text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
// The Mandelbrot CUDA GPU thread function
/*
Version using software scheduling of thread blocks.
The idea here is to launch of fixed number of worker blocks to fill the
machine, and have each block loop over the available work until it is all done.
We use a counter in global memory to keep track of which blocks have been
completed. The counter is incremented atomically by each worker block.
This method can achieve higher performance when blocks take a wide range of
different times to complete.
*/
__device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch
template<class T>
__global__ void Mandelbrot0_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
#ifndef __DEVICE_EMULATION__ // device emu doesn't like syncthreads inside while()
__syncthreads();
#endif
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot<T>(xPos, yPos, xJP, yJP, crunch, isJ);
// int m = blockIdx.x; // uncomment to see scheduling order
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0) {
color.w = 0;
dst[pixel] = color;
} else {
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot0
// The Mandelbrot CUDA GPU thread function (double single version)
__global__ void MandelbrotDS0_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1,
const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale,
const uchar4 colors, const int frame, const int animationFrame, const int gridWidth,
const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
// printf("add %i %i \n", blockIdx.x, blockIdx.y) ;
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
if (blockIndex >= numBlocks) break; // finish
// printf("run %i %i \n", blockX, blockY ) ;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0) {
color.w = 0;
dst[pixel] = color;
} else {
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS0
// The Mandelbrot secondary AA pass CUDA GPU thread function
template<class T>
__global__ void Mandelbrot1_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
count += CheckColors(pixelColor, dst[pixel - 1]);
if (ix + 1 < imageW)
count += CheckColors(pixelColor, dst[pixel + 1]);
if (iy > 0)
count += CheckColors(pixelColor, dst[pixel - imageW]);
if (iy + 1 < imageH)
count += CheckColors(pixelColor, dst[pixel + imageW]);
if (count) {
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot(xPos, yPos, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot1
// The Mandelbrot secondary AA pass CUDA GPU thread function (double single version)
__global__ void MandelbrotDS1_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch,
const float xOff0, const float xOff1, const float yOff0, const float yOff1,
const float xJP, const float yJP, const float scale, const uchar4 colors,
const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
count += CheckColors(pixelColor, dst[pixel - 1]);
if (ix + 1 < imageW)
count += CheckColors(pixelColor, dst[pixel + 1]);
if (iy > 0)
count += CheckColors(pixelColor, dst[pixel - imageW]);
if (iy + 1 < imageH)
count += CheckColors(pixelColor, dst[pixel + imageW]);
if (count) {
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS1
// The host CPU Mandebrot thread spawner
void RunMandelbrot0_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
// zero block counter
unsigned int hBlockCounter = 0;
cutilSafeCall( cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice ) );
int numWorkerBlocks = numSMs;
switch(mode) {
default:
case 0:
Mandelbrot0_sm11<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
MandelbrotDS0_sm11<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 2:
Mandelbrot0_sm11<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
}
cutilCheckMsg("Mandelbrot0_sm11 kernel execution failed.\n");
} // RunMandelbrot0
// The host CPU Mandebrot thread spawner
void RunMandelbrot1_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
// zero block counter
unsigned int hBlockCounter = 0;
cutilSafeCall( cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice ) );
int numWorkerBlocks = numSMs;
switch(mode) {
default:
case 0:
Mandelbrot1_sm11<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
MandelbrotDS1_sm11<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 2:
Mandelbrot1_sm11<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
}
cutilCheckMsg("Mandelbrot1_sm11 kernel execution failed.\n");
} // RunMandelbrot1
|
the_stack
|
This example demonstrates several CUTLASS utilities in the context of a mixed-precision
floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing tensor and matrix
memory allocations, initializing and comparing results, and computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions appear
namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have
both host-side and device-side implementations, and the choice to use device-side initialization
and host-side verification in this example was arbitrary.
cutlass::half_t
This is a numeric type implementing IEEE half-precision quantities. It is functional in host
and device code. In host-side code, CUTLASS_ENABLE_F16C optionally enables harware-accelerated
numeric conversion on x86-64 CPUs support F16C extensions. In device code, all available
hardware is used to implement conversion and numeric operations.
cutlass::HostTensor<>
This template class simplifies the creation of tensors for all supported layouts. It simplifies
allocation and management of host- and device- memory allocations.
This class offers methods device_view() and host_view() to provide TensorView objects for
device- and host-side memory allocations.
cutlass::reference::device::TensorFillRandomGaussian()
This template function initializes elementsof a tensor to a random Gaussian distribution. It
uses cuRAND in device code to compute random numbers.
cutlass::reference::host::Gemm<>
This template function computes the general matrix product. This template supports unique
data types for each matrix operand, the internal accumulation type, and the scalar parameters
alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
#include <fstream>
// CUTLASS includes needed for half-precision GEMM kernel
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::half_t
#include "cutlass/numeric_types.h"
// Defines device_memory::copy_device_to_device()
#include "cutlass/util/device_memory.h"
// Defines cutlass::reference::device::TensorFillRandomGaussian()
#include "cutlass/util/reference/device/tensor_fill.h"
// Defines cutlass::reference::host::TensorEquals()
#include "cutlass/util/reference/host/tensor_compare.h"
// Defines cutlass::reference::host::Gemm()
#include "cutlass/util/reference/host/gemm.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t cutlass_hgemm_nn(
int M,
int N,
int K,
cutlass::half_t alpha,
cutlass::half_t const *A,
cutlass::layout::ColumnMajor::Stride::Index lda,
cutlass::half_t const *B,
cutlass::layout::ColumnMajor::Stride::Index ldb,
cutlass::half_t beta,
cutlass::half_t *C,
cutlass::layout::ColumnMajor::Stride::Index ldc) {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor // LayoutOutput
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{M, N, K},
{A, lda},
{B, ldb},
{C, ldc},
{C, ldc},
{alpha, beta}
});
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) {
cudaError_t result;
//
// Construct cutlass::HostTensor<> using the half-precision host-side type.
//
// cutlass::HostTensor<> allocates memory on both the host and device corresponding to rank=2
// tensors in column-major layout. Explicit synchronization methods are offered to copy the
// tensor to the device or to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
// Arbitrary RNG seed value. Hard-coded for deterministic results.
uint64_t seed = 2080;
// Gaussian random distribution
cutlass::half_t mean = 0.0_hf;
cutlass::half_t stddev = 5.0_hf;
// Specify the number of bits right of the binary decimal that are permitted
// to be non-zero. A value of "0" here truncates random values to integers
int bits_less_than_one = 0;
cutlass::reference::device::TensorFillRandomGaussian(
A.device_view(),
seed,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
B.device_view(),
seed * 2019,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
C_cutlass.device_view(),
seed * 1993,
mean,
stddev,
bits_less_than_one
);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::device_memory::copy_device_to_device(
C_reference.device_data(),
C_cutlass.device_data(),
C_cutlass.capacity());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = cutlass_hgemm_nn(
M,
N,
K,
alpha,
A.device_data(),
A.stride(0),
B.device_data(),
B.stride(0),
beta,
C_cutlass.device_data(),
C_cutlass.stride(0)
);
if (result != cudaSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this example is to
// use the host-side reference GEMM, so we must perform a device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference implementation.
cutlass::reference::host::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
cutlass::half_t,
cutlass::half_t
> gemm_ref;
gemm_ref(
{M, N, K}, // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (type: TensorRef<half_t, ColumnMajor>)
B.host_ref(), // B (type: TensorRef<half_t, ColumnMajor>)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref() // C (type: TensorRef<half_t, ColumnMajor>)
);
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(
C_reference.host_view(),
C_cutlass.host_view())) {
char const *filename = "errors_01_cutlass_utilities.csv";
std::cerr << "Error - CUTLASS GEMM kernel differs from reference. Wrote computed and reference results to '" << filename << "'" << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host memory as
// arrays of type cutlass::half_t.
//
std::ofstream file(filename);
// Result of CUTLASS GEMM kernel
file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl;
// Result of reference computation
file << "\n\nReference =\n" << C_reference.host_view() << std::endl;
// Return error code.
return cudaErrorUnknown;
}
// Passed error check
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// This example uses half-precision and is only suitable for devices with compute capabitliy 5.3 or greater.
//
cudaDeviceProp prop;
cudaError_t result = cudaGetDeviceProperties(&prop, 0);
if (result != cudaSuccess) {
std::cerr << "Failed to query device properties with error " << cudaGetErrorString(result) << std::endl;
return -1;
}
if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) {
std::cerr << "This example uses half precision and is only suitable for devices with compute capability 5.3 or greater.\n";
std::cerr << "You are using a CUDA device with compute capability " << prop.major << "." << prop.minor << std::endl;
return -1;
}
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values stored as
// cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero.
//
cutlass::half_t scalars[2] = { 1.0_hf, 0.0_hf };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
the_stack
|
typedef long long ll_t;
typedef unsigned long long ull_t;
typedef struct __builtin_align__(32) {
float s0, s1, s2, s3, s4, s5, s6, s7;
} _float8;
typedef union {
_float8 f8;
float val[8];
} float8;
__device__ __forceinline__ float atomicMax(float *address, float val)
{
int ret = __float_as_int(*address);
while(val > __int_as_float(ret))
{
int old = ret;
if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old)
break;
}
return __int_as_float(ret);
}
__device__ void init_cCache(
float8 cCache[8]
) {
#pragma unroll
for (int i=0; i<8; i++){
#pragma unroll
for (int j=0; j<8; j++){
cCache[i].val[j] = 0.f;
}
}
}
__device__ void SM2Cache(
float cache[8][4],
volatile float SM[8][128+4],
int vy, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int mi=0; mi<4; mi++){
cache[ki][mi] = SM[ki][8*vy + 4*p + mi];
}
}
}
__device__ void thread_matmul(
float aCache[8][4],
volatile float bSM[8][128+4],
float8 cCache[8],
int vx, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int ni=0; ni<8; ni++){
// float b = bSM[ki][(8*vx)/32 + 8*vx + ni];
float b = bSM[ki][ vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<4; mi++){
float a = aCache[ki][mi];
cCache[mi + 4*p].val[ni] = fmaf(a, b, cCache[mi + 4*p].val[ni]);
}
}
}
}
// negative squared euclidean distance
__device__ void thread_nseuclidean(
float aCache[8][4],
volatile float bSM[8][128+4],
float8 cCache[8],
int vx, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int ni=0; ni<8; ni++){
// float b = bSM[ki][(8*vx)/32 + 8*vx + ni];
float b = bSM[ki][ vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<4; mi++){
float a = aCache[ki][mi];
float dif = a - b;
cCache[mi + 4*p].val[ni] = fmaf(- dif, dif, cCache[mi + 4*p].val[ni]);
}
}
}
}
// negative manhattan distance
__device__ void thread_nmanhattan(
float aCache[8][4],
volatile float bSM[8][128+4],
float8 cCache[8],
int vx, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int ni=0; ni<8; ni++){
// float b = bSM[ki][(8*vx)/32 + 8*vx + ni];
float b = bSM[ki][ vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<4; mi++){
float a = aCache[ki][mi];
cCache[mi + 4*p].val[ni] -= fabsf(a - b);
}
}
}
}
__device__ void reduce_dim_1(
float8 cCache[8],
float* C,
ll_t* D,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iN = gStartx + vx*8 + i;
float val = -INFINITY;
ll_t ind = 0;
#pragma unroll
for (int j=0; j<8; j++){
int iM = gStarty + vy*8 + j;
if (iM < _M_){
val = fmaxf(val, cCache[j].val[i]);
ind = val == cCache[j].val[i] ? iM : ind;
}
}
if (iN < N){
atomicMax(&C[(bid) * _N_ + iN], val);
if (C[(bid) * _N_ + iN] <= val){
D[(bid) * _N_ + iN] = ind;
}
}
}
}
__device__ void reduce_dim_2(
float8 cCache[8],
float* C,
ll_t* D,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iM = gStarty + vy*8 + i;
float val = -INFINITY;
ll_t ind = 0;
#pragma unroll
for (int j=0; j<8; j++){
int iN = gStartx + vx*8 + j;
if (iN < _N_){
val = fmaxf(val, cCache[i].val[j]);
ind = val == cCache[i].val[j] ? iN : ind;
}
}
if (iM < M){
atomicMax(&C[(bid) * _M_ + iM], val);
if (C[(bid) * _M_ + iM] <= val){
D[(bid) * _M_ + iM] = ind;
}
}
}
}
extern "C"
__global__ void max_sim_tn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid, gStartx, gStarty;
if (DIM == 2){
bid = blockIdx.z;
gStartx = blockIdx.y * 128;
gStarty = blockIdx.x * 128;
} else {
bid = blockIdx.z;
gStartx = blockIdx.x * 128;
gStarty = blockIdx.y * 128;
}
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + wx + i*32;
int iN = gStartx + wx + i*32;
if (wy < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (wy)*_M_ + (iM)];
if (iN < _N_)
bBuffer1[i] = B[(bid)*_N_*_K_ + (wy)*_N_ + (gStartx + wx + i*32)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iK = gStartk + 8 + wy;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + wx;
if (iK < _K_){
if (iM < _M_)
aBuffer2[i] = A[(bid)*_M_*_K_ + (iK)*_M_ + (iM)];
if (iN < _N_)
bBuffer2[i] = B[(bid)*_N_*_K_ + (iK)*_N_ + (iN)];
} else {
aBuffer2[i] = 0.f;
bBuffer2[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer1[i];
bSM[wy][wx+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + wx;
if (iK < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (iK)*_M_ + (iM)];
if (iN < N)
bBuffer1[i] = B[(bid)*_N_*_K_ + (iK)*_N_ + (iN)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer2[i];
bSM[wy][wx+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
// thread_matmul(aCache, bSM, cCache, vx, p);
_DISTFN_(aCache, bSM, cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
extern "C"
__global__ void max_sim_nt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid, gStartx, gStarty;
if (DIM == 2){
bid = blockIdx.z;
gStartx = blockIdx.y * 128;
gStarty = blockIdx.x * 128;
} else {
bid = blockIdx.z;
gStartx = blockIdx.x * 128;
gStarty = blockIdx.y * 128;
}
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + dy + i*32;
if (dx < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (dx)];
if (iN < _N_)
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (dx)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iK = gStartk + 8 + dx;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + dy;
if (iK < _K_){
if (iM < _M_)
aBuffer2[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iK)];
if (iN < _N_)
bBuffer2[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iK)];
} else {
aBuffer2[i] = 0.f;
bBuffer2[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer1[i];
bSM[dx][dy+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + dy;
if (iK < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iK)];
if (iN < N)
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iK)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer2[i];
bSM[dx][dy+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
_DISTFN_(aCache, bSM, cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
extern "C"
__global__ void max_sim_nn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid, gStartx, gStarty;
if (DIM == 2){
bid = blockIdx.z;
gStartx = blockIdx.y * 128;
gStarty = blockIdx.x * 128;
} else {
bid = blockIdx.z;
gStartx = blockIdx.x * 128;
gStarty = blockIdx.y * 128;
}
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + wx + i*32;
if (iM < _M_){
if (dx < _K_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (dx)];
} else {
aBuffer1[i] = 0.f;
}
}
if (iN < N){
if (wy < _K_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (wy)*_N_ + (iN)];
} else {
bBuffer1[i] = 0.f;
}
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iKA = gStartk + 8 + dx;
int iKB = gStartk + 8 + wy;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + wx;
if (iKA < _K_){
if (iM < _M_){
aBuffer2[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iKA)];
}
} else {
aBuffer2[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer2[i] = B[(bid)*_N_*_K_ + (iKB)*_N_ + (iN)];
}
} else {
bBuffer2[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer1[i];
bSM[wy][wx+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + wx;
if (iKA < _K_){
if (iM < _M_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iKA)];
}
} else {
aBuffer1[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (iKB)*_N_ + (iN)];
}
} else {
bBuffer1[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer2[i];
bSM[wy][wx+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
_DISTFN_(aCache, bSM,cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
extern "C"
__global__ void max_sim_tt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid, gStartx, gStarty;
if (DIM == 2){
bid = blockIdx.z;
gStartx = blockIdx.y * 128;
gStarty = blockIdx.x * 128;
} else {
bid = blockIdx.z;
gStartx = blockIdx.x * 128;
gStarty = blockIdx.y * 128;
}
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + wx + i*32;
int iN = gStartx + dy + i*32;
if (iM < _M_){
if (wy < _K_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (wy)*_M_ + (iM)];
} else {
aBuffer1[i] = 0.f;
}
}
if (iN < _N_){
if (dx < _K_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (dx)];
} else {
bBuffer1[i] = 0.f;
}
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iKA = gStartk + 8 + wy;
int iKB = gStartk + 8 + dx;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + dy;
if (iKA < _K_){
if (iM < _M_){
aBuffer2[i] = A[(bid)*_M_*_K_ + (iKA)*_M_ + (iM)];
}
} else {
aBuffer2[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer2[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iKB)];
}
} else {
bBuffer2[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer1[i];
bSM[dx][dy+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + dy;
if (iKA < _K_){
if (iM < _M_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (iKA)*_M_ + (iM)];
}
} else {
aBuffer1[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iKB)];
}
} else {
bBuffer1[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer2[i];
bSM[dx][dy+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
_DISTFN_(aCache, bSM, cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
|
the_stack
|
namespace lightseq {
namespace cuda {
template <typename T>
void print_vec(const thrust::device_vector<T>& outv, std::string outn,
int num_output_ele) {
std::cout << outn << ": ";
if (num_output_ele > 0) {
num_output_ele = std::min(size_t(num_output_ele), outv.size());
thrust::copy(outv.begin(), outv.begin() + num_output_ele,
std::ostream_iterator<T>(std::cout, " "));
std::cout << " ...";
} else {
thrust::copy(outv.begin(), outv.end(),
std::ostream_iterator<T>(std::cout, " "));
}
std::cout << std::endl;
}
template void print_vec<float>(const thrust::device_vector<float>& outv,
std::string outn, int num_output_ele);
template void print_vec<int>(const thrust::device_vector<int>& outv,
std::string outn, int num_output_ele);
template void print_vec<int8_t>(const thrust::device_vector<int8_t>& outv,
std::string outn, int num_output_ele);
template <typename T>
void print_vec(thrust::device_ptr<T> outv, std::string outn,
int num_output_ele) {
std::cout << outn << ": ";
thrust::copy(outv, outv + num_output_ele,
std::ostream_iterator<T>(std::cout, ", "));
std::cout << std::endl;
}
template void print_vec<float>(thrust::device_ptr<float> outv, std::string outn,
int num_output_ele);
template void print_vec<int>(thrust::device_ptr<int> outv, std::string outn,
int num_output_ele);
template void print_vec<int8_t>(thrust::device_ptr<int8_t> outv,
std::string outn, int num_output_ele);
template <typename T>
void print_vec(const T* outv, std::string outn, int num_output_ele) {
std::cout << outn << ": ";
std::vector<T> hout(num_output_ele, (T)0);
CHECK_GPU_ERROR(cudaMemcpy(hout.data(), outv, num_output_ele * sizeof(T),
cudaMemcpyDeviceToHost));
for (int i = 0; i < num_output_ele; i++) {
std::cout << hout[i] << ", ";
}
std::cout << std::endl;
}
template <>
void print_vec<__half>(const __half* outv, std::string outn,
int num_output_ele) {
std::cout << outn << ": ";
std::vector<__half> hout(num_output_ele, (__half)0.f);
CHECK_GPU_ERROR(cudaMemcpy(hout.data(), outv, num_output_ele * sizeof(__half),
cudaMemcpyDeviceToHost));
for (int i = 0; i < num_output_ele; i++) {
std::cout << __half2float(hout[i]) << ", ";
}
std::cout << std::endl;
}
template <>
void print_vec<int8_t>(const int8_t* outv, std::string outn,
int num_output_ele) {
std::cout << outn << ": ";
std::vector<int8_t> hout(num_output_ele, (int8_t)0);
CHECK_GPU_ERROR(cudaMemcpy(hout.data(), outv, num_output_ele * sizeof(int8_t),
cudaMemcpyDeviceToHost));
for (int i = 0; i < num_output_ele; i++) {
std::cout << static_cast<int>(hout[i]) << ", ";
}
std::cout << std::endl;
}
template void print_vec<float>(const float* outv, std::string outn,
int num_output_ele);
template void print_vec<int>(const int* outv, std::string outn,
int num_output_ele);
template void print_vec<int8_t>(const int8_t* outv, std::string outn,
int num_output_ele);
template void print_vec<__half>(const __half* outv, std::string outn,
int num_output_ele);
template <typename T>
void print_vec(const T* outv, std::string outn, int start, int end) {
std::cout << outn << ": ";
thrust::copy(thrust::device_pointer_cast(outv + start),
thrust::device_pointer_cast(outv + end),
std::ostream_iterator<T>(std::cout, ", "));
std::cout << std::endl;
}
template <>
void print_vec<__half>(const __half* outv, std::string outn, int start,
int end) {
std::cout << outn << ": ";
int num_elements = end - start;
std::vector<__half> hout(num_elements, (__half)0.f);
CHECK_GPU_ERROR(cudaMemcpy(hout.data(), outv + start,
num_elements * sizeof(__half),
cudaMemcpyDeviceToHost));
for (int i = 0; i < num_elements; i++) {
std::cout << __half2float(hout[i]) << ", ";
}
std::cout << std::endl;
}
template void print_vec<float>(const float* outv, std::string outn, int start,
int end);
template void print_vec<int>(const int* outv, std::string outn, int start,
int end);
void print_time_duration(
const std::chrono::high_resolution_clock::time_point& start,
std::string duration_name, cudaStream_t stream) {
CHECK_GPU_ERROR(cudaStreamSynchronize(stream));
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
std::cout << duration_name
<< " duration time is: " << (elapsed).count() * 1000 << " ms"
<< std::endl;
return;
}
struct prg_uniform {
float a, b;
__host__ __device__ prg_uniform(float _a = 0.f, float _b = 1.f)
: a(_a), b(_b){};
__host__ __device__ float operator()(const unsigned int n) const {
thrust::default_random_engine rng;
thrust::uniform_real_distribution<float> dist(a, b);
rng.discard(n);
return dist(rng);
}
};
struct prg_norm {
float a, b;
__host__ __device__ prg_norm(float _a = 0.f, float _b = 1.f) : a(_a), b(_b){};
__host__ __device__ float operator()(const unsigned int n) const {
thrust::default_random_engine rng;
thrust::random::normal_distribution<float> dist(a, b);
rng.discard(n);
return dist(rng);
}
};
void generate_distribution(thrust::device_vector<float>& input_output,
std::string mode, float a, float b) {
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
if (mode == "uniform")
thrust::transform(index_sequence_begin,
index_sequence_begin + input_output.size(),
input_output.begin(), prg_uniform(a, b));
if (mode == "norm")
thrust::transform(index_sequence_begin,
index_sequence_begin + input_output.size(),
input_output.begin(), prg_norm(a, b));
}
void read_batch_tokenids_from_file(std::string file_name, int& batch_size,
int& batch_seq_len,
std::vector<int>& input_ids) {
std::ifstream fin(file_name);
fin >> batch_size >> batch_seq_len;
input_ids = std::vector<int>(batch_size * batch_seq_len, 0);
for (int i = 0; i < batch_size; i++) {
for (int j = 0; j < batch_seq_len; j++) {
int idx = i * batch_seq_len + j;
fin >> input_ids[idx];
}
}
}
bool endswith(std::string const& full, std::string const& end) {
if (full.length() >= end.length()) {
return (0 == full.compare(full.length() - end.length(), end.length(), end));
}
return false;
}
int get_hdf5_dataset_size(hid_t dataset) {
hid_t dataspace = H5Dget_space(dataset); /* dataspace handle */
int n_dims = H5Sget_simple_extent_ndims(dataspace);
// return 1 for scalar
if (n_dims < 1) {
return 1;
}
// get dimensions for N-Dimension vector
hsize_t dims[n_dims];
int status = H5Sget_simple_extent_dims(dataspace, dims, NULL);
if (status != n_dims || status < 0) {
// return negative number on error
return -1;
}
// accumulate size from every dimension
int vec_size = 1;
for (int i = 0; i < n_dims; ++i) {
vec_size *= dims[i];
}
return vec_size;
}
int get_hdf5_dataset_size(hid_t hdf5_file, std::string dataset_name) {
// check if dataset exists or not
if (!H5Lexists(hdf5_file, dataset_name.c_str(), H5P_DEFAULT)) {
throw HDF5DatasetNotFoundError(
(dataset_name + " Not Found in HDF5 File").c_str());
}
// parse dataset size
hid_t ds = H5Dopen2(hdf5_file, dataset_name.c_str(), H5P_DEFAULT);
if (ds < 0) {
throw std::runtime_error("Failed to open HDF5 dataset: " + dataset_name);
}
int ds_size = get_hdf5_dataset_size(ds);
if (ds_size < 0) {
throw std::runtime_error("HDF5 parsing error: " + dataset_name);
}
H5Dclose(ds);
return ds_size;
}
int read_hdf5_dataset_data(hid_t hdf5_file, std::string dataset_name,
hid_t output_type, void* output_buf,
std::function<bool(int)> size_predicate,
std::string extra_msg) {
// check if dataset exists or not
if (!H5Lexists(hdf5_file, dataset_name.c_str(), H5P_DEFAULT)) {
throw HDF5DatasetNotFoundError(
(dataset_name + " Not Found in HDF5 File").c_str());
}
hid_t ds = H5Dopen2(hdf5_file, dataset_name.c_str(), H5P_DEFAULT);
if (ds < 0) {
throw std::runtime_error("Failed to open HDF5 dataset: " + dataset_name);
}
int ds_size = get_hdf5_dataset_size(ds);
// sanity (custom) check for size with extra message.
if (size_predicate(ds_size)) {
throw std::runtime_error("Invalid shape " + std::to_string(ds_size) + ". " +
extra_msg);
}
herr_t status =
H5Dread(ds, output_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, output_buf);
if (status < 0) {
throw std::runtime_error("Failed to read HDF5 dataset: " + dataset_name);
}
H5Dclose(ds);
return ds_size;
}
std::vector<float> read_hdf5_dataset_data_float(
hid_t hdf5_file, std::string dataset_name, hid_t output_type,
std::function<bool(int)> size_predicate, std::string extra_msg) {
// check if dataset exists or not
if (!H5Lexists(hdf5_file, dataset_name.c_str(), H5P_DEFAULT)) {
throw HDF5DatasetNotFoundError(
(dataset_name + " Not Found in HDF5 File").c_str());
}
hid_t ds = H5Dopen2(hdf5_file, dataset_name.c_str(), H5P_DEFAULT);
if (ds < 0) {
throw std::runtime_error("Failed to open HDF5 dataset: " + dataset_name);
}
int ds_size = get_hdf5_dataset_size(ds);
// sanity (custom) check for size with extra message.
if (size_predicate(ds_size)) {
throw std::runtime_error("Invalid shape " + std::to_string(ds_size) + ". " +
extra_msg);
}
std::vector<float> output_vec(ds_size);
herr_t status = H5Dread(ds, output_type, H5S_ALL, H5S_ALL, H5P_DEFAULT,
output_vec.data());
if (status < 0) {
throw std::runtime_error("Failed to read HDF5 dataset: " + dataset_name);
}
H5Dclose(ds);
return output_vec; // return with copy elision
}
std::vector<int> read_hdf5_dataset_data_int(
hid_t hdf5_file, std::string dataset_name, hid_t output_type,
std::function<bool(int)> size_predicate, std::string extra_msg) {
// check if dataset exists or not
if (!H5Lexists(hdf5_file, dataset_name.c_str(), H5P_DEFAULT)) {
throw HDF5DatasetNotFoundError(
(dataset_name + " Not Found in HDF5 File").c_str());
}
hid_t ds = H5Dopen2(hdf5_file, dataset_name.c_str(), H5P_DEFAULT);
if (ds < 0) {
throw std::runtime_error("Failed to open HDF5 dataset: " + dataset_name);
}
int ds_size = get_hdf5_dataset_size(ds);
// sanity (custom) check for size with extra message.
if (size_predicate(ds_size)) {
throw std::runtime_error("Invalid shape " + std::to_string(ds_size) + ". " +
extra_msg);
}
std::vector<int> output_vec(ds_size);
herr_t status = H5Dread(ds, output_type, H5S_ALL, H5S_ALL, H5P_DEFAULT,
output_vec.data());
if (status < 0) {
throw std::runtime_error("Failed to read HDF5 dataset: " + dataset_name);
}
H5Dclose(ds);
return output_vec; // return with copy elision
}
int read_hdf5_dataset_scalar(hid_t hdf5_file, std::string dataset_name,
hid_t output_type, void* output_buf) {
return read_hdf5_dataset_data(
hdf5_file, dataset_name, output_type, output_buf,
[](int size) { return size != 1; }, "Expect scalar with shape of 1.");
}
float dequantize(unsigned char i, float scale, float clip_max) {
return (float(i) - scale) * clip_max / scale;
}
void dequantize_array(std::vector<unsigned char>& i8, std::vector<float>& f,
float clip_max, float quant_range, int start, int num) {
for (int i = start; i < start + num; ++i) {
f[i] = dequantize(i8[i], quant_range, clip_max);
}
}
} // namespace cuda
} // namespace lightseq
|
the_stack
|
#include "mask/mergerMask.hpp"
#include "backend/common/imageOps.hpp"
#include "backend/cuda/deviceBuffer.hpp"
#include "backend/cuda/deviceStream.hpp"
#include "backend/cuda/surface.hpp"
#include "cuda/util.hpp"
#include "gpu/core1/voronoi.hpp"
#include "gpu/memcpy.hpp"
#include "mask/mergerMaskConstant.hpp"
namespace VideoStitch {
namespace MergerMask {
#define MERGER_MASK_KERNEL_SIZE_X 16
#define MERGER_MASK_KERNEL_SIZE_Y 16
__global__ void updateInputIndexByDistortionMapKernel(
const videoreaderid_t camId, const unsigned char distortionThreshold, const int2 camSize, const int2 camOffset,
const unsigned char* __restrict__ camDistortionBuffer, const int2 inputSize,
const uint32_t* __restrict__ inputNonOverlappingIndexBuffer,
const unsigned char* __restrict__ inputDistortionBuffer, uint32_t* __restrict__ nextNonOverlappingIndexBuffer,
unsigned char* __restrict__ nextDistortionBuffer) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < inputSize.x && y < inputSize.y) {
const int inputIndex = y * inputSize.x + x;
const unsigned char inputDistortion = inputDistortionBuffer[inputIndex];
const uint32_t inputNonOverlappingIndex = inputNonOverlappingIndexBuffer[inputIndex];
const int camX = (x - camOffset.x + inputSize.x) % inputSize.x;
const int camY = y - camOffset.y;
nextNonOverlappingIndexBuffer[inputIndex] = inputNonOverlappingIndex;
nextDistortionBuffer[inputIndex] = inputDistortion;
if (camX >= 0 && camX < camSize.x && camY >= 0 && camY < camSize.y) {
const int camIndex = camY * camSize.x + camX;
const unsigned char camDistortion = camDistortionBuffer[camIndex];
if ((camDistortion < inputDistortion && inputDistortion > distortionThreshold) ||
(inputNonOverlappingIndex == 0 && camDistortion < 255)) {
nextNonOverlappingIndexBuffer[inputIndex] = 1 << camId;
nextDistortionBuffer[inputIndex] = camDistortion;
}
}
}
}
Status MergerMask::updateInputIndexByDistortionMap(const videoreaderid_t camId, const int2 inputSize,
const GPU::Buffer<const uint32_t> inputNonOverlappingIndexBuffer,
const GPU::Buffer<const unsigned char> inputDistortionBuffer,
GPU::Buffer<uint32_t> nextNonOverlappingIndexBuffer,
GPU::Buffer<unsigned char> nextDistortionBuffer, GPU::Stream stream,
const bool original) {
const int2 camSize =
original ? make_int2((int)cachedOriginalMappedRects[camId].getWidth(),
(int)cachedOriginalMappedRects[camId].getHeight())
: make_int2((int)cachedMappedRects[camId].getWidth(), (int)cachedMappedRects[camId].getHeight());
const int2 camOffset =
original ? make_int2((int)cachedOriginalMappedRects[camId].left(), (int)cachedOriginalMappedRects[camId].top())
: make_int2((int)cachedMappedRects[camId].left(), (int)cachedMappedRects[camId].top());
const unsigned char distortionThreshold = mergerMaskConfig.getDistortionThreshold();
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1);
updateInputIndexByDistortionMapKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
camId, distortionThreshold, camSize, camOffset,
original ? originalDistortionMaps[camId].get() : distortionMaps[camId].get(), inputSize,
inputNonOverlappingIndexBuffer.get(), inputDistortionBuffer.get(), nextNonOverlappingIndexBuffer.get(),
nextDistortionBuffer.get());
return CUDA_STATUS;
}
__global__ void updateDistortionFromMaskKernel(videoreaderid_t camId, const int2 camSize, const int2 camOffset,
unsigned char* __restrict__ camDistortionBuffer, const int2 inputSize,
const uint32_t* __restrict__ srcMapBuffer) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < camSize.x && y < camSize.y) {
int inputX = (x + camOffset.x) % inputSize.x;
int inputY = (y + camOffset.y);
if (inputX >= 0 && inputX < inputSize.x && inputY >= 0 && inputY < inputSize.y) {
for (int i = -2; i <= 2; i++) {
for (int j = -2; j <= 2; j++) {
const int neiX = inputX + i;
const int neiY = inputY + j;
if (neiX >= 0 && neiX < inputSize.x && neiY >= 0 && neiY < inputSize.y) {
if ((srcMapBuffer[neiY * inputSize.x + neiX] & (1 << camId)) == 0) {
camDistortionBuffer[y * camSize.x + x] = 255;
return;
}
}
}
}
}
}
}
Status MergerMask::updateDistortionFromMask(const videoreaderid_t camId, const int2 distortionBufferSize,
const int2 distortionBufferOffset,
GPU::Buffer<unsigned char> distortionBuffer, const int2 inputSize,
const GPU::Buffer<const uint32_t> srcMap, GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(distortionBufferSize.x, dimBlock.x),
(unsigned)Cuda::ceilDiv(distortionBufferSize.y, dimBlock.y), 1);
updateDistortionFromMaskKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
camId, distortionBufferSize, distortionBufferOffset, distortionBuffer.get(), inputSize, srcMap.get());
return CUDA_STATUS;
}
__global__ void initializeMasksKernel(videoreaderid_t camId, const int2 camSize, const int2 camOffset,
const unsigned char* __restrict__ camDistortionBuffer, const int2 inputSize,
uint32_t* __restrict__ inputNonOverlappingIndexBuffer,
unsigned char* inputDistortionBuffer) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < inputSize.x && y < inputSize.y) {
const int inputIndex = y * inputSize.x + x;
const int camX = (x - camOffset.x + inputSize.x) % inputSize.x;
const int camY = y - camOffset.y;
inputNonOverlappingIndexBuffer[inputIndex] = 0;
inputDistortionBuffer[inputIndex] = 255;
if (camX >= 0 && camX < camSize.x && camY >= 0 && camY < camSize.y) {
unsigned char camDistortion = camDistortionBuffer[camY * camSize.x + camX];
if (camDistortion < 255) {
inputDistortionBuffer[inputIndex] = camDistortion;
inputNonOverlappingIndexBuffer[inputIndex] = 1 << camId;
}
}
}
}
Status MergerMask::initializeMasks(const int2 inputSize, const videoreaderid_t camId,
GPU::Buffer<uint32_t> inputNonOverlappingIndexBuffer,
GPU::Buffer<unsigned char> inputDistortionBuffer, GPU::Stream stream,
const bool original) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
const Core::Rect camRect = original ? cachedOriginalMappedRects[camId] : cachedMappedRects[camId];
dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1);
initializeMasksKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
camId, make_int2((int)camRect.getWidth(), (int)camRect.getHeight()),
make_int2((int)camRect.left(), (int)camRect.top()),
original ? originalDistortionMaps[camId].get() : distortionMaps[camId].get(), inputSize,
inputNonOverlappingIndexBuffer.get(), inputDistortionBuffer.get());
return CUDA_STATUS;
}
__global__ void transformDistortionKernel(const int2 inputSize, const float distortionParam,
unsigned char* __restrict__ distortionBuffer) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < inputSize.x && y < inputSize.y) {
const unsigned index = y * inputSize.x + x;
const unsigned char inputDistortion = distortionBuffer[index];
unsigned char remappedDistortion = (unsigned char)(pow(float(inputDistortion) / 255.0f, distortionParam) * 255.0f);
distortionBuffer[index] = remappedDistortion;
}
}
Status MergerMask::transformDistortion(const int2 inputSize, GPU::Buffer<unsigned char> distortionBuffer,
GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1);
const float distortionParam = mergerMaskConfig.getDistortionParam();
transformDistortionKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(inputSize, distortionParam, distortionBuffer.get());
return CUDA_STATUS;
}
__global__ void updateIndexMaskKernel(const videoreaderid_t camId, const int maxOverlappingCount,
const char* const __restrict__ cameraIndices, const int2 distortionBufferSize,
const int2 distortionBufferOffset,
const unsigned char* const __restrict__ distortionBuffer, const int2 size,
uint32_t* __restrict__ inputIndexBuffer, unsigned char* __restrict__ mask,
const uint32_t* const __restrict__ srcMap) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < size.x && y < size.y) {
const unsigned index = y * size.x + x;
uint32_t inputIndex = inputIndexBuffer[index];
if ((mask[index] < 255) && (srcMap[index] & (1 << camId))) {
int coordX = (x - distortionBufferOffset.x + size.x) % size.x;
int coordY = y - distortionBufferOffset.y;
// Make sure to only put pixels those are not very distorted
if (coordX >= 0 && coordX < distortionBufferSize.x && coordY >= 0 && coordY < distortionBufferSize.y) {
unsigned char distortion = distortionBuffer[coordY * distortionBufferSize.x + coordX];
// if this pixel is already occupied and the distortion is large, just ignore it
if (inputIndex > 0 && distortion > 130) {
mask[index] = 0;
return;
}
}
int countBitOne = 0;
int count = 0;
int minCount = -1;
while (inputIndex > 0) {
if (inputIndex & 1) {
countBitOne++;
if (minCount < 0) {
minCount = count;
} else if (cameraIndices[count] < cameraIndices[minCount]) {
minCount = count;
}
}
inputIndex = inputIndex >> 1;
count++;
}
if (countBitOne < maxOverlappingCount) {
inputIndexBuffer[index] |= (1 << camId);
} else if (countBitOne == maxOverlappingCount) {
inputIndexBuffer[index] = (inputIndexBuffer[index] - (1 << minCount)) | (1 << camId);
}
mask[index] = 255;
} else {
mask[index] = 0;
}
}
}
Status MergerMask::updateIndexMask(const videoreaderid_t camId, const int maxOverlappingCount,
const GPU::Buffer<const char> cameraIndices, const int2 distortionBufferSize,
const int2 distortionBufferOffset,
const GPU::Buffer<const unsigned char> distortionBuffer, const int2 inputSize,
GPU::Buffer<uint32_t> inputIndexBuffer, GPU::Buffer<unsigned char> mask,
const GPU::Buffer<const uint32_t> srcMap, GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1);
updateIndexMaskKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
camId, maxOverlappingCount, cameraIndices.get(), distortionBufferSize, distortionBufferOffset,
distortionBuffer.get(), inputSize, inputIndexBuffer.get(), mask.get(), srcMap.get());
return CUDA_STATUS;
}
// Get the first 1 bit (from right to left), set it to 0
// For example input number = 1000100 --> return 2 and set number = 1000
// @NOTE: Faster implementation can be found at : https://graphics.stanford.edu/~seander/bithacks.html
__device__ int getFirstOnBitPosition(uint32_t& number) {
int x = -1;
int count = 0;
while ((number > 0) && ((number & 1) == 0)) {
count++;
number = number >> 1;
}
if ((number & 1) > 0) {
x = count;
number = number >> 1;
}
return x;
}
// Get index of the first two bit with value 1
__device__ int2 getFirstTwoOnBitPosition(const uint32_t input) {
uint32_t number = input;
int32_t x = getFirstOnBitPosition(number);
int32_t y = -1;
if (x >= 0) {
int32_t offsetY = getFirstOnBitPosition(number);
if (offsetY >= 0) {
y = x + offsetY + 1;
}
}
return make_int2(x, y);
}
__global__ void updateStitchingCostKernel(const size_t camCount, const int2 size, const int kernelSize,
const uint32_t* __restrict__ inputIndexBuffer,
const uint32_t* __restrict__ mappedOffset,
const int2* __restrict__ mappedRectOffset,
const int2* __restrict__ mappedRectSize,
const uint32_t* __restrict__ mappedBuffer, float* __restrict__ cost,
uint32_t* __restrict__ debugBuffer0, uint32_t* __restrict__ debugBuffer1) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < size.x && y < size.y) {
const unsigned index = y * size.x + x;
debugBuffer0[index] = 0;
debugBuffer1[index] = 0;
const uint32_t inputIndex = inputIndexBuffer[index];
const int2 firstTwo = getFirstTwoOnBitPosition(inputIndex);
if (firstTwo.x >= 0 && firstTwo.y >= 0) {
const int input0 = firstTwo.x;
const int input1 = firstTwo.y;
debugBuffer1[index] = 1 << input0 + 1 << input1;
int x0 = (x - mappedRectOffset[input0].x + size.x) % size.x;
int y0 = y - mappedRectOffset[input0].y;
const unsigned index0 = y0 * mappedRectSize[input0].x + x0;
const uint32_t color0 = mappedBuffer[mappedOffset[input0] + index0];
debugBuffer0[index] = color0;
int x1 = x - mappedRectOffset[input1].x;
int y1 = y - mappedRectOffset[input1].y;
const unsigned index1 = y1 * mappedRectSize[input1].x + x1;
const uint32_t color1 = mappedBuffer[mappedOffset[input1] + index1];
debugBuffer1[index] = color1;
// Update stitching cost using min pooling metric
const int left = max(x1 - kernelSize, 0);
const int right = min(x1 + kernelSize, int(mappedRectSize[input1].x - 1));
const int top = max(y1 - kernelSize, 0);
const int bottom = min(y1 + kernelSize, int(mappedRectSize[input1].y - 1));
float sadMin = -1;
for (int i = left; i <= right; i++) {
for (int j = top; j <= bottom; j++) {
const unsigned warpI = (i + size.x) % size.x;
const unsigned index1 = j * mappedRectSize[input1].x + warpI;
const uint32_t color1 = mappedBuffer[mappedOffset[input1] + index1];
if (color1 != INVALID_VALUE) {
const float sadLab = abs((float(Image::RGBA::r(color0)) - Image::RGBA::r(color1)) / 255.0) +
abs((float(Image::RGBA::g(color0)) - Image::RGBA::g(color1)) / 255.0) +
abs((float(Image::RGBA::b(color0)) - Image::RGBA::b(color1)) / 255.0);
const float sadGradient = abs((float(Image::RGBA::a(color0)) - Image::RGBA::a(color1)) / 255.0);
const float sad = (sadLab + 2.0f * sadGradient) / (1.0f + 2.0f);
if (sad < sadMin || sadMin < 0) {
sadMin = sad;
}
}
}
}
if (sadMin >= 0) {
// Prefer to focus all effort in the middle of the output panorama, give these pixels more weights
float yDistance = min(1.0f, (float)(abs((size.y / 2) - y)) / (size.y / 2));
float yCost = max(0.0f, expf(yDistance * yDistance * (-0.5f)));
cost[index] += max(sadMin * yCost, 0.001);
}
}
}
}
Status MergerMask::updateStitchingCost(const int2 inputSize, const int kernelSize,
const GPU::Buffer<const uint32_t> inputIndexBuffer,
const GPU::Buffer<const uint32_t> mappedOffset,
const GPU::Buffer<const int2> mappedRectOffset,
const GPU::Buffer<const int2> mappedRectSize,
const GPU::Buffer<const uint32_t> mappedBuffer, GPU::Buffer<float> cost,
GPU::Buffer<uint32_t> debugBuffer0, GPU::Buffer<uint32_t> debugBuffer1,
GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1);
updateStitchingCostKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
pano.numInputs(), inputSize, kernelSize, inputIndexBuffer.get(), mappedOffset.get(), mappedRectOffset.get(),
mappedRectSize.get(), mappedBuffer.get(), cost.get(), debugBuffer0.get(), debugBuffer1.get());
return CUDA_STATUS;
}
__global__ void extractLayerFromIndexBufferKernel(const videoreaderid_t id, int2 bufferSize,
const uint32_t* const __restrict__ inputBuffer,
uint32_t* __restrict__ extractedBuffer) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < bufferSize.x && y < bufferSize.y) {
const unsigned index = y * bufferSize.x + x;
const uint32_t input = inputBuffer[index];
if ((input & id) > 0) {
extractedBuffer[index] = id;
} else {
extractedBuffer[index] = 0;
}
}
}
Status MergerMask::extractLayerFromIndexBuffer(const videoreaderid_t id, const int2 bufferSize,
const GPU::Buffer<const uint32_t> inputIndexBuffer,
GPU::Buffer<uint32_t> extractedBuffer, GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(bufferSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(bufferSize.y, dimBlock.y), 1);
extractLayerFromIndexBufferKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(id, bufferSize, inputIndexBuffer.get(),
extractedBuffer.get());
return CUDA_STATUS;
}
__global__ void updateIndexMaskAfterSeamKernel(const videoreaderid_t id0s, const videoreaderid_t id1, int2 bufferSize,
const unsigned char* const __restrict__ seamBuffer,
uint32_t* __restrict__ indexBuffer) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < bufferSize.x && y < bufferSize.y) {
const unsigned index = y * bufferSize.x + x;
uint32_t input = indexBuffer[index];
const uint32_t seam = seamBuffer[index];
if (seam == (1 << 0)) {
if ((input & id1) == id1) {
input -= id1;
}
} else if (seam == (1 << 1)) {
if ((input & id0s) > 0) {
input = (input & (~id0s));
}
}
indexBuffer[index] = input;
}
}
Status MergerMask::updateIndexMaskAfterSeam(const videoreaderid_t id0s, const videoreaderid_t id1,
const int2 bufferSize, const GPU::Buffer<const unsigned char> seamBuffer,
GPU::Buffer<uint32_t> indexBuffer, GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(bufferSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(bufferSize.y, dimBlock.y), 1);
updateIndexMaskAfterSeamKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(id0s, id1, bufferSize, seamBuffer.get(),
indexBuffer.get());
return CUDA_STATUS;
}
__global__ void lookupColorBufferFromInputIndexKernel(
const int wrapWidth, const int camCount, const unsigned char* const __restrict__ cameraIndices,
const int2* __restrict__ const mappedRectOffsets, const int2* __restrict__ const mappedRectSizes,
const uint32_t* __restrict__ const mappedOffsets, const uint32_t* __restrict__ const mappedBuffer,
const int2 bufferSize, const uint32_t* const __restrict__ inputIndexBuffer, uint32_t* __restrict__ outputBuffer) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < bufferSize.x && y < bufferSize.y) {
const unsigned index = y * bufferSize.x + x;
outputBuffer[index] = INVALID_VALUE;
// If the signal is on
const uint32_t inputIndex = inputIndexBuffer[index];
for (int i = camCount - 1; i >= 0; i--)
if ((inputIndex & (1 << cameraIndices[i])) > 0) {
unsigned char camIndex = cameraIndices[i];
uint32_t camOffset = mappedOffsets[camIndex];
int2 camRectOffset = mappedRectOffsets[camIndex];
int2 camRectSize = mappedRectSizes[camIndex];
int32_t camX = (x - camRectOffset.x + wrapWidth) % wrapWidth;
int32_t camY = y - camRectOffset.y;
if (camX >= 0 && camX < camRectSize.x && camY >= 0 && camY < camRectSize.y) {
int32_t camIndex = camY * camRectSize.x + camX;
outputBuffer[index] = mappedBuffer[camOffset + camIndex];
}
break;
}
}
}
Status MergerMask::lookupColorBufferFromInputIndex(
const int wrapWidth, const GPU::Buffer<const unsigned char> camBuffer,
const GPU::Buffer<const int2> mappedRectOffsets, const GPU::Buffer<const int2> mappedRectSizes,
const GPU::Buffer<const uint32_t> mappedOffsets, const GPU::Buffer<const uint32_t> mappedBuffers,
const int2 bufferSize, const GPU::Buffer<const uint32_t> inputIndexBuffer, GPU::Buffer<uint32_t> outputBuffer,
GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(bufferSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(bufferSize.y, dimBlock.y), 1);
lookupColorBufferFromInputIndexKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
wrapWidth, (int)camBuffer.numElements(), camBuffer.get(), mappedRectOffsets.get(), mappedRectSizes.get(),
mappedOffsets.get(), mappedBuffers.get(), bufferSize, inputIndexBuffer.get(), outputBuffer.get());
return CUDA_STATUS;
}
__global__ void updateSeamMaskKernel(const videoreaderid_t id, const int2 size,
const uint32_t* __restrict__ const originalInputIndexBuffer,
const unsigned char* const __restrict__ distanceBuffer,
uint32_t* __restrict__ seamOuputIndexBuffer) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < size.x && y < size.y) {
const unsigned index = y * size.x + x;
if ((originalInputIndexBuffer[index] & (1 << id)) > 0) {
if (distanceBuffer[index] < 255) {
seamOuputIndexBuffer[index] |= (1 << id);
}
}
}
}
Status MergerMask::updateSeamMask(const videoreaderid_t id, const int2 size,
const GPU::Buffer<const uint32_t> originalInputIndexBuffer,
const GPU::Buffer<const unsigned char> distanceBuffer,
GPU::Buffer<uint32_t> seamOuputIndexBuffer, GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1);
updateSeamMaskKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(id, size, originalInputIndexBuffer.get(),
distanceBuffer.get(), seamOuputIndexBuffer.get());
return CUDA_STATUS;
}
__global__ void getInputMaskFromOutputIndicesKernel(const videoreaderid_t imId, const int scaleFactor,
const int2 outputSize,
const uint32_t* __restrict__ const maskBuffer, const int2 inputSize,
const float2* __restrict__ const inputCoordBuffer,
unsigned char* const __restrict__ inputMask) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < scaleFactor * inputSize.x && y < scaleFactor * inputSize.y) {
const unsigned index = y * inputSize.x * scaleFactor + x;
const float2 coord = inputCoordBuffer[index];
if (coord.x < 0 || coord.y < 0) {
return;
}
const int2 roundedCoord = make_int2(roundf(coord.x), roundf(coord.y));
inputMask[index] = 0;
if (roundedCoord.x >= 0 && roundedCoord.x < outputSize.x && roundedCoord.y >= 0 && roundedCoord.y < outputSize.y) {
if ((maskBuffer[roundedCoord.y * outputSize.x + (roundedCoord.x % outputSize.x)] & (1 << imId)) > 0) {
inputMask[index] = 255;
}
}
}
}
Status MergerMask::getInputMaskFromOutputIndices(const videoreaderid_t imId, const int scaleFactor,
const int2 outputSize, const GPU::Buffer<const uint32_t> maskBuffer,
const int2 inputSize, const GPU::Buffer<const float2> inputCoordBuffer,
GPU::Buffer<unsigned char> inputMask, GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(scaleFactor * inputSize.x, dimBlock.x),
(unsigned)Cuda::ceilDiv(scaleFactor * inputSize.y, dimBlock.y), 1);
FAIL_RETURN(
GPU::memsetToZeroBlocking<unsigned char>(inputMask, inputSize.x * inputSize.y * scaleFactor * scaleFactor));
getInputMaskFromOutputIndicesKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
imId, scaleFactor, outputSize, maskBuffer.get(), inputSize, inputCoordBuffer.get(), inputMask.get());
return CUDA_STATUS;
}
__global__ void getOutputIndicesFromInputMaskKernel(const videoreaderid_t imId, const int scaleFactor,
const int2 inputSize,
const unsigned char* const __restrict__ inputMask,
const int2 outputSize, cudaTextureObject_t coordBuffer,
uint32_t* __restrict__ const maskBuffer) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < outputSize.x && y < outputSize.y) {
const unsigned index = y * outputSize.x + x;
const float2 coord = tex2D<float2>(coordBuffer, x, y);
const int2 roundedCoord = make_int2(roundf(coord.x * scaleFactor), roundf(coord.y * scaleFactor));
if (roundedCoord.x >= 0 && roundedCoord.x < scaleFactor * inputSize.x && roundedCoord.y >= 0 &&
roundedCoord.y < scaleFactor * inputSize.y) {
if (inputMask[roundedCoord.y * (scaleFactor * inputSize.x) + roundedCoord.x] > 0) {
maskBuffer[index] |= (1 << imId);
}
}
}
}
Status MergerMask::getOutputIndicesFromInputMask(const videoreaderid_t imId, const int scaleFactor,
const int2 inputSize, const GPU::Buffer<const unsigned char> inputMask,
const int2 outputSize, const GPU::Surface& coordBuffer,
GPU::Buffer<uint32_t> maskBuffer, GPU::Stream stream) {
dim3 dimBlock(MERGER_MASK_KERNEL_SIZE_X, MERGER_MASK_KERNEL_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(outputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(outputSize.y, dimBlock.y), 1);
getOutputIndicesFromInputMaskKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
imId, scaleFactor, inputSize, inputMask.get(), outputSize, coordBuffer.get().texture(), maskBuffer.get());
return CUDA_STATUS;
}
} // namespace MergerMask
} // namespace VideoStitch
|
the_stack
|
#include "HugeCTR/include/general_buffer2.hpp"
#include "HugeCTR/include/resource_managers/resource_manager_ext.hpp"
#include "HugeCTR/include/tensor2.hpp"
#include "gtest/gtest.h"
#include "input_generator.hpp"
#include "utest/test_utils.h"
// all your base are belong to us
#define private public
#define protected public
#include "HugeCTR/include/embeddings/hybrid_sparse_embedding.hpp"
using namespace HugeCTR;
constexpr bool debug_print = false;
int global_seed = 0;
template <typename dtype, typename emtype>
void end_to_end_impl(std::vector<int> device_list, HybridEmbeddingInputGenerator<dtype> *generator,
size_t batch_size, size_t embedding_vec_size, double bw_ratio_a2a_over_ar,
size_t seed, size_t num_evals) {
constexpr double epsilon = sizeof(emtype) < 4 ? 1e-2 : 1e-3;
int rank = 0, num_procs = 1;
#ifdef ENABLE_MPI
HCTR_MPI_THROW(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
HCTR_MPI_THROW(MPI_Comm_size(MPI_COMM_WORLD, &num_procs));
#endif
HCTR_LIB_THROW(nvmlInit_v2());
std::vector<std::vector<int>> vvgpu;
size_t num_local_gpus = device_list.size();
// size_t num_total_gpus = num_procs*num_local_gpus;
// if there are multi-node, we assume each node has the same gpu device_list
for (int i = 0; i < num_procs; i++) {
vvgpu.push_back(device_list);
}
const auto resource_manager = ResourceManagerExt::create(vvgpu, seed);
size_t total_gpu_count = resource_manager->get_global_gpu_count();
size_t local_gpu_count = resource_manager->get_local_gpu_count();
size_t local_batch_size = batch_size / total_gpu_count;
assert(batch_size % total_gpu_count == 0);
auto table_sizes = generator->get_table_sizes();
size_t num_tables = table_sizes.size();
size_t total_categories = std::accumulate(table_sizes.begin(), table_sizes.end(), 0);
HCTR_LOG(INFO, WORLD, "total categories: %lu\n", total_categories);
size_t num_init_batches = 50;
SparseTensors<dtype> inputs;
SparseTensors<dtype> inits;
for (size_t i = 0; i < local_gpu_count; i++) {
CudaDeviceContext context(resource_manager->get_local_gpu(i)->get_device_id());
auto buf = GeneralBuffer2<CudaManagedAllocator>::create();
Tensor2<dtype> value_tensor;
buf->reserve({batch_size, num_tables}, &value_tensor);
auto dummy_row_offset_tensor = Tensor2<dtype>();
std::shared_ptr<size_t> dummy_nnz(new size_t);
inputs.emplace_back(SparseTensor<dtype>(value_tensor, dummy_row_offset_tensor, dummy_nnz));
buf->reserve({num_init_batches * batch_size, num_tables}, &value_tensor);
inits.emplace_back(SparseTensor<dtype>(value_tensor, dummy_row_offset_tensor, dummy_nnz));
buf->allocate();
}
const float lr = 0.42f;
GpuLearningRateSchedulers lr_scheds;
for (size_t i = 0; i < local_gpu_count; i++) {
lr_scheds.emplace_back(new GpuLearningRateScheduler(2 * lr, 2, 0, 1, 2.f, 0.f,
resource_manager->get_local_gpu(i)));
lr_scheds.back()->update();
}
HybridSparseEmbeddingParams params = {
batch_size,
batch_size,
num_init_batches,
2 * num_tables * batch_size,
-1,
0.01, // p_max_dup ?
embedding_vec_size,
num_tables,
generator->get_table_sizes(),
num_procs == 1 ? hybrid_embedding::CommunicationType::NVLink_SingleNode
: hybrid_embedding::CommunicationType::IB_NVLink,
1.0,
bw_ratio_a2a_over_ar,
1.0,
false,
false,
HybridEmbeddingType::Distributed,
OptParams{Optimizer_t::SGD, lr, {}, Update_t::Global, 1.0f}};
std::vector<std::shared_ptr<BufferBlock2<emtype>>> placeholder(
resource_manager->get_local_gpu_count(), NULL);
auto embedding = std::make_unique<HybridSparseEmbedding<dtype, emtype>>(
inputs, inputs, params, placeholder, lr_scheds, false, resource_manager, false);
// Table offsets
std::vector<size_t> table_offsets(num_tables);
size_t total = 0;
for (size_t table = 0; table < num_tables; table++) {
table_offsets[table] = total;
total += generator->get_table_sizes()[table];
}
auto initial_input = generator->generate_categorical_input(num_init_batches * batch_size);
if (debug_print) {
std::map<dtype, int> unique_cat;
HCTR_LOG(INFO, ROOT, "Generated INIT unique categories: ");
for (size_t i = 0; i < num_init_batches * batch_size; i++) {
for (size_t j = 0; j < num_tables; j++) {
unique_cat[initial_input[i * num_tables + j] + table_offsets[j]] = 1;
}
}
for (auto c : unique_cat) {
HCTR_PRINT(INFO, " %d", (int)c.first);
}
HCTR_PRINT(INFO, "\n");
}
for (size_t lgpu = 0; lgpu < local_gpu_count; ++lgpu) {
CudaDeviceContext context(resource_manager->get_local_gpu(lgpu)->get_device_id());
auto stream = resource_manager->get_local_gpu(lgpu)->get_stream();
upload_tensor(initial_input, inits[lgpu].get_value_tensor(), stream);
}
size_t tmp_size = 0;
embedding->init_model(inits, tmp_size);
size_t num_frequent = embedding->model_[0].num_frequent;
if (rank == 0) {
HCTR_LOG(INFO, WORLD, "Number of frequent categories: %ld\n", num_frequent);
}
std::vector<size_t> num_infrequent(local_gpu_count);
for (size_t i = 0; i < local_gpu_count; i++) {
num_infrequent[i] = embedding->model_[i].h_infrequent_model_table_offsets[num_tables];
// if (debug_print) {
HCTR_LOG(INFO, WORLD, "local_gpu = %ld, Number of infrequent categories: %ld\n", i,
num_infrequent[i]);
//}
}
std::vector<float> full_emb_table(total_categories * embedding_vec_size);
{
std::mt19937 gen(seed + 2);
std::uniform_real_distribution<float> distr(-1, 1);
for (auto &e : full_emb_table) {
e = distr(gen);
}
}
// Set frequent embeddings
for (size_t device = 0; device < local_gpu_count; device++) {
CudaDeviceContext context(resource_manager->get_local_gpu(device)->get_device_id());
std::vector<dtype> h_frequent_categories;
download_tensor(h_frequent_categories, embedding->model_[device].frequent_categories, 0);
for (size_t i = 0; i < num_frequent; ++i) {
dtype cat = h_frequent_categories[i];
cudaMemcpy(embedding->frequent_embeddings_[device].frequent_embedding_vectors_.get_ptr() +
i * embedding_vec_size,
full_emb_table.data() + cat * embedding_vec_size,
sizeof(float) * embedding_vec_size, cudaMemcpyHostToDevice);
}
if (debug_print && device == 0) {
HCTR_LOG(INFO, ROOT, "Frequent categories: ");
for (size_t i = 0; i < num_frequent; i++) {
HCTR_PRINT(INFO, " %d", h_frequent_categories[i]);
}
HCTR_PRINT(INFO, "\n");
}
}
// Set infrequent embeddings
for (size_t device = 0; device < local_gpu_count; device++) {
CudaDeviceContext context(resource_manager->get_local_gpu(device)->get_device_id());
int global_id = resource_manager->get_local_gpu(device)->get_global_id();
cudaDeviceSynchronize();
size_t num_infrequent = embedding->model_[device].h_infrequent_model_table_offsets[num_tables];
float *h_infrequent_embedding_vectors;
dtype *h_category_location;
cudaMallocHost((void **)&h_infrequent_embedding_vectors,
(num_infrequent + 1) * embedding_vec_size * sizeof(float));
cudaMallocHost((void **)&h_category_location, total_categories * 2 * sizeof(dtype));
cudaMemcpy(h_category_location, embedding->model_[device].category_location.get_ptr(),
total_categories * 2 * sizeof(dtype), cudaMemcpyDeviceToHost);
if (debug_print) {
HCTR_LOG(INFO, ROOT, "Category location array:\n");
for (size_t i = 0; i < total_categories; i++) {
HCTR_PRINT(INFO, " (%d, %d)\n", h_category_location[2 * i],
h_category_location[2 * i + 1]);
}
}
for (size_t i = 0; i < total_categories; ++i) {
if ((int)h_category_location[2 * i] == global_id &&
(size_t)h_category_location[2 * i + 1] < total_categories) {
auto loc = h_category_location[2 * i + 1];
memcpy(h_infrequent_embedding_vectors + loc * embedding_vec_size,
full_emb_table.data() + i * embedding_vec_size, sizeof(float) * embedding_vec_size);
/*
if(device == 0)
{
HCTR_LOG(INFO, WORLD, "i = %ld, loc = %d, embed[0] = %f\n", i, loc,
*(h_infrequent_embedding_vectors+loc*embedding_vec_size));
}
*/
}
}
cudaMemcpy(embedding->infrequent_embeddings_[device].infrequent_embedding_vectors_.get_ptr(),
h_infrequent_embedding_vectors, num_infrequent * embedding_vec_size * sizeof(float),
cudaMemcpyHostToDevice);
// HCTR_LOG(INFO, WORLD, "gpu = %ld, num_infrequent = %ld, infrequent_embedding_vectors_ =
// 0x%lx\n", device, num_infrequent,
// (size_t)(embedding->infrequent_embeddings_[device].infrequent_embedding_vectors_.get_ptr()));
cudaFreeHost(h_infrequent_embedding_vectors);
cudaFreeHost(h_category_location);
}
if (debug_print) {
HCTR_LOG(INFO, ROOT, "Generated full embedding table\n");
for (size_t i = 0; i < full_emb_table.size(); i++) {
HCTR_PRINT(INFO, "%8.5f ", (float)full_emb_table[i]);
if (i % embedding_vec_size == embedding_vec_size - 1) {
HCTR_PRINT(INFO, "\n");
}
}
HCTR_PRINT(INFO, "\n");
}
auto outputs = embedding->get_train_output_tensors();
//======================================================================================
// Do the forward step
//======================================================================================
auto input = generator->generate_categorical_input(batch_size);
for (size_t lgpu = 0; lgpu < local_gpu_count; ++lgpu) {
CudaDeviceContext context(resource_manager->get_local_gpu(lgpu)->get_device_id());
auto stream = resource_manager->get_local_gpu(lgpu)->get_stream();
upload_tensor(input, inputs[lgpu].get_value_tensor(), stream);
}
if (debug_print) {
HCTR_LOG(INFO, ROOT, "Generated input:\n");
HCTR_PRINT(INFO, " Table sizes: ");
for (auto sz : generator->get_table_sizes()) {
HCTR_PRINT(INFO, "%ld ", sz);
}
HCTR_PRINT(INFO, "\n");
HCTR_PRINT(INFO, " Input:\n");
for (size_t i = 0; i < batch_size; i++) {
HCTR_PRINT(INFO, " [ ");
for (size_t j = 0; j < num_tables; j++) {
HCTR_PRINT(INFO, "%7d ", input[i * num_tables + j]);
}
HCTR_PRINT(INFO, " ]\n");
}
}
embedding->forward(true);
if (debug_print) {
const int device = 0;
CudaDeviceContext context(resource_manager->get_local_gpu(device)->get_device_id());
int global_id = resource_manager->get_local_gpu(device)->get_global_id();
cudaDeviceSynchronize();
{
std::vector<dtype> tmp;
download_tensor(tmp, embedding->infrequent_embeddings_[device].indices_->model_indices_, 0);
HCTR_LOG(INFO, ROOT, "Instance %d model indices: ", global_id);
for (size_t j = 0; j < tmp.size(); j++) {
HCTR_PRINT(INFO, " %d", (int)tmp[j]);
}
HCTR_PRINT(INFO, "\n");
HCTR_LOG(INFO, ROOT, "Instance %d model indices OFFSETS: ", global_id);
for (int j = 0; j < num_procs + 1; j++) {
HCTR_PRINT(INFO, " %d",
(int)embedding->infrequent_embeddings_[device]
.indices_->model_indices_offsets_.get_ptr()[j]);
}
HCTR_PRINT(INFO, "\n");
int num_batch_frequent;
HCTR_LIB_THROW(cudaMemcpy(&num_batch_frequent,
embedding->frequent_embeddings_[device]
.indices_->d_num_frequent_sample_indices_.get_ptr(),
sizeof(uint32_t), cudaMemcpyDeviceToHost));
HCTR_LOG(INFO, ROOT, "Instance %d found %d frequent categories in positions: ", global_id,
num_batch_frequent);
download_tensor(
tmp, embedding->frequent_embeddings_[device].indices_->frequent_sample_indices_, 0);
for (int j = 0; j < num_batch_frequent; j++) {
HCTR_PRINT(INFO, " %d", (int)tmp[j]);
}
HCTR_PRINT(INFO, "\n");
}
{
std::vector<dtype> tmp;
download_tensor(tmp, embedding->infrequent_embeddings_[device].indices_->network_indices_, 0);
HCTR_LOG(INFO, ROOT, "Instance %d network indices: ", global_id);
for (size_t j = 0; j < tmp.size(); j++) {
HCTR_PRINT(INFO, " %d", (int)tmp[j]);
}
HCTR_PRINT(INFO, "\n");
HCTR_LOG(INFO, ROOT, "Instance %d network indices OFFSETS: ", global_id);
for (int j = 0; j < num_procs + 1; j++) {
HCTR_PRINT(INFO, " %d",
(int)embedding->infrequent_embeddings_[device]
.indices_->network_indices_offsets_.get_ptr()[j]);
}
HCTR_PRINT(INFO, "\n");
}
}
// Check
for (size_t device = 0; device < local_gpu_count; device++) {
CudaDeviceContext context(resource_manager->get_local_gpu(device)->get_device_id());
int global_id = resource_manager->get_local_gpu(device)->get_global_id();
cudaDeviceSynchronize();
std::vector<emtype> h_output;
std::vector<emtype> expected(embedding_vec_size);
ASSERT_EQ(local_batch_size, embedding->get_batch_size_per_gpu(true));
download_tensor(h_output, Tensor2<emtype>::stretch_from(outputs[device]), 0);
ASSERT_EQ(h_output.size() % embedding_vec_size, 0);
ASSERT_EQ(h_output.size(), local_batch_size * num_tables * embedding_vec_size);
for (size_t i = 0; i < h_output.size() / embedding_vec_size; i++) {
size_t table = i % num_tables;
size_t cat_id = table_offsets[table] + input[i + global_id * local_batch_size * num_tables];
auto expected_ptr = full_emb_table.data() + cat_id * embedding_vec_size;
auto actual_ptr = h_output.data() + i * embedding_vec_size;
if (debug_print) {
HCTR_LOG(INFO, ROOT, " Instance %d sample %ld slot %ld comparing category %ld: ", global_id,
i, table, cat_id);
for (size_t j = 0; j < embedding_vec_size; j++) {
HCTR_PRINT(INFO, " (%8.5f : %8.5f) ", (float)actual_ptr[j], (float)expected_ptr[j]);
}
HCTR_PRINT(INFO, "\n");
}
for (size_t j = 0; j < embedding_vec_size; j++) {
expected[j] = (emtype)expected_ptr[j];
}
ASSERT_EQ(memcmp(expected.data(), actual_ptr, embedding_vec_size * sizeof(emtype)), 0)
<< "Data mismatch on instance " << global_id << " in sample " << i / num_tables
<< " feature " << table << std::endl;
}
}
//======================================================================================
// Do the backward step and update
//======================================================================================
for (size_t device = 0; device < local_gpu_count; device++) {
CudaDeviceContext context(resource_manager->get_local_gpu(device)->get_device_id());
std::vector<emtype> h_output(local_batch_size * num_tables * embedding_vec_size);
// Per-GPU generator
std::mt19937 gen(seed + 3 + resource_manager->get_local_gpu(device)->get_global_id());
std::uniform_real_distribution<float> distr(-1, 1);
for (auto &grad : h_output) {
grad = (emtype)distr(gen);
}
upload_tensor(h_output, Tensor2<emtype>::stretch_from(outputs[device]), 0);
}
// We can't allreduce __half type with MPI, so need to recreate all the output tensors locally.
std::vector<double> gradients(total_categories * embedding_vec_size, 0);
for (size_t device = 0; device < total_gpu_count; device++) {
std::mt19937 gen(seed + 3 + device);
std::uniform_real_distribution<float> distr(-1, 1);
for (size_t i = 0; i < local_batch_size * num_tables; i++) {
size_t table = i % num_tables;
size_t cat_id = table_offsets[table] + input[i + device * local_batch_size * num_tables];
auto grad_ptr = gradients.data() + cat_id * embedding_vec_size;
for (size_t j = 0; j < embedding_vec_size; j++) {
grad_ptr[j] += distr(gen);
}
}
}
if (debug_print) {
HCTR_LOG(INFO, ROOT, "Generated embedding gradients");
for (size_t i = 0; i < gradients.size(); i++) {
if (i % embedding_vec_size == 0) {
HCTR_PRINT(INFO, "\nRank %d cat %ld :: ", rank, i / embedding_vec_size);
}
HCTR_PRINT(INFO, "%8.5f ", (float)gradients[i]);
}
HCTR_PRINT(INFO, "\n");
}
embedding->backward();
embedding->update_params();
// Check
// Check frequent embeddings
for (size_t device = 0; device < local_gpu_count; device++) {
CudaDeviceContext context(resource_manager->get_local_gpu(device)->get_device_id());
int global_id = resource_manager->get_local_gpu(device)->get_global_id();
cudaDeviceSynchronize();
std::vector<dtype> h_frequent_categories;
download_tensor(h_frequent_categories, embedding->model_[device].frequent_categories, 0);
float *h_frequent_embedding_vectors;
cudaMallocHost((void **)&h_frequent_embedding_vectors, embedding_vec_size * sizeof(float));
// Only checking the categories that the instance owns
size_t chunk = num_frequent / resource_manager->get_global_gpu_count();
ASSERT_EQ(num_frequent % resource_manager->get_global_gpu_count(), 0);
size_t start = device * chunk;
size_t end = (device + 1) * chunk;
for (size_t i = start; i < end; ++i) {
dtype cat_id = h_frequent_categories[i];
cudaMemcpy(h_frequent_embedding_vectors,
embedding->frequent_embeddings_[device].frequent_embedding_vectors_.get_ptr() +
i * embedding_vec_size,
sizeof(float) * embedding_vec_size, cudaMemcpyDeviceToHost);
for (size_t j = 0; j < embedding_vec_size; j++) {
ASSERT_NEAR((double)h_frequent_embedding_vectors[j],
(double)full_emb_table.data()[cat_id * embedding_vec_size + j] -
(double)gradients.data()[cat_id * embedding_vec_size + j] * lr,
epsilon)
<< "Gradient (frequent) mismatch on instance " << global_id << " in category " << cat_id
<< " dimension " << j << "/" << embedding_vec_size << std::endl;
}
}
cudaFreeHost(h_frequent_embedding_vectors);
}
// Check infrequent embeddings
for (size_t device = 0; device < local_gpu_count; device++) {
CudaDeviceContext context(resource_manager->get_local_gpu(device)->get_device_id());
int global_id = resource_manager->get_local_gpu(device)->get_global_id();
size_t num_infrequent = embedding->model_[device].h_infrequent_model_table_offsets[num_tables];
float *h_infrequent_embedding_vectors;
dtype *h_category_location;
cudaMallocHost((void **)&h_infrequent_embedding_vectors,
num_infrequent * embedding_vec_size * sizeof(float));
cudaMallocHost((void **)&h_category_location, total_categories * 2 * sizeof(dtype));
cudaMemcpy(h_category_location, embedding->model_[device].category_location.get_ptr(),
total_categories * 2 * sizeof(dtype), cudaMemcpyDeviceToHost);
cudaMemcpy(h_infrequent_embedding_vectors,
embedding->infrequent_embeddings_[device].infrequent_embedding_vectors_.get_ptr(),
num_infrequent * embedding_vec_size * sizeof(float), cudaMemcpyDeviceToHost);
for (size_t cat_id = 0; cat_id < total_categories; ++cat_id) {
if ((int)h_category_location[2 * cat_id] == global_id) {
auto local_cat_id = h_category_location[2 * cat_id + 1];
for (size_t j = 0; j < embedding_vec_size; j++) {
ASSERT_NEAR((double)h_infrequent_embedding_vectors[local_cat_id * embedding_vec_size + j],
(double)full_emb_table.data()[cat_id * embedding_vec_size + j] -
(double)gradients.data()[cat_id * embedding_vec_size + j] * lr,
epsilon)
<< "Gradient (infrequent) mismatch on instance " << global_id << " in category "
<< cat_id << " dimension " << j << "/" << embedding_vec_size << std::endl;
}
}
}
cudaFreeHost(h_infrequent_embedding_vectors);
cudaFreeHost(h_category_location);
}
}
template <typename dtype, typename emtype>
void end_to_end(std::vector<int> device_list, size_t num_tables, size_t total_categories,
size_t batch_size, size_t embedding_vec_size, double bw_ratio_a2a_over_ar,
size_t seed = 42, size_t num_evals = 1) {
int num_procs = 1;
#ifdef ENABLE_MPI
HCTR_MPI_THROW(MPI_Comm_size(MPI_COMM_WORLD, &num_procs));
#endif
size_t num_total_gpus = num_procs * device_list.size();
HybridEmbeddingConfig<dtype> test_config = {
(size_t)num_procs,
num_total_gpus,
num_tables,
embedding_vec_size,
(dtype)total_categories,
(dtype)0, // irrelevent here
1.0f, // irrelevent here
num_procs == 1 ? hybrid_embedding::CommunicationType::NVLink_SingleNode
: hybrid_embedding::CommunicationType::IB_NVLink,
};
auto generator = std::make_unique<HybridEmbeddingInputGenerator<dtype>>(test_config, seed + 1);
end_to_end_impl<dtype, emtype>(device_list, generator.get(), batch_size, embedding_vec_size,
bw_ratio_a2a_over_ar, seed, num_evals);
}
template <typename dtype, typename emtype>
void end_to_end(std::vector<int> device_list, std::vector<size_t> table_sizes, size_t batch_size,
size_t embedding_vec_size, double bw_ratio_a2a_over_ar, size_t seed = 42,
size_t num_evals = 1) {
int num_procs = 1;
#ifdef ENABLE_MPI
HCTR_MPI_THROW(MPI_Comm_size(MPI_COMM_WORLD, &num_procs));
#endif
size_t num_total_gpus = num_procs * device_list.size();
HybridEmbeddingConfig<dtype> test_config = {
(size_t)num_procs,
num_total_gpus,
0, // irrelevent here
embedding_vec_size,
0, // irrelevent here
(dtype)0, // irrelevent here
1.0f, // irrelevent here
num_procs == 1 ? hybrid_embedding::CommunicationType::NVLink_SingleNode
: hybrid_embedding::CommunicationType::IB_NVLink,
};
auto generator =
std::make_unique<HybridEmbeddingInputGenerator<dtype>>(test_config, table_sizes, seed + 1);
end_to_end_impl<dtype, emtype>(device_list, generator.get(), batch_size, embedding_vec_size,
bw_ratio_a2a_over_ar, seed, num_evals);
}
class MPIEnvironment : public ::testing::Environment {
protected:
virtual void SetUp() { test::mpi_init(); }
virtual void TearDown() { test::mpi_finalize(); }
virtual ~MPIEnvironment(){};
};
::testing::Environment *const mpi_env = ::testing::AddGlobalTestEnvironment(new MPIEnvironment);
//
TEST(hybrid_e2e, test1) { end_to_end<uint32_t, float>({0}, 2, 16, 20, 2, 1.0e10, global_seed); }
TEST(hybrid_e2e, test2) { end_to_end<uint32_t, float>({0}, 2, 16, 20, 2, 1.0e-10, global_seed++); }
TEST(hybrid_e2e, test3) {
end_to_end<uint32_t, float>({0, 1}, 2, 128, 20, 2, 1.0e10, global_seed++);
}
TEST(hybrid_e2e, test4) {
end_to_end<uint32_t, float>({0, 1}, 2, 128, 20, 2, 1.0e-10, global_seed++);
}
TEST(hybrid_e2e, test5) { end_to_end<uint32_t, float>({0, 1}, 2, 128, 20, 2, 1.0, global_seed++); }
TEST(hybrid_e2e, test6) { end_to_end<uint32_t, float>({0, 1}, 7, 128, 20, 2, 1.0, global_seed++); }
TEST(hybrid_e2e, test7) {
end_to_end<uint32_t, float>({0, 1, 2}, 3, 192, 96, 5, 1.0, global_seed++);
}
TEST(hybrid_e2e, test8) {
end_to_end<uint32_t, float>({0, 1, 2, 3}, 6, 651, 96, 128, 1.5, global_seed++);
}
TEST(hybrid_e2e, test9) {
end_to_end<uint32_t, float>({0, 1, 2, 3}, 18, 6531, 256, 64, 1.7, global_seed++);
}
TEST(hybrid_e2e, test10) {
end_to_end<uint32_t, float>({0, 1, 2, 3, 4, 5, 6, 7}, 18, 6531, 256, 64, 1.7, global_seed++);
}
TEST(hybrid_e2e, test11) {
end_to_end<uint32_t, float>({0, 1, 2, 3, 4, 5, 6, 7}, 26, 16531, 512, 48, 1.33, global_seed++);
}
TEST(hybrid_e2e, test12) {
end_to_end<uint32_t, float>({0, 1, 6, 7}, 13, 21345, 256, 32, 0.6, global_seed++);
}
TEST(hybrid_e2e, test13) {
std::vector<size_t> slot_size_array{
39884406, 39043, 17289, 7420, 20263, 3, 7120, 1543, 63,
38532951, 2953546, 403346, 10, 2208, 11938, 155, 4, 976,
14, 39979771, 25641295, 39664984, 585935, 12972, 108, 36};
// for (auto& s : slot_size_array) {
// s = s/16 + 1;
// }
end_to_end<uint32_t, float>({0, 1, 2, 3, 4, 5, 6, 7}, slot_size_array, 1024, 128, 1.9 / 1.3,
global_seed++);
}
TEST(hybrid_e2e, test21) { end_to_end<uint32_t, __half>({0}, 2, 16, 20, 2, 1.0e10, global_seed++); }
TEST(hybrid_e2e, test22) {
end_to_end<uint32_t, __half>({0}, 2, 16, 20, 2, 1.0e-10, global_seed++);
}
TEST(hybrid_e2e, test23) {
end_to_end<uint32_t, __half>({0, 1}, 2, 128, 20, 2, 1.0e10, global_seed++);
}
TEST(hybrid_e2e, test24) {
end_to_end<uint32_t, __half>({0, 1}, 2, 128, 20, 2, 1.0e-10, global_seed++);
}
TEST(hybrid_e2e, test25) {
end_to_end<uint32_t, __half>({0, 1}, 2, 128, 20, 2, 1.0, global_seed++);
}
TEST(hybrid_e2e, test26) {
end_to_end<uint32_t, __half>({0, 1}, 7, 128, 20, 2, 1.0, global_seed++);
}
TEST(hybrid_e2e, test27) {
end_to_end<uint32_t, __half>({0, 1, 2}, 3, 192, 96, 5, 1.0, global_seed++);
}
TEST(hybrid_e2e, test28) {
end_to_end<uint32_t, __half>({0, 1, 2, 3}, 6, 651, 96, 128, 1.5, global_seed++);
}
TEST(hybrid_e2e, test29) {
end_to_end<uint32_t, __half>({0, 1, 2, 3}, 18, 6531, 256, 64, 1.7, global_seed++);
}
TEST(hybrid_e2e, test30) {
end_to_end<uint32_t, __half>({0, 1, 2, 3, 4, 5, 6, 7}, 18, 6531, 256, 64, 1.7, global_seed++);
}
TEST(hybrid_e2e, test31) {
end_to_end<uint32_t, __half>({0, 1, 2, 3, 4, 5, 6, 7}, 26, 16531, 512, 48, 1.33, global_seed++);
}
TEST(hybrid_e2e, test32) {
end_to_end<uint32_t, __half>({0, 1, 6, 7}, 13, 21345, 256, 32, 0.6, global_seed++);
}
TEST(hybrid_e2e, test33) {
std::vector<size_t> slot_size_array{
39884406, 39043, 17289, 7420, 20263, 3, 7120, 1543, 63,
38532951, 2953546, 403346, 10, 2208, 11938, 155, 4, 976,
14, 39979771, 25641295, 39664984, 585935, 12972, 108, 36};
// for (auto& s : slot_size_array) {
// s = s/16 + 1;
// }
end_to_end<uint32_t, float>({0, 1, 2, 3, 4, 5, 6, 7}, slot_size_array, 1024, 128, 1.9 / 1.3,
global_seed++);
}
|
the_stack
|
#include "nvblox/experiments/integrators/cuda/experimental_integrator_input_frames.cuh"
namespace nvblox {
namespace experiments {
__device__ inline float interpolateDepthTexture(
cudaTextureObject_t depth_texture, const Eigen::Vector2f& u_px) {
return tex2D<float>(depth_texture, u_px.x() + 0.5, u_px.y() + 0.5);
}
__device__ inline bool interpolateDepthImage(const float* image, int rows,
int cols, Eigen::Vector2f u_px,
float* value_ptr) {
// If the projected point does not lie on the image plane, fail. (Here "on the
// image plane" means having pixel centers surrounding the query point, ie no
// extrapolation).
if ((u_px.x() < 0.0f) || (u_px.y() < 0.0f) ||
(u_px.x() > static_cast<float>(cols) - 1.0f) ||
(u_px.y() > static_cast<float>(rows) - 1.0f)) {
return false;
}
// Interpolation of a grid on with 1 pixel spacing.
// https://en.wikipedia.org/wiki/Bilinear_interpolation#On_the_unit_square
// Get the pixel coordinates of the pixel on the low side
const Index2D u_low_side_px = (u_px).cast<int>();
// Get the 4-neighbours values and put them in a matrix
// clang-format off
const Eigen::Matrix2f value_matrix =
(Eigen::Matrix2f() <<
image::access(u_low_side_px.y(), u_low_side_px.x(), cols, image),
image::access(u_low_side_px.y() + 1, u_low_side_px.x(), cols, image),
image::access(u_low_side_px.y(), u_low_side_px.x() + 1, cols, image),
image::access(u_low_side_px.y() + 1, u_low_side_px.x() + 1, cols, image))
.finished();
// clang-format on
// Offset of the requested point to the low side center.
const Eigen::Vector2f u_offset = (u_px - u_low_side_px.cast<float>());
const Eigen::Vector2f x_vec(1.0f - u_offset.x(), u_offset.x());
const Eigen::Vector2f y_vec(1.0f - u_offset.y(), u_offset.y());
*value_ptr = x_vec.transpose() * value_matrix * y_vec;
return true;
}
__global__ void intergrateBlocksTextureBasedInterpolation(
const Index3D* block_indices_device_ptr, const Camera* camera_device_ptr,
cudaTextureObject_t depth_texture, const Eigen::Matrix3f* R_C_L_device_ptr,
const Eigen::Vector3f* t_C_L_device_ptr, const float block_size,
const float truncation_distance_m, const float max_weight,
VoxelBlock<TsdfVoxel>** block_device_ptrs) {
// Linear index of thread within block
const int thread_index_linear =
threadIdx.x + blockDim.x * (threadIdx.y + (blockDim.y * threadIdx.z));
// Get the data which is common between all threads in a block into shared
// memory
// TODO(alexmillane): We could also get the camera into shared memory. But
// maybe let's profile things first and see what is actually affecting the
// performance.
__shared__ Eigen::Matrix3f R_C_L;
if (thread_index_linear < 9) {
R_C_L.data()[thread_index_linear] =
R_C_L_device_ptr->data()[thread_index_linear];
}
__shared__ Eigen::Vector3f t_C_L;
if (thread_index_linear >= 9 && thread_index_linear < 12) {
t_C_L.data()[thread_index_linear - 9] =
t_C_L_device_ptr->data()[thread_index_linear - 9];
}
__syncthreads();
// The indices of the voxel this thread will work on
// blockIdx.x - The index of the block we're working on (blockIdx.y/z
// should be zero)
// threadIdx.x/y/z - The indices of the voxel within the block (we
// expect the threadBlockDims == voxelBlockDims)
const Index3D block_idx = block_indices_device_ptr[blockIdx.x];
const Index3D voxel_idx(threadIdx.z, threadIdx.y, threadIdx.x);
// Get the Voxel we'll update in this thread
// NOTE(alexmillane): Note that we've reverse the voxel indexing order such
// that adjacent threads (x-major) access adjacent memory locations in the
// block (z-major).
TsdfVoxel* voxel_ptr =
&(block_device_ptrs[blockIdx.x]
->voxels[threadIdx.z][threadIdx.y][threadIdx.x]);
// Voxel center point
const Vector3f p_voxel_center_L = getCenterPostionFromBlockIndexAndVoxelIndex(
block_size, block_idx, voxel_idx);
// To camera frame
const Vector3f p_voxel_center_C = R_C_L * p_voxel_center_L + t_C_L;
// Project to image plane
Eigen::Vector2f u_px;
if (!camera_device_ptr->project(p_voxel_center_C, &u_px)) {
return;
}
// If the projected point does not lie on the image plane, fail. (Here "on the
// image plane" means having pixel centers surrounding the query point, ie no
// extrapolation).
if ((u_px.x() < 0.0f) || (u_px.y() < 0.0f) ||
(u_px.x() > static_cast<float>(camera_device_ptr->width()) - 1.0f) ||
(u_px.y() > static_cast<float>(camera_device_ptr->height()) - 1.0f)) {
return;
}
// Get the MEASURED depth of the SURFACE, by interpolating the depth image
const float surface_depth_mesured =
interpolateDepthTexture(depth_texture, u_px);
// Get the MEASURED depth of the VOXEL
const float voxel_distance_measured =
surface_depth_mesured - p_voxel_center_C.z();
// If we're behind the negative truncation distance, just continue.
if (voxel_distance_measured < -truncation_distance_m) {
return;
}
// Read CURRENT voxel values (from global GPU memory)
const float voxel_distance_current = voxel_ptr->distance;
const float voxel_weight_current = voxel_ptr->weight;
// NOTE(alexmillane): We could try to use CUDA math functions to speed up
// below
// https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE
// Fuse
constexpr float measurement_weight = 1.0f;
const float fused_distance = (voxel_distance_measured * measurement_weight +
voxel_distance_current * voxel_weight_current) /
(measurement_weight + voxel_weight_current);
// Write back to voxel (to global GPU memory)
voxel_ptr->distance = fused_distance > 0.0f
? fmin(truncation_distance_m, fused_distance)
: fmax(-truncation_distance_m, fused_distance);
voxel_ptr->weight =
fmin(measurement_weight + voxel_weight_current, max_weight);
}
__global__ void intergrateBlocksGlobalBasedInterpolation(
const Index3D* block_indices_device_ptr, const Camera* camera_device_ptr,
const float* image, int rows, int cols,
const Eigen::Matrix3f* R_C_L_device_ptr,
const Eigen::Vector3f* t_C_L_device_ptr, const float block_size,
const float truncation_distance_m, const float max_weight,
VoxelBlock<TsdfVoxel>** block_device_ptrs) {
// Linear index of thread within block
const int thread_index_linear =
threadIdx.x + blockDim.x * (threadIdx.y + (blockDim.y * threadIdx.z));
// Get the data which is common between all threads in a block into shared
// memory
// TODO(alexmillane): We could also get the camera into shared memory. But
// maybe let's profile things first and see what is actually affecting the
// performance.
__shared__ Eigen::Matrix3f R_C_L;
if (thread_index_linear < 9) {
R_C_L.data()[thread_index_linear] =
R_C_L_device_ptr->data()[thread_index_linear];
}
__shared__ Eigen::Vector3f t_C_L;
if (thread_index_linear >= 9 && thread_index_linear < 12) {
t_C_L.data()[thread_index_linear - 9] =
t_C_L_device_ptr->data()[thread_index_linear - 9];
}
__syncthreads();
// The indices of the voxel this thread will work on
// blockIdx.x - The index of the block we're working on (blockIdx.y/z
// should be zero)
// threadIdx.x/y/z - The indices of the voxel within the block (we
// expect the threadBlockDims == voxelBlockDims)
const Index3D block_idx = block_indices_device_ptr[blockIdx.x];
const Index3D voxel_idx(threadIdx.z, threadIdx.y, threadIdx.x);
// Get the Voxel we'll update in this thread
// NOTE(alexmillane): Note that we've reverse the voxel indexing order such
// that adjacent threads (x-major) access adjacent memory locations in the
// block (z-major).
TsdfVoxel* voxel_ptr =
&(block_device_ptrs[blockIdx.x]
->voxels[threadIdx.z][threadIdx.y][threadIdx.x]);
// Voxel center point
const Vector3f p_voxel_center_L = getCenterPostionFromBlockIndexAndVoxelIndex(
block_size, block_idx, voxel_idx);
// To camera frame
const Vector3f p_voxel_center_C = R_C_L * p_voxel_center_L + t_C_L;
// Project to image plane
Eigen::Vector2f u_px;
if (!camera_device_ptr->project(p_voxel_center_C, &u_px)) {
return;
}
// Get the MEASURED depth of the SURFACE, by interpolating the depth image
float surface_depth_mesured;
if (!interpolateDepthImage(image, rows, cols, u_px, &surface_depth_mesured)) {
return;
}
// Get the MEASURED depth of the VOXEL
const float voxel_distance_measured =
surface_depth_mesured - p_voxel_center_C.z();
// If we're behind the negative truncation distance, just continue.
if (voxel_distance_measured < -truncation_distance_m) {
return;
}
// Read CURRENT voxel values (from global GPU memory)
const float voxel_distance_current = voxel_ptr->distance;
const float voxel_weight_current = voxel_ptr->weight;
// NOTE(alexmillane): We could try to use CUDA math functions to speed up
// below
// https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE
// Fuse
constexpr float measurement_weight = 1.0f;
const float fused_distance = (voxel_distance_measured * measurement_weight +
voxel_distance_current * voxel_weight_current) /
(measurement_weight + voxel_weight_current);
// Write back to voxel (to global GPU memory)
voxel_ptr->distance = fused_distance > 0.0f
? fmin(truncation_distance_m, fused_distance)
: fmax(-truncation_distance_m, fused_distance);
voxel_ptr->weight =
fmin(measurement_weight + voxel_weight_current, max_weight);
}
ProjectiveTsdfIntegratorExperimentsBase::
ProjectiveTsdfIntegratorExperimentsBase()
: ProjectiveTsdfIntegrator() {
checkCudaErrors(cudaStreamCreate(&integration_stream_));
}
ProjectiveTsdfIntegratorExperimentsBase::
~ProjectiveTsdfIntegratorExperimentsBase() {
finish();
checkCudaErrors(cudaStreamDestroy(integration_stream_));
}
void ProjectiveTsdfIntegratorExperimentsBase::finish() const {
cudaStreamSynchronize(integration_stream_);
}
void ProjectiveTsdfIntegratorExperimentsTexture::updateBlocks(
const std::vector<Index3D>& block_indices, const DepthImage& depth_frame,
const Transform& T_L_C, const Camera& camera,
const float truncation_distance_m, TsdfLayer* layer_ptr) {
CHECK_NOTNULL(layer_ptr);
// Create an integrator frame
// Internally this object starts (asynchronous) transfers of it's inputs to
// device memory. Kernels called the passed stream can therefore utilize the
// input frame's device-side members.
const IntegratorInputFrameExperimentsTexture input(
block_indices, depth_frame, T_L_C, camera, truncation_distance_m,
max_weight_, layer_ptr, integration_stream_);
// Kernel call - One ThreadBlock launched per VoxelBlock
constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide;
const dim3 kThreadsPerBlock(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide);
const int num_blocks = input.num_blocks;
// clang-format off
intergrateBlocksTextureBasedInterpolation<<<num_blocks, kThreadsPerBlock, 0, integration_stream_>>>(
input.block_indices_device_ptr,
input.camera_device_ptr,
input.depth_texture.texture_object(),
input.R_C_L_device_ptr,
input.t_C_L_device_ptr,
input.block_size,
input.truncation_distance_m,
input.max_weight,
input.block_device_ptrs);
// clang-format on
checkCudaErrors(cudaPeekAtLastError());
// Finish processing of the frame before returning control
finish();
}
void ProjectiveTsdfIntegratorExperimentsGlobal::updateBlocks(
const std::vector<Index3D>& block_indices, const DepthImage& depth_frame,
const Transform& T_L_C, const Camera& camera,
const float truncation_distance_m, TsdfLayer* layer_ptr) {
CHECK_NOTNULL(layer_ptr);
// Create an integrator frame
// Internally this object starts (asynchronous) transfers of it's inputs to
// device memory. Kernels called the passed stream can therefore utilize the
// input frame's device-side members.
const IntegratorInputFrameExperimentsGlobal input(
block_indices, depth_frame, T_L_C, camera, truncation_distance_m,
max_weight_, layer_ptr, integration_stream_);
// Kernel call - One ThreadBlock launched per VoxelBlock
constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide;
const dim3 kThreadsPerBlock(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide);
const int num_blocks = input.num_blocks;
// clang-format off
intergrateBlocksGlobalBasedInterpolation<<<num_blocks, kThreadsPerBlock, 0, integration_stream_>>>(
input.block_indices_device_ptr,
input.camera_device_ptr,
input.depth_frame_unified_ptr,
input.depth_frame_rows,
input.depth_frame_cols,
input.R_C_L_device_ptr,
input.t_C_L_device_ptr,
input.block_size,
input.truncation_distance_m,
input.max_weight,
input.block_device_ptrs);
// clang-format on
checkCudaErrors(cudaPeekAtLastError());
// Finish processing of the frame before returning control
finish();
}
} // namespace experiments
} // namespace nvblox
|
the_stack
|
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/quantize.hpp"
#include <float.h>
namespace caffe {
template <typename Dtype>
__global__ void gpu_fix_kernel1(const int n, const Dtype *x, Dtype *y,
Dtype step, Dtype lb, Dtype ub) {
CUDA_KERNEL_LOOP(i, n) { y[i] = min(max(round(x[i] / step) * step, lb), ub); }
}
// sigmoid kernel: y = sigmoid(x)
template <typename Dtype>
__global__ void gpu_sigmoid_kernel(const int n, const Dtype *x, Dtype *y) {
CUDA_KERNEL_LOOP(i, n) { y[i] = 1. / (1. + exp(-x[i])); }
}
template <typename Dtype>
__global__ void gpu_fix_kernel2(const int n, const Dtype *x, Dtype *y,
Dtype step, Dtype lb, Dtype ub) {
CUDA_KERNEL_LOOP(i, n) {
Dtype tmp = x[i] / step;
// simulate DPU where to save hardware resource
if ( tmp < 0 && ( tmp - floor( tmp ) ) == 0.5 )
tmp = ceil( tmp );
else
tmp = round( tmp );
y[i] = min(max(tmp * step, lb), ub);
}
}
template <typename Dtype>
void caffe_gpu_fix(const int n, const Dtype *x, Dtype *y, const int bit_width,
const int p) {
Dtype step = std::pow(Dtype(2), -p);
Dtype lower_bound = -std::pow(Dtype(2), bit_width - 1) * step;
Dtype upper_bound = std::pow(Dtype(2), bit_width - 1) * step - step;
gpu_fix_kernel1<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, step, lower_bound, upper_bound);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void caffe_gpu_top_fix(const int n, const Dtype *x, Dtype *y, const int bit_width,
const int p) {
Dtype step = std::pow(Dtype(2), -p);
Dtype lower_bound = -std::pow(Dtype(2), bit_width - 1) * step;
Dtype upper_bound = std::pow(Dtype(2), bit_width - 1) * step - step;
gpu_fix_kernel2<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, step, lower_bound, upper_bound);
CUDA_POST_KERNEL_CHECK;
}
template void caffe_gpu_fix<float>(const int n, const float *x, float *y,
const int bit_width, const int p);
template void caffe_gpu_fix<double>(const int n, const double *x, double *y,
const int bit_width, const int p);
template void caffe_gpu_top_fix<float>(const int n, const float *x, float *y,
const int bit_width, const int p);
template void caffe_gpu_top_fix<double>(const int n, const double *x, double *y,
const int bit_width, const int p);
// Overflow: minimize fix pos in terms of all weights and data do not overflow
template <typename Dtype>
Dtype caffe_gpu_fix_pos_overflow(const int n, const Dtype *x,
const int bit_width) {
// Use half of step as a guard
Dtype fix_lb = -std::pow(2, bit_width - 1) - 0.5;
Dtype fix_ub = std::pow(2, bit_width - 1) - 0.5;
// Dynamic range [min, max]
// Find min and max value in GPU
auto min_max = thrust::minmax_element(thrust::device, x, x + n);
// Copy to Host
Dtype x_min, x_max;
cudaMemcpy(&x_min, min_max.first, sizeof(Dtype), cudaMemcpyDeviceToHost);
cudaMemcpy(&x_max, min_max.second, sizeof(Dtype), cudaMemcpyDeviceToHost);
Dtype step = std::max(x_min / fix_lb, x_max / fix_ub);
if (step == 0) {
return SHRT_MAX;
} else if(isnan(step)) {
return SHRT_MIN;
}
return std::log2(1 / step);
}
template float caffe_gpu_fix_pos_overflow<float>(const int n, const float *x,
const int bit_width);
template double caffe_gpu_fix_pos_overflow<double>(const int n, const double *x,
const int bit_width);
// Diff_S: minimize L2 norm of fixed weights/activation and float weights/activation
template <typename Dtype>
Dtype caffe_gpu_fix_pos_diffs(const int n, const Dtype *x, const int bit_width,
const int range) {
// Calc search range for scale
int max_scale;
Dtype fix_lb = -std::pow(2, bit_width - 1) - 0.5;
Dtype fix_ub = std::pow(2, bit_width - 1) - 0.5;
auto min_max = thrust::minmax_element(thrust::device, x, x + n);
// Copy to Host
Dtype x_min, x_max;
cudaMemcpy(&x_min, min_max.first, sizeof(Dtype), cudaMemcpyDeviceToHost);
cudaMemcpy(&x_max, min_max.second, sizeof(Dtype), cudaMemcpyDeviceToHost);
// Find max_scale
Dtype step = std::max(x_min / fix_lb, x_max / fix_ub);
if (step == 0) {
return SHRT_MAX;
} else if(isnan(step)) {
return SHRT_MIN;
} else {
max_scale = std::floor(std::log2(1 / step));
}
// Find fix pos in range [max_scale + range , max_scale]
Dtype final_scale;
final_scale = max_scale;
Dtype fixed_diff_min = FLT_MAX;
Dtype *buffer;
CUDA_CHECK(cudaMalloc((void **)&buffer, n * sizeof(Dtype)));
/* CHECK_NOTNULL(buffer); */
for (int scale = max_scale; scale < max_scale + range; scale++) {
caffe_gpu_fix<Dtype>(n, x, buffer, bit_width, scale);
caffe_gpu_sub<Dtype>(n, x, buffer, buffer);
caffe_gpu_powx<Dtype>(n, buffer, 2, buffer);
Dtype fixed_diff;
caffe_gpu_asum(n, buffer, &fixed_diff);
if (fixed_diff < fixed_diff_min) {
final_scale = scale;
fixed_diff_min = fixed_diff;
}
}
CUDA_CHECK(cudaFree(buffer));
return final_scale;
}
template float caffe_gpu_fix_pos_diffs<float>(const int n, const float *x,
const int bit_width,
const int range);
template double caffe_gpu_fix_pos_diffs<double>(const int n, const double *x,
const int bit_width,
const int range);
// Diff_S_Sigmoid: minimize L2 norm of sigmoid(weights/activation) between fixed and float
template <typename Dtype>
Dtype caffe_gpu_fix_pos_diffs_sigmoid(const int n, const Dtype *x, const int bit_width,
const int range) {
// Calc search range for scale
int max_scale;
Dtype fix_lb = -std::pow(2, bit_width - 1) - 0.5;
Dtype fix_ub = std::pow(2, bit_width - 1) - 0.5;
auto min_max = thrust::minmax_element(thrust::device, x, x + n);
// Copy to Host
Dtype x_min, x_max;
cudaMemcpy(&x_min, min_max.first, sizeof(Dtype), cudaMemcpyDeviceToHost);
cudaMemcpy(&x_max, min_max.second, sizeof(Dtype), cudaMemcpyDeviceToHost);
// Find max_scale
Dtype step = std::max(x_min / fix_lb, x_max / fix_ub);
if (step == 0)
max_scale = 0;
else
max_scale = std::floor(std::log2(1 / step));
// Find fix pos in range [max_scale + range , max_scale]
Dtype final_scale;
final_scale = max_scale;
Dtype fixed_diff_min = FLT_MAX;
Dtype *sigmoid_x, *buffer;
CUDA_CHECK(cudaMalloc((void **)&sigmoid_x, n * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc((void **)&buffer, n * sizeof(Dtype)));
gpu_sigmoid_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, sigmoid_x);
CUDA_POST_KERNEL_CHECK;
LOG(INFO) << "calib start";
for (int scale = max_scale; scale < max_scale + range; scale++) {
caffe_gpu_fix<Dtype>(n, x, buffer, bit_width, scale);
gpu_sigmoid_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, buffer, buffer);
CUDA_POST_KERNEL_CHECK;
caffe_gpu_sub<Dtype>(n, sigmoid_x, buffer, buffer);
caffe_gpu_powx<Dtype>(n, buffer, 2, buffer);
Dtype fixed_diff;
caffe_gpu_asum(n, buffer, &fixed_diff);
if (fixed_diff < fixed_diff_min) {
final_scale = scale;
fixed_diff_min = fixed_diff;
}
}
CUDA_CHECK(cudaFree(sigmoid_x));
CUDA_CHECK(cudaFree(buffer));
return final_scale;
}
template float caffe_gpu_fix_pos_diffs_sigmoid<float>(const int n, const float *x,
const int bit_width,
const int range);
template double caffe_gpu_fix_pos_diffs_sigmoid<double>(const int n, const double *x,
const int bit_width,
const int range);
/*
template <typename Dtype>
static __global__ void overflow_kernel(const int n, Dtype upper_bound, Dtype
lower_bound, const Dtype* x, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index]=(x[index]<=upper_bound && x[index]>=lower_bound)?Dtype(0):Dtype(1);
}
}
template <typename Dtype>
static bool test_overflow(const int n, Dtype upper_bound, Dtype lower_bound,
const Dtype* data, Dtype* buffer) {
overflow_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n,
upper_bound, lower_bound, data, buffer);
CUDA_POST_KERNEL_CHECK;
Dtype asum;
caffe_gpu_asum(n, buffer, &asum);
return asum>Dtype(0.5);
}
template <typename Dtype>
void caffe_gpu_fix_overflow(const int n, const Dtype* x, Dtype* y, const int
bit_level, const int max_scale, const int min_scale, int& final_scale) {
final_scale=std::max(std::min(final_scale, max_scale), min_scale);
int search_length=max_scale-min_scale+1;
if(search_length<2) {
final_scale=min_scale;
}
else {
Dtype* buffer=y;
if(x==y) {
buffer=static_cast<Dtype*>(Caffe::GpuBuffer(n*sizeof(Dtype)));
CHECK_NOTNULL(buffer);
}
vector<Dtype> upper_bound(search_length);
vector<Dtype> lower_bound(search_length);
for(int i=0; i<search_length; i++) {
upper_bound[i]=std::pow(Dtype(2), i+min_scale);
lower_bound[i]=-upper_bound[i]-std::pow(Dtype(2),
i+min_scale-bit_level);
}
vector<bool> overflow(search_length);
vector<bool> tested(search_length, false);
bool found=false;
overflow[final_scale-min_scale]=test_overflow(n,
upper_bound[final_scale-min_scale],
lower_bound[final_scale-min_scale], x, buffer);
tested[final_scale-min_scale]=true;
if(!overflow[final_scale-min_scale]) {
if(final_scale==min_scale) {
found=true;
}
else {
overflow[final_scale-min_scale-1]=test_overflow(n,
upper_bound[final_scale-min_scale-1],
lower_bound[final_scale-min_scale-1],
x, buffer);
tested[final_scale-min_scale-1]=true;
if(overflow[final_scale-min_scale-1]) {
found=true;
}
}
}
if(!found) {
overflow[0]=true;
tested[0]=true;
overflow[search_length-1]=false;
tested[search_length-1]=true;
int left=0;
int right=search_length-1;
for(;;) {
int middle=(left+right)/2;
if(!tested[middle]) {
overflow[middle]=test_overflow(n,
upper_bound[middle], lower_bound[middle], x, buffer);
tested[middle]=true;
}
if(!tested[middle+1]) {
overflow[middle+1]=test_overflow(n,
upper_bound[middle+1], lower_bound[middle+1], x, buffer);
tested[middle+1]=true;
}
if(overflow[middle] && !overflow[middle+1]) {
final_scale=min_scale+middle+1;
break;
}
else if(!overflow[middle]) {
right=middle;
}
else {
left=middle+1;
}
}
}
}
caffe_gpu_fix(n, x, y, bit_level, final_scale);
}
template void caffe_gpu_fix_overflow<float>(const int n, const float* x, float*
y, const int bit_level, const int max_scale, const int min_scale, int&
final_scale);
template void caffe_gpu_fix_overflow<double>(const int n, const double* x,
double* y, const int bit_level, const int max_scale, const int min_scale, int&
final_scale);
*/
template <typename Dtype>
__global__ void gpu_scale_kernel(const int n, const Dtype *x, Dtype *y,
Dtype step ) {
CUDA_KERNEL_LOOP(i, n) { y[i] = x[i] * step; }
}
template <typename Dtype>
void caffe_gpu_scale(const int n, const Dtype *x, Dtype *y, const int p) {
Dtype step;
if (p == SHRT_MAX) {
step = 1;
} else {
step = std::pow(Dtype(2), p);
}
gpu_scale_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, step);
CUDA_POST_KERNEL_CHECK;
}
template void caffe_gpu_scale<float>(const int n, const float *x, float *y, const int p);
template void caffe_gpu_scale<double>(const int n, const double *x, double *y, const int p);
template <typename Dtype>
__global__ void gpu_trunc_kernel(const int n, const Dtype *x, Dtype *y,
Dtype scale) {
CUDA_KERNEL_LOOP(i, n) { y[i] = ( (int)(x[i] / scale) ) * scale; }
}
template <typename Dtype>
void caffe_gpu_trunc(const int n, const Dtype *x, Dtype *y, const int p) {
Dtype scale = std::pow(Dtype(2), -p);
gpu_trunc_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, scale);
CUDA_POST_KERNEL_CHECK;
}
template void caffe_gpu_trunc<float>(const int n, const float *x, float *y, const int p);
template void caffe_gpu_trunc<double>(const int n, const double *x, double *y, const int p);
template <typename Dtype>
void caffe_pooling_scale(const int n, const Dtype *x, Dtype *y, float scale) {
gpu_scale_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, scale);
CUDA_POST_KERNEL_CHECK;
}
template void caffe_pooling_scale<float>(const int n, const float *x, float *y, float scale);
template void caffe_pooling_scale<double>(const int n, const double *x, double *y, float scale);
} // namespace caffe
|
the_stack
|
#include <cuda_runtime.h>
#include <cublasLt.h>
#include "sample_cublasLt_LtSgemmCustomFind.h"
#include "helpers.h"
/* Structure to store information about different run trials */
typedef struct {
cublasLtMatmulAlgo_t algo;
cublasStatus_t status;
float time;
size_t workspaceSize; // actual memory workspace needed
cublasMath_t mathMode;
cublasLtReductionScheme_t reductionScheme;
int customOption;
float wavesCount;
} customMatmulPerf_t;
/* CAUTION : must match cublasLtMatmulTile_t */
const char * const matmulTileName[] = {
"UNDEF",
"8x8",
"8x16",
"16x8" ,
"8x32" ,
"16x16" ,
"32x8" ,
"8x64" ,
"16x32" ,
"32x16" ,
"64x8" ,
"32x32" ,
"32x64" ,
"64x32" ,
"32x128" ,
"64x64" ,
"128x32" ,
"64x128" ,
"128x64" ,
"64x256" ,
"128x128",
"256x64" ,
"64x512" ,
"128x256",
"256x128",
"512x64" ,
};
// Utility function to print customMatmulPerf_t structure
static void printPerfStructure(const customMatmulPerf_t &perf) {
int algoId, tile, swizzle, customOption, numSplitsK, reductionScheme, stages;
const cublasLtMatmulAlgo_t *matmulAlgo = &perf.algo;
cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo, CUBLASLT_ALGO_CONFIG_ID, &algoId, sizeof(algoId), NULL);
cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo, CUBLASLT_ALGO_CONFIG_TILE_ID, &tile, sizeof(tile), NULL);
cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo, CUBLASLT_ALGO_CONFIG_SPLITK_NUM, &numSplitsK, sizeof(numSplitsK), NULL);
cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo, CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME, &reductionScheme, sizeof(reductionScheme), NULL);
cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo, CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING, &swizzle, sizeof(swizzle), NULL);
cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo, CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION, &customOption, sizeof(customOption), NULL);
cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo, CUBLASLT_ALGO_CONFIG_STAGES_ID, &stages, sizeof(stages), NULL);
printf("algo={ Id=%d, tileIdx=%d (%s) splitK=%d reduc=%d swizzle=%d custom=%d stages=%d} status %d "
"time %f workspace=%d mathMode=%d waves=%f\n",
algoId, tile, matmulTileName[tile],
numSplitsK, reductionScheme,
swizzle, customOption, stages,
perf.status,
perf.time,
(int)perf.workspaceSize,
(int)perf.mathMode,
perf.wavesCount);
}
static inline bool time_compare(const customMatmulPerf_t &perf_a, const customMatmulPerf_t &perf_b) {
return ((perf_a.status == CUBLAS_STATUS_SUCCESS) && (perf_a.time < perf_b.time));
}
static cublasStatus_t customMatmulRun(cublasLtHandle_t ltHandle, // to get the capabilities (required a GPU)
cublasLtMatmulDesc_t operationDesc,
const void *alpha, /* host or device pointer */
const void *A,
cublasLtMatrixLayout_t Adesc,
const void *B,
cublasLtMatrixLayout_t Bdesc,
const void *beta, /* host or device pointer */
const void *C,
cublasLtMatrixLayout_t Cdesc,
void *D,
cublasLtMatrixLayout_t Ddesc,
const cublasLtMatmulAlgo_t &algo,
int kernelRepeats,
void *workSpace,
size_t workSpaceSizeInBytes,
customMatmulPerf_t &perfResults,
cudaStream_t stream,
cudaEvent_t &startEvent,
cudaEvent_t &stopEvent) {
cublasLtMatmulHeuristicResult_t heurResult;
/* Looping over the Algo */
int repeats = kernelRepeats;
cublasStatus_t algoStatus = cublasLtMatmulAlgoCheck( ltHandle,
operationDesc,
Adesc,
Bdesc,
Cdesc,
Ddesc,
&algo,
&heurResult);
if (algoStatus == CUBLAS_STATUS_SUCCESS) {
if (heurResult.workspaceSize <= workSpaceSizeInBytes) {
cudaError_t err, err1, err2, err3;
err = cudaEventRecord(startEvent, stream);
for (int loop = 0; loop < repeats; loop++) {
cublasStatus_t oneRunStatus = cublasLtMatmul( ltHandle,
operationDesc,
alpha,
A, Adesc,
B, Bdesc,
beta,
C, Cdesc,
D, Ddesc,
&algo,
workSpace,
workSpaceSizeInBytes,
stream);
if (oneRunStatus != CUBLAS_STATUS_SUCCESS) {
algoStatus = oneRunStatus;
break;
}
}
err1 = cudaEventRecord(stopEvent, stream);
err2 = cudaEventSynchronize(stopEvent);
float time;
err3 = cudaEventElapsedTime(&time, startEvent, stopEvent);
if ((err != cudaSuccess) || (err1 != cudaSuccess) || (err2 != cudaSuccess) || (err3 != cudaSuccess)) {
algoStatus = CUBLAS_STATUS_INTERNAL_ERROR;
}
// For the moment only add successful findings
if (algoStatus == CUBLAS_STATUS_SUCCESS) {
perfResults.algo = algo;
perfResults.time = time;
perfResults.workspaceSize = heurResult.workspaceSize;
perfResults.wavesCount = heurResult.wavesCount;
}
}
else {
algoStatus = CUBLAS_STATUS_NOT_SUPPORTED; //Not enough workspace
}
}
return algoStatus;
}
/// Sample wrapper running through multiple algo and config attributes combination for single precision gemm using cublasLt low-level API
void LtSgemmCustomFind(cublasLtHandle_t ltHandle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const float *alpha, /* host pointer */
const float *A,
int lda,
const float *B,
int ldb,
const float *beta, /* host pointer */
float *C,
int ldc,
void *workSpace,
size_t workSpaceSize) {
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
cublasLtMatmulDesc_t operationDesc = NULL;
cublasLtMatrixLayout_t Adesc = NULL, Bdesc = NULL, Cdesc = NULL;
cublasLtMatmulPreference_t preference = NULL;
cudaEvent_t startEvent = NULL, stopEvent = NULL;
cudaStream_t stream = NULL;
// SplitK value that we are going to try when SplitK is supported for a given algo
const int splitKSequenceA[] = {2, 3, 4, 5, 6, 8, 12, 16, 32};
// Let try a fixed number of combinations
#define ALGO_COMBINATIONS 100
int AlgoCombinations = ALGO_COMBINATIONS;
int AlgoCount = 0;
int kernelRepeats = 10; //number of time the CUDA kernels will be run back to back
customMatmulPerf_t perfResults[ALGO_COMBINATIONS];
int nbAlgoIds = 0;
#define ALGO_IDS 4
int algoIdA[ALGO_IDS];
cudaDataType_t scaleType = CUDA_R_32F, Atype = CUDA_R_32F, Btype = CUDA_R_32F, Ctype = CUDA_R_32F;
cublasComputeType_t computeType = CUBLAS_COMPUTE_32F;
// create operation desciriptor; see cublasLtMatmulDescAttributes_t for details about defaults; here we just need to
// set the transforms for A and B
checkCublasStatus(cublasLtMatmulDescCreate(&operationDesc, CUBLAS_COMPUTE_32F, CUDA_R_32F));
checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa)));
checkCublasStatus(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transa)));
// create matrix descriptors, we are good with the details here so no need to set any extra attributes
checkCublasStatus(cublasLtMatrixLayoutCreate(&Adesc, CUDA_R_32F, transa == CUBLAS_OP_N ? m : k, transa == CUBLAS_OP_N ? k : m, lda));
checkCublasStatus(cublasLtMatrixLayoutCreate(&Bdesc, CUDA_R_32F, transb == CUBLAS_OP_N ? k : n, transb == CUBLAS_OP_N ? n : k, ldb));
checkCublasStatus(cublasLtMatrixLayoutCreate(&Cdesc, CUDA_R_32F, m, n, ldc));
// Request the 4 first AlgoId available for SGEMM ( computeType = scaleType = Atype = Btype = Ctype = Dtype = CUDA_R_32F)
checkCublasStatus(cublasLtMatmulAlgoGetIds(ltHandle, computeType, scaleType, Atype, Btype, Ctype, Ctype, ALGO_IDS, algoIdA, &nbAlgoIds));
// Create CUDA event to time the execution time of each algo
checkCudaStatus(cudaEventCreate(&startEvent, cudaEventBlockingSync));
checkCudaStatus(cudaEventCreate(&stopEvent, cudaEventBlockingSync));
// Loop over the Algo IDs
for (int idx = 0; (idx < nbAlgoIds) && (AlgoCount < AlgoCombinations); idx++) {
cublasLtMatmulAlgo_t algo;
size_t sizeWritten = 0;
/* Initialize algo structure with given Algp ID */
status = cublasLtMatmulAlgoInit(ltHandle, computeType, scaleType, Atype, Btype, Ctype, Ctype, algoIdA[idx], &algo);
if (status != CUBLAS_STATUS_SUCCESS) {
continue;
}
// Query the tiles enums supported by that algo
checkCublasStatus(cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_TILE_IDS, NULL, 0, &sizeWritten));
int nbTiles = int(sizeWritten/sizeof(int));
int *tileA = new int[ nbTiles == 0 ? 1:nbTiles];
if(nbTiles == 0){
tileA[0] = CUBLASLT_MATMUL_TILE_UNDEFINED;
nbTiles = 1;
}
checkCublasStatus(cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_STAGES_IDS, NULL, 0, &sizeWritten));
int nbStages = int(sizeWritten/sizeof(int));
std::vector<int> stagesA(nbStages == 0 ? 1 : nbStages);
if (nbStages == 0) {
stagesA[0] = CUBLASLT_MATMUL_STAGES_UNDEFINED;
nbStages = 1;
} else {
checkCublasStatus(cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_STAGES_IDS, stagesA.data(), sizeof(int)*nbStages, &sizeWritten));
}
int splitkSupport, redMask, swizzlingMax, customOptionMax;
// Retrieve Algo Capabilities attributes to be able to setup loop over the different combinations
cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_TILE_IDS, tileA, sizeof(int)*nbTiles, &sizeWritten);
cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_SPLITK_SUPPORT, &splitkSupport, sizeof(splitkSupport), &sizeWritten);
cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_REDUCTION_SCHEME_MASK, &redMask, sizeof(redMask), &sizeWritten);
cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_CTA_SWIZZLING_SUPPORT, &swizzlingMax, sizeof(swizzlingMax), &sizeWritten);
cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX, &customOptionMax, sizeof(customOptionMax), &sizeWritten);
/* Loop over the different tiles */
for (int tileIdx = 0; tileIdx < nbTiles; tileIdx++) {
/* Loop over different stages count */
for (int stagesIdx = 0; stagesIdx < nbStages; stagesIdx++) {
checkCublasStatus(cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_STAGES_ID, &stagesA[stagesIdx], sizeof(stagesA[stagesIdx])));
/* loop over the different custom option if any */
for (int customOption = 0; customOption <= customOptionMax; customOption++) {
checkCublasStatus(cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION, &customOption, sizeof(customOption)));
/* loop over the CTAs swizzling support */
for (int k = 0; k <= swizzlingMax; k++) {
int splitK_trial = 0;
if (splitkSupport) {
splitK_trial += sizeof(splitKSequenceA) / sizeof(splitKSequenceA[0]);
}
// Loop over the splitK value over a fixed sequence splitKSequenceA in addtion to the case where splitK is not enabled
for (int l = 0; (l < (1 + splitK_trial)) && (AlgoCount < AlgoCombinations); l++) {
/* Setup attribute of the algo to run */
checkCublasStatus(cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_TILE_ID, &tileA[tileIdx], sizeof(tileA[tileIdx])));
int splitK_val = 0;
int redScheme = CUBLASLT_REDUCTION_SCHEME_NONE;
checkCublasStatus(cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_SPLITK_NUM, &splitK_val, sizeof(splitK_val)));
checkCublasStatus(cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING, &k, sizeof(k)));
checkCublasStatus(cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME, &redScheme, sizeof(int)));
if (l > 0) { // Split-K case
splitK_val = splitKSequenceA[l - 1];
checkCublasStatus(cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_SPLITK_NUM, &splitKSequenceA[l - 1], sizeof(splitKSequenceA[l - 1])));
/* Going over all the reduction scheme */
for (redScheme = 1 ; redScheme < (int)CUBLASLT_REDUCTION_SCHEME_MASK && (AlgoCount < AlgoCombinations); redScheme = redScheme << 1) {
if (redScheme & redMask) {
checkCublasStatus(cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME, &redScheme, sizeof(redScheme)));
status = customMatmulRun( ltHandle,
operationDesc,
alpha, /* host or device pointer */
A, Adesc,
B, Bdesc,
beta, /* host or device pointer */
C, Cdesc,
C, Cdesc,
algo,
kernelRepeats,
workSpace,
workSpaceSize,
perfResults[AlgoCount],
stream,
startEvent, stopEvent);
perfResults[AlgoCount].status = status;
if (status == CUBLAS_STATUS_SUCCESS) AlgoCount++;
} // end if
} // end for
} else { // Non-splitK case
/* if user preference is ok with workspace */
if (AlgoCount < AlgoCombinations) {
status = customMatmulRun( ltHandle,
operationDesc,
alpha, /* host or device pointer */
A, Adesc,
B, Bdesc,
beta, /* host or device pointer */
C, Cdesc,
C, Cdesc,
algo,
kernelRepeats,
workSpace,
workSpaceSize,
perfResults[AlgoCount],
stream,
startEvent, stopEvent);
perfResults[AlgoCount].status = status;
if (status == CUBLAS_STATUS_SUCCESS) AlgoCount++;
}
}
} // end l
} // end k
} //end customOption
} // end stagesIdx
} // end tileIdx
delete [] tileA;
} // end idx
// Sort the results per run duration
std::sort(perfResults, perfResults + AlgoCount, time_compare);
// Print timing and perf details
for (int i = 0; i < AlgoCount; i++) {
printf( "result %03d : ", i);
printPerfStructure(perfResults[i]);
}
// descriptors are no longer needed as all GPU work was already enqueued
if (preference) checkCublasStatus(cublasLtMatmulPreferenceDestroy(preference));
if (Cdesc) checkCublasStatus(cublasLtMatrixLayoutDestroy(Cdesc));
if (Bdesc) checkCublasStatus(cublasLtMatrixLayoutDestroy(Bdesc));
if (Adesc) checkCublasStatus(cublasLtMatrixLayoutDestroy(Adesc));
if (operationDesc) checkCublasStatus(cublasLtMatmulDescDestroy(operationDesc));
if (startEvent) checkCudaStatus(cudaEventDestroy(startEvent));
if (stopEvent) checkCudaStatus(cudaEventDestroy(stopEvent));
}
|
the_stack
|
/*
* Shabal implementation.
*
* ==========================(LICENSE BEGIN)============================
*
* Copyright (c) 2007-2010 Projet RNRT SAPHIR
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ===========================(LICENSE END)=============================
*
* @author Thomas Pornin <thomas.pornin@cryptolog.com>
*/
/*
* Part of this code was automatically generated (the part between
* the "BEGIN" and "END" markers).
*/
#define sM 16
#define O1 13
#define O2 9
#define O3 6
/*
* We copy the state into local variables, so that the compiler knows
* that it can optimize them at will.
*/
/* BEGIN -- automatically generated code. */
#define INPUT_BLOCK_ADD \
B0 = B0 + M0; \
B1 = B1 + M1; \
B2 = B2 + M2; \
B3 = B3 + M3; \
B4 = B4 + M4; \
B5 = B5 + M5; \
B6 = B6 + M6; \
B7 = B7 + M7; \
B8 = B8 + M8; \
B9 = B9 + M9; \
BA = BA + MA; \
BB = BB + MB; \
BC = BC + MC; \
BD = BD + MD; \
BE = BE + ME; \
BF = BF + MF; \
#define INPUT_BLOCK_SUB \
C0 = C0 - M0; \
C1 = C1 - M1; \
C2 = C2 - M2; \
C3 = C3 - M3; \
C4 = C4 - M4; \
C5 = C5 - M5; \
C6 = C6 - M6; \
C7 = C7 - M7; \
C8 = C8 - M8; \
C9 = C9 - M9; \
CA = CA - MA; \
CB = CB - MB; \
CC = CC - MC; \
CD = CD - MD; \
CE = CE - ME; \
CF = CF - MF; \
#define XOR_W \
A00 ^= Wlow; \
A01 ^= Whigh; \
#define SWAP(v1, v2) \
v1^=v2;\
v2 ^= v1;\
v1 ^= v2;
#define SWAP_BC \
SWAP(B0, C0); \
SWAP(B1, C1); \
SWAP(B2, C2); \
SWAP(B3, C3); \
SWAP(B4, C4); \
SWAP(B5, C5); \
SWAP(B6, C6); \
SWAP(B7, C7); \
SWAP(B8, C8); \
SWAP(B9, C9); \
SWAP(BA, CA); \
SWAP(BB, CB); \
SWAP(BC, CC); \
SWAP(BD, CD); \
SWAP(BE, CE); \
SWAP(BF, CF); \
#define PERM_ELT(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \
xa0 = ((xa0 \
^ (ROTL32(xa1, 15) * 5U) \
^ xc) * 3U) \
^ xb1 ^ (xb2 & ~xb3) ^ xm; \
xb0 = (~(ROTL32(xb0, 1) ^ xa0)); \
#define PERM_STEP_0 \
PERM_ELT(A00, A0B, B0, BD, B9, B6, C8, M0); \
PERM_ELT(A01, A00, B1, BE, BA, B7, C7, M1); \
PERM_ELT(A02, A01, B2, BF, BB, B8, C6, M2); \
PERM_ELT(A03, A02, B3, B0, BC, B9, C5, M3); \
PERM_ELT(A04, A03, B4, B1, BD, BA, C4, M4); \
PERM_ELT(A05, A04, B5, B2, BE, BB, C3, M5); \
PERM_ELT(A06, A05, B6, B3, BF, BC, C2, M6); \
PERM_ELT(A07, A06, B7, B4, B0, BD, C1, M7); \
PERM_ELT(A08, A07, B8, B5, B1, BE, C0, M8); \
PERM_ELT(A09, A08, B9, B6, B2, BF, CF, M9); \
PERM_ELT(A0A, A09, BA, B7, B3, B0, CE, MA); \
PERM_ELT(A0B, A0A, BB, B8, B4, B1, CD, MB); \
PERM_ELT(A00, A0B, BC, B9, B5, B2, CC, MC); \
PERM_ELT(A01, A00, BD, BA, B6, B3, CB, MD); \
PERM_ELT(A02, A01, BE, BB, B7, B4, CA, ME); \
PERM_ELT(A03, A02, BF, BC, B8, B5, C9, MF); \
#define PERM_STEP_1 \
PERM_ELT(A04, A03, B0, BD, B9, B6, C8, M0); \
PERM_ELT(A05, A04, B1, BE, BA, B7, C7, M1); \
PERM_ELT(A06, A05, B2, BF, BB, B8, C6, M2); \
PERM_ELT(A07, A06, B3, B0, BC, B9, C5, M3); \
PERM_ELT(A08, A07, B4, B1, BD, BA, C4, M4); \
PERM_ELT(A09, A08, B5, B2, BE, BB, C3, M5); \
PERM_ELT(A0A, A09, B6, B3, BF, BC, C2, M6); \
PERM_ELT(A0B, A0A, B7, B4, B0, BD, C1, M7); \
PERM_ELT(A00, A0B, B8, B5, B1, BE, C0, M8); \
PERM_ELT(A01, A00, B9, B6, B2, BF, CF, M9); \
PERM_ELT(A02, A01, BA, B7, B3, B0, CE, MA); \
PERM_ELT(A03, A02, BB, B8, B4, B1, CD, MB); \
PERM_ELT(A04, A03, BC, B9, B5, B2, CC, MC); \
PERM_ELT(A05, A04, BD, BA, B6, B3, CB, MD); \
PERM_ELT(A06, A05, BE, BB, B7, B4, CA, ME); \
PERM_ELT(A07, A06, BF, BC, B8, B5, C9, MF); \
#define PERM_STEP_2 \
PERM_ELT(A08, A07, B0, BD, B9, B6, C8, M0); \
PERM_ELT(A09, A08, B1, BE, BA, B7, C7, M1); \
PERM_ELT(A0A, A09, B2, BF, BB, B8, C6, M2); \
PERM_ELT(A0B, A0A, B3, B0, BC, B9, C5, M3); \
PERM_ELT(A00, A0B, B4, B1, BD, BA, C4, M4); \
PERM_ELT(A01, A00, B5, B2, BE, BB, C3, M5); \
PERM_ELT(A02, A01, B6, B3, BF, BC, C2, M6); \
PERM_ELT(A03, A02, B7, B4, B0, BD, C1, M7); \
PERM_ELT(A04, A03, B8, B5, B1, BE, C0, M8); \
PERM_ELT(A05, A04, B9, B6, B2, BF, CF, M9); \
PERM_ELT(A06, A05, BA, B7, B3, B0, CE, MA); \
PERM_ELT(A07, A06, BB, B8, B4, B1, CD, MB); \
PERM_ELT(A08, A07, BC, B9, B5, B2, CC, MC); \
PERM_ELT(A09, A08, BD, BA, B6, B3, CB, MD); \
PERM_ELT(A0A, A09, BE, BB, B7, B4, CA, ME); \
PERM_ELT(A0B, A0A, BF, BC, B8, B5, C9, MF); \
#define APPLY_P \
B0 = ROTL32(B0, 17); \
B1 = ROTL32(B1, 17); \
B2 = ROTL32(B2, 17); \
B3 = ROTL32(B3, 17); \
B4 = ROTL32(B4, 17); \
B5 = ROTL32(B5, 17); \
B6 = ROTL32(B6, 17); \
B7 = ROTL32(B7, 17); \
B8 = ROTL32(B8, 17); \
B9 = ROTL32(B9, 17); \
BA = ROTL32(BA, 17); \
BB = ROTL32(BB, 17); \
BC = ROTL32(BC, 17); \
BD = ROTL32(BD, 17); \
BE = ROTL32(BE, 17); \
BF = ROTL32(BF, 17); \
PERM_STEP_0; \
PERM_STEP_1; \
PERM_STEP_2; \
A0B = (A0B + C6); \
A0A = (A0A + C5); \
A09 = (A09 + C4); \
A08 = (A08 + C3); \
A07 = (A07 + C2); \
A06 = (A06 + C1); \
A05 = (A05 + C0); \
A04 = (A04 + CF); \
A03 = (A03 + CE); \
A02 = (A02 + CD); \
A01 = (A01 + CC); \
A00 = (A00 + CB); \
A0B = (A0B + CA); \
A0A = (A0A + C9); \
A09 = (A09 + C8); \
A08 = (A08 + C7); \
A07 = (A07 + C6); \
A06 = (A06 + C5); \
A05 = (A05 + C4); \
A04 = (A04 + C3); \
A03 = (A03 + C2); \
A02 = (A02 + C1); \
A01 = (A01 + C0); \
A00 = (A00 + CF); \
A0B = (A0B + CE); \
A0A = (A0A + CD); \
A09 = (A09 + CC); \
A08 = (A08 + CB); \
A07 = (A07 + CA); \
A06 = (A06 + C9); \
A05 = (A05 + C8); \
A04 = (A04 + C7); \
A03 = (A03 + C6); \
A02 = (A02 + C5); \
A01 = (A01 + C4); \
A00 = (A00 + C3); \
#define APPLY_P_FINAL \
B0 = ROTL32(B0, 17); \
B1 = ROTL32(B1, 17); \
B2 = ROTL32(B2, 17); \
B3 = ROTL32(B3, 17); \
B4 = ROTL32(B4, 17); \
B5 = ROTL32(B5, 17); \
B6 = ROTL32(B6, 17); \
B7 = ROTL32(B7, 17); \
B8 = ROTL32(B8, 17); \
B9 = ROTL32(B9, 17); \
BA = ROTL32(BA, 17); \
BB = ROTL32(BB, 17); \
BC = ROTL32(BC, 17); \
BD = ROTL32(BD, 17); \
BE = ROTL32(BE, 17); \
BF = ROTL32(BF, 17); \
PERM_STEP_0; \
PERM_STEP_1; \
PERM_STEP_2; \
#define INCR_W if ((Wlow = (Wlow + 1)) == 0) \
Whigh = (Whigh + 1); \
#if 0 /* other hash sizes init */
static const uint32_t A_init_192[] = {
0xFD749ED4), 0xB798E530), 0x33904B6F), 0x46BDA85E),
0x076934B4), 0x454B4058), 0x77F74527), 0xFB4CF465),
0x62931DA9), 0xE778C8DB), 0x22B3998E), 0xAC15CFB9)
};
static const uint32_t B_init_192[] = {
0x58BCBAC4), 0xEC47A08E), 0xAEE933B2), 0xDFCBC824),
0xA7944804), 0xBF65BDB0), 0x5A9D4502), 0x59979AF7),
0xC5CEA54E), 0x4B6B8150), 0x16E71909), 0x7D632319),
0x930573A0), 0xF34C63D1), 0xCAF914B4), 0xFDD6612C)
};
static const uint32_t C_init_192[] = {
0x61550878), 0x89EF2B75), 0xA1660C46), 0x7EF3855B),
0x7297B58C), 0x1BC67793), 0x7FB1C723), 0xB66FC640),
0x1A48B71C), 0xF0976D17), 0x088CE80A), 0xA454EDF3),
0x1C096BF4), 0xAC76224B), 0x5215781C), 0xCD5D2669)
};
static const uint32_t A_init_224[] = {
0xA5201467), 0xA9B8D94A), 0xD4CED997), 0x68379D7B),
0xA7FC73BA), 0xF1A2546B), 0x606782BF), 0xE0BCFD0F),
0x2F25374E), 0x069A149F), 0x5E2DFF25), 0xFAECF061)
};
static const uint32_t B_init_224[] = {
0xEC9905D8), 0xF21850CF), 0xC0A746C8), 0x21DAD498),
0x35156EEB), 0x088C97F2), 0x26303E40), 0x8A2D4FB5),
0xFEEE44B6), 0x8A1E9573), 0x7B81111A), 0xCBC139F0),
0xA3513861), 0x1D2C362E), 0x918C580E), 0xB58E1B9C)
};
static const uint32_t C_init_224[] = {
0xE4B573A1), 0x4C1A0880), 0x1E907C51), 0x04807EFD),
0x3AD8CDE5), 0x16B21302), 0x02512C53), 0x2204CB18),
0x99405F2D), 0xE5B648A1), 0x70AB1D43), 0xA10C25C2),
0x16F1AC05), 0x38BBEB56), 0x9B01DC60), 0xB1096D83)
};
static const uint32_t A_init_256[] = {
0x52F84552), 0xE54B7999), 0x2D8EE3EC), 0xB9645191),
0xE0078B86), 0xBB7C44C9), 0xD2B5C1CA), 0xB0D2EB8C),
0x14CE5A45), 0x22AF50DC), 0xEFFDBC6B), 0xEB21B74A)
};
static const uint32_t B_init_256[] = {
0xB555C6EE), 0x3E710596), 0xA72A652F), 0x9301515F),
0xDA28C1FA), 0x696FD868), 0x9CB6BF72), 0x0AFE4002),
0xA6E03615), 0x5138C1D4), 0xBE216306), 0xB38B8890),
0x3EA8B96B), 0x3299ACE4), 0x30924DD4), 0x55CB34A5)
};
static const uint32_t C_init_256[] = {
0xB405F031), 0xC4233EBA), 0xB3733979), 0xC0DD9D55),
0xC51C28AE), 0xA327B8E1), 0x56C56167), 0xED614433),
0x88B59D60), 0x60E2CEBA), 0x758B4B8B), 0x83E82A7F),
0xBC968828), 0xE6E00BF7), 0xBA839E55), 0x9B491C60)
};
static const uint32_t A_init_384[] = {
0xC8FCA331), 0xE55C504E), 0x003EBF26), 0xBB6B8D83),
0x7B0448C1), 0x41B82789), 0x0A7C9601), 0x8D659CFF),
0xB6E2673E), 0xCA54C77B), 0x1460FD7E), 0x3FCB8F2D)
};
static const uint32_t B_init_384[] = {
0x527291FC), 0x2A16455F), 0x78E627E5), 0x944F169F),
0x1CA6F016), 0xA854EA25), 0x8DB98ABE), 0xF2C62641),
0x30117DCB), 0xCF5C4309), 0x93711A25), 0xF9F671B8),
0xB01D2116), 0x333F4B89), 0xB285D165), 0x86829B36)
};
static const uint32_t C_init_384[] = {
0xF764B11A), 0x76172146), 0xCEF6934D), 0xC6D28399),
0xFE095F61), 0x5E6018B4), 0x5048ECF5), 0x51353261),
0x6E6E36DC), 0x63130DAD), 0xA9C69BD6), 0x1E90EA0C),
0x7C35073B), 0x28D95E6D), 0xAA340E0D), 0xCB3DEE70)
};
#endif
/***************************************************/
// GPU Hash Function
__global__ __launch_bounds__(256, 4)
void x14_shabal512_gpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *g_hash)
{
uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
const uint32_t d_A512[] = {
0x20728DFD, 0x46C0BD53, 0xE782B699,0x55304632,
0x71B4EF90, 0x0EA9E82C, 0xDBB930F1, 0xFAD06B8B,
0xBE0CAE40, 0x8BD14410, 0x76D2ADAC, 0x28ACAB7F
};
const uint32_t d_B512[] = {
0xC1099CB7, 0x07B385F3, 0xE7442C26, 0xCC8AD640,
0xEB6F56C7, 0x1EA81AA9, 0x73B9D314, 0x1DE85D08,
0x48910A5A, 0x893B22DB, 0xC5A0DF44, 0xBBC4324E,
0x72D2F240, 0x75941D99, 0x6D8BDE82, 0xA1A7502B
};
const uint32_t d_C512[] = {
0xD9BF68D1, 0x58BAD750, 0x56028CB2, 0x8134F359,
0xB5D469D8, 0x941A8CC2, 0x418B2A6E, 0x04052780,
0x7F07D787, 0x5194358F, 0x3C60D665, 0xBE97D79A,
0x950C3434, 0xAED9A06D, 0x2537DC8D, 0x7CDB5969
};
if (thread < threads)
{
uint32_t nounce = (startNounce + thread);
uint32_t hashPosition = nounce - startNounce;
uint32_t *Hash = &g_hash[hashPosition*16]; // [hashPosition * 8]
uint32_t A00 = d_A512[0], A01 = d_A512[1], A02 = d_A512[2], A03 = d_A512[3],
A04 = d_A512[4], A05 = d_A512[5], A06 = d_A512[6], A07 = d_A512[7],
A08 = d_A512[8], A09 = d_A512[9], A0A = d_A512[10], A0B = d_A512[11];
uint32_t B0 = d_B512[0], B1 = d_B512[1], B2 = d_B512[2], B3 = d_B512[3],
B4 = d_B512[4], B5 = d_B512[5], B6 = d_B512[6], B7 = d_B512[7],
B8 = d_B512[8], B9 = d_B512[9], BA = d_B512[10], BB = d_B512[11],
BC = d_B512[12], BD = d_B512[13], BE = d_B512[14], BF = d_B512[15];
uint32_t C0 = d_C512[0], C1 = d_C512[1], C2 = d_C512[2], C3 = d_C512[3],
C4 = d_C512[4], C5 = d_C512[5], C6 = d_C512[6], C7 = d_C512[7],
C8 = d_C512[8], C9 = d_C512[9], CA = d_C512[10], CB = d_C512[11],
CC = d_C512[12], CD = d_C512[13], CE = d_C512[14], CF = d_C512[15];
uint32_t M0, M1, M2, M3, M4, M5, M6, M7, M8, M9, MA, MB, MC, MD, ME, MF;
uint32_t msg[16];
uint28 *phash = (uint28*)Hash;
uint28 *outpt = (uint28*)msg;
outpt[0] = phash[0];
outpt[1] = phash[1];
M0 = msg[0];
M1 = msg[1];
M2 = msg[2];
M3 = msg[3];
M4 = msg[4];
M5 = msg[5];
M6 = msg[6];
M7 = msg[7];
M8 = msg[8];
M9 = msg[9];
MA = msg[10];
MB = msg[11];
MC = msg[12];
MD = msg[13];
ME = msg[14];
MF = msg[15];
INPUT_BLOCK_ADD;
A00 ^= 1;
APPLY_P;
INPUT_BLOCK_SUB;
SWAP_BC;
M0 = 0x80;
M1 = M2 = M3 = M4 = M5 = M6 = M7 = M8 = M9 = MA = MB = MC = MD = ME = MF = 0;
INPUT_BLOCK_ADD;
A00 ^= 2;
APPLY_P;
SWAP_BC;
A00 ^= 2;
APPLY_P;
SWAP_BC;
A00 ^= 2;
APPLY_P;
SWAP_BC;
A00 ^= 2;
APPLY_P_FINAL;
Hash[0] = B0;
Hash[1] = B1;
Hash[2] = B2;
Hash[3] = B3;
Hash[4] = B4;
Hash[5] = B5;
Hash[6] = B6;
Hash[7] = B7;
Hash[8] = B8;
Hash[9] = B9;
Hash[10] = BA;
Hash[11] = BB;
Hash[12] = BC;
Hash[13] = BD;
Hash[14] = BE;
Hash[15] = BF;
}
}
// #include <stdio.h>
__host__ void x14_shabal512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash)
{
const uint32_t threadsperblock = 64;
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
x14_shabal512_gpu_hash_64<<<grid, block>>>(threads, startNounce, d_hash);
}
|
the_stack
|
#include <complex>
#include <iostream>
#include <thrust/complex.h>
// this is valid for compute capability 3.5 -> 8.0 (and beyond?)
//const unsigned int MAX_BLOCKS_X = 4294967295; // 2^32-1
const unsigned int MAX_BLOCKS_Y = 65535;
const unsigned int MAX_BLOCKS_Z = 65535;
namespace strumpack {
namespace gpu {
/**
* Get the real T type corresponding to a scalar, for instance T,
* std::complex<T> or thrust::complex<T>, to be used for instance
* to compute norms or absolute value.
*/
template<class T> struct real_type { typedef T value_type; };
template<class T> struct real_type<thrust::complex<T>> { typedef T value_type; };
template<class T> struct real_type<std::complex<T>> { typedef T value_type; };
/**
* The types float2 and double2 are binary the same as
* std::complex or thrust::complex, but they can be used as
* __shared__ variables, whereas thrust::complex cannot because it
* doesn't have a no-argument default constructor.
*/
template<class T> struct primitive_type { typedef T value_type; };
template<> struct primitive_type<thrust::complex<float>> { typedef float2 value_type; };
template<> struct primitive_type<thrust::complex<double>> { typedef double2 value_type; };
template<> struct primitive_type<std::complex<float>> { typedef float2 value_type; };
template<> struct primitive_type<std::complex<double>> { typedef double2 value_type; };
/**
* Get the corresponding thrust::complex for std::complex
*/
template<class T> struct cuda_type { typedef T value_type; };
template<class T> struct cuda_type<std::complex<T>> { typedef thrust::complex<T> value_type; };
/**
* Put elements of the sparse matrix in the F11 part of the front.
* The sparse elements are taken from F.e11, which is a list of
* triplets {r,c,v}. The front is assumed to be initialized to
* zero.
*
* Use this with a 1-dimensional grid, where the number of grid
* blocks in the x direction is the number of fronts, with f0
* being the first front pointed to by dat. The threadblock should
* also be 1d.
*/
template<typename T> __global__ void
assemble_11_kernel(unsigned int f0, AssembleData<T>* dat) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
auto& F = dat[blockIdx.y + f0];
if (idx >= F.n11) return;
auto& t = F.e11[idx];
F.F11[t.r + t.c*F.d1] = t.v;
}
/**
* Put elements of the sparse matrix in the F12 anf F21 parts of
* the front. These two are combined because F.n12 and F.n21 are
* (probably always?) equal, and to save on overhead of launching
* kernels/blocks.
*
* Use this with a 1-dimensional grid, where the number of grid
* blocks in the x direction is the number of fronts, with f0
* being the first front pointed to by dat. The threadblock should
* also be 1d.
*/
template<typename T> __global__ void
assemble_12_21_kernel(unsigned int f0, AssembleData<T>* dat) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
auto& F = dat[blockIdx.y + f0];
if (idx < F.n12) {
auto& t = F.e12[idx];
F.F12[t.r + t.c*F.d1] = t.v;
}
if (idx < F.n21) {
auto& t = F.e21[idx];
F.F21[t.r + t.c*F.d2] = t.v;
}
}
/**
* Single extend-add operation from one contribution block into
* the parent front. d1 is the size of F11, d2 is the size of F22.
*/
template<typename T> __device__ void
ea_kernel(int x, int y, int d1, int d2, int dCB,
T* F11, T* F12, T* F21, T* F22,
T* CB, std::size_t* I) {
if (x >= dCB || y >= dCB) return;
auto Ix = I[x], Iy = I[y];
if (Ix < d1) {
if (Iy < d1) F11[Iy+Ix*d1] += CB[y+x*dCB];
else F21[Iy-d1+Ix*d2] += CB[y+x*dCB];
} else {
if (Iy < d1) F12[Iy+(Ix-d1)*d1] += CB[y+x*dCB];
else F22[Iy-d1+(Ix-d1)*d2] += CB[y+x*dCB];
}
}
template<typename T> __global__ void
extend_add_kernel_left(unsigned int by0, AssembleData<T>* dat) {
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = (blockIdx.y + by0) * blockDim.y + threadIdx.y;
auto& F = dat[blockIdx.z];
if (F.CB1)
ea_kernel(x, y, F.d1, F.d2, F.dCB1,
F.F11, F.F12, F.F21, F.F22, F.CB1, F.I1);
}
template<typename T> __global__ void
extend_add_kernel_right(unsigned int by0, AssembleData<T>* dat) {
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = (blockIdx.y + by0) * blockDim.y + threadIdx.y;
auto& F = dat[blockIdx.z];
if (F.CB2)
ea_kernel(x, y, F.d1, F.d2, F.dCB2,
F.F11, F.F12, F.F21, F.F22, F.CB2, F.I2);
}
template<typename T> void
assemble(unsigned int nf, AssembleData<T>* dat,
AssembleData<T>* ddat) {
{ // front assembly from sparse matrix
unsigned int nt1 = 128, nt2 = 32, nb1 = 0, nb2 = 0;
for (int f=0; f<nf; f++) {
unsigned int b = dat[f].n11 / nt1 + (dat[f].n11 % nt1 != 0);
if (b > nb1) nb1 = b;
b = dat[f].n12 / nt2 + (dat[f].n12 % nt2 != 0);
if (b > nb2) nb2 = b;
b = dat[f].n21 / nt2 + (dat[f].n21 % nt2 != 0);
if (b > nb2) nb2 = b;
}
for (unsigned int f=0; f<nf; f+=MAX_BLOCKS_Y) {
dim3 grid(nb1, std::min(nf-f, MAX_BLOCKS_Y));
assemble_11_kernel<<<grid,nt1>>>(f, ddat);
}
if (nb2)
for (unsigned int f=0; f<nf; f+=MAX_BLOCKS_Y) {
dim3 grid(nb2, std::min(nf-f, MAX_BLOCKS_Y));
assemble_12_21_kernel<<<grid,nt2>>>(f, ddat);
}
}
cudaDeviceSynchronize();
{ // extend-add
unsigned int nt = 16, nb = 0;
for (int f=0; f<nf; f++) {
int b = dat[f].dCB1 / nt + (dat[f].dCB1 % nt != 0);
if (b > nb) nb = b;
b = dat[f].dCB2 / nt + (dat[f].dCB2 % nt != 0);
if (b > nb) nb = b;
}
dim3 block(nt, nt);
using T_ = typename cuda_type<T>::value_type;
auto dat_ = reinterpret_cast<AssembleData<T_>*>(ddat);
for (unsigned int b1=0; b1<nb; b1+=MAX_BLOCKS_Y) {
int nb1 = std::min(nb-b1, MAX_BLOCKS_Y);
for (unsigned int f=0; f<nf; f+=MAX_BLOCKS_Z) {
dim3 grid(nb, nb1, std::min(nf-f, MAX_BLOCKS_Z));
extend_add_kernel_left<<<grid, block>>>(b1, dat_+f);
}
}
cudaDeviceSynchronize();
for (unsigned int b1=0; b1<nb; b1+=MAX_BLOCKS_Y) {
int nb1 = std::min(nb-b1, MAX_BLOCKS_Y);
for (unsigned int f=0; f<nf; f+=MAX_BLOCKS_Z) {
dim3 grid(nb, nb1, std::min(nf-f, MAX_BLOCKS_Z));
extend_add_kernel_right<<<grid, block>>>(b1, dat_+f);
}
}
}
}
// /**
// * This only works if value >= 0.
// * It's assuming two's complement for the int.
// * __float_as_int is like reinterpret_cast<int&>(value)
// */
// __device__ __forceinline__ void atomicAbsMax(float* data, float value) {
// atomicMax((int *)data, __float_as_int(value));
// }
// __device__ __forceinline__ void atomicAbsMax(double* addr, double value) {
// // why does this not compile?
// atomicMax((long long int *)addr, __double_as_longlong(value));
// }
/**
* LU with row pivoting, with a single NTxNT thread block. The
* matrix size n must be less than NT.
*
* This is a naive implementation. The goal here is to reduce
* kernel launch overhead by batching many small LU
* factorizations.
*
* Use thrust::complex instead of std::complex.
*/
template<typename T, int NT> __device__ int
LU_block_kernel(int n, T* F, int* piv) {
using cuda_primitive_t = typename primitive_type<T>::value_type;
using real_t = typename real_type<T>::value_type;
__shared__ int p;
__shared__ cuda_primitive_t M_[NT*NT];
T* M = reinterpret_cast<T*>(M_);
__shared__ real_t Mmax, cabs[NT];
int info = 0;
int j = threadIdx.x, i = threadIdx.y;
// copy F from global device storage into shared memory
if (i < n && j < n)
M[i+j*NT] = F[i+j*n];
__syncthreads();
for (int k=0; k<n; k++) {
// only 1 thread looks for the pivot element
// this should be optimized?
// #if 0
// real_t pa = 0.;
// Mmax = 0.;
// piv[k] = k + 1;
// __syncthreads();
// if (j == k && i >= k) {
// pa = abs(M[i+k*NT]);
// // see above, not working for double?
// atomicAbsMax(&Mmax, pa);
// }
// __syncthreads();
// if (j == k && i > k)
// if (Mmax == pa)
// atomicMin(piv+k, i+1);
#if 0
if (j == k && i == k) {
p = k;
Mmax = abs(M[k+k*NT]);
for (int l=k+1; l<n; l++) {
auto tmp = abs(M[l+k*NT]);
if (tmp > Mmax) {
Mmax = tmp;
p = l;
}
}
piv[k] = p + 1;
}
#else
if (j == k && i >= k)
cabs[i] = abs(M[i+j*NT]);
__syncthreads();
if (j == k && i == k) {
p = k;
Mmax = cabs[k]; //abs(M[k+k*NT]);
for (int l=k+1; l<n; l++) {
auto tmp = cabs[l]; //abs(M[l+k*NT]);
if (tmp > Mmax) {
Mmax = tmp;
p = l;
}
}
piv[k] = p + 1;
}
#endif
__syncthreads();
if (Mmax == T(0.)) {
if (info == 0)
info = k;
} else {
// swap row k with the pivot row
if (j < n && i == k && p != k) {
auto tmp = M[k+j*NT];
M[k+j*NT] = M[p+j*NT];
M[p+j*NT] = tmp;
}
__syncthreads();
// divide by the pivot element
if (j == k && i > k && i < n)
M[i+k*NT] /= M[k+k*NT];
__syncthreads();
// Schur update
if (j > k && i > k && j < n && i < n)
M[i+j*NT] -= M[i+k*NT] * M[k+j*NT];
__syncthreads();
}
}
// write back from shared to global device memory
if (i < n && j < n)
F[i+j*n] = M[i+j*NT];
return info;
}
__device__ float real_part(float& a) { return a; }
__device__ double real_part(double& a) { return a; }
__device__ float real_part(thrust::complex<float>& a) { return a.real(); }
__device__ double real_part(thrust::complex<double>& a) { return a.real(); }
template<typename T, int NT, typename real_t> __global__ void
LU_block_kernel_batched(FrontData<T>* dat, bool replace, real_t thresh) {
FrontData<T>& A = dat[blockIdx.x];
int info = LU_block_kernel<T,NT>(A.n1, A.F11, A.piv);
if (info || replace) {
int i = threadIdx.x, j = threadIdx.y;
if (i == j && i < A.n1) {
std::size_t k = i + i*A.n1;
if (abs(A.F11[k]) < thresh)
A.F11[k] = (real_part(A.F11[k]) < 0) ? -thresh : thresh;
}
}
}
template<typename T, typename real_t> __global__ void
replace_pivots_kernel(int n, T* A, real_t thresh) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
std::size_t k = i + i*n;
if (abs(A[k]) < thresh)
A[k] = (real_part(A[k]) < 0) ? -thresh : thresh;
}
}
template<typename T, typename real_t>
void replace_pivots(int n, T* A, real_t thresh, gpu::Stream& s) {
if (!n) return;
using T_ = typename cuda_type<T>::value_type;
int NT = 128;
replace_pivots_kernel<T_,real_t><<<(n+NT)/NT, NT, 0, s>>>
(n, reinterpret_cast<T_*>(A), thresh);
}
/**
* LU solve with matrix F factor in LU, with pivot vector piv. F
* is n x n, and n <= NT. X is the right hand side, and is n x
* m. Both F and X have leading dimension n.
*
* NTxNT is the dimension of the thread block.
*
* This doesn't work for T = std::complex<?>, use
* T=thrust::complex<?> instead.
*/
template<typename T, int NT> __device__ void
solve_block_kernel(int n, int m, T* F, T* X, int* piv) {
using primitive_t = typename primitive_type<T>::value_type;
__shared__ int P[NT];
__shared__ primitive_t A_[NT*NT], B_[NT*NT];
T *B = reinterpret_cast<T*>(B_), *A = reinterpret_cast<T*>(A_);
int j = threadIdx.x, i = threadIdx.y;
if (j == 0)
P[i] = i;
__syncthreads();
if (i == 0 && j == 0)
for (int k=0; k<n; k++) {
auto p = piv[k]-1;
auto tmp = P[k];
P[k] = P[p];
P[p] = tmp;
}
// put matrix F in shared memory
if (i < n && j < n)
A[j+i*NT] = F[i+j*n];
__syncthreads();
// loop over blocks of NT columns of X
for (int b=0; b<m; b+=NT) {
int c = b + j;
// put X in shared memory, while applying the permutation
if (i < n && c < m)
B[j+i*NT] = X[P[i]+c*n];
__syncthreads();
// solve with L (unit diagonal)
for (int k=0; k<n; k++) {
if (i > k && i < n && c < m)
B[j+i*NT] -= A[k+i*NT] * B[j+k*NT];
__syncthreads();
}
// solve with U
for (int k=n-1; k>=0; k--) {
if (i == k && c < m)
B[j+i*NT] /= A[i+i*NT];
__syncthreads();
if (i < k && c < m)
B[j+i*NT] -= A[k+i*NT] * B[j+k*NT];
__syncthreads();
}
// write from shared back to global device memory
if (i < n && c < m)
X[i+c*n] = B[j+i*NT];
}
}
template<typename T, int NT> __global__ void
solve_block_kernel_batched(FrontData<T>* dat) {
FrontData<T>& A = dat[blockIdx.x];
solve_block_kernel<T,NT>(A.n1, A.n2, A.F11, A.F12, A.piv);
}
/**
* Compute F -= F21 * F12, where F is d2 x d2 and F12 is d1 x d2.
* d1 is <= NT. This should be called with a single NT x NT thread
* block.
*/
template<typename T, int NT> __device__ void
Schur_block_kernel(int d1, int d2, T* F12, T* F21, T* F22) {
using cuda_primitive_t = typename primitive_type<T>::value_type;
__shared__ cuda_primitive_t B_[NT*NT], A_[NT*NT];
T *B = reinterpret_cast<T*>(B_), *A = reinterpret_cast<T*>(A_);
int j = threadIdx.x, i = threadIdx.y;
A[j+i*NT] = B[j+i*NT] = 0.;
for (int cb=0; cb<d2; cb+=NT) {
int c = cb + j;
// put NT columns of F12 in shared memory B
if (i < d1 && c < d2)
B[j+i*NT] = F12[i+c*d1];
__syncthreads();
for (int rb=0; rb<d2; rb+=NT) {
int r = rb + i;
// put NT rows of F21 in shared memory A
if (r < d2 && j < d1)
A[j+i*NT] = F21[r+j*d2];
__syncthreads(); // wait for A and B
if (c < d2 && r < d2) {
T tmp(0.);
// k < d1 <= NT, by using k<NT this can be unrolled
for (int k=0; k<NT; k++)
tmp += A[k+i*NT] * B[j+k*NT];
F22[r+c*d2] -= tmp;
}
__syncthreads(); // sync before reading new A/B
}
}
}
template<typename T, int NT> __global__ void
Schur_block_kernel_batched(FrontData<T>* dat) {
FrontData<T>& A = dat[blockIdx.x];
Schur_block_kernel<T,NT>(A.n1, A.n2, A.F12, A.F21, A.F22);
}
template<typename T, int NT, typename real_t>
void factor_block_batch(unsigned int count, FrontData<T>* dat,
bool replace, real_t thresh) {
if (!count) return;
using T_ = typename cuda_type<T>::value_type;
auto dat_ = reinterpret_cast<FrontData<T_>*>(dat);
dim3 block(NT, NT); //, grid(count, 1, 1);
LU_block_kernel_batched<T_,NT,real_t><<<count, block>>>
(dat_, replace, thresh);
solve_block_kernel_batched<T_,NT><<<count, block>>>(dat_);
Schur_block_kernel_batched<T_,NT><<<count, block>>>(dat_);
}
template<typename T, int NT> __global__ void
solve_block_kernel_batched(int nrhs, FrontData<T>* dat) {
FrontData<T>& A = dat[blockIdx.x];
solve_block_kernel<T,NT>(A.n1, nrhs, A.F11, A.F12, A.piv);
}
template<typename T, int NT, int alpha, int beta> __device__ void
gemmNN_block_inner_kernel(int m, int n, int k,
T* Aglobal, T* Bglobal, T* Cglobal) {
using cuda_primitive_t = typename primitive_type<T>::value_type;
__shared__ cuda_primitive_t B_[NT*NT], A_[NT*NT];
T *B = reinterpret_cast<T*>(B_), *A = reinterpret_cast<T*>(A_);
int j = threadIdx.x, i = threadIdx.y;
for (int cb=0; cb<n; cb+=NT) {
int c = cb + j;
// put NT columns of Bglobal in shared memory B
if (i < k && c < n)
B[j+i*NT] = Bglobal[i+c*k];
__syncthreads();
for (int rb=0; rb<m; rb+=NT) {
int r = rb + i;
// put NT rows of F21 in shared memory A
if (r < m && j < k)
A[j+i*NT] = Aglobal[r+j*m];
__syncthreads(); // wait for A and B
if (c < n && r < m) {
T tmp(0.);
// l < n <= NT, by using k<NT this can be unrolled
for (int l=0; l<k; l++)
tmp += A[l+i*NT] * B[j+l*NT];
Cglobal[r+c*m] = T(alpha) * tmp + T(beta) * Cglobal[r+c*m];
}
__syncthreads(); // sync before reading new A/B
}
}
}
/**
* Compute C = alpha*A*B + beta*C, with a single thread block,
* with NT x NT threads. A is m x k, and k <= NT. B is m x n and C
* is m x n.
*/
template<typename T, int NT, int alpha, int beta> __global__ void
gemmNN_block_inner_kernel_batched(int nrhs, FrontData<T>* dat) {
FrontData<T>& A = dat[blockIdx.x];
gemmNN_block_inner_kernel<T,NT,alpha,beta>
(A.n2, nrhs, A.n1, A.F21, A.F12, A.F22);
}
/**
* Compute a matrix vector product C = alpha*A + beta*C with a
* single 1 x NT thread block. A is m x k, with m <= NT. B is k x
* 1, C is m x 1.
*/
template<typename T, int NT, int alpha, int beta> __device__ void
gemvN_block_inner_kernel(int m, int k,
T* Aglobal, T* Bglobal, T* Cglobal) {
using cuda_primitive_t = typename primitive_type<T>::value_type;
__shared__ cuda_primitive_t B_[NT];
T *B = reinterpret_cast<T*>(B_);
int i = threadIdx.y;
B[i] = Bglobal[i];
__syncthreads();
for (int r=i; r<m; r+=NT) {
T tmp(0.);
for (int j=0; j<k; j++) // j < k <= NT
tmp += Aglobal[r+j*m] * B[j];
Cglobal[r] = T(alpha) * tmp + T(beta) * Cglobal[r];
}
}
template<typename T, int NT, int alpha, int beta> __global__ void
gemvN_block_inner_kernel_batched(FrontData<T>* dat) {
FrontData<T>& A = dat[blockIdx.x];
gemvN_block_inner_kernel<T,NT,alpha,beta>
(A.n2, A.n1, A.F21, A.F12, A.F22);
}
/**
* Single extend-add operation along the column dimension, for the
* solve. d1 is the size of F11, d2 is the size of F22.
*/
template<typename T> __device__ void
ea_rhs_kernel(int x, int y, int nrhs, int dsep, int dupd, int dCB,
T* b, T* bupd, T* CB, std::size_t* I) {
if (x >= nrhs || y >= dCB) return;
auto Iy = I[y];
if (Iy < dsep) b[Iy+x*dsep] += CB[y+x*dCB];
else bupd[Iy-dsep+x*dupd] += CB[y+x*dCB];
}
template<typename T> __global__ void
extend_add_rhs_kernel_left
(int nrhs, unsigned int by0, AssembleData<T>* dat) {
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = (blockIdx.y + by0) * blockDim.y + threadIdx.y;
auto& F = dat[blockIdx.z];
if (F.CB1)
ea_rhs_kernel(x, y, nrhs, F.d1, F.d2, F.dCB1,
F.F11, F.F21, F.CB1, F.I1);
}
template<typename T> __global__ void
extend_add_rhs_kernel_right
(int nrhs, unsigned int by0, AssembleData<T>* dat) {
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = (blockIdx.y + by0) * blockDim.y + threadIdx.y;
auto& F = dat[blockIdx.z];
if (F.CB2)
ea_rhs_kernel(x, y, nrhs, F.d1, F.d2, F.dCB2,
F.F11, F.F21, F.CB2, F.I2);
}
template<typename T> void
extend_add_rhs(int nrhs, unsigned int nf,
AssembleData<T>* dat, AssembleData<T>* ddat) {
unsigned int nty = 64, nby = 0;
for (int f=0; f<nf; f++) {
int b = dat[f].dCB1 / nty + (dat[f].dCB1 % nty != 0);
if (b > nby) nby = b;
b = dat[f].dCB2 / nty + (dat[f].dCB2 % nty != 0);
if (b > nby) nby = b;
}
int ntx = (nrhs == 1) ? 1 : 16;
int nbx = nrhs / ntx + (nrhs % ntx != 0);
dim3 block(ntx, nty);
using T_ = typename cuda_type<T>::value_type;
auto dat_ = reinterpret_cast<AssembleData<T_>*>(ddat);
for (unsigned int by=0; by<nby; by+=MAX_BLOCKS_Y) {
int nbyy = std::min(nby-by, MAX_BLOCKS_Y);
for (unsigned int f=0; f<nf; f+=MAX_BLOCKS_Z) {
dim3 grid(nbx, nbyy, std::min(nf-f, MAX_BLOCKS_Z));
extend_add_rhs_kernel_left<<<grid, block>>>(nrhs, by, dat_+f);
}
}
cudaDeviceSynchronize();
for (unsigned int by=0; by<nby; by+=MAX_BLOCKS_Y) {
int nbyy = std::min(nby-by, MAX_BLOCKS_Y);
for (unsigned int f=0; f<nf; f+=MAX_BLOCKS_Z) {
dim3 grid(nbx, nbyy, std::min(nf-f, MAX_BLOCKS_Z));
extend_add_rhs_kernel_right<<<grid, block>>>(nrhs, by, dat_+f);
}
}
}
template<typename T, int NT> void
fwd_block_batch(int nrhs, unsigned int count,
FrontData<T>* dat) {
if (!count) return;
using T_ = typename cuda_type<T>::value_type;
auto dat_ = reinterpret_cast<FrontData<T_>*>(dat);
dim3 block(NT, NT);
solve_block_kernel_batched<T_,NT><<<count, block>>>(nrhs, dat_);
if (nrhs == 1) {
dim3 block1(1, NT);
gemvN_block_inner_kernel_batched
<T_,NT,-1,1><<<count, block1>>>(dat_);
} else
gemmNN_block_inner_kernel_batched
<T_,NT,-1,1><<<count, block>>>(nrhs, dat_);
}
/**
* Single extend-add operation along the column dimension, for the
* solve. d1 is the size of F11, d2 is the size of F22.
*/
template<typename T> __device__ void
extract_rhs_kernel(int x, int y, int nrhs, int dsep, int dupd, int dCB,
T* b, T* bupd, T* CB, std::size_t* I) {
if (x >= nrhs || y >= dCB) return;
auto Iy = I[y];
if (Iy < dsep) CB[y+x*dCB] = b[Iy+x*dsep];
else CB[y+x*dCB] = bupd[Iy-dsep+x*dupd];
}
template<typename T> __global__ void
extract_rhs_kernel(int nrhs, unsigned int by0, AssembleData<T>* dat) {
int x = blockIdx.x * blockDim.x + threadIdx.x,
y = (blockIdx.y + by0) * blockDim.y + threadIdx.y;
auto& F = dat[blockIdx.z];
if (F.CB1)
extract_rhs_kernel(x, y, nrhs, F.d1, F.d2, F.dCB1,
F.F11, F.F21, F.CB1, F.I1);
if (F.CB2)
extract_rhs_kernel(x, y, nrhs, F.d1, F.d2, F.dCB2,
F.F11, F.F21, F.CB2, F.I2);
}
template<typename T> void
extract_rhs(int nrhs, unsigned int nf, AssembleData<T>* dat,
AssembleData<T>* ddat) {
unsigned int nty = 64, nby = 0;
for (int f=0; f<nf; f++) {
int b = dat[f].dCB1 / nty + (dat[f].dCB1 % nty != 0);
if (b > nby) nby = b;
b = dat[f].dCB2 / nty + (dat[f].dCB2 % nty != 0);
if (b > nby) nby = b;
}
int ntx = (nrhs == 1) ? 1 : 16;
int nbx = nrhs / ntx + (nrhs % ntx != 0);
dim3 block(ntx, nty);
using T_ = typename cuda_type<T>::value_type;
auto dat_ = reinterpret_cast<AssembleData<T_>*>(ddat);
for (unsigned int by=0; by<nby; by+=MAX_BLOCKS_Y) {
int nbyy = std::min(nby-by, MAX_BLOCKS_Y);
for (unsigned int f=0; f<nf; f+=MAX_BLOCKS_Z) {
dim3 grid(nbx, nbyy, std::min(nf-f, MAX_BLOCKS_Z));
extract_rhs_kernel<<<grid, block>>>(nrhs, by, dat_+f);
}
}
}
/**
* Compute a matrix vector product C = alpha*A + beta*C with a
* single 1 x NT thread block. A is m x k, with k <= NT. B is k x
* 1, C is m x 1.
*/
template<typename T, int NT, int alpha, int beta> __device__ void
gemvN_block_outer_kernel(int m, int k,
T* Aglobal, T* Bglobal, T* Cglobal) {
using cuda_primitive_t = typename primitive_type<T>::value_type;
__shared__ cuda_primitive_t B_[NT], C_[NT];
T *B = reinterpret_cast<T*>(B_), *C = reinterpret_cast<T*>(C_);
int i = threadIdx.y;
C[i] = T(0.);
for (int c=0; c<k; c+=NT) {
B[i] = Bglobal[c+i];
__syncthreads();
if (i < m) {
T tmp(0.);
for (int j=0; j<min(NT, k-c); j++)
tmp += Aglobal[i+(c+j)*m] * B[j];
C[i] += tmp;
}
}
if (i < m)
Cglobal[i] = T(alpha) * C[i] + T(beta) * Cglobal[i];
}
template<typename T, int NT, int alpha, int beta> __global__ void
gemvN_block_outer_kernel_batched(FrontData<T>* dat) {
FrontData<T>& A = dat[blockIdx.x];
// F12 is F12, F21 holds yupd, F11 holds y
gemvN_block_outer_kernel<T,NT,alpha,beta>
(A.n1, A.n2, A.F12, A.F21, A.F11);
}
template<typename T, int NT, int alpha, int beta> __device__ void
gemmNN_block_outer_kernel(int m, int n, int k,
T* Aglobal, T* Bglobal, T* Cglobal) {
using cuda_primitive_t = typename primitive_type<T>::value_type;
__shared__ cuda_primitive_t B_[NT*NT], A_[NT*NT], C_[NT*NT];
T *A = reinterpret_cast<T*>(A_), *B = reinterpret_cast<T*>(B_),
*C = reinterpret_cast<T*>(C_);
int j = threadIdx.x, i = threadIdx.y;
for (int nb=0; nb<n; nb+=NT) {
int n_ = nb + j;
C[i+j*NT] = T(0.);
for (int kb=0; kb<k; kb+=NT) {
int dk = min(NT, k-kb);
if (j < dk && i < m)
A[i+j*NT] = Aglobal[i+(kb+j)*m];
if (i < dk && n_ < n)
B[i+j*NT] = Bglobal[(kb+i)+n_*k];
__syncthreads();
if (i < m && n_ < n) {
T tmp(0.);
for (int l=0; l<dk; l++)
tmp += A[i+l*NT] * B[l+j*NT];
C[i+j*NT] += tmp;
}
__syncthreads();
}
if (i < m && n_ < n)
Cglobal[i+n_*m] = T(alpha) * C[i+j*NT] + T(beta) * Cglobal[i+n_*m];
}
}
/**
* Compute C = alpha*A*B + beta*C, with a single thread block,
* with NT x NT threads. A is m x k, and k <= NT. B is m x n and C
* is m x n.
*/
template<typename T, int NT, int alpha, int beta> __global__ void
gemmNN_block_outer_kernel_batched(int nrhs, FrontData<T>* dat) {
FrontData<T>& A = dat[blockIdx.x];
gemmNN_block_outer_kernel<T,NT,alpha,beta>
(A.n1, nrhs, A.n2, A.F12, A.F21, A.F11);
}
template<typename T, int NT> void
bwd_block_batch(int nrhs, unsigned int count,
FrontData<T>* dat) {
if (!count) return;
using T_ = typename cuda_type<T>::value_type;
auto dat_ = reinterpret_cast<FrontData<T_>*>(dat);
if (nrhs == 1) {
dim3 block(1, NT);
gemvN_block_outer_kernel_batched
<T_,NT,-1,1><<<count, block>>>(dat_);
} else {
dim3 block(NT, NT);
gemmNN_block_outer_kernel_batched
<T_,NT,-1,1><<<count, block>>>(nrhs, dat_);
}
}
// explicit template instantiations
template void assemble(unsigned int, AssembleData<float>*, AssembleData<float>*);
template void assemble(unsigned int, AssembleData<double>*, AssembleData<double>*);
template void assemble(unsigned int, AssembleData<std::complex<float>>*, AssembleData<std::complex<float>>*);
template void assemble(unsigned int, AssembleData<std::complex<double>>*, AssembleData<std::complex<double>>*);
template void extend_add_rhs(int, unsigned int, AssembleData<float>*, AssembleData<float>*);
template void extend_add_rhs(int, unsigned int, AssembleData<double>*, AssembleData<double>*);
template void extend_add_rhs(int, unsigned int, AssembleData<std::complex<float>>*, AssembleData<std::complex<float>>*);
template void extend_add_rhs(int, unsigned int, AssembleData<std::complex<double>>*, AssembleData<std::complex<double>>*);
template void extract_rhs(int, unsigned int, AssembleData<float>*, AssembleData<float>*);
template void extract_rhs(int, unsigned int, AssembleData<double>*, AssembleData<double>*);
template void extract_rhs(int, unsigned int, AssembleData<std::complex<float>>*, AssembleData<std::complex<float>>*);
template void extract_rhs(int, unsigned int, AssembleData<std::complex<double>>*, AssembleData<std::complex<double>>*);
template void factor_block_batch<float,8,float>(unsigned int, FrontData<float>*, bool, float);
template void factor_block_batch<double,8,double>(unsigned int, FrontData<double>*, bool, double);
template void factor_block_batch<std::complex<float>,8,float>(unsigned int, FrontData<std::complex<float>>*, bool, float);
template void factor_block_batch<std::complex<double>,8,double>(unsigned int, FrontData<std::complex<double>>*, bool, double);
template void factor_block_batch<float,16,float>(unsigned int, FrontData<float>*, bool, float);
template void factor_block_batch<double,16,double>(unsigned int, FrontData<double>*, bool, double);
template void factor_block_batch<std::complex<float>,16,float>(unsigned int, FrontData<std::complex<float>>*, bool, float);
template void factor_block_batch<std::complex<double>,16,double>(unsigned int, FrontData<std::complex<double>>*, bool, double);
template void factor_block_batch<float,24,float>(unsigned int, FrontData<float>*, bool, float);
template void factor_block_batch<double,24,double>(unsigned int, FrontData<double>*, bool, double);
template void factor_block_batch<std::complex<float>,24,float>(unsigned int, FrontData<std::complex<float>>*, bool, float);
template void factor_block_batch<std::complex<double>,24,double>(unsigned int, FrontData<std::complex<double>>*, bool, double);
template void factor_block_batch<float,32,float>(unsigned int, FrontData<float>*, bool, float);
template void factor_block_batch<double,32,double>(unsigned int, FrontData<double>*, bool, double);
template void factor_block_batch<std::complex<float>,32,float>(unsigned int, FrontData<std::complex<float>>*, bool, float);
template void factor_block_batch<std::complex<double>,32,double>(unsigned int, FrontData<std::complex<double>>*, bool, double);
template void replace_pivots(int, float*, float, gpu::Stream&);
template void replace_pivots(int, double*, double, gpu::Stream&);
template void replace_pivots(int, std::complex<float>*, float, gpu::Stream&);
template void replace_pivots(int, std::complex<double>*, double, gpu::Stream&);
template void fwd_block_batch<float,8>(int, unsigned int, FrontData<float>*);
template void fwd_block_batch<double,8>(int, unsigned int, FrontData<double>*);
template void fwd_block_batch<std::complex<float>,8>(int, unsigned int, FrontData<std::complex<float>>*);
template void fwd_block_batch<std::complex<double>,8>(int, unsigned int, FrontData<std::complex<double>>*);
template void fwd_block_batch<float,16>(int, unsigned int, FrontData<float>*);
template void fwd_block_batch<double,16>(int, unsigned int, FrontData<double>*);
template void fwd_block_batch<std::complex<float>,16>(int, unsigned int, FrontData<std::complex<float>>*);
template void fwd_block_batch<std::complex<double>,16>(int, unsigned int, FrontData<std::complex<double>>*);
template void fwd_block_batch<float,24>(int, unsigned int, FrontData<float>*);
template void fwd_block_batch<double,24>(int, unsigned int, FrontData<double>*);
template void fwd_block_batch<std::complex<float>,24>(int, unsigned int, FrontData<std::complex<float>>*);
template void fwd_block_batch<std::complex<double>,24>(int, unsigned int, FrontData<std::complex<double>>*);
template void fwd_block_batch<float,32>(int, unsigned int, FrontData<float>*);
template void fwd_block_batch<double,32>(int, unsigned int, FrontData<double>*);
template void fwd_block_batch<std::complex<float>,32>(int, unsigned int, FrontData<std::complex<float>>*);
template void fwd_block_batch<std::complex<double>,32>(int, unsigned int, FrontData<std::complex<double>>*);
template void bwd_block_batch<float,8>(int, unsigned int, FrontData<float>*);
template void bwd_block_batch<double,8>(int, unsigned int, FrontData<double>*);
template void bwd_block_batch<std::complex<float>,8>(int, unsigned int, FrontData<std::complex<float>>*);
template void bwd_block_batch<std::complex<double>,8>(int, unsigned int, FrontData<std::complex<double>>*);
template void bwd_block_batch<float,16>(int, unsigned int, FrontData<float>*);
template void bwd_block_batch<double,16>(int, unsigned int, FrontData<double>*);
template void bwd_block_batch<std::complex<float>,16>(int, unsigned int, FrontData<std::complex<float>>*);
template void bwd_block_batch<std::complex<double>,16>(int, unsigned int, FrontData<std::complex<double>>*);
template void bwd_block_batch<float,24>(int, unsigned int, FrontData<float>*);
template void bwd_block_batch<double,24>(int, unsigned int, FrontData<double>*);
template void bwd_block_batch<std::complex<float>,24>(int, unsigned int, FrontData<std::complex<float>>*);
template void bwd_block_batch<std::complex<double>,24>(int, unsigned int, FrontData<std::complex<double>>*);
template void bwd_block_batch<float,32>(int, unsigned int, FrontData<float>*);
template void bwd_block_batch<double,32>(int, unsigned int, FrontData<double>*);
template void bwd_block_batch<std::complex<float>,32>(int, unsigned int, FrontData<std::complex<float>>*);
template void bwd_block_batch<std::complex<double>,32>(int, unsigned int, FrontData<std::complex<double>>*);
} // end namespace gpu
} // end namespace strumpack
|
the_stack
|
#include "caffe/layers/eltwise_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype, typename MItype, typename MOtype>
void EltwiseLayer<Dtype, MItype, MOtype>::GenerateProgram() {
this->device_program_ = this->device_->CreateProgram();
stringstream ss;
ss << this->device_program_->setup();
ss << this->device_program_->template define_type<Dtype>("Dtype");
ss << this->device_program_->template define_type<MItype>("MItype");
ss << this->device_program_->template define_type<MOtype>("MOtype");
#ifdef USE_HALF
if (std::is_same<MItype, half_fp>::value) {
ss << "#define DTYPE_MAX HALF_MAX" << std::endl;
ss << "#define DTYPE_MIN HALF_MIN" << std::endl;
} else if (std::is_same<MItype, float>::value
|| std::is_same<MItype, double>::value) {
#endif
ss << "#define DTYPE_MAX FLT_MAX" << std::endl;
ss << "#define DTYPE_MIN FLT_MIN" << std::endl;
#ifdef USE_HALF
} else {
ss << "#define DTYPE_MAX " << type_max_val<MItype>() << std::endl;
ss << "#define DTYPE_MIN " << 0 << std::endl;
}
#endif
KernelArgs fw_args;
fw_args.push_back(this->device_program_->template create_kernel_arg<uint_tp>(
"nthreads", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"bottom_data_a", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"bottom_data_b", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"blob_idx", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<MOtype>(
"top_data", KERNEL_ARG_GLOBAL_MEM));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"mask", KERNEL_ARG_GLOBAL_MEM));
ss << this->device_program_->function("MaxForward", fw_args);
ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads");
ss << "Dtype maxval = -DTYPE_MAX;" << std::endl;
ss << "int_tp maxidx = -1;" << std::endl;
ss << "if (bottom_data_a[index] > bottom_data_b[index]) {" << std::endl;
// only update for very first bottom_data blob (blob_idx == 0)
ss << "if (blob_idx == 0) {" << std::endl;
ss << "maxval = bottom_data_a[index];" << std::endl;
ss << "top_data[index] = maxval;" << std::endl;
ss << "maxidx = blob_idx;" << std::endl;
ss << "mask[index] = maxidx;" << std::endl;
ss << "}" << std::endl;
ss << "} else {" << std::endl;
ss << "maxval = bottom_data_b[index];" << std::endl;
ss << "top_data[index] = maxval;" << std::endl;
ss << "maxidx = blob_idx + 1;" << std::endl;
ss << "mask[index] = maxidx;" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
KernelArgs bw_args;
bw_args.push_back(this->device_program_->template create_kernel_arg<uint_tp>(
"nthreads", KERNEL_ARG_CONST));
bw_args.push_back(this->device_program_->template create_kernel_arg<MOtype>(
"top_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
bw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"blob_idx", KERNEL_ARG_CONST));
bw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"mask", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
bw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"bottom_diff", KERNEL_ARG_GLOBAL_MEM));
ss << this->device_program_->function("MaxBackward", bw_args);
ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads");
ss << "Dtype gradient = 0;" << std::endl;
ss << "if (mask[index] == blob_idx) {" << std::endl;
ss << "gradient += top_diff[index];" << std::endl;
ss << "}" << std::endl;
ss << "bottom_diff[index] = gradient;" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
this->device_program_->set_source(ss.str());
this->device_program_->Compile(true, true);
}
template<typename Dtype, typename MItype, typename MOtype>
void EltwiseLayer<Dtype, MItype, MOtype>::Forward_gpu(
const vector<Blob<MItype>*>& bottom,
const vector<Blob<MOtype>*>& top) {
vptr<int_tp> mask;
const int_tp count = top[0]->count();
vptr<Dtype> top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD: {
this->device_->template mul<Dtype>(count, bottom[0]->gpu_data(),
bottom[1]->gpu_data(),
top_data);
for (int_tp i = 2; i < bottom.size(); ++i) {
this->device_->template mul<Dtype>(count, top_data,
bottom[i]->gpu_data(), top_data);
}
break;
}
case EltwiseParameter_EltwiseOp_SUM: {
this->device_->set(count, Dtype(0.), top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int_tp i = 0; i < bottom.size(); ++i) {
this->device_->axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
break;
}
case EltwiseParameter_EltwiseOp_MAX: {
mask = max_idx_.mutable_gpu_data();
shared_ptr<DeviceKernel> kernel =
this->device_program_->GetKernel("MaxForward");
vptr<const Dtype> bottom_0_data = bottom[0]->gpu_data();
vptr<const Dtype> bottom_1_data = bottom[1]->gpu_data();
int_tp idx = 0;
kernel->add_arg(&count);
kernel->add_arg(&bottom_0_data);
kernel->add_arg(&bottom_1_data);
kernel->add_arg(&idx);
kernel->add_arg(&top_data);
kernel->add_arg(&mask);
vector<size_t> work_size(1, count);
vector<size_t> group;
vector<size_t> local;
this->device_->get_threads(&work_size, &group, &local, kernel.get(),
true);
kernel->Execute(group, local);
for (int_tp i = 2; i < bottom.size(); ++i) {
vptr<const Dtype> bottom_data = bottom[i]->gpu_data();
int_tp idx = i - 1;
kernel->add_arg(&count);
kernel->add_arg(&top_data);
kernel->add_arg(&bottom_data);
kernel->add_arg(&idx);
kernel->add_arg(&top_data);
kernel->add_arg(&mask);
vector<size_t> work_size(1, count);
vector<size_t> group;
vector<size_t> local;
this->device_->get_threads(&work_size, &group, &local, kernel.get(),
true);
kernel->Execute(group, local);
}
break;
}
default: {
LOG(FATAL)<< "Unknown elementwise operation.";
}
}
}
template<typename Dtype, typename MItype, typename MOtype>
void EltwiseLayer<Dtype, MItype, MOtype>::Backward_gpu(
const vector<Blob<MOtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<MItype>*>& bottom) {
vptr<const int_tp> mask;
const int_tp count = top[0]->count();
vptr<const Dtype> top_data = top[0]->gpu_data();
vptr<const Dtype> top_diff = top[0]->gpu_diff();
for (int_tp i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
vptr<const Dtype> bottom_data = bottom[i]->gpu_data();
vptr<Dtype> bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD: {
if (stable_prod_grad_) {
bool initialized = false;
for (int_tp j = 0; j < bottom.size(); ++j) {
if (i == j) {
continue;
}
if (!initialized) {
this->device_->copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
this->device_->template mul<Dtype>(count, bottom[j]->gpu_data(),
bottom_diff, bottom_diff);
}
}
} else {
this->device_->template div<Dtype>(count, top_data, bottom_data,
bottom_diff);
}
this->device_->template mul<Dtype>(count, bottom_diff, top_diff,
bottom_diff);
break;
}
case EltwiseParameter_EltwiseOp_SUM: {
if (coeffs_[i] == Dtype(1.)) {
this->device_->copy(count, top_diff, bottom_diff);
} else {
this->device_->scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
}
case EltwiseParameter_EltwiseOp_MAX: {
mask = max_idx_.gpu_data();
shared_ptr<DeviceKernel> kernel =
this->device_program_->GetKernel("MaxBackward");
kernel->add_arg(&count);
kernel->add_arg(&top_diff);
kernel->add_arg(&i);
kernel->add_arg(&mask);
kernel->add_arg(&bottom_diff);
vector<size_t> work_size(1, count);
vector<size_t> group;
vector<size_t> local;
this->device_->get_threads(&work_size, &group, &local, kernel.get(),
true);
kernel->Execute(group, local);
break;
}
default: {
LOG(FATAL)<< "Unknown elementwise operation.";
}
}
}
}
}
INSTANTIATE_CLASST_FUNC_3T_GUARDED(EltwiseLayer, GenerateProgram,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(EltwiseLayer, GenerateProgram,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(EltwiseLayer, GenerateProgram,
(double), (double), (double));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(EltwiseLayer, Forward_gpu,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(EltwiseLayer, Forward_gpu,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(EltwiseLayer, Forward_gpu,
(double), (double), (double));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(EltwiseLayer, Backward_gpu,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(EltwiseLayer, Backward_gpu,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(EltwiseLayer, Backward_gpu,
(double), (double), (double));
} // namespace caffe
|
the_stack
|
float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
float result;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Sdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
double result;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Ddot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
#ifdef CUDA_HALF_TENSOR
float THCudaBlas_Hdot(THCState *state, int64_t n, half *x, int64_t incx, half *y, int64_t incy)
{
#if CUDA_VERSION >= 8000
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
float result;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDotEx(handle, i_n, x, CUDA_R_16F, i_incx, y, CUDA_R_16F, i_incy, &result, CUDA_R_32F, CUDA_R_32F));
return result;
}
THError("Cublas_Hdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
#else
THError("Cublas_Hdot requires CUDA 8.0+");
return 0;
#endif
}
#endif
/* Level 2 */
void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy)
{
if(n == 1)
lda = m;
cublasOperation_t op;
if (trans == 't') op = CUBLAS_OP_T;
else if (trans == 'n') op = CUBLAS_OP_N;
else if (trans == 'c') op = CUBLAS_OP_C;
if( (m <= INT_MAX) && (n <= INT_MAX) &&
(lda > 0) && (lda <= INT_MAX) &&
(incx > 0) && (incx <= INT_MAX) &&
(incy > 0) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy));
return;
}
THError("Cublas_Sgemv only supports m, n, lda, incx, incy"
"in the range 0 < [val] <= %d", INT_MAX);
}
void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy)
{
if(n == 1)
lda = m;
cublasOperation_t op;
if (trans == 't') op = CUBLAS_OP_T;
else if (trans == 'n') op = CUBLAS_OP_N;
else if (trans == 'c') op = CUBLAS_OP_C;
if( (m <= INT_MAX) && (n <= INT_MAX) &&
(lda > 0) && (lda <= INT_MAX) &&
(incx > 0) && (incx <= INT_MAX) &&
(incy > 0) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgemv(handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy));
return;
}
THError("Cublas_Dgemv only supports m, n, lda, incx, incy"
"in the range 0 < [val] <= %d", INT_MAX);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
if(n == 1)
lda = m;
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
if(n == 1)
lda = m;
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
cublasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return CUBLAS_OP_T;
else if (trans == 'n') return CUBLAS_OP_N;
else if (trans == 'c') return CUBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return CUBLAS_OP_T;
}
}
void adjustLd(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transa_)
{
if(m == 1)
*lda = k;
}
else
{
if(k == 1)
*lda = m;
}
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc));
return;
}
THError("Cublas_Sgemm only supports m, n, k, lda, ldb, ldc"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef CUDA_HALF_TENSOR
// In CUDA 8.0, definition of data types for sgemmex changed
#if CUDA_VERSION < 8000
# define CUDA_R_16F CUBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, half alpha, half *a, int64_t lda, half *b, int64_t ldb, half beta, half *c, int64_t ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
// Simulated Hgemm
float fAlpha = THC_half2float(alpha);
float fBeta = THC_half2float(beta);
#if CUDA_VERSION < 9000
THCublasCheck(cublasSgemmEx(handle, opa, opb,
i_m, i_n, i_k, &fAlpha,
a, CUDA_R_16F, i_lda, b, CUDA_R_16F,
i_ldb, &fBeta, c, CUDA_R_16F, i_ldc));
#else
cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state);
if (prop->major >= 5){
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
THCublasCheck(cublasGemmEx(handle, opa, opb,
i_m, i_n, i_k, &fAlpha,
a, CUDA_R_16F, i_lda, b, CUDA_R_16F,
i_ldb, &fBeta, c, CUDA_R_16F, i_ldc,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
}else{
THCublasCheck(cublasSgemmEx(handle, opa, opb,
i_m, i_n, i_k, &fAlpha,
a, CUDA_R_16F, i_lda, b, CUDA_R_16F,
i_ldb, &fBeta, c, CUDA_R_16F, i_ldc));
}
#endif
return;
}
THError("Cublas_Hgemm only supports m, n, k, lda, ldb, ldc"
"with th bound [val] <= %d", INT_MAX);
}
#endif
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgemm(handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc));
return;
}
THError("Cublas_Dgemm only supports m, n, k, lda, ldb, ldc"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
}
#if CUDA_VERSION >= 8000
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
}
#if CUDA_VERSION >= 8000
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
/* Inverse */
void THCudaBlas_Sgetrf(THCState *state, int n, float **a, int lda, int *pivot, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Sgetrf only supports n, lda, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgetrfBatched(handle, n, a, lda, pivot, info, batchSize));
}
void THCudaBlas_Dgetrf(THCState *state, int n, double **a, int lda, int *pivot, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetrf only supports n, lda, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgetrfBatched(handle, n, a, lda, pivot, info, batchSize));
}
THC_API void THCudaBlas_Sgetrs(THCState *state, char transa, int n, int nrhs, const float **a, int lda, int *pivot, float **b, int ldb, int *info, int batchSize)
{
if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
// no need to adjust leading dimensions, since matrices are square
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize));
}
THC_API void THCudaBlas_Dgetrs(THCState *state, char transa, int n, int nrhs, const double **a, int lda, int *pivot, double **b, int ldb, int *info, int batchSize)
{
if( (n >= INT_MAX) || (nrhs >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetrs only supports n, nrhs, lda, ldb, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
// no need to adjust leading dimensions, since matrices are square
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgetrsBatched(handle, opa, n, nrhs, a, lda, pivot, b, ldb, info, batchSize));
}
void THCudaBlas_Sgetri(THCState *state, int n, const float **a, int lda, int *pivot, float **c, int ldc, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Sgetri only supports n, lda, ldc, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasSgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize));
}
void THCudaBlas_Dgetri(THCState *state, int n, const double **a, int lda, int *pivot, double **c, int ldc, int *info, int batchSize) {
if( (n >= INT_MAX) || (lda >= INT_MAX)|| (ldc >= INT_MAX) || (batchSize >= INT_MAX) )
{
THError("Cublas_Dgetri only supports n, lda, ldc, batchSize"
"with the bound [val] <= %d", INT_MAX);
}
cublasHandle_t handle = THCState_getCurrentBlasHandle(state);
cublasSetStream(handle, THCState_getCurrentStream(state));
THCublasCheck(cublasDgetriBatched(handle, n, a, lda, pivot, c, ldc, info, batchSize));
}
|
the_stack
|
#define USE_SHARED 1
#include "cuda_helper.h"
//static uint32_t *d_nonce[MAX_GPUS];
#define SPH_ROTL32(x, n) ROTL32(x, n)
#define SPH_ROTR32(x, n) ROTR32(x, n)
#define F1(x6, x5, x4, x3, x2, x1, x0) \
(((x1) & ((x0) ^ (x4))) ^ ((x2) & (x5)) ^ ((x3) & (x6)) ^ (x0))
#define F2(x6, x5, x4, x3, x2, x1, x0) \
(((x2) & (((x1) & ~(x3)) ^ ((x4) & (x5)) ^ (x6) ^ (x0))) \
^ ((x4) & ((x1) ^ (x5))) ^ ((x3 & (x5)) ^ (x0)))
#define F3(x6, x5, x4, x3, x2, x1, x0) \
(((x3) & (((x1) & (x2)) ^ (x6) ^ (x0))) \
^ ((x1) & (x4)) ^ ((x2) & (x5)) ^ (x0))
#define F4(x6, x5, x4, x3, x2, x1, x0) \
(((x3) & (((x1) & (x2)) ^ ((x4) | (x6)) ^ (x5))) \
^ ((x4) & ((~(x2) & (x5)) ^ (x1) ^ (x6) ^ (x0))) \
^ ((x2) & (x6)) ^ (x0))
#define F5(x6, x5, x4, x3, x2, x1, x0) \
(((x0) & ~(((x1) & (x2) & (x3)) ^ (x5))) \
^ ((x1) & (x4)) ^ ((x2) & (x5)) ^ ((x3) & (x6)))
#define STEP1(x7, x6, x5, x4, x3, x2, x1, x0, w) { \
uint32_t t = F1(x3, x4, x1, x0, x5, x2, x6); \
(x7) =(SPH_ROTR32(t, 7) + SPH_ROTR32((x7), 11) \
+ (w)); \
}
#define STEP2(x7, x6, x5, x4, x3, x2, x1, x0, w, c) { \
uint32_t t = F2(x6, x2, x1, x0, x3, x4, x5); \
(x7) =(SPH_ROTR32(t, 7) + SPH_ROTR32((x7), 11) \
+ (w) + (c)); \
}
#define STEP3(x7, x6, x5, x4, x3, x2, x1, x0, w, c) { \
uint32_t t = F3(x2, x6, x0, x4, x3, x1, x5); \
(x7) =(SPH_ROTR32(t, 7) + SPH_ROTR32((x7), 11) \
+ (w) + (c)); \
}
#define STEP4(x7, x6, x5, x4, x3, x2, x1, x0, w, c) { \
uint32_t t = F4(x1, x5, x3, x2, x0, x4, x6); \
(x7) =(SPH_ROTR32(t, 7) + SPH_ROTR32((x7), 11) \
+ (w) + (c)); \
}
#define STEP5(x7, x6, x5, x4, x3, x2, x1, x0, w, c) { \
uint32_t t = F5(x2, x5, x0, x6, x4, x3, x1); \
(x7) =(SPH_ROTR32(t, 7) + SPH_ROTR32((x7), 11) \
+ (w) + (c)); \
}
__global__
void x17_haval256_gpu_hash_64(uint32_t threads, uint32_t startNounce, const uint64_t *const __restrict__ g_hash)
{
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
// if (thread < threads)
{
uint32_t *inpHash = (uint32_t*)&g_hash[8 * thread];
uint32_t hash[16];
uint32_t buf[32] = { 0 };
uint32_t s0 = 0x243F6A88;
uint32_t s1 = 0x85A308D3;
uint32_t s2 = 0x13198A2E;
uint32_t s3 = 0x03707344;
uint32_t s4 = 0xA4093822;
uint32_t s5 = 0x299F31D0;
uint32_t s6 = 0x082EFA98;
uint32_t s7 = 0xEC4E6C89;
#pragma unroll
for (int i = 0; i<16; i++)
{
hash[i] = inpHash[i];
}
///////// input big /////////////////////
#pragma unroll
for (int i = 0; i<16; i++)
{
buf[i] = hash[i];
}
buf[16] = 0x00000001;
buf[29] = 0x40290000;
buf[30] = 0x00000200;
STEP1(s7, s6, s5, s4, s3, s2, s1, s0, buf[0]);
STEP1(s6, s5, s4, s3, s2, s1, s0, s7, buf[1]);
STEP1(s5, s4, s3, s2, s1, s0, s7, s6, buf[2]);
STEP1(s4, s3, s2, s1, s0, s7, s6, s5, buf[3]);
STEP1(s3, s2, s1, s0, s7, s6, s5, s4, buf[4]);
STEP1(s2, s1, s0, s7, s6, s5, s4, s3, buf[5]);
STEP1(s1, s0, s7, s6, s5, s4, s3, s2, buf[6]);
STEP1(s0, s7, s6, s5, s4, s3, s2, s1, buf[7]);
STEP1(s7, s6, s5, s4, s3, s2, s1, s0, buf[8]);
STEP1(s6, s5, s4, s3, s2, s1, s0, s7, buf[9]);
STEP1(s5, s4, s3, s2, s1, s0, s7, s6, buf[10]);
STEP1(s4, s3, s2, s1, s0, s7, s6, s5, buf[11]);
STEP1(s3, s2, s1, s0, s7, s6, s5, s4, buf[12]);
STEP1(s2, s1, s0, s7, s6, s5, s4, s3, buf[13]);
STEP1(s1, s0, s7, s6, s5, s4, s3, s2, buf[14]);
STEP1(s0, s7, s6, s5, s4, s3, s2, s1, buf[15]);
STEP1(s7, s6, s5, s4, s3, s2, s1, s0, buf[16]);
STEP1(s6, s5, s4, s3, s2, s1, s0, s7, buf[17]);
STEP1(s5, s4, s3, s2, s1, s0, s7, s6, buf[18]);
STEP1(s4, s3, s2, s1, s0, s7, s6, s5, buf[19]);
STEP1(s3, s2, s1, s0, s7, s6, s5, s4, buf[20]);
STEP1(s2, s1, s0, s7, s6, s5, s4, s3, buf[21]);
STEP1(s1, s0, s7, s6, s5, s4, s3, s2, buf[22]);
STEP1(s0, s7, s6, s5, s4, s3, s2, s1, buf[23]);
STEP1(s7, s6, s5, s4, s3, s2, s1, s0, buf[24]);
STEP1(s6, s5, s4, s3, s2, s1, s0, s7, buf[25]);
STEP1(s5, s4, s3, s2, s1, s0, s7, s6, buf[26]);
STEP1(s4, s3, s2, s1, s0, s7, s6, s5, buf[27]);
STEP1(s3, s2, s1, s0, s7, s6, s5, s4, buf[28]);
STEP1(s2, s1, s0, s7, s6, s5, s4, s3, buf[29]);
STEP1(s1, s0, s7, s6, s5, s4, s3, s2, buf[30]);
STEP1(s0, s7, s6, s5, s4, s3, s2, s1, buf[31]);
STEP2(s7, s6, s5, s4, s3, s2, s1, s0, buf[5], SPH_C32(0x452821E6));
STEP2(s6, s5, s4, s3, s2, s1, s0, s7, buf[14], SPH_C32(0x38D01377));
STEP2(s5, s4, s3, s2, s1, s0, s7, s6, buf[26], SPH_C32(0xBE5466CF));
STEP2(s4, s3, s2, s1, s0, s7, s6, s5, buf[18], SPH_C32(0x34E90C6C));
STEP2(s3, s2, s1, s0, s7, s6, s5, s4, buf[11], SPH_C32(0xC0AC29B7));
STEP2(s2, s1, s0, s7, s6, s5, s4, s3, buf[28], SPH_C32(0xC97C50DD));
STEP2(s1, s0, s7, s6, s5, s4, s3, s2, buf[7], SPH_C32(0x3F84D5B5));
STEP2(s0, s7, s6, s5, s4, s3, s2, s1, buf[16], SPH_C32(0xB5470917));
STEP2(s7, s6, s5, s4, s3, s2, s1, s0, buf[0], SPH_C32(0x9216D5D9));
STEP2(s6, s5, s4, s3, s2, s1, s0, s7, buf[23], SPH_C32(0x8979FB1B));
STEP2(s5, s4, s3, s2, s1, s0, s7, s6, buf[20], SPH_C32(0xD1310BA6));
STEP2(s4, s3, s2, s1, s0, s7, s6, s5, buf[22], SPH_C32(0x98DFB5AC));
STEP2(s3, s2, s1, s0, s7, s6, s5, s4, buf[1], SPH_C32(0x2FFD72DB));
STEP2(s2, s1, s0, s7, s6, s5, s4, s3, buf[10], SPH_C32(0xD01ADFB7));
STEP2(s1, s0, s7, s6, s5, s4, s3, s2, buf[4], SPH_C32(0xB8E1AFED));
STEP2(s0, s7, s6, s5, s4, s3, s2, s1, buf[8], SPH_C32(0x6A267E96));
STEP2(s7, s6, s5, s4, s3, s2, s1, s0, buf[30], SPH_C32(0xBA7C9045));
STEP2(s6, s5, s4, s3, s2, s1, s0, s7, buf[3], SPH_C32(0xF12C7F99));
STEP2(s5, s4, s3, s2, s1, s0, s7, s6, buf[21], SPH_C32(0x24A19947));
STEP2(s4, s3, s2, s1, s0, s7, s6, s5, buf[9], SPH_C32(0xB3916CF7));
STEP2(s3, s2, s1, s0, s7, s6, s5, s4, buf[17], SPH_C32(0x0801F2E2));
STEP2(s2, s1, s0, s7, s6, s5, s4, s3, buf[24], SPH_C32(0x858EFC16));
STEP2(s1, s0, s7, s6, s5, s4, s3, s2, buf[29], SPH_C32(0x636920D8));
STEP2(s0, s7, s6, s5, s4, s3, s2, s1, buf[6], SPH_C32(0x71574E69));
STEP2(s7, s6, s5, s4, s3, s2, s1, s0, buf[19], SPH_C32(0xA458FEA3));
STEP2(s6, s5, s4, s3, s2, s1, s0, s7, buf[12], SPH_C32(0xF4933D7E));
STEP2(s5, s4, s3, s2, s1, s0, s7, s6, buf[15], SPH_C32(0x0D95748F));
STEP2(s4, s3, s2, s1, s0, s7, s6, s5, buf[13], SPH_C32(0x728EB658));
STEP2(s3, s2, s1, s0, s7, s6, s5, s4, buf[2], SPH_C32(0x718BCD58));
STEP2(s2, s1, s0, s7, s6, s5, s4, s3, buf[25], SPH_C32(0x82154AEE));
STEP2(s1, s0, s7, s6, s5, s4, s3, s2, buf[31], SPH_C32(0x7B54A41D));
STEP2(s0, s7, s6, s5, s4, s3, s2, s1, buf[27], SPH_C32(0xC25A59B5));
STEP3(s7, s6, s5, s4, s3, s2, s1, s0, buf[19], SPH_C32(0x9C30D539));
STEP3(s6, s5, s4, s3, s2, s1, s0, s7, buf[9], SPH_C32(0x2AF26013));
STEP3(s5, s4, s3, s2, s1, s0, s7, s6, buf[4], SPH_C32(0xC5D1B023));
STEP3(s4, s3, s2, s1, s0, s7, s6, s5, buf[20], SPH_C32(0x286085F0));
STEP3(s3, s2, s1, s0, s7, s6, s5, s4, buf[28], SPH_C32(0xCA417918));
STEP3(s2, s1, s0, s7, s6, s5, s4, s3, buf[17], SPH_C32(0xB8DB38EF));
STEP3(s1, s0, s7, s6, s5, s4, s3, s2, buf[8], SPH_C32(0x8E79DCB0));
STEP3(s0, s7, s6, s5, s4, s3, s2, s1, buf[22], SPH_C32(0x603A180E));
STEP3(s7, s6, s5, s4, s3, s2, s1, s0, buf[29], SPH_C32(0x6C9E0E8B));
STEP3(s6, s5, s4, s3, s2, s1, s0, s7, buf[14], SPH_C32(0xB01E8A3E));
STEP3(s5, s4, s3, s2, s1, s0, s7, s6, buf[25], SPH_C32(0xD71577C1));
STEP3(s4, s3, s2, s1, s0, s7, s6, s5, buf[12], SPH_C32(0xBD314B27));
STEP3(s3, s2, s1, s0, s7, s6, s5, s4, buf[24], SPH_C32(0x78AF2FDA));
STEP3(s2, s1, s0, s7, s6, s5, s4, s3, buf[30], SPH_C32(0x55605C60));
STEP3(s1, s0, s7, s6, s5, s4, s3, s2, buf[16], SPH_C32(0xE65525F3));
STEP3(s0, s7, s6, s5, s4, s3, s2, s1, buf[26], SPH_C32(0xAA55AB94));
STEP3(s7, s6, s5, s4, s3, s2, s1, s0, buf[31], SPH_C32(0x57489862));
STEP3(s6, s5, s4, s3, s2, s1, s0, s7, buf[15], SPH_C32(0x63E81440));
STEP3(s5, s4, s3, s2, s1, s0, s7, s6, buf[7], SPH_C32(0x55CA396A));
STEP3(s4, s3, s2, s1, s0, s7, s6, s5, buf[3], SPH_C32(0x2AAB10B6));
STEP3(s3, s2, s1, s0, s7, s6, s5, s4, buf[1], SPH_C32(0xB4CC5C34));
STEP3(s2, s1, s0, s7, s6, s5, s4, s3, buf[0], SPH_C32(0x1141E8CE));
STEP3(s1, s0, s7, s6, s5, s4, s3, s2, buf[18], SPH_C32(0xA15486AF));
STEP3(s0, s7, s6, s5, s4, s3, s2, s1, buf[27], SPH_C32(0x7C72E993));
STEP3(s7, s6, s5, s4, s3, s2, s1, s0, buf[13], SPH_C32(0xB3EE1411));
STEP3(s6, s5, s4, s3, s2, s1, s0, s7, buf[6], SPH_C32(0x636FBC2A));
STEP3(s5, s4, s3, s2, s1, s0, s7, s6, buf[21], SPH_C32(0x2BA9C55D));
STEP3(s4, s3, s2, s1, s0, s7, s6, s5, buf[10], SPH_C32(0x741831F6));
STEP3(s3, s2, s1, s0, s7, s6, s5, s4, buf[23], SPH_C32(0xCE5C3E16));
STEP3(s2, s1, s0, s7, s6, s5, s4, s3, buf[11], SPH_C32(0x9B87931E));
STEP3(s1, s0, s7, s6, s5, s4, s3, s2, buf[5], SPH_C32(0xAFD6BA33));
STEP3(s0, s7, s6, s5, s4, s3, s2, s1, buf[2], SPH_C32(0x6C24CF5C));
STEP4(s7, s6, s5, s4, s3, s2, s1, s0, buf[24], SPH_C32(0x7A325381));
STEP4(s6, s5, s4, s3, s2, s1, s0, s7, buf[4], SPH_C32(0x28958677));
STEP4(s5, s4, s3, s2, s1, s0, s7, s6, buf[0], SPH_C32(0x3B8F4898));
STEP4(s4, s3, s2, s1, s0, s7, s6, s5, buf[14], SPH_C32(0x6B4BB9AF));
STEP4(s3, s2, s1, s0, s7, s6, s5, s4, buf[2], SPH_C32(0xC4BFE81B));
STEP4(s2, s1, s0, s7, s6, s5, s4, s3, buf[7], SPH_C32(0x66282193));
STEP4(s1, s0, s7, s6, s5, s4, s3, s2, buf[28], SPH_C32(0x61D809CC));
STEP4(s0, s7, s6, s5, s4, s3, s2, s1, buf[23], SPH_C32(0xFB21A991));
STEP4(s7, s6, s5, s4, s3, s2, s1, s0, buf[26], SPH_C32(0x487CAC60));
STEP4(s6, s5, s4, s3, s2, s1, s0, s7, buf[6], SPH_C32(0x5DEC8032));
STEP4(s5, s4, s3, s2, s1, s0, s7, s6, buf[30], SPH_C32(0xEF845D5D));
STEP4(s4, s3, s2, s1, s0, s7, s6, s5, buf[20], SPH_C32(0xE98575B1));
STEP4(s3, s2, s1, s0, s7, s6, s5, s4, buf[18], SPH_C32(0xDC262302));
STEP4(s2, s1, s0, s7, s6, s5, s4, s3, buf[25], SPH_C32(0xEB651B88));
STEP4(s1, s0, s7, s6, s5, s4, s3, s2, buf[19], SPH_C32(0x23893E81));
STEP4(s0, s7, s6, s5, s4, s3, s2, s1, buf[3], SPH_C32(0xD396ACC5));
STEP4(s7, s6, s5, s4, s3, s2, s1, s0, buf[22], SPH_C32(0x0F6D6FF3));
STEP4(s6, s5, s4, s3, s2, s1, s0, s7, buf[11], SPH_C32(0x83F44239));
STEP4(s5, s4, s3, s2, s1, s0, s7, s6, buf[31], SPH_C32(0x2E0B4482));
STEP4(s4, s3, s2, s1, s0, s7, s6, s5, buf[21], SPH_C32(0xA4842004));
STEP4(s3, s2, s1, s0, s7, s6, s5, s4, buf[8], SPH_C32(0x69C8F04A));
STEP4(s2, s1, s0, s7, s6, s5, s4, s3, buf[27], SPH_C32(0x9E1F9B5E));
STEP4(s1, s0, s7, s6, s5, s4, s3, s2, buf[12], SPH_C32(0x21C66842));
STEP4(s0, s7, s6, s5, s4, s3, s2, s1, buf[9], SPH_C32(0xF6E96C9A));
STEP4(s7, s6, s5, s4, s3, s2, s1, s0, buf[1], SPH_C32(0x670C9C61));
STEP4(s6, s5, s4, s3, s2, s1, s0, s7, buf[29], SPH_C32(0xABD388F0));
STEP4(s5, s4, s3, s2, s1, s0, s7, s6, buf[5], SPH_C32(0x6A51A0D2));
STEP4(s4, s3, s2, s1, s0, s7, s6, s5, buf[15], SPH_C32(0xD8542F68));
STEP4(s3, s2, s1, s0, s7, s6, s5, s4, buf[17], SPH_C32(0x960FA728));
STEP4(s2, s1, s0, s7, s6, s5, s4, s3, buf[10], SPH_C32(0xAB5133A3));
STEP4(s1, s0, s7, s6, s5, s4, s3, s2, buf[16], SPH_C32(0x6EEF0B6C));
STEP4(s0, s7, s6, s5, s4, s3, s2, s1, buf[13], SPH_C32(0x137A3BE4));
STEP5(s7, s6, s5, s4, s3, s2, s1, s0, buf[27], SPH_C32(0xBA3BF050));
STEP5(s6, s5, s4, s3, s2, s1, s0, s7, buf[3], SPH_C32(0x7EFB2A98));
STEP5(s5, s4, s3, s2, s1, s0, s7, s6, buf[21], SPH_C32(0xA1F1651D));
STEP5(s4, s3, s2, s1, s0, s7, s6, s5, buf[26], SPH_C32(0x39AF0176));
STEP5(s3, s2, s1, s0, s7, s6, s5, s4, buf[17], SPH_C32(0x66CA593E));
STEP5(s2, s1, s0, s7, s6, s5, s4, s3, buf[11], SPH_C32(0x82430E88));
STEP5(s1, s0, s7, s6, s5, s4, s3, s2, buf[20], SPH_C32(0x8CEE8619));
STEP5(s0, s7, s6, s5, s4, s3, s2, s1, buf[29], SPH_C32(0x456F9FB4));
STEP5(s7, s6, s5, s4, s3, s2, s1, s0, buf[19], SPH_C32(0x7D84A5C3));
STEP5(s6, s5, s4, s3, s2, s1, s0, s7, buf[0], SPH_C32(0x3B8B5EBE));
STEP5(s5, s4, s3, s2, s1, s0, s7, s6, buf[12], SPH_C32(0xE06F75D8));
STEP5(s4, s3, s2, s1, s0, s7, s6, s5, buf[7], SPH_C32(0x85C12073));
STEP5(s3, s2, s1, s0, s7, s6, s5, s4, buf[13], SPH_C32(0x401A449F));
STEP5(s2, s1, s0, s7, s6, s5, s4, s3, buf[8], SPH_C32(0x56C16AA6));
STEP5(s1, s0, s7, s6, s5, s4, s3, s2, buf[31], SPH_C32(0x4ED3AA62));
STEP5(s0, s7, s6, s5, s4, s3, s2, s1, buf[10], SPH_C32(0x363F7706));
STEP5(s7, s6, s5, s4, s3, s2, s1, s0, buf[5], SPH_C32(0x1BFEDF72));
STEP5(s6, s5, s4, s3, s2, s1, s0, s7, buf[9], SPH_C32(0x429B023D));
STEP5(s5, s4, s3, s2, s1, s0, s7, s6, buf[14], SPH_C32(0x37D0D724));
STEP5(s4, s3, s2, s1, s0, s7, s6, s5, buf[30], SPH_C32(0xD00A1248));
STEP5(s3, s2, s1, s0, s7, s6, s5, s4, buf[18], SPH_C32(0xDB0FEAD3));
STEP5(s2, s1, s0, s7, s6, s5, s4, s3, buf[6], SPH_C32(0x49F1C09B));
STEP5(s1, s0, s7, s6, s5, s4, s3, s2, buf[28], SPH_C32(0x075372C9));
STEP5(s0, s7, s6, s5, s4, s3, s2, s1, buf[24], SPH_C32(0x80991B7B));
STEP5(s7, s6, s5, s4, s3, s2, s1, s0, buf[2], SPH_C32(0x25D479D8));
STEP5(s6, s5, s4, s3, s2, s1, s0, s7, buf[23], SPH_C32(0xF6E8DEF7));
STEP5(s5, s4, s3, s2, s1, s0, s7, s6, buf[16], SPH_C32(0xE3FE501A));
STEP5(s4, s3, s2, s1, s0, s7, s6, s5, buf[22], SPH_C32(0xB6794C3B));
STEP5(s3, s2, s1, s0, s7, s6, s5, s4, buf[ 4], SPH_C32(0x976CE0BD));
STEP5(s2, s1, s0, s7, s6, s5, s4, s3, buf[ 1], SPH_C32(0x04C006BA));
STEP5(s1, s0, s7, s6, s5, s4, s3, s2, buf[25], SPH_C32(0xC1A94FB6));
STEP5(s0, s7, s6, s5, s4, s3, s2, s1, buf[15], SPH_C32(0x409F60C4));
inpHash[0] = s0 + 0x243F6A88;
inpHash[1] = s1 + 0x85A308D3;
inpHash[2] = s2 + 0x13198A2E;
inpHash[3] = s3 + 0x03707344;
inpHash[4] = s4 + 0xA4093822;
inpHash[5] = s5 + 0x299F31D0;
inpHash[6] = s6 + 0x082EFA98;
inpHash[7] = s7 + 0xEC4E6C89;
/* if (s7 + 0xEC4E6C89 <= target)
{
uint32_t tmp = atomicExch(ret, startNounce + thread);
if (tmp != 0xffffffff)
ret[1] = tmp;
}
*/
} // threads
}
__host__
void x17_haval256_cpu_init(int thr_id, uint32_t threads)
{
// cudaMemcpyToSymbol(initVector,c_initVector,sizeof(c_initVector),0, cudaMemcpyHostToDevice);
}
__host__
void x17_haval256_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash)
{
const uint32_t threadsperblock = 256; // Alignment mit mixtab Grösse. NICHT ÄNDERN
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
x17_haval256_gpu_hash_64<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash);
}
|
the_stack
|
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <ops/declarable/helpers/transforms.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void clipByNormCuda(const void* vClipNorm, const void* vNorm, const sd::LongType* normShapeInfo,
void* vz, const sd::LongType* zShapeInfo, const int* dimensions, const int dimsLen,
const bool useAverage) {
const T clipNorm = *reinterpret_cast<const T*>(vClipNorm);
const T* norm = reinterpret_cast<const T*>(vNorm);
T* z = reinterpret_cast<T*>(vz);
__shared__ sd::LongType zLen, tadLen, totalThreads;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
tadLen = zLen / shape::length(normShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
int zCoords[SD_MAX_RANK], normCoords[SD_MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoords);
// deduce norm coords
for (int j = 0; j < dimsLen; ++j) normCoords[j] = zCoords[dimensions[j]];
const T actualNorm = useAverage ? norm[shape::getOffset(normShapeInfo, normCoords)] / tadLen
: norm[shape::getOffset(normShapeInfo, normCoords)];
if (actualNorm > clipNorm) z[shape::getOffset(zShapeInfo, zCoords)] *= clipNorm / actualNorm;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
SD_HOST static void clipByNormCudaLauncher(const int blocksPerGrid, const int threadsPerBlock,
const cudaStream_t* stream, const void* vClipNorm, const void* vNorm,
const sd::LongType* normShapeInfo, void* vz, const sd::LongType* zShapeInfo,
const int* dimensions, const int dimsLen, const bool useAverage) {
clipByNormCuda<T><<<blocksPerGrid, threadsPerBlock, 512, *stream>>>(vClipNorm, vNorm, normShapeInfo, vz, zShapeInfo,
dimensions, dimsLen, useAverage);
}
//////////////////////////////////////////////////////////////////////////
void clipByNorm(sd::LaunchContext* context, NDArray& input, NDArray& output, const std::vector<int>& dims,
const NDArray& clipNorm, const bool isInplace, const bool useAverage) {
NDArray* z = nullptr;
if (isInplace) {
z = &input;
} else {
output.assign(input);
z = &output;
}
if (dims.empty()) {
const NDArray actualNorm = useAverage ? z->reduceAlongDimension(reduce::Norm2, {}) / z->lengthOf()
: z->reduceAlongDimension(reduce::Norm2, {});
if (actualNorm.e<float>(0) > clipNorm.e<float>(0)) *z *= clipNorm / actualNorm;
} else {
const NDArray actualNorms = z->reduceAlongDimension(reduce::Norm2, dims);
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(z->rankOf(), dims);
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (z->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "clipByNorm");
const int* dimensions = reinterpret_cast<const int*>(
manager.replicatePointer(dimsToExclude.data(), dimsToExclude.size() * sizeof(int)));
NDArray::prepareSpecialUse({z}, {z, &actualNorms, &clipNorm});
BUILD_SINGLE_SELECTOR(z->dataType(), clipByNormCudaLauncher,
(blocksPerGrid, threadsPerBlock, context->getCudaStream(), clipNorm.specialBuffer(),
actualNorms.specialBuffer(), actualNorms.specialShapeInfo(), z->specialBuffer(),
z->specialShapeInfo(), dimensions, (int)dimsToExclude.size(), useAverage),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({z}, {z, &actualNorms, &clipNorm});
manager.synchronize();
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL static void clipByNormBpCuda(const void* vClipNorm, const void* vx, const sd::LongType* xShapeInfo, // input
const void* vy, const sd::LongType* yShapeInfo, // gradO
const void* vNorm, const sd::LongType* normShapeInfo, const void* vSum,
const sd::LongType* sumShapeInfo, void* vz,
const sd::LongType* zShapeInfo, // gradI
const int* dimensions, const int dimsLen, const bool useAverage) {
const T clipNorm = *reinterpret_cast<const T*>(vClipNorm);
const T* norm = reinterpret_cast<const T*>(vNorm);
const T* sum = reinterpret_cast<const T*>(vSum);
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
__shared__ sd::LongType zLen, tadLen, totalThreads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
tadLen = zLen / shape::length(normShapeInfo);
totalThreads = gridDim.x * blockDim.x;
sameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo, zShapeInfo);
}
__syncthreads();
int zCoords[SD_MAX_RANK], normCoords[SD_MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoords);
const auto zOffset = shape::getOffset(zShapeInfo, zCoords);
const auto yOffset = sameOffsets ? zOffset : shape::getOffset(yShapeInfo, zCoords);
// deduce norm coords
for (int j = 0; j < dimsLen; ++j) normCoords[j] = zCoords[dimensions[j]];
const T actualNorm = useAverage ? norm[shape::getOffset(normShapeInfo, normCoords)] / tadLen
: norm[shape::getOffset(normShapeInfo, normCoords)];
if (actualNorm > clipNorm) {
const T sumVal = sum[shape::getOffset(sumShapeInfo, normCoords)];
const auto xOffset = sameOffsets ? zOffset : shape::getOffset(xShapeInfo, zCoords);
z[zOffset] = (clipNorm / actualNorm) * y[yOffset] *
(static_cast<T>(1.f) - (x[xOffset] * sumVal) / (actualNorm * actualNorm));
} else
z[zOffset] = y[yOffset];
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
void clipByNormBp_(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI,
const std::vector<int>& dims, const NDArray& clipNorm, const bool useAverage) {
const int rank = input.rankOf();
auto actualNorms = input.reduceAlongDimension(reduce::Norm2, dims);
if (actualNorms.lengthOf() == 1) {
const T norm = useAverage ? actualNorms.e<T>(0) / static_cast<T>(input.lengthOf()) : actualNorms.e<T>(0);
auto clipVal = clipNorm.e<T>(0);
if (norm > clipVal) {
const T sum = input.reduceNumber(reduce::Sum).e<T>(0); // reduce to scalar
const T factor1 = clipVal / norm;
const T factor2 = static_cast<T>(1.f) / (norm * norm); // 1 / (norm*norm*norm)
auto lambda = LAMBDA_TT(x, y, sum, factor1, factor2) {
return factor1 * y * (static_cast<T>(1.f) - factor2 * x * sum);
};
const_cast<NDArray&>(input).applyPairwiseLambda(const_cast<NDArray&>(gradO), lambda, gradI);
} else
gradI.assign(gradO);
} else {
const NDArray actualNorms = input.reduceAlongDimension(reduce::Norm2, dims);
const NDArray sums = input.reduceAlongDimension(reduce::Sum, dims);
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(gradI.rankOf(), dims);
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "clipByNormBp");
const int* dimensions = reinterpret_cast<const int*>(
manager.replicatePointer(dimsToExclude.data(), dimsToExclude.size() * sizeof(int)));
NDArray::prepareSpecialUse({&gradI}, {&actualNorms, &sums, &clipNorm, &input, &gradO});
clipByNormBpCuda<T><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(
clipNorm.specialBuffer(), input.specialBuffer(), input.specialShapeInfo(), gradO.specialBuffer(),
gradO.specialShapeInfo(), actualNorms.specialBuffer(), actualNorms.specialShapeInfo(), sums.specialBuffer(),
sums.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), dimensions, (int)dimsToExclude.size(),
useAverage);
NDArray::registerSpecialUse({&gradI}, {&actualNorms, &sums, &clipNorm, &input, &gradO});
manager.synchronize();
}
}
BUILD_SINGLE_TEMPLATE(template void clipByNormBp_,
(sd::LaunchContext * context, const NDArray& input, const NDArray& gradO, NDArray& gradI,
const std::vector<int>& dimensions, const NDArray& clipNorm, const bool useAverage),
SD_FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void clipByNormBp(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI,
const std::vector<int>& dimensions, const NDArray& clipNorm, const bool useAverage) {
const NDArray& castedInput = gradI.dataType() == input.dataType() ? input : input.cast(gradI.dataType());
BUILD_SINGLE_SELECTOR(gradI.dataType(), clipByNormBp_,
(context, castedInput, gradO, gradI, dimensions, clipNorm, useAverage), SD_FLOAT_TYPES);
}
template <typename T>
void clipByGlobalNorm_(sd::LaunchContext* context, std::vector<NDArray*> const& inputs, double clipNorm,
sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
NDArray globalNorm =
NDArrayFactory::create<T>(0, inputs[0]->getContext()); // sqrt(sum([l2norm(t)**2 for t in t_list]))
for (auto i = 0; i < inputs.size(); i++) {
auto input = inputs[i];
auto l2norm = input->reduceNumber(reduce::Norm2);
globalNorm += l2norm * l2norm;
}
globalNorm.applyTransform(transform::Sqrt, globalNorm); // = sd::math::sd_sqrt(globalNorm);
outputs[inputs.size()]->p(0, globalNorm);
globalNorm.syncToHost();
const T factor = static_cast<T>(clipNorm) / globalNorm.e<T>(0);
for (size_t e = 0; e < inputs.size(); e++) {
// all-reduce
auto input = inputs[e];
auto output = outputs[e];
if (globalNorm.e<double>(0) <= clipNorm) {
output->assign(input);
} else {
auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
input->applyLambda(lambda, *output);
}
}
}
void clipByGlobalNorm(sd::LaunchContext* context, std::vector<NDArray*> const& inputs, double clipNorm,
sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_,
(context, inputs, clipNorm, workspace, outputs, isInplace), SD_FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_,
(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm,
sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace),
SD_FLOAT_TYPES);
template <typename T>
static void SD_KERNEL clipByValueKernel(void* input, const sd::LongType* inputShape, void* output,
const sd::LongType* outputShape, double leftBound, double rightBound) {
__shared__ T* outputBuf;
__shared__ T* inputBuf;
__shared__ sd::LongType length;
__shared__ bool linearBuffers;
if (threadIdx.x == 0) {
outputBuf = reinterpret_cast<T*>(output);
inputBuf = reinterpret_cast<T*>(input);
length = shape::length(inputShape);
linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) &&
shape::elementWiseStride(inputShape) == 1;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (sd::LongType e = tid; e < length; e += step) {
if (linearBuffers) {
if (inputBuf[e] > rightBound)
outputBuf[e] = (T)rightBound;
else if (inputBuf[e] < leftBound)
outputBuf[e] = (T)leftBound;
else
outputBuf[e] = inputBuf[e];
} else {
auto inputOffset = shape::getIndexOffset(e, inputShape);
auto outputOffset = shape::getIndexOffset(e, outputShape);
if (inputBuf[inputOffset] > rightBound)
outputBuf[outputOffset] = (T)rightBound;
else if (inputBuf[inputOffset] < leftBound)
outputBuf[outputOffset] = (T)leftBound;
else
outputBuf[outputOffset] = inputBuf[outputOffset];
}
}
}
template <typename T>
static void clipByValue_(sd::LaunchContext* context, NDArray& input, double leftBound, double rightBound,
NDArray& output) {
auto stream = context->getCudaStream();
if (!input.isActualOnDeviceSide()) input.syncToDevice();
NDArray::prepareSpecialUse({&output}, {&input});
clipByValueKernel<T><<<256, 512, 8192, *stream>>>(input.specialBuffer(), input.specialShapeInfo(),
output.specialBuffer(), output.specialShapeInfo(), leftBound,
rightBound);
NDArray::registerSpecialUse({&output}, {&input});
}
void clipByValue(sd::LaunchContext* context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output),
SD_FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByValue_, (sd::LaunchContext * context, NDArray& input, double leftBound,
double rightBound, NDArray& output);
, SD_FLOAT_TYPES);
} // namespace helpers
} // namespace ops
} // namespace sd
|
the_stack
|
#include <cub/cub.cuh>
#include "cuda-decoder-kernels.h"
#include "cuda-decoder-kernels-utils.h"
namespace kaldi {
namespace cuda_decoder {
// Initialize the hashmap with NO_VAL
// Called in InitDeviceData, when building the CudaDecoder object
__global__ void init_hashmap_kernel(DeviceParams cst_dev_params) {
const int max_nlanes = cst_dev_params.max_nlanes;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, max_nlanes) {
const int capacity = cst_dev_params.hashmap_capacity;
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(idx, capacity) {
cst_dev_params.d_hashmap_values.lane(ilane)[idx] =
KALDI_CUDA_DECODER_HASHMAP_NO_VAL;
}
}
}
// Initialize initial channel on device
// Called by ComputeInitialChannel
// It is NOT called in InitDecoding
// In InitDecoding we will clone the initial channel into the channel we called
// InitDecoding on
// Here we are actually creating this initial channel
// we do that once in the CudaDecoder constructor.
//
// The initial channel is the state of a channel when
// it will start decoding a new utterance
// thread (1, 1, 1)
// blocks(1, 1, 1);
__global__ void initialize_initial_lane_kernel(DeviceParams cst_dev_params) {
const int init_ichannel = cst_dev_params.init_channel_id;
const int init_ilane = 0;
ChannelCounters *init_channel_counters =
cst_dev_params.d_channels_counters.channel(init_ichannel);
LaneCounters *lane_counters =
cst_dev_params.d_lanes_counters.lane(init_ilane);
// Making the data look like an ExpandArcsEmitting just executed,
// and put the StartState in the aux_q. We will then pick up a normal
// execution from there
// (calling PruneAndPreprocess, then ExpandArcsNonEmitting..)
lane_counters->aux_q_end = 0;
lane_counters->aux_q_requested = 0;
lane_counters->post_expand_aux_q_end = 1;
lane_counters->main_q_global_offset = 0;
lane_counters->main_q_local_offset = 0;
lane_counters->main_q_n_extra_prev_tokens = 0;
lane_counters->int_cutoff = INT_MAX;
lane_counters->main_q_n_emitting_tokens = 0; // all non emitting
lane_counters->int_beam = floatToOrderedInt(cst_dev_params.default_beam);
lane_counters->main_q_narcs_and_end = {0, 0};
lane_counters->main_q_requested = 0;
lane_counters->prev_arg_min_int_cost = 0;
const StateId init_state = cst_dev_params.init_state;
const CostType init_cost = cst_dev_params.init_cost;
IntegerCostType int_init_cost = floatToOrderedInt(init_cost);
cst_dev_params.d_aux_q_state_and_cost.lane(init_ilane)[0] = {init_state,
int_init_cost};
lane_counters->min_int_cost = int_init_cost;
CostType cutoff = orderedIntToFloat(int_init_cost);
lane_counters->int_cutoff =
floatToOrderedInt(cutoff + cst_dev_params.default_beam);
cst_dev_params.d_aux_q_info.lane(init_ilane)[0] = {INT_MIN, -1};
}
// Called by InitDecoding
// Called when some channels will start decoding a new utterance
// do everything that's needed to do on the device to start decoding a new
// utterance with those channels
// It clones the initial channel (created in initialize_initial_lane_kernel)
// into the channels we want to InitDecoding on
__global__ void init_decoding_on_device_kernel(DeviceParams cst_dev_params,
KernelParams params) {
const int init_ichannel = cst_dev_params.init_channel_id;
const ChannelCounters *init_channel_counters =
cst_dev_params.d_channels_counters.channel(init_ichannel);
const int32 init_main_q_end =
init_channel_counters->prev_main_q_narcs_and_end.y;
const int32 nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(idx, init_main_q_end) {
const LaneCounters *lane_counters =
cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
cst_dev_params.d_main_q_state_and_cost.channel(ichannel)[idx] =
cst_dev_params.d_main_q_state_and_cost.channel(init_ichannel)[idx];
cst_dev_params.d_main_q_degrees_prefix_sum.channel(ichannel)[idx] =
cst_dev_params.d_main_q_degrees_prefix_sum.channel(
init_ichannel)[idx];
cst_dev_params.d_main_q_arc_offsets.channel(ichannel)[idx] =
cst_dev_params.d_main_q_arc_offsets.channel(init_ichannel)[idx];
if (idx == 0) {
ChannelCounters *channel_counters =
cst_dev_params.d_channels_counters.channel(ichannel);
channel_counters->prev_main_q_narcs_and_end =
init_channel_counters->prev_main_q_narcs_and_end;
channel_counters->prev_main_q_n_extra_prev_tokens =
init_channel_counters->prev_main_q_n_extra_prev_tokens;
channel_counters->prev_main_q_global_offset = 0;
channel_counters->prev_main_q_extra_prev_tokens_global_offset = 0;
channel_counters->prev_beam = cst_dev_params.default_beam;
}
}
}
}
// Context switch : load
// Called by LoadChannelsStateToLanes
// THREADS : (1, 1, 1)
// BLOCKS : (1, nlanes_used, 1)
__global__ void load_channels_state_in_lanes_kernel(DeviceParams cst_dev_params,
KernelParams params) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
const ChannelCounters *channel_counters =
cst_dev_params.d_channels_counters.channel(ichannel);
int2 main_q_narcs_and_end = channel_counters->prev_main_q_narcs_and_end;
lane_counters->main_q_narcs_and_end = main_q_narcs_and_end;
lane_counters->main_q_n_extra_prev_tokens =
channel_counters->prev_main_q_n_extra_prev_tokens;
CostType beam = channel_counters->prev_beam;
IntegerCostType int_beam = floatToOrderedInt(beam);
lane_counters->int_beam = int_beam;
lane_counters->adaptive_int_beam_with_validity_index.x = int_beam;
lane_counters->adaptive_int_beam_with_validity_index.y =
cst_dev_params.adaptive_beam_static_segment;
lane_counters->main_q_global_offset =
channel_counters
->prev_main_q_global_offset; // we'll update it after emitting
lane_counters->main_q_extra_prev_tokens_global_offset =
channel_counters->prev_main_q_extra_prev_tokens_global_offset;
}
}
// Context switch : store
// Called by SaveChannelsStateFromLanes
// THREADS : (1, 1, 1)
// BLOCKS : (1, nchannel_to_compute, 1)
__global__ void save_channels_state_from_lanes_kernel(
DeviceParams cst_dev_params, KernelParams params) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
const LaneCounters *lane_counters =
cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
ChannelCounters *channel_counters =
cst_dev_params.d_channels_counters.channel(ichannel);
channel_counters->prev_main_q_global_offset =
lane_counters->main_q_global_offset;
channel_counters->prev_main_q_extra_prev_tokens_global_offset =
lane_counters->main_q_extra_prev_tokens_global_offset;
channel_counters->prev_main_q_narcs_and_end =
lane_counters->main_q_narcs_and_end;
channel_counters->prev_main_q_n_extra_prev_tokens =
lane_counters->main_q_n_extra_prev_tokens;
channel_counters->prev_beam = orderedIntToFloat(lane_counters->int_beam);
}
}
// compute_lane_offsets_kernel
// the kernel concatenate_lanes_data concatenates multiple array into a single
// continuous array
// compute_lane_offsets_kernel computes the offset of each array into this
// continous array
// This kernel is 1D : the lanes are on the X dimension, because we want to
// compute the offset of those lanes
__global__ void compute_lane_offsets_kernel(DeviceParams cst_dev_params,
KernelParams params) {
typedef cub::BlockScan<int4, KALDI_CUDA_DECODER_1D_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
const int nlanes = params.nlanes_used;
int4 sum_so_far = {0, 0, 0, 0};
KALDI_CUDA_DECODER_1D_BLOCK_OFFSET_KERNEL_LOOP(
block_offset, thread_idx,
nlanes + 1) { // +1 because we are doing an exclusive sum, and we want
// all the values
int32 ilane = block_offset + thread_idx;
int4 zero4 = {0, 0, 0, 0};
int4 lane_offsets = zero4;
if (ilane < nlanes) { // nlanes, not nlanes+1, because we cannot read +1
// values (undefined)
LaneCounters *d_lane_counters =
cst_dev_params.d_lanes_counters.lane(ilane);
int32 main_q_end = d_lane_counters->main_q_narcs_and_end.y;
int32 n_emitting_tokens = d_lane_counters->main_q_n_emitting_tokens;
int32 main_q_n_extra_prev_tokens =
d_lane_counters->main_q_n_extra_prev_tokens;
lane_offsets = {main_q_end, n_emitting_tokens, main_q_n_extra_prev_tokens,
0};
}
int4 block_aggregate;
BlockScan(temp_storage)
.ExclusiveScan(lane_offsets, lane_offsets, zero4, PlusPlusPlusPlus(),
block_aggregate);
PlusPlusPlusPlus pppp;
lane_offsets = pppp(lane_offsets, sum_so_far);
sum_so_far = pppp(sum_so_far, block_aggregate);
if (ilane < (nlanes + 1)) { // nlanes+1, to write the output
LaneCounters *d_lane_counters =
cst_dev_params.d_lanes_counters.lane(ilane);
LaneCounters *h_lane_counters =
cst_dev_params.h_lanes_counters.lane(ilane);
h_lane_counters->main_q_end_lane_offset =
d_lane_counters->main_q_end_lane_offset = lane_offsets.x;
h_lane_counters->main_q_n_emitting_tokens_lane_offset =
d_lane_counters->main_q_n_emitting_tokens_lane_offset =
lane_offsets.y;
h_lane_counters->main_q_n_extra_prev_tokens_lane_offset =
d_lane_counters->main_q_n_extra_prev_tokens_lane_offset =
lane_offsets.z;
}
__syncthreads(); // reusing temp_storage
}
}
// concatenate_lanes_data
// Called by PerformConcatenatedCopy
// Creates a concatenate array into concat,
// by concatenating all the arrays src.lane(ilane)
// for ilane=0..params.nlanes_used
// Used to prepare data for copy to Host. We want to avoid small Device2Host
// copies.
template <typename T>
__global__ void concatenate_lanes_data_kernel(DeviceParams cst_dev_params,
KernelParams params,
LaneMatrixView<T> src, T *concat,
int32 *lane_offsets) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
const int32 stride =
sizeof(LaneCounters) / sizeof(int32); // offsets are in LaneCounters
int32 beg = *(lane_offsets + ilane * stride);
int32 end = *(lane_offsets + (ilane + 1) * stride);
int32 vec_size = end - beg;
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(idx, vec_size) {
T d = src.lane(ilane)[idx];
concat[beg + idx] = d;
}
}
}
// nonemitting_preprocess_and_contract_kernel
// Called from PruneAndPreprocess
// This kernels prune the aux_q, move the survival tokens to the main_q,
// and add the preprocessing information necessary for the next ExpandArcs
// (the expand that follows PruneAndPreprocess is always non-emitting)
// It prunes the tokens using the cutoff, and prepare the data necessary for
// ExpandArcs:
// d_main_q_degrees_prefix_sum, d_main_q_arc_offsets_
// The prefix sum is done in one-pass here, using a trick (we compute the prefix
// sum
// as we fill the main_q)
__global__ void nonemitting_preprocess_and_contract_kernel(
DeviceParams cst_dev_params, KernelParams params) {
typedef cub::BlockScan<int2, KALDI_CUDA_DECODER_1D_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage sh_temp_storage;
// We need to move the survival tokens to the main_q
//
// sh_main_q_global_block_offset has two purposes :
// (1) to know where to store the survival tokens in the main_q
// (2) to perform the prefix sum degrees (of the survival tokens)
__shared__ int2 sh_main_q_global_block_offset;
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int32 aux_q_end = lane_counters->post_expand_aux_q_end;
const IntegerCostType int_cutoff = lane_counters->int_cutoff;
// Keeping whole CTA alive. We'll use __syncthreads()
KALDI_CUDA_DECODER_1D_BLOCK_OFFSET_KERNEL_LOOP(block_offset, thread_idx,
aux_q_end) {
const int32 aux_q_idx = block_offset + thread_idx;
const int32 ichannel = lane_counters->channel_to_compute;
int32 degree = 0;
int32 arc_start = -1;
StateId token_state;
IntegerCostType token_int_cost;
// We've kept the whole CTA alive. Now we keep only those will a valid
// token
if (aux_q_idx < aux_q_end) {
const int2 both =
cst_dev_params.d_aux_q_state_and_cost.lane(ilane)[aux_q_idx];
token_state = both.x;
token_int_cost = both.y;
if (token_int_cost < int_cutoff) {
// We'll keep that token. Loading its arc degree/csr offset now.
arc_start = cst_dev_params.d_arc_ne_offsets[token_state];
const int32 arc_end =
cst_dev_params.d_arc_ne_offsets[token_state + 1];
degree = arc_end - arc_start;
}
}
// If we've set a different arc_start,
// this thread has a valid unpruned token
int32 is_pruned = (arc_start == -1);
// We now know which tokens will be moved to the main_q, the remaining
// will be pruned
// we now compute a prefix sum inside the CUDA block to determine the
// local indexes of the unpruned tokens
// the first unpruned token will have a index of 0, the second 1, ...
// We also need to compute the prefix sum of the arc degrees
// we start by doing a local prefix sum inside the CUDA block
int2 block_prefix_sum_narcs_and_end = {degree, (is_pruned ? 0 : 1)};
const int2 zero2 = {0, 0};
// Computing the prefix sum (exclusive)
BlockScan(sh_temp_storage)
.ExclusiveScan(block_prefix_sum_narcs_and_end,
block_prefix_sum_narcs_and_end, zero2, PlusPlus());
if (KALDI_CUDA_DECODER_IS_LAST_1D_THREAD()) {
// This conditional branch is entered by the last thread
// Because it is the last, the prefix_sum of that thread contains the
// sum of all elements
// We also add the value from this thread - the prefix sum is exclusive
// For the sum, we want it inclusive
int2 block_sum = block_prefix_sum_narcs_and_end;
block_sum.x += degree;
block_sum.y += is_pruned ? 0 : 1;
// Doing two things at the same time :
// requesting a spot in the main_q to store the survival tokens from
// this CTA
// We also increment the narcs value. atomic64.x will contain the number
// of
// arcs in the main_q up until the atomic64.y index
// That's all we need to finish our prefix sum. We add this global
// offset.
// First atomic to check if we are not overflowing main_q.
int block_offset =
atomicAdd(&lane_counters->main_q_requested, block_sum.y);
// Verify that we do not overflow
if (block_offset + block_sum.y < cst_dev_params.main_q_capacity) {
// we don't overflow we can safely grab a spot in the main_q
sh_main_q_global_block_offset =
atomicAddI2(&lane_counters->main_q_narcs_and_end, block_sum);
} else {
// our update would overflow
lane_counters->q_overflow |= OVERFLOW_MAIN_Q; // for the host
sh_main_q_global_block_offset.y =
cst_dev_params.main_q_capacity; // used as flag to broadcast the
// information in the CTA
}
}
// Syncing because :
// - Broadcasting sh_main_q_global_block_offset
// - We may reuse sh_temp_storage (cf CUB doc)
__syncthreads();
// Checking if we are overflowing the main_q
// All threads are executing the next line
if (sh_main_q_global_block_offset.y == cst_dev_params.main_q_capacity)
goto end_lane; // done for this lane
// If we are executing the following lines it means that we are not
// overflowing the queue
// We then continue what we were doing
if (!is_pruned) {
bool moving_emitting_tokens = (lane_counters->main_q_local_offset == 0);
// we will move our unpruned token to the main_q, at index main_q_idx
InfoToken tok_info = cst_dev_params.d_aux_q_info.lane(ilane)[aux_q_idx];
const int32 main_q_idx =
sh_main_q_global_block_offset.y + block_prefix_sum_narcs_and_end.y;
CostType acoustic_cost = 0.0f;
if (moving_emitting_tokens && tok_info.arc_idx != -1) {
const int32 arc_ilabel =
cst_dev_params.d_arc_pdf_ilabels[tok_info.arc_idx];
acoustic_cost = -lane_counters->loglikelihoods[arc_ilabel];
}
cst_dev_params.d_main_q_info.lane(ilane)[main_q_idx] = tok_info;
// Moving the token to the main q
cst_dev_params.d_main_q_state_and_cost.channel(ichannel)[main_q_idx] = {
token_state, token_int_cost};
cst_dev_params.d_main_q_acoustic_cost.lane(ilane)[main_q_idx] =
acoustic_cost;
// Saving the global prefix sum
const int32 prefix_sum_narcs =
sh_main_q_global_block_offset.x + block_prefix_sum_narcs_and_end.x;
cst_dev_params.d_main_q_degrees_prefix_sum.channel(
ichannel)[main_q_idx] = prefix_sum_narcs;
// Saving the CSR arc offset for that token's state
// it will be used by the expand kernel, and avoid doing a new random
// memory access in the expand kernel
cst_dev_params.d_main_q_arc_offsets.channel(ichannel)[main_q_idx] =
arc_start;
}
}
end_lane:; // empty statement
}
}
// GetAdaptiveBeam is used in ExpandArcs
// When we generate new tokens by traversing arcs,
// we can end up creating a lot of tokens, if the current frame
// generated loglikelihoods too uniform for instance (we don't have
// any good tokens that will reduce the cutoff, so we end up generating
// a lot of tokens)
// To avoid overflowing the aux_q, we apply a decreasing beam.
// With aux_q_end being the current aux_q size, we have a decrease function f, with
// adaptive_beam = f(aux_q_end)
// f is a decreasing piecewise constant function
// Please note that when processing tokens, we usually have dozens of thousands of threads
// generating tokens. Those are already in flight, and will not reload the beam immediatly.
// It means that we need to start reducing the beam as soon as we detect that we are generating more tokens than
// expected.
// We can configure the function f using KALDI_CUDA_DECODER_ADAPTIVE_BEAM_STATIC_SEGMENT
// and KALDI_CUDA_DECODER_ADAPTIVE_BEAM_NSTEPS.
// We will use default_beam for the first max_tokens_per_frame/KALDI_CUDA_DECODER_ADAPTIVE_BEAM_STATIC_SEGMENT
// tokens in the aux_q.
// Once we reach that number, we will decrease the adaptive beam linearly from default_beam to 0,
// using KALDI_CUDA_DECODER_ADAPTIVE_BEAM_NSTEPS steps
//
// x-axis : aux_q_end. How much tokens are already in the aux_q
// y-axis : adaptive_beam = f(aux_q_end)
// default_beam _| ________________
// | /\ _________
// | | _________
// 0 _| static_segment _________
// |________________________________________________
// | |
// aux_q_end= 0 max_tokens_per_frame
// We have :
// static_segment = max_tokens_per_frame/KALDI_CUDA_DECODER_ADAPTIVE_BEAM_STATIC_SEGMENT
// and KALDI_CUDA_DECODER_ADAPTIVE_BEAM_NSTEPS = 3
__device__ void UpdateAdaptiveBeam(const DeviceParams &cst_dev_params,
const int aux_q_index_block_offset,
IntegerCostType min_int_cost,
int2 *adaptive_int_beam_with_validity_index,
LaneCounters *lane_counters) {
int32 beam_valid_until_idx = adaptive_int_beam_with_validity_index->y;
if (aux_q_index_block_offset < beam_valid_until_idx) return; // nothing to do
CostType beam = orderedIntToFloat(adaptive_int_beam_with_validity_index->x);
while (aux_q_index_block_offset >= beam_valid_until_idx) {
beam /= 2;
beam_valid_until_idx += cst_dev_params.adaptive_beam_bin_width;
}
IntegerCostType new_int_cutoff = (min_int_cost < INT_MAX)
? floatToOrderedInt(orderedIntToFloat(min_int_cost) + beam)
: INT_MAX;
IntegerCostType int_beam = floatToOrderedInt(beam);
adaptive_int_beam_with_validity_index->x = int_beam;
adaptive_int_beam_with_validity_index->y = beam_valid_until_idx;
// We can have races between the two atomics
// However the worst than can happen is a CTA might delay updating the beam
// This is not a critical bug. However, once we have a floatToOrderedInt
// that is generating unsigned ints, we could merge the two atomics into a
// single atomic64
atomicMin(&lane_counters->adaptive_int_beam_with_validity_index.x, int_beam);
atomicMax(&lane_counters->adaptive_int_beam_with_validity_index.y,
beam_valid_until_idx);
atomicMin(&lane_counters->int_cutoff, new_int_cutoff);
}
// One CTA / lane
__global__ void reset_for_frame_and_estimate_cutoff_kernel(
DeviceParams cst_dev_params, KernelParams params) {
typedef cub::BlockReduce<CostType, KALDI_CUDA_DECODER_1D_BLOCK> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
ChannelCounters *channel_counters =
cst_dev_params.d_channels_counters.channel(ichannel);
if (threadIdx.x == 0) {
const CostType current_beam = orderedIntToFloat(lane_counters->int_beam);
// Do some initialization
lane_counters->q_overflow = OVERFLOW_NONE;
lane_counters->main_q_n_emitting_tokens = INT_MAX;
lane_counters->int_cutoff = INT_MAX;
lane_counters->min_int_cost = INT_MAX;
lane_counters->q_overflow = OVERFLOW_NONE;
lane_counters->aux_q_requested = 0;
lane_counters->main_q_requested = 0;
lane_counters->main_q_local_offset = 0;
lane_counters->compute_max_active =
false; // will be set to true if necessary
channel_counters->min_int_cost_and_arg_with_final.x =
INT_MAX; // it will be set with atomicMins
const CostType new_beam =
fmin(cst_dev_params.default_beam,
current_beam * KALDI_CUDA_DECODER_ADAPTIVE_BEAM_RECOVER_RATE);
lane_counters->int_beam = floatToOrderedInt(new_beam);
}
const int32 prev_arg_min = lane_counters->prev_arg_min_int_cost;
int2 both =
cst_dev_params.d_main_q_state_and_cost.channel(ichannel)[prev_arg_min];
int32 int_cost = both.y;
CostType previous_cost = orderedIntToFloat(int_cost);
const int32 prev_arg_min_state = both.x;
int32 arc_start = cst_dev_params.d_arc_e_offsets[prev_arg_min_state];
int32 arc_end = cst_dev_params.d_arc_e_offsets[prev_arg_min_state + 1];
int32 narcs = arc_end - arc_start;
// no loop - we only process the first KALDI_CUDA_DECODER_1D_BLOCK arcs
// we just want an estimate
CostType total_cost = FLT_MAX;
if (threadIdx.x < narcs) {
int32 iarc = arc_start + threadIdx.x;
CostType arc_fixed_cost = cst_dev_params.d_arc_weights[iarc];
const int32 arc_ilabel = cst_dev_params.d_arc_pdf_ilabels[iarc];
CostType acoustic_cost = -lane_counters->loglikelihoods[arc_ilabel];
total_cost = previous_cost + arc_fixed_cost +
acoustic_cost; // +0.0f, best prev cost is normalized to 0
}
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(bin_id, KALDI_CUDA_DECODER_HISTO_NBINS) {
cst_dev_params.d_histograms.lane(ilane)[bin_id] = 0; // reset for this frame
}
CostType min = BlockReduce(temp_storage).Reduce(total_cost, cub::Min());
if (narcs > 0 && threadIdx.x == 0) {
// narcs > 0 to have at least one valid element in the reduce
CostType new_cutoff = min + orderedIntToFloat(lane_counters->int_beam);
IntegerCostType new_int_cutoff = floatToOrderedInt(new_cutoff);
lane_counters->int_cutoff = new_int_cutoff;
lane_counters->min_int_cost = floatToOrderedInt(min);
}
}
}
// ExpandArc kernel
// This kernel does the actual work of traversing arcs
//
// Pseudo code :
// for all token tok in main_q[main_q_offset...end]:
// u = tok.next_state
// for all arc a(u->v) in the FST:
// v_cost = tok.cost + a.cost + accoustic_cost
//
// if v_cost < cutoff and v_cost < best_state_cost[v]
// generate token associated to v, add to aux_q
// if necessary update cutoff
// if aux_q is getting full, reduce beam
//
// For more information please refer to http://kaldi-asr.org/doc/decoders.html
//
// ExpandArc rely on some preprocessed data to be able to function
// for instance, it needs the prefix sum of the arc degree of all token.state in
// the main_q
// We need to call a Preprocess kernel before ExpandArc
//
// ExpandArc is used for both emitting and nonemitting phases
// Differences between emitting and nonemitting :
// 1) params.d_q_arc_offset contains offsets to either emitting or
// nonemitting arcs.
// It is transparent for this kernel. The differentiation was done in
// the Preprocess kernel,
// which is responsible for filling the params.d_q_arc_offset array
// 2) Computation of the acoustic cost. If nonemitting, it is equal to 0.
// If emitting, we need
// to use values from the acoustic model (through the d_loglikelihoods
// array)
//
// Note : ExpandArc is not the only kernel able to traverse arcs.
// FinalizeProcessNonemitting contains a simplified version of expand for only
// one CUDA block
template <bool IS_EMITTING>
__global__ void expand_arcs_kernel(DeviceParams cst_dev_params,
KernelParams params) {
// BlockScan that we will use to compute token indexes in the output queue,
// and to find the min cost in the block
typedef cub::BlockScan<int2, KALDI_CUDA_DECODER_1D_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage sh_temp_storage_scan;
// This kernel writes the new token to the output queue aux_q
// We will request a spot to store all the new tokens created by threads in
// this CUDA block
// sh_aux_q_index_block_offset indicates where to store them in the aux_q
// tokens created in this CUDA block will be store in :
// aux_q[sh_aux_q_index_block_offset], aux_q[sh_aux_q_index_block_offset + 1],
__shared__ int32 sh_aux_q_index_block_offset;
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int32 main_q_offset = lane_counters->main_q_local_offset;
const int32 main_q_end = lane_counters->main_q_narcs_and_end.y;
const int32 total_narcs = lane_counters->main_q_narcs_and_end.x;
KALDI_CUDA_DECODER_1D_BLOCK_OFFSET_KERNEL_LOOP(block_offset, thread_idx,
total_narcs) {
int2 adaptive_int_beam_with_validity_index =
lane_counters->adaptive_int_beam_with_validity_index;
const int32 ichannel = lane_counters->channel_to_compute;
// Important : this thread is not responsible for a token in the input
// queue main_q
// but for an arc, going out of a token in the main_q
// The main_q contains in total total_narcs
// and this thread will compute the main_q_arc_index-th arc of the main_q
// For instance, first thread in the grid with threadIdx.x == 0 and
// blockIdx.x == 0
// will process the first arc of the token in main_q[main_q_offset + 0]
// (if that token has at least one arc)
//
// This insure a perfect one thread = one arc load balancing
// but we have work to do to know exactly which arc is the
// main_q_arc_index-th arc
// (what's its source ? its destination ? its arc_idx the FST CSR ?)
int32 main_q_arc_index = block_offset + thread_idx;
// We'll need those variables later in the kernel
// we declare them outside of the "valid_input" scope
// to be able to access them later
int32 main_q_idx;
int32 arc_idx;
StateId arc_next_state;
IntegerCostType int_total_cost = INT_MAX;
if (main_q_arc_index < total_narcs) {
// Current thread must take care of main_q_arc_index-th arc
// we need to now what's the source of that arc
// ie which token.state in main_q does it start from ?
// We use a binary search in the prefix sum of the token's degree to get
// that information
//
// Example : main_q contains 3 tokens
// - First token is associated to a state which has 3 outgoing arc
// - Second token is associated to a state which has 0 outgoing arc
// - Third token is associated to a state which has 2 outgoing arc
//
// We store the degrees in an array :
// [3, 0, 2]
//
// We then compute the exclusive prefix sum of that array :
// [0, 3, 3, 5]
//
// In total, we have 5 arcs in the main_q. ExpandArc will use 5 threads.
//
// Let's say we are the fifth thread in ExpandArc.
// we have threadIdx.x == 4, and blockIdx.x == 0
// it gives us main_q_arc_index == 4
// From there we have no idea what we're supposed to do next, we need to
// have information about the
// arc that we're supposed to traverse
//
// To do that, we look for the maximum index maxle_i in the prefix sum
// array such prefix_sum[i] <= 4
//
// [0, 3, 3, 5]
// |
// here
// maxle_i = 2
// it means that our source token is at index 2 in the main_q
// and we are computing the arc at index (main_q_arc_index -
// prefix_sum[maxle_i]) of that token
// ie the arc at index (4-3) = 1, the second arc of the second token in
// main_q
// Searching for the source of the arc that we will process
// (main_q_arc_index)
// we could preprocess the search in the preprocess kernels - for now
// this kernel is fast enough
const int32 *degrees_prefix_sum =
cst_dev_params.d_main_q_degrees_prefix_sum.channel(ichannel);
main_q_idx = binsearch_maxle(degrees_prefix_sum, main_q_arc_index,
main_q_offset, main_q_end - 1);
// state_first_arc_idx_in_main_q
// d_main_q_degrees_prefix_sum contains the prefix sum of the
// degrees of all tokens in the main_q
// d_main_q_degrees_prefix_sum[main_q_idx] contains the number of arc
// in the main_q until that token
const int32 state_first_arc_idx_in_main_q =
degrees_prefix_sum[main_q_idx];
// arc_offset_start is the offset in the CSR, to find the arcs
// related to the state main_q_state_[main_q_idx]
// it was set by the preprocess kernel
const int32 arc_offset_start =
cst_dev_params.d_main_q_arc_offsets.channel(ichannel)[main_q_idx];
// local_arc_index is the arc index for that state
// if local_arc_index == 2, we will process the second arc
// of state main_q_state_[main_q_idx]
const int32 local_arc_index =
main_q_arc_index - state_first_arc_idx_in_main_q;
// corresponding arc_idx in the FST
arc_idx = arc_offset_start + local_arc_index;
// Destination of that arc
arc_next_state = cst_dev_params.d_arc_nextstates[arc_idx];
// Building the total cost incrementally
// we'll add the acoustic cost and the old token's cost
const CostType arc_fixed_cost = cst_dev_params.d_arc_weights[arc_idx];
const CostType prev_token_cost = orderedIntToFloat(
cst_dev_params.d_main_q_state_and_cost.channel(ichannel)[main_q_idx]
.y);
CostType total_cost = prev_token_cost + arc_fixed_cost;
const int32 prev_state =
cst_dev_params.d_main_q_state_and_cost.channel(ichannel)[main_q_idx]
.x;
if (IS_EMITTING) {
const int32 arc_ilabel = cst_dev_params.d_arc_pdf_ilabels[arc_idx];
CostType acoustic_cost = -lane_counters->loglikelihoods[arc_ilabel];
total_cost += acoustic_cost;
}
int_total_cost = floatToOrderedInt(total_cost);
// If the total_cost is too large compared to our cutoff (beam search)
// then let's drop it
const IntegerCostType int_cutoff = lane_counters->int_cutoff;
if (int_total_cost >= int_cutoff) int_total_cost = INT_MAX;
}
// If int_total_cost < INT_MAX, it means that :
// - this thread had a valid input (main_q_arc_index < total_narcs)
// - the total_cost of the generated token is < cutoff
// We will then add that new token in the output queue, aux_q
// We need to know where to put that token in the aux_q
// we'll first compute its index inside the CUDA block
// the first valid output token in the CUDA block will have index 0,
// the second index 1... We compute that using a prefix sum
//
// We also need to find the overall min cost in the CUDA block
// a prefix sum is a scan operation, and a min a reduce operation
// we can perform a reduce operation using a scan (using the last value)
// we compute the prefix sum and the min in one scan, using the data
// struct CostTypeAndInt
const int32 has_successor = (int_total_cost < INT_MAX) ? 1 : 0;
int2 int_cost_and_index = {int_total_cost, has_successor};
BlockScan(sh_temp_storage_scan)
.InclusiveScan(int_cost_and_index, int_cost_and_index, MinPlus());
if (KALDI_CUDA_DECODER_IS_LAST_1D_THREAD()) {
// We are in a divergent branch
// This is the last thread. The last value of the inclusive scan is the
// total
const int32 total_successors_in_block = int_cost_and_index.y;
// Requesting a spot of size total_successors_in_block in the aux_q
// note: using 2 atomics here to avoid adding another kernel
// first request more space
const int aux_q_index_block_offset = atomicAdd(
&lane_counters->aux_q_requested, total_successors_in_block);
// check for overflow in aux_q
// We try to prevent an overflow from happening using an adaptive beam
// (cf GetAdaptiveBeam)
if (aux_q_index_block_offset + total_successors_in_block <
cst_dev_params.aux_q_capacity) {
// no overflow
// grab the aux_q offset
sh_aux_q_index_block_offset =
atomicAdd(&lane_counters->aux_q_end, total_successors_in_block);
// We are not overflowing the queue, updating the global values
IntegerCostType global_min_int_cost = lane_counters->min_int_cost;
IntegerCostType local_min_int_cost = int_cost_and_index.x;
// if we found a lower min_cost, update the global value
if (local_min_int_cost < global_min_int_cost) {
global_min_int_cost = local_min_int_cost;
atomicMin(&lane_counters->min_int_cost, global_min_int_cost);
CostType beam =
orderedIntToFloat(adaptive_int_beam_with_validity_index.x);
IntegerCostType new_int_cutoff = floatToOrderedInt(
orderedIntToFloat(local_min_int_cost) + beam);
atomicMin(&lane_counters->int_cutoff, new_int_cutoff);
}
int32 beam_valid_until_idx =
adaptive_int_beam_with_validity_index.y;
if (aux_q_index_block_offset >= beam_valid_until_idx) {
// This beam is no longer valid. Updating it
UpdateAdaptiveBeam(
cst_dev_params, aux_q_index_block_offset, global_min_int_cost,
&adaptive_int_beam_with_validity_index, lane_counters);
}
} else {
// sh_aux_q_index_block_offset is in shared memory
// its value is currently invalid (overflow)
// we set it to a special value and use it as a flag to broadcast
// the fact that we have an overflow and that all threads should exit
sh_aux_q_index_block_offset = cst_dev_params.aux_q_capacity;
// Setting the flag for the host. It will be used to print a warning
// to stderr
lane_counters->q_overflow |= OVERFLOW_AUX_Q;
// We do not jump to end_lane now, because only
// the first thread (threadIdx.x == 0) is executing this
// We wait until the end of the divergent branch
}
}
// Sync'ing for two reasons :
// - Broadcasting sh_aux_q_index_block_offset
// - reusing sh_temp_storage (cf CUB's doc)
__syncthreads();
// The only case where we can have that condition met,
// is if we detected an overflow if the previous lines
if (sh_aux_q_index_block_offset == cst_dev_params.aux_q_capacity)
goto end_lane; // done for this lane
//
// If we're executing the following lines it means everything
// is valid and we are not overflowing the aux_q
//
int_cost_and_index.y -= has_successor; // we want the exclusive sum now
const int32 aux_q_block_index = int_cost_and_index.y;
const int32 aux_q_index = sh_aux_q_index_block_offset + aux_q_block_index;
if (has_successor) {
// We save the new token to the aux_q
cst_dev_params.d_aux_q_state_and_cost.lane(ilane)[aux_q_index] = {
arc_next_state, int_total_cost};
// Index of the parent token
// the parent is the token used as input (source of arc)
// that parent is at index main_q_idx in the GPU memory
// However, the main_q is emptied before processing a new frame
// we need to add the offset related to the previous frames index
// we add cst_dev_params.main_q_global_offset
const int32 prev_token =
lane_counters->main_q_global_offset + main_q_idx;
assert(main_q_idx >= 0 && main_q_idx < cst_dev_params.main_q_capacity);
cst_dev_params.d_aux_q_info.lane(ilane)[aux_q_index] = {prev_token,
arc_idx};
}
}
end_lane:; // ";" is an empty statement
}
}
// post_expand_kernel
// Called after expand_arcs_kernel
// Takes care of what needs to be done after an expand_arcs_kernel
// execution. Mostly resetting the beam (if adaptive beam was triggered,
// the max_active_ kernels will take care of selecting a good beam),
// resetting the number of arcs in the main_q (we've processed them all),
// etc.
// Threads (1,1,1)
// Blocks (1, nlanes_used, 1)
template <bool IS_EMITTING>
__global__ void post_expand_kernel(DeviceParams cst_dev_params,
KernelParams params) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
LaneCounters *h_lane_counters = cst_dev_params.h_lanes_counters.lane(ilane);
const int prev_main_q_end = lane_counters->main_q_narcs_and_end.y;
const int prev_n_extra_prev_tokens =
lane_counters->main_q_n_extra_prev_tokens;
const int aux_q_end = lane_counters->aux_q_end;
CostType min_cost = orderedIntToFloat(lane_counters->min_int_cost);
// The next step is the contracting step from aux_q to main_q
// It will need the aux_q_end value. But it will also empty the aux_q
// We're resetting aux_q_end to 0 now, but we're saving its old value
// in another place
lane_counters->post_expand_aux_q_end = aux_q_end;
h_lane_counters->post_expand_aux_q_end = aux_q_end; // pinned memory
h_lane_counters->q_overflow = lane_counters->q_overflow; // pinned memory
lane_counters->aux_q_end = 0;
lane_counters->aux_q_requested = 0;
// We are done processing those arcs
lane_counters->main_q_narcs_and_end.x = 0;
// Resetting the adaptive beam
lane_counters->adaptive_int_beam_with_validity_index.x =
lane_counters->int_beam;
lane_counters->adaptive_int_beam_with_validity_index.y =
cst_dev_params.adaptive_beam_static_segment;
CostType beam = orderedIntToFloat(lane_counters->int_beam);
lane_counters->int_cutoff = floatToOrderedInt(min_cost + beam);
// If the adaptive beam kicked in, we want to reset the beam
// the max-active process will take care of selecting the right beam
if (IS_EMITTING) {
// the main_q contains the tokens from the previous frame
// after emitting, we won't use them anymore to create new tokens
// we reset the main_q
lane_counters->main_q_narcs_and_end = {0, 0};
lane_counters->main_q_requested = 0;
// The main_q was flushed - we need to update the global_offset
lane_counters->main_q_global_offset += prev_main_q_end;
if (threadIdx.x == 0 && blockIdx.x == 0)
lane_counters->main_q_extra_prev_tokens_global_offset +=
prev_n_extra_prev_tokens;
// Moving local offset. Tokens created by last expand
// will be pruned, and survivals will be moved at the end
// of the main q. Those tokens will be placed after local_offset
lane_counters->main_q_requested = 0;
CostType min_cost = orderedIntToFloat(lane_counters->min_int_cost);
lane_counters->min_histo_cost = min_cost;
lane_counters->max_histo_cost = min_cost + beam;
lane_counters->histo_bin_width = beam / (KALDI_CUDA_DECODER_HISTO_NBINS-1);
} else {
lane_counters->main_q_local_offset = prev_main_q_end;
// reset requested to end of queue
lane_counters->main_q_requested = prev_main_q_end;
}
}
}
__global__ void post_contract_and_preprocess_kernel(DeviceParams cst_dev_params,
KernelParams params) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
LaneCounters *h_lane_counters = cst_dev_params.h_lanes_counters.lane(ilane);
int2 main_q_narcs_and_end = lane_counters->main_q_narcs_and_end;
h_lane_counters->main_q_narcs_and_end =
main_q_narcs_and_end; // pinned memory
h_lane_counters->q_overflow = lane_counters->q_overflow; // pinned memory
atomicMin(&lane_counters->main_q_n_emitting_tokens, main_q_narcs_and_end.y);
}
}
// Meta-kernel (merging preprocess and expand) but only works with 1 CUDA block
// Used to avoid calling multiple main kernels (such as expand_arcs_kernel)
// for the tail of non emitting (lots of iterations with small number of arcs)
//
// Code is greatly simplified because we use only one CTA / lane
//
// Repeat until new queue empty:
// 1) Preprocess
// 2) Expand arcs
//
// The preprocess stage is not done on the first iteration, because it was
// already done by the ProcessAndContract kernel. We always call
// PruneAndPreprocess before calling FinalizeProcessNonemitting
//
// At the end, this kernel finalize the computation for current frame,
// so that it's ready for next ProcessEmitting
//
// This kernel works, but can be greatly simplified now.
__launch_bounds__(KALDI_CUDA_DECODER_LARGEST_1D_BLOCK, 1) __global__
void finalize_process_non_emitting_kernel(DeviceParams cst_dev_params,
KernelParams params) {
typedef cub::BlockScan<int2, KALDI_CUDA_DECODER_LARGEST_1D_BLOCK>
Int2BlockScan;
typedef cub::BlockScan<int, KALDI_CUDA_DECODER_LARGEST_1D_BLOCK> IntBlockScan;
__shared__ typename IntBlockScan::TempStorage sh_temp_storage_int_scan;
__shared__ typename Int2BlockScan::TempStorage sh_temp_storage_int2_scan;
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
ChannelCounters *channel_counters =
cst_dev_params.d_channels_counters.channel(ichannel);
int2 both = lane_counters->main_q_narcs_and_end;
int32 main_q_narcs = both.x;
int32 main_q_end = both.y;
int32 main_q_local_offset = lane_counters->main_q_local_offset;
const int32 main_q_global_offset = lane_counters->main_q_global_offset;
// aux_q is empty when this kernel is called
int32 aux_q_end = 0;
IntegerCostType int_cutoff = lane_counters->int_cutoff;
while (main_q_narcs > 0) {
// Step 1 : ExpandArcs
KALDI_CUDA_DECODER_1D_BLOCK_OFFSET_KERNEL_LOOP(offset, thread_idx,
main_q_narcs) {
const int32 main_q_arc_idx = offset + thread_idx;
// For details on how this code works, please refer to comments in
// expand_arcs
IntegerCostType total_int_cost = INT_MAX;
int32 arc_idx;
StateId arc_next_state;
int32 main_q_idx;
if (main_q_arc_idx < main_q_narcs) {
main_q_idx = binsearch_maxle(
cst_dev_params.d_main_q_degrees_prefix_sum.channel(ichannel),
main_q_arc_idx, main_q_local_offset, main_q_end - 1);
const int32 state_first_arc_idx_in_main_q =
cst_dev_params.d_main_q_degrees_prefix_sum.channel(
ichannel)[main_q_idx];
const int32 arc_offset_start =
cst_dev_params.d_main_q_arc_offsets.channel(ichannel)[main_q_idx];
arc_idx = arc_offset_start +
(main_q_arc_idx - state_first_arc_idx_in_main_q);
arc_next_state = cst_dev_params.d_arc_nextstates[arc_idx];
CostType arc_weight = cst_dev_params.d_arc_weights[arc_idx];
CostType prev_token_cost =
orderedIntToFloat(cst_dev_params.d_main_q_state_and_cost
.channel(ichannel)[main_q_idx]
.y);
total_int_cost = floatToOrderedInt(arc_weight + prev_token_cost);
if(total_int_cost < lane_counters->min_int_cost)
atomicMin(&lane_counters->min_int_cost, total_int_cost);
if (total_int_cost >= int_cutoff) {
total_int_cost = INT_MAX; // above cutoff
}
}
const int32 has_successor = (total_int_cost < INT_MAX) ? 1 : 0;
int32 local_aux_q_idx;
int32 nsuccessors;
IntBlockScan(sh_temp_storage_int_scan)
.ExclusiveSum(has_successor, local_aux_q_idx,
nsuccessors); // aggregate
// Checking if we are overflowing the aux_q
if ((aux_q_end + nsuccessors) >= cst_dev_params.aux_q_capacity) {
lane_counters->q_overflow |= OVERFLOW_AUX_Q;
// nothing to revert in global memory
goto finalize_lane;
}
if (has_successor) {
const int32 aux_q_idx = aux_q_end + local_aux_q_idx;
const int32 prev_token_idx = main_q_global_offset + main_q_idx;
cst_dev_params.d_aux_q_state_and_cost.lane(ilane)[aux_q_idx] = {
arc_next_state, total_int_cost};
cst_dev_params.d_aux_q_info.lane(ilane)[aux_q_idx] = {prev_token_idx,
arc_idx};
}
aux_q_end += nsuccessors;
// sync: reusing sh_temp_storage_scan_int
__syncthreads();
}
// Step 2 : PreprocessAndContract
// Reset for new iteration
main_q_narcs = 0;
main_q_local_offset = main_q_end;
KALDI_CUDA_DECODER_1D_BLOCK_OFFSET_KERNEL_LOOP(offset, thread_idx,
aux_q_end) {
const int32 aux_q_idx = offset + thread_idx;
int32 degree = 0;
int32 start = -1;
StateId token_state;
IntegerCostType token_int_cost;
if (aux_q_idx < aux_q_end) {
int2 both =
cst_dev_params.d_aux_q_state_and_cost.lane(ilane)[aux_q_idx];
token_state = both.x;
token_int_cost = both.y;
// beam may have changed since generation
// We are non-emitting in this kernel, using ne offsets
start = cst_dev_params.d_arc_ne_offsets[token_state];
int32 end = cst_dev_params.d_arc_ne_offsets[token_state + 1];
degree = end - start;
}
int has_valid_nonpruned_token = (start != -1) ? 1 : 0;
int2 narcs_and_ntokens_prefix_sum = {degree, has_valid_nonpruned_token};
int2 aggregate, zero2 = {0, 0};
Int2BlockScan(sh_temp_storage_int2_scan)
.ExclusiveScan(narcs_and_ntokens_prefix_sum,
narcs_and_ntokens_prefix_sum, zero2, PlusPlus(),
aggregate);
// Checking if we are not overflowing the main_q
const int32 total_ntokens = aggregate.y;
if ((main_q_end + total_ntokens) >= cst_dev_params.main_q_capacity) {
lane_counters->q_overflow |= OVERFLOW_MAIN_Q;
goto finalize_lane;
}
const int32 degree_prefix_sum =
main_q_narcs + narcs_and_ntokens_prefix_sum.x;
const int32 degree_sum = aggregate.x;
main_q_narcs += degree_sum;
if (has_valid_nonpruned_token) {
const int32 local_main_q_idx = narcs_and_ntokens_prefix_sum.y;
const int32 main_q_idx = main_q_end + local_main_q_idx;
cst_dev_params.d_main_q_arc_offsets.channel(ichannel)[main_q_idx] =
start;
cst_dev_params.d_main_q_degrees_prefix_sum.channel(
ichannel)[main_q_idx] = degree_prefix_sum;
cst_dev_params.d_main_q_state_and_cost.channel(
ichannel)[main_q_idx] = {token_state, token_int_cost};
cst_dev_params.d_main_q_info.lane(ilane)[main_q_idx] =
cst_dev_params.d_aux_q_info.lane(ilane)[aux_q_idx];
cst_dev_params.d_main_q_acoustic_cost.lane(ilane)[main_q_idx] =
0.0f; // we are always nonemitting in this kernel
}
main_q_end += total_ntokens;
__syncthreads();
}
aux_q_end = 0; // aux_q is now empty
}
finalize_lane:
if (threadIdx.x == 0) {
// This main_q is now final for that frame
lane_counters->main_q_narcs_and_end = {0, main_q_end};
cst_dev_params.h_lanes_counters.lane(ilane)->main_q_narcs_and_end = {
0, main_q_end}; // pinned memory
}
}
}
// GetBestCost :
// Finds all tokens with a cost in [min_cost;min_cost+lattice_beam[
// Add the final_costs if use_final_probs
// Does the computation in two steps
//
// Step 1: Find the value of min_cost, i.e. the minimum cost in the last token
// queue
// (the queue generated by the last frame computed)
// We set both channel_counters->min_int_cost_and_arg_without_final
// and channel_counters->min_int_cost_and_arg_with_final
// One add the final_cost[token.state] before looking for the min
__global__ void get_best_cost_step1_kernel(DeviceParams cst_dev_params,
KernelParams params,
bool use_final_probs,
CostType fst_zero) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
ChannelCounters *channel_counters =
cst_dev_params.d_channels_counters.channel(ichannel);
const int32 main_q_end = channel_counters->prev_main_q_narcs_and_end.y;
const int32 global_offset = channel_counters->prev_main_q_global_offset;
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(idx, main_q_end) {
if (idx == 0)
lane_counters->n_within_lattice_beam =
0; // will be used in the next kernel
const int2 both =
cst_dev_params.d_main_q_state_and_cost.channel(ichannel)[idx];
const int token_state = both.x;
const int token_int_cost = both.y;
CostType cost = orderedIntToFloat(token_int_cost);
IntegerCostType int_cost = floatToOrderedInt(cost);
int32 global_idx = global_offset + idx;
// We know what is the min cost (without final costs)
// we just need to have the index of one token with that min cost
if (use_final_probs) {
const CostType final_cost =
cst_dev_params.d_fst_final_costs[token_state];
IntegerCostType int_cost_with_final =
floatToOrderedInt(cost + final_cost);
if (final_cost != fst_zero) {
int2 min_and_arg = {int_cost_with_final,
global_idx}; // sort by cost, put it first
atomicMinI2(&channel_counters->min_int_cost_and_arg_with_final,
min_and_arg);
}
}
}
}
}
// Step2: Now that step1 found the min_cost (with and without final cost)
// If at least one final token (token associated with a final fst state)
// exists in the token queue, AND if use_final_probs is true,
// We can detect all tokens with a cost within [min_cost;min_cost+lattice_beam]
// and list them into d_list_final_tokens_in_main_q
__global__ void get_best_cost_step2_kernel(DeviceParams cst_dev_params,
KernelParams params,
bool use_final_probs,
CostType fst_zero) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
const ChannelCounters *channel_counters =
cst_dev_params.d_channels_counters.channel(ichannel);
const int32 main_q_end = channel_counters->prev_main_q_narcs_and_end.y;
const int32 global_offset = channel_counters->prev_main_q_global_offset;
const int2 min_int_cost_and_arg_with_final =
channel_counters->min_int_cost_and_arg_with_final;
const int2 min_int_cost_and_arg_without_final =
channel_counters->min_int_cost_and_arg_without_final;
bool has_reached_final = (min_int_cost_and_arg_with_final.x != INT_MAX);
// Use final if we want to use final (use_final_probs is true) and if we
// found a final state in the token list
bool compute_final = use_final_probs && has_reached_final;
IntegerCostType min_cost_to_use =
compute_final ? min_int_cost_and_arg_with_final.x
: min_int_cost_and_arg_without_final.x;
// if token.cost < lattice_cutoff, that token will belong in the output
// lattice
CostType lattice_cutoff =
orderedIntToFloat(min_cost_to_use) + cst_dev_params.lattice_beam;
IntegerCostType lattice_int_cutoff = floatToOrderedInt(lattice_cutoff);
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(idx, main_q_end) {
// First thread of each lane will move the results into lane counters.
// That's because we never move channel counters back to host,
// so we move those values to the lane counters, and those lane counters
// will be moved to host after this kernel
if (idx == 0) {
// The lane counters will be copied to host
lane_counters->min_int_cost_and_arg =
compute_final ? min_int_cost_and_arg_with_final
: min_int_cost_and_arg_without_final;
lane_counters->has_reached_final = has_reached_final;
}
// Looking for a token with its int_cost < lattice_int_cutoff
const int2 both =
cst_dev_params.d_main_q_state_and_cost.channel(ichannel)[idx];
const int32 token_state = both.x;
int32 token_int_cost = both.y;
if (compute_final) {
const CostType final_cost =
cst_dev_params.d_fst_final_costs[token_state];
const CostType token_cost = orderedIntToFloat(token_int_cost);
// final_cost == fst_zero -> this state is not final
token_int_cost = (final_cost != fst_zero)
? floatToOrderedInt(token_cost + final_cost)
: INT_MAX;
}
if (token_int_cost < lattice_int_cutoff) {
// That token will be included in the lattice (last frame)
// save it
int list_idx = atomicAdd(&lane_counters->n_within_lattice_beam, 1);
cst_dev_params.h_list_final_tokens_in_main_q.lane(ilane)[list_idx] = {
global_offset + idx, token_int_cost};
}
}
}
}
__global__ void get_best_cost_step3_kernel(DeviceParams cst_dev_params,
KernelParams params) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *d_lanes_counters =
cst_dev_params.d_lanes_counters.lane(ilane);
LaneCounters *h_lanes_counters =
cst_dev_params.h_lanes_counters.lane(ilane);
h_lanes_counters->min_int_cost_and_arg =
d_lanes_counters->min_int_cost_and_arg;
h_lanes_counters->has_reached_final = d_lanes_counters->has_reached_final;
h_lanes_counters->n_within_lattice_beam =
d_lanes_counters->n_within_lattice_beam;
}
}
// compute_costs_histogram_kernel
// Used in ApplyMaxActiveAndReduceBeam
// Compute the histogram of the token.cost in the main_q
__global__ void compute_costs_histogram_kernel(DeviceParams cst_dev_params,
KernelParams params,
bool use_aux_q) {
const int nlanes = params.nlanes_used;
typedef cub::BlockHistogram<BinId, KALDI_CUDA_DECODER_1D_BLOCK, 1,
KALDI_CUDA_DECODER_HISTO_NBINS + 1>
BlockHistogram;
__shared__ typename BlockHistogram::TempStorage temp_storage;
__shared__ unsigned int smem_histogram[KALDI_CUDA_DECODER_HISTO_NBINS + 1];
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
const int32 q_end = use_aux_q ? lane_counters->post_expand_aux_q_end
: lane_counters->main_q_narcs_and_end.y;
bool compute_max_active = lane_counters->compute_max_active;
if (!compute_max_active) {
if (q_end <= cst_dev_params.max_active) continue; // nothing to do
// Otherwise let's turn max active on for this frame and lane
lane_counters->compute_max_active = true;
}
// Reset local histogram for this lane
BlockHistogram(temp_storage).InitHistogram(smem_histogram);
CostType min_histo_cost = lane_counters->min_histo_cost;
CostType max_histo_cost = lane_counters->max_histo_cost;
CostType bin_width = lane_counters->histo_bin_width;
// We have a sync inside the loop, keeping all threads alive
KALDI_CUDA_DECODER_1D_BLOCK_OFFSET_KERNEL_LOOP(block_offset, thread_idx,
q_end) {
const int32 q_idx = block_offset + thread_idx;
// The last bin is for everything we don't want to count:
// cost already above the beam, or non-valid tokens
// It is the default bin
BinId bin_id[1];
bin_id[0] = KALDI_CUDA_DECODER_HISTO_NBINS;
if (q_idx < q_end) {
IntegerCostType int_cost =
use_aux_q
? cst_dev_params.d_aux_q_state_and_cost.lane(ilane)[q_idx].y
: cst_dev_params.d_main_q_state_and_cost
.channel(ichannel)[q_idx]
.y;
CostType cost = orderedIntToFloat(int_cost);
CostType extra = cost - min_histo_cost;
if(extra <= 0.0f)
bin_id[0] = 0;
else if (extra < max_histo_cost) {
bin_id[0] = (BinId)__fdiv_rd(extra, bin_width)+1; // +1 because first bin is cost < min_histo_cost
}
}
BlockHistogram(temp_storage).Composite(bin_id, smem_histogram); // sync
__syncthreads(); // reusing temp_storage
}
// Not using the macros 1D_LOOP because that loop is only within a CTA
for (int32 bin_id_w = threadIdx.x;
bin_id_w < KALDI_CUDA_DECODER_HISTO_NBINS;
bin_id_w += KALDI_CUDA_DECODER_1D_BLOCK) {
// Writing the local histo to global
// We don't care about the last bin (cf above)
int32 s_count = (int32)smem_histogram[bin_id_w];
atomicAdd(&cst_dev_params.d_histograms.lane(ilane)[bin_id_w], s_count);
}
// Making sure we're done reading from smem
__syncthreads();
}
}
// update_beam_using_histogram_kernel
// used in ApplyMaxActiveAndReduceBeam
// uses the histogram computed in compute_costs_histogram_kernel
// to find where to cut (where to set the beam)
// to keep only ~max_active_ tokens.
// Important: use only one CTA per lane
__global__ void update_beam_using_histogram_kernel(DeviceParams cst_dev_params,
KernelParams params,
bool use_aux_q) {
typedef cub::BlockScan<int, KALDI_CUDA_DECODER_1D_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
const int nlanes = params.nlanes_used;
const int max_active = cst_dev_params.max_active;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
bool compute_max_active = lane_counters->compute_max_active;
if (!compute_max_active) continue; // nothing to do
CostType beam = orderedIntToFloat(lane_counters->int_beam);
CostType min_histo_cost = lane_counters->min_histo_cost;
CostType bin_width = lane_counters->histo_bin_width;
// We now have our histogram of the token costs (computed in the previous
// kernel)
// Each thread i is responsible for a bin i, with that bin containing ni
// tokens.
// We compute the prefix sum of those ni, ending up for each thread with
// si=sum[i=1..i](ni)
// If the thread i detects that si < max_active_ and s[i+1] >= max_active_,
// then we will cut the beam at
// the cost of the bin [i+1]
//
// Assert : one thread in a CTA is responsible for at most one bin
// we will not iterate over bins
assert(KALDI_CUDA_DECODER_HISTO_NBINS < KALDI_CUDA_DECODER_1D_BLOCK);
int bin_id = threadIdx.x;
int val = 0;
if (bin_id < KALDI_CUDA_DECODER_HISTO_NBINS)
val = cst_dev_params.d_histograms.lane(ilane)[bin_id];
int prefix_sum;
BlockScan(temp_storage).ExclusiveSum(val, prefix_sum);
if (prefix_sum < max_active && (prefix_sum + val) >= max_active) {
// We found our new beam regarding min_histo_cost
// Howevever, the current min_cost could be lower than min_histo_cost
// we need to add that diff to the new beam
CostType new_beam_for_histo_min_cost = bin_width * bin_id;
CostType current_min_cost = orderedIntToFloat(lane_counters->min_int_cost);
CostType new_beam = (min_histo_cost - current_min_cost) + new_beam_for_histo_min_cost;
IntegerCostType new_int_beam = floatToOrderedInt(new_beam);
// Saving our new beam for this lane
lane_counters->int_beam = new_int_beam;
lane_counters->adaptive_int_beam_with_validity_index.x = new_int_beam;
lane_counters->int_cutoff = floatToOrderedInt(current_min_cost + new_beam);
}
}
}
//
// PostProcessingMainQueue kernels.
// all the following kernels are called when postprocessing a frame
//
// Filling hashmap values with the tokens that we have in the main queue
// We do that because multiple tokens associated with the same FST state
// (but with different arc_idx) can exist in the main_q. We need to detect
// that situation, count them, detect what the min_cost for that FST state is.
// It is done using a hashmap
__global__ void fill_hashmap_with_main_q_kernel(DeviceParams cst_dev_params,
KernelParams params) {
// Operator for the prefix sum inside the CUDA block
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
ChannelCounters *channel_counters =
cst_dev_params.d_channels_counters.channel(ichannel);
const int32 main_q_end = lane_counters->main_q_narcs_and_end.y;
int32 min_int_cost = lane_counters->min_int_cost;
CostType min_cost = orderedIntToFloat(min_int_cost);
const int32 global_offset = channel_counters->prev_main_q_global_offset;
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(main_q_idx, main_q_end) {
// Position of considered token in the main_q
if (main_q_idx < main_q_end) {
int2 both = cst_dev_params.d_main_q_state_and_cost.channel(
ichannel)[main_q_idx];
StateId token_state = both.x;
IntegerCostType token_int_cost = both.y;
if (min_int_cost == token_int_cost) {
// remove offset = min_cost, set it to 0 explicitely
token_int_cost = floatToOrderedInt(0.0f);
channel_counters->min_int_cost_and_arg_without_final = {
token_int_cost, global_offset + main_q_idx};
lane_counters->prev_arg_min_int_cost = main_q_idx;
} else {
// remove offset = min_cost
CostType token_cost = orderedIntToFloat(token_int_cost) - min_cost;
token_int_cost = floatToOrderedInt(token_cost);
}
int local_idx, hash_idx;
hashmap_insert_or_aggregate(cst_dev_params.d_hashmap_values.lane(ilane),
token_state, token_int_cost, main_q_idx,
cst_dev_params.hashmap_capacity, &local_idx,
&hash_idx);
cst_dev_params.d_main_q_n_extra_prev_tokens_local_idx.lane(
ilane)[main_q_idx] = local_idx;
cst_dev_params.d_main_q_state_and_cost.channel(ichannel)[main_q_idx].y =
token_int_cost;
// If we have the min, saving its index for get best cost and the min
// cost estimate of the next frame
// Saving where that token.state ended up in the hashmap
// false = this token is not the representative of this state
// We will update representing_state once we know more (in the next
// kernel)
// We first need to add all tokens to the hashmap. Which will be the
// case when
// this kernel returns.
SetFSTStateHashIndex(
hash_idx, false,
&cst_dev_params.d_main_q_state_hash_idx.lane(ilane)[main_q_idx]);
}
if (main_q_idx == 0) {
lane_counters->int_cutoff = floatToOrderedInt(
orderedIntToFloat(lane_counters->int_cutoff) - min_cost);
}
}
}
}
// preprocess_and_list_extra_prev_tokens_kernel_step[i] kernels
// Called in PostProcessingMainQueue
// They do two things:
// - do the "emitting preprocessing". I.e. doing the preprocessing necessary for
// the future ExpandArcsEmitting that may be done next (if the current frame is
// not the last one)
// It consists of filling the d_main_q_degrees_prefix_sum of the emitting arc
// degrees of the tokens + setting d_main_q_arc_offsets
// - when we have multiple tokens associated with the same FST state S, we will
// list them in d_main_q_extra_prev_tokens. We need to know where to put them in
// that array,
// so we'll compute a prefix_sum also to compute those indexes. We'll then save
// the location of each extra tokens list (its offset and size in
// d_main_q_extra_prev_tokens),
// and save it into d_main_q_info for later lattice processing
//
// First step : Reading the hashmap, detecting which token is representative for
// each FST state, which is decided by fill_hashmap_with_main_q_kernel()
// (we pick one of the best ones, with the best ones being the ones with the
// lowest cost)
// this representative will be responsible for K tokens, with K being the number
// of tokens associated with that FST state. We only considers the cases where K
// > 1,
// because if K == 1, then we will not store that token in the special list
// d_main_q_extra_prev_tokens
// Each representative is also the only token that will propagate emitting arcs
// for that FST state. Because a representative has the min_cost for that FST
// state, it is enough to only propagate
// that one
// Each representative counts the number of emitting arcs it is responsible for,
// and we will compute the prefix sum of the arc degrees
__global__ void emitting_preprocess_and_list_extra_prev_tokens_step1_kernel(
DeviceParams cst_dev_params, KernelParams params) {
// Operator for the prefix sum inside the CUDA block
typedef cub::BlockScan<int2, KALDI_CUDA_DECODER_1D_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage sh_temp_storage;
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
const LaneCounters *lane_counters =
cst_dev_params.d_lanes_counters.lane(ilane);
const int32 main_q_end = lane_counters->main_q_narcs_and_end.y;
// Final cutoff from last ExpandArc execution
// The cutoff can have decreased since moving tokens to the main_q
// min_cost cannot be lower than before (we only did non-emitting phases
// since then)
// but the adaptive beam may have lowered the beam
const IntegerCostType int_cutoff = lane_counters->int_cutoff;
// Keeping all threads in CTA alive
// We'll __syncthreads()
KALDI_CUDA_DECODER_1D_BLOCK_OFFSET_KERNEL_LOOP(block_offset, thread_idx,
main_q_end) {
// We'll take care of the token at index main_q_idx
const int32 main_q_idx = block_offset + thread_idx;
const int32 ichannel = lane_counters->channel_to_compute;
// If that token is the representative of its FST state (token.next_state)
// The representative of a FST state is the token with the lowest
// token.cost for that FST state
// If multiple tokens have token1.cost == token2.cost ==
// min_cost_for_that_state, then one is picked (first come first serve,
// was done in fill_hashmap_with_main_q_kernel)
bool representing_state = false;
// Number of emitting arcs for that token
// Only the token representative of that FST state can have degree > 0
int32 degree = 0;
// If that token is representative of a FST state S,
// and if multiple tokens are associated with that state S,
// then n_extra_prev_token will contain their count
int32 n_extra_prev_token = 0;
if (main_q_idx < main_q_end) {
int2 both = cst_dev_params.d_main_q_state_and_cost.channel(
ichannel)[main_q_idx];
StateId token_state = both.x;
IntegerCostType token_int_cost = both.y;
// Loading info about token.next_state. Is there multiple tokens for
// that state ?
// How many ? What's the min token.cost for that state ?
int32 hash_idx; // we saved the hash_idx after inserting
bool bool_buffer; // will always be false. We just need it to call the
// function
GetFSTStateHashIndex(
cst_dev_params.d_main_q_state_hash_idx.lane(ilane)[main_q_idx],
&hash_idx, &bool_buffer);
HashmapValueT h_val =
cst_dev_params.d_hashmap_values.lane(ilane)[hash_idx];
// Token index of one of the token which the lowest token.cost for that
// state
uint32_t state_best_int_cost_argmin;
GetArgFromPackedArgminUInt64(h_val.min_and_argmin_int_cost_u64, &state_best_int_cost_argmin);
// Checking if we're the representative of that state
representing_state = (main_q_idx == state_best_int_cost_argmin);
// Saving the hash_idx of that fst state + if we're responsible for that
// state
SetFSTStateHashIndex(
hash_idx, representing_state,
&cst_dev_params.d_main_q_state_hash_idx.lane(ilane)[main_q_idx]);
// One of the best token for that state will represent that state in the
// next frame
if (representing_state) {
if (token_int_cost < int_cutoff) {
// Next step is emitting (next frame), using emitting offsets
const int32 start = cst_dev_params.d_arc_e_offsets[token_state];
const int32 end = cst_dev_params.d_arc_e_offsets[token_state + 1];
degree = end - start;
// Saving the start offset for the expand kernel
// avoid a new random memory access
cst_dev_params.d_main_q_arc_offsets.channel(ichannel)[main_q_idx] =
start;
}
// If that FST state has only one token associated to it, we store
// that token directly in
// d_main_q_info (its original place)
// We only move it into the d_main_q_extra_prev_tokens list if
// multiple tokens are associated to that state
n_extra_prev_token = (h_val.count > 1) ? (h_val.count) : 0;
}
}
// Computing a local prefix sum inside that CUDA block
// Others kernels will take care of adding the necessary offset to those
// local prefix sums
int2 zeroi2 = {0, 0};
int2 vali2 = {degree, n_extra_prev_token};
int2 aggi2;
BlockScan(sh_temp_storage)
.ExclusiveScan(vali2, aggi2, zeroi2, PlusPlus());
int32 degree_local_prefix_sum = aggi2.x;
int32 n_extra_prev_token_prefix_sum = aggi2.y;
if (main_q_idx < main_q_end) {
// This is not the final global prefix sum
// Other kernels will add the necessary offset
cst_dev_params.d_main_q_degrees_prefix_sum.channel(
ichannel)[main_q_idx] = degree_local_prefix_sum;
cst_dev_params.d_main_q_extra_prev_tokens_prefix_sum.lane(
ilane)[main_q_idx] = n_extra_prev_token_prefix_sum;
}
if (KALDI_CUDA_DECODER_IS_LAST_1D_THREAD()) {
// Saving the local sum of degrees of that CUDA block
// That's necessary to compute the global offset of that CUDA block,
// and that offset is what we need to transform the local prefix sum
// into a global prefix sum
const int local_sum_index = block_offset / KALDI_CUDA_DECODER_1D_BLOCK;
// the prefix sum was exclusive, adding missing value
const int degree_inclusive_sum = degree_local_prefix_sum + degree;
const int n_extra_prev_tokens_inclusive_sum =
n_extra_prev_token_prefix_sum + n_extra_prev_token;
cst_dev_params.d_main_q_block_sums_prefix_sum.lane(
ilane)[local_sum_index] = {degree_inclusive_sum,
n_extra_prev_tokens_inclusive_sum};
}
// Synchronization because:
// - we may need to reuse sh_temp_storage if the for loop iterates (cf
// CUB's doc)
__syncthreads();
}
}
}
// In step1, we've computed the local (CTA-wide) prefix sums. We also have the
// local sums of each individual CTAs
// In this kernel, we will compute the offset of each CTA in the global prefix
// sum. We will then add those offsets in step3
// Only one CTA / lane
__global__ void emitting_preprocess_and_list_extra_prev_tokens_step2_kernel(
DeviceParams cst_dev_params, KernelParams params) {
typedef cub::BlockScan<int2, KALDI_CUDA_DECODER_1D_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage sh_temp_storage;
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int main_q_end = lane_counters->main_q_narcs_and_end.y;
const int ntiles = KALDI_CUDA_DECODER_DIV_ROUND_UP(
main_q_end, KALDI_CUDA_DECODER_1D_BLOCK);
// Using block_offset loop to keep entire CTA alive (we're going to use
// __syncthreads in CUB)
int2 sum_so_far = {0, 0};
KALDI_CUDA_DECODER_1D_BLOCK_OFFSET_KERNEL_LOOP(offset, thread_idx, ntiles) {
const int32 itile = offset + thread_idx;
const int2 zeroi2 = {0, 0};
const int2 val =
(itile < ntiles)
? cst_dev_params.d_main_q_block_sums_prefix_sum.lane(ilane)[itile]
: zeroi2;
int2 prefix_sum, sum;
BlockScan(sh_temp_storage)
.ExclusiveScan(val, prefix_sum, zeroi2, PlusPlus(), sum);
PlusPlus pp;
prefix_sum = pp(prefix_sum, sum_so_far);
sum_so_far = pp(sum_so_far, sum);
if (itile < ntiles) {
cst_dev_params.d_main_q_block_sums_prefix_sum.lane(ilane)[itile] =
prefix_sum;
}
if (itile == (ntiles - 1)) {
const int32 total_narcs = prefix_sum.x + val.x;
const int32 total_n_extra_prev_tokens = prefix_sum.y + val.y;
lane_counters->main_q_narcs_and_end.x = total_narcs;
lane_counters->main_q_n_extra_prev_tokens = total_n_extra_prev_tokens;
assert(total_n_extra_prev_tokens >= 0 &&
total_n_extra_prev_tokens <= main_q_end);
}
}
}
}
// Step3: Uses the CTA offsets computed in step2 to transform the CTA-wide
// prefix sums to global prefix sums
// The representative of each FST states saves into the hashmap the location of
// the extra_prev_tokens of that state
// in d_main_q_extra_prev_tokens. That way each extra tokens will know where to
// write itself in the next kernel.
__global__ void emitting_preprocess_and_list_extra_prev_tokens_step3_kernel(
DeviceParams cst_dev_params, KernelParams params) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
const LaneCounters *lane_counters =
cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
const int main_q_end = lane_counters->main_q_narcs_and_end.y;
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(main_q_idx, main_q_end) {
const int32 local_sum_idx = main_q_idx / KALDI_CUDA_DECODER_1D_BLOCK;
const int2 local_sum_offset =
cst_dev_params.d_main_q_block_sums_prefix_sum.lane(
ilane)[local_sum_idx];
cst_dev_params.d_main_q_degrees_prefix_sum.channel(
ichannel)[main_q_idx] += local_sum_offset.x;
int extra_prev_tokens_offset =
cst_dev_params.d_main_q_extra_prev_tokens_prefix_sum.lane(
ilane)[main_q_idx] +
local_sum_offset.y;
// Loading the hash index associate with token.state
// If representative, store the location of the extra prev tokens list for
// that state in the hashmap
bool is_representative;
int32 hash_idx;
GetFSTStateHashIndex(
cst_dev_params.d_main_q_state_hash_idx.lane(ilane)[main_q_idx],
&hash_idx, &is_representative);
if (is_representative) {
HashmapValueT &val =
cst_dev_params.d_hashmap_values.lane(
ilane)[hash_idx];
uint32_t min;
GetMinFromPackedArgminUInt64(
val.min_and_argmin_int_cost_u64, &min);
unsigned long long new_pack;
PackArgminInUInt64(min, extra_prev_tokens_offset,
&new_pack);
val.min_and_argmin_int_cost_u64 = new_pack;
}
}
}
}
// Step4: We now know where to store our extra prev tokens in
// d_main_q_extra_prev_tokens.
// We will now move the tokens that need to be moved (when multiple tokens are
// associated to the same FST state)
// into d_main_q_extra_prev_tokens. In d_main_q_info, we will store the location
// of that list [offset,size]
// so that when backtracking, when we read d_main_q_info[token_idx], we know
// where to look to have the list
// of the same-state tokens
// It is the last step of the
// emitting_preprocess_and_list_extra_prev_tokens_step[i]_kernel pipeline
__global__ void emitting_preprocess_and_list_extra_prev_tokens_step4_kernel(
DeviceParams cst_dev_params, KernelParams params) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
const LaneCounters *lane_counters =
cst_dev_params.d_lanes_counters.lane(ilane);
const int32 ichannel = lane_counters->channel_to_compute;
const int main_q_end = lane_counters->main_q_narcs_and_end.y;
// Previous frames have filled d_main_q_extra_prev_tokens.
// d_main_q_extra_prev_tokens was then flushed to host. We want to set the
// global
// (global in the sense "for all frames") offset on where to read it the
// h_all_tokens_extra_prev_tokens_ on host.
// adding the main_q_extra_prev_tokens_global_offset for that
const int prev_global_idx =
lane_counters->main_q_extra_prev_tokens_global_offset;
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(main_q_idx, main_q_end) {
// We'll take care of token at main_q_idx
// Loading hashmap information about token.state
bool is_representative;
int32 hash_idx;
GetFSTStateHashIndex(
cst_dev_params.d_main_q_state_hash_idx.lane(ilane)[main_q_idx],
&hash_idx, &is_representative);
HashmapValueT val = cst_dev_params.d_hashmap_values.lane(ilane)[hash_idx];
// How many tokens are associated with that fst state token.state
int same_count = val.count;
bool must_move_to_extra_prev_tokens = (same_count > 1);
if (must_move_to_extra_prev_tokens) {
// Moving to the extra_prev_tokens list.
// Some of those tokens have an extra cost (compared to the best cost
// for that FST state)
// Generating and saving that extra cost. We will use it when generating
// the lattice.
CostType token_cost = orderedIntToFloat(
cst_dev_params.d_main_q_state_and_cost.channel(ichannel)[main_q_idx]
.y);
uint32_t best_int_cost;
// Where to write this state list in d_main_q_extra_prev_tokens
uint32_t extra_prev_tokens_offset;
unsigned long long pack = val.min_and_argmin_int_cost_u64;
GetMinFromPackedArgminUInt64(pack, &best_int_cost);
GetArgFromPackedArgminUInt64(pack, &extra_prev_tokens_offset);
CostType best_cost = orderedIntToFloat((int)best_int_cost);
CostType extra_cost = token_cost - best_cost;
assert(!is_representative || extra_cost == 0.0f);
// Loading the token to be moved
InfoToken inf_tok =
cst_dev_params.d_main_q_info.lane(ilane)[main_q_idx];
CostType acoustic_cost =
cst_dev_params.d_main_q_acoustic_cost.lane(ilane)[main_q_idx];
// Place of that specific token in the extra_prev_tokens sublist of that
// specific FST state
int32 local_idx =
cst_dev_params.d_main_q_n_extra_prev_tokens_local_idx.lane(
ilane)[main_q_idx];
// Saving the location of the extra prev tokens for that state into that
// InfoToken
SetSameFSTStateTokensList(
prev_global_idx + extra_prev_tokens_offset, same_count,
&cst_dev_params.d_main_q_info.lane(ilane)[main_q_idx]);
// Where to write this token in d_main_q_extra_prev_tokens
int32 list_idx = extra_prev_tokens_offset + local_idx;
// Moving token. Also saving extra_cost
cst_dev_params.d_main_q_extra_prev_tokens.lane(ilane)[list_idx] =
inf_tok;
cst_dev_params.d_main_q_extra_and_acoustic_cost.lane(
ilane)[list_idx] = {extra_cost, acoustic_cost};
assert(inf_tok.prev_token >= (lane_counters->main_q_global_offset -
cst_dev_params.main_q_capacity) &&
inf_tok.prev_token <=
(lane_counters->main_q_global_offset + main_q_end));
}
}
}
}
// Clear the hashmaps after use
// Each element in the map has a representative in the main_q
// Everyone of those representatives has the responsability to reset their
// corresponding value in the hashmap
// Once this kernel returns, the hashmaps are cleared
__global__ void clear_hashmap_kernel(DeviceParams cst_dev_params,
KernelParams params) {
const int nlanes = params.nlanes_used;
KALDI_CUDA_DECODER_BATCH_KERNEL_LOOP(ilane, nlanes) {
LaneCounters *lane_counters = cst_dev_params.d_lanes_counters.lane(ilane);
const int main_q_end = lane_counters->main_q_narcs_and_end.y;
KALDI_CUDA_DECODER_1D_KERNEL_LOOP(main_q_idx, main_q_end) {
bool is_representative;
int32 hash_idx;
GetFSTStateHashIndex(
cst_dev_params.d_main_q_state_hash_idx.lane(ilane)[main_q_idx],
&hash_idx, &is_representative);
// Representative owns a state. Each representative resets its associated
// token.state
// in the hashmap
if (is_representative) {
cst_dev_params.d_hashmap_values.lane(ilane)[hash_idx] =
KALDI_CUDA_DECODER_HASHMAP_NO_VAL; // clear
}
}
}
}
// Kernels wrappers
void SaveChannelsStateFromLanesKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
save_channels_state_from_lanes_kernel<<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void LoadChannelsStateInLanesKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
load_channels_state_in_lanes_kernel<<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void InitDecodingOnDeviceKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
init_decoding_on_device_kernel<<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void InitializeInitialLaneKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params) {
initialize_initial_lane_kernel<<<grid, block, 0, st>>>(cst_dev_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void ResetForFrameAndEstimateCutoffKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
reset_for_frame_and_estimate_cutoff_kernel<<<grid, block, 0, st>>>(
cst_dev_params, kernel_params);
}
template <bool IS_EMITTING>
void ExpandArcsKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
expand_arcs_kernel<IS_EMITTING><<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
template <bool IS_EMITTING>
void PostExpandKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
post_expand_kernel<IS_EMITTING><<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void PostContractAndPreprocessKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
post_contract_and_preprocess_kernel<<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void NonEmittingPreprocessAndContractKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
nonemitting_preprocess_and_contract_kernel<<<grid, block, 0, st>>>(
cst_dev_params, kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void FillHashmapWithMainQKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
fill_hashmap_with_main_q_kernel<<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void EmittingPreprocessAndListExtraPrevTokensStep1Kernel(
const dim3 &grid, const dim3 &block, const cudaStream_t &st,
const DeviceParams &cst_dev_params, const KernelParams &kernel_params) {
emitting_preprocess_and_list_extra_prev_tokens_step1_kernel<<<grid, block, 0,
st>>>(
cst_dev_params, kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void EmittingPreprocessAndListExtraPrevTokensStep2Kernel(
const dim3 &grid, const dim3 &block, const cudaStream_t &st,
const DeviceParams &cst_dev_params, const KernelParams &kernel_params) {
emitting_preprocess_and_list_extra_prev_tokens_step2_kernel<<<grid, block, 0,
st>>>(
cst_dev_params, kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void EmittingPreprocessAndListExtraPrevTokensStep3Kernel(
const dim3 &grid, const dim3 &block, const cudaStream_t &st,
const DeviceParams &cst_dev_params, const KernelParams &kernel_params) {
emitting_preprocess_and_list_extra_prev_tokens_step3_kernel<<<grid, block, 0,
st>>>(
cst_dev_params, kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void EmittingPreprocessAndListExtraPrevTokensStep4Kernel(
const dim3 &grid, const dim3 &block, const cudaStream_t &st,
const DeviceParams &cst_dev_params, const KernelParams &kernel_params) {
emitting_preprocess_and_list_extra_prev_tokens_step4_kernel<<<grid, block, 0,
st>>>(
cst_dev_params, kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void ComputeLaneOffsetsKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
compute_lane_offsets_kernel<<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
template <typename T>
void ConcatenateLanesDataKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params,
const LaneMatrixView<T> &src, T *concat,
int32 *lane_offsets) {
concatenate_lanes_data_kernel<<<grid, block, 0, st>>>(
cst_dev_params, kernel_params, src, concat, lane_offsets);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void InitHashmapKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params) {
init_hashmap_kernel<<<grid, block, 0, st>>>(cst_dev_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void ClearHashmapKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
clear_hashmap_kernel<<<grid, block, 0, st>>>(cst_dev_params, kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void ComputeCostsHistogramKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params,
bool use_aux_q) {
compute_costs_histogram_kernel<<<grid, block, 0, st>>>(
cst_dev_params, kernel_params, use_aux_q);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void UpdateBeamUsingHistogramKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params,
bool use_aux_q) {
update_beam_using_histogram_kernel<<<grid, block, 0, st>>>(
cst_dev_params, kernel_params, use_aux_q);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void FinalizeProcessNonEmittingKernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
finalize_process_non_emitting_kernel<<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void GetBestCostStep1Kernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params, bool isfinal,
CostType fst_zero) {
get_best_cost_step1_kernel<<<grid, block, 0, st>>>(
cst_dev_params, kernel_params, isfinal, fst_zero);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void GetBestCostStep2Kernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params, bool isfinal,
CostType fst_zero) {
get_best_cost_step2_kernel<<<grid, block, 0, st>>>(
cst_dev_params, kernel_params, isfinal, fst_zero);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
void GetBestCostStep3Kernel(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams &kernel_params) {
get_best_cost_step3_kernel<<<grid, block, 0, st>>>(cst_dev_params,
kernel_params);
KALDI_DECODER_CUDA_CHECK_ERROR();
}
template void ExpandArcsKernel<true>(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams ¶ms);
template void ExpandArcsKernel<false>(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams ¶ms);
template void PostExpandKernel<true>(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams ¶ms);
template void PostExpandKernel<false>(const dim3 &grid, const dim3 &block,
const cudaStream_t &st,
const DeviceParams &cst_dev_params,
const KernelParams ¶ms);
template void ConcatenateLanesDataKernel<InfoToken>(
const dim3 &grid, const dim3 &block, const cudaStream_t &st,
const DeviceParams &cst_dev_params, const KernelParams ¶ms,
const LaneMatrixView<InfoToken> &src, InfoToken *concat,
int32 *lane_offsets);
template void ConcatenateLanesDataKernel<CostType>(
const dim3 &grid, const dim3 &block, const cudaStream_t &st,
const DeviceParams &cst_dev_params, const KernelParams ¶ms,
const LaneMatrixView<CostType> &src, CostType *concat, int32 *lane_offsets);
template void ConcatenateLanesDataKernel<float2>(
const dim3 &grid, const dim3 &block, const cudaStream_t &st,
const DeviceParams &cst_dev_params, const KernelParams ¶ms,
const LaneMatrixView<float2> &src, float2 *concat, int32 *lane_offsets);
template void ConcatenateLanesDataKernel<int32>(
const dim3 &grid, const dim3 &block, const cudaStream_t &st,
const DeviceParams &cst_dev_params, const KernelParams ¶ms,
const LaneMatrixView<int32> &src, int32 *concat, int32 *lane_offsets);
} // end namespace cuda_decoder
} // end namespace kaldi
|
the_stack
|
#include "../config.cuh"
#include "../thread/thread_search.cuh"
#include "../util_math.cuh"
#include "../util_namespace.cuh"
#include "../util_ptx.cuh"
#include "../util_type.cuh"
#include "block_scan.cuh"
#include <limits>
#include <type_traits>
CUB_NAMESPACE_BEGIN
/**
* \brief The BlockRunLengthDecode class supports decoding a run-length encoded array of items. That is, given
* the two arrays run_value[N] and run_lengths[N], run_value[i] is repeated run_lengths[i] many times in the output
* array.
* Due to the nature of the run-length decoding algorithm ("decompression"), the output size of the run-length decoded
* array is runtime-dependent and potentially without any upper bound. To address this, BlockRunLengthDecode allows
* retrieving a "window" from the run-length decoded array. The window's offset can be specified and BLOCK_THREADS *
* DECODED_ITEMS_PER_THREAD (i.e., referred to as window_size) decoded items from the specified window will be returned.
*
* \note: Trailing runs of length 0 are supported (i.e., they may only appear at the end of the run_lengths array).
* A run of length zero may not be followed by a run length that is not zero.
*
* \par
* \code
* __global__ void ExampleKernel(...)
* {
* // Specialising BlockRunLengthDecode to run-length decode items of type uint64_t
* using RunItemT = uint64_t;
* // Type large enough to index into the run-length decoded array
* using RunLengthT = uint32_t;
*
* // Specialising BlockRunLengthDecode for a 1D block of 128 threads
* constexpr int BLOCK_DIM_X = 128;
* // Specialising BlockRunLengthDecode to have each thread contribute 2 run-length encoded runs
* constexpr int RUNS_PER_THREAD = 2;
* // Specialising BlockRunLengthDecode to have each thread hold 4 run-length decoded items
* constexpr int DECODED_ITEMS_PER_THREAD = 4;
*
* // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer items each
* using BlockRunLengthDecodeT =
* cub::BlockRunLengthDecode<RunItemT, BLOCK_DIM_X, RUNS_PER_THREAD, DECODED_ITEMS_PER_THREAD>;
*
* // Allocate shared memory for BlockRunLengthDecode
* __shared__ typename BlockRunLengthDecodeT::TempStorage temp_storage;
*
* // The run-length encoded items and how often they shall be repeated in the run-length decoded output
* RunItemT run_values[RUNS_PER_THREAD];
* RunLengthT run_lengths[RUNS_PER_THREAD];
* ...
*
* // Initialize the BlockRunLengthDecode with the runs that we want to run-length decode
* uint32_t total_decoded_size = 0;
* BlockRunLengthDecodeT block_rld(temp_storage, run_values, run_lengths, total_decoded_size);
*
* // Run-length decode ("decompress") the runs into a window buffer of limited size. This is repeated until all runs
* // have been decoded.
* uint32_t decoded_window_offset = 0U;
* while (decoded_window_offset < total_decoded_size)
* {
* RunLengthT relative_offsets[DECODED_ITEMS_PER_THREAD];
* RunItemT decoded_items[DECODED_ITEMS_PER_THREAD];
*
* // The number of decoded items that are valid within this window (aka pass) of run-length decoding
* uint32_t num_valid_items = total_decoded_size - decoded_window_offset;
* block_rld.RunLengthDecode(decoded_items, relative_offsets, decoded_window_offset);
*
* decoded_window_offset += BLOCK_DIM_X * DECODED_ITEMS_PER_THREAD;
*
* ...
* }
* }
* \endcode
* \par
* Suppose the set of input \p run_values across the block of threads is
* <tt>{ [0, 1], [2, 3], [4, 5], [6, 7], ..., [254, 255] }</tt> and
* \p run_lengths is <tt>{ [1, 2], [3, 4], [5, 1], [2, 3], ..., [5, 1] }</tt>.
* The corresponding output \p decoded_items in those threads will be <tt>{ [0, 1, 1, 2], [2, 2, 3, 3], [3, 3, 4, 4],
* [4, 4, 4, 5], ..., [169, 169, 170, 171] }</tt> and \p relative_offsets will be <tt>{ [0, 0, 1, 0], [1, 2, 0, 1], [2,
* 3, 0, 1], [2, 3, 4, 0], ..., [3, 4, 0, 0] }</tt> during the first iteration of the while loop.
*
* \tparam ItemT The data type of the items being run-length decoded
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam RUNS_PER_THREAD The number of consecutive runs that each thread contributes
* \tparam DECODED_ITEMS_PER_THREAD The maximum number of decoded items that each thread holds
* \tparam DecodedOffsetT Type used to index into the block's decoded items (large enough to hold the sum over all the
* runs' lengths)
* \tparam BLOCK_DIM_Y The thread block length in threads along the Y dimension
* \tparam BLOCK_DIM_Z The thread block length in threads along the Z dimension
*/
template <typename ItemT,
int BLOCK_DIM_X,
int RUNS_PER_THREAD,
int DECODED_ITEMS_PER_THREAD,
typename DecodedOffsetT = uint32_t,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1>
class BlockRunLengthDecode
{
//---------------------------------------------------------------------
// CONFIGS & TYPE ALIASES
//---------------------------------------------------------------------
private:
/// The thread block size in threads
static constexpr int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
/// The number of runs that the block decodes (out-of-bounds items may be padded with run lengths of '0')
static constexpr int BLOCK_RUNS = BLOCK_THREADS * RUNS_PER_THREAD;
/// BlockScan used to determine the beginning of each run (i.e., prefix sum over the runs' length)
using RunOffsetScanT = BlockScan<DecodedOffsetT, BLOCK_DIM_X, BLOCK_SCAN_RAKING_MEMOIZE, BLOCK_DIM_Y, BLOCK_DIM_Z>;
/// Type used to index into the block's runs
using RunOffsetT = uint32_t;
/// Shared memory type required by this thread block
union _TempStorage
{
typename RunOffsetScanT::TempStorage offset_scan;
struct
{
ItemT run_values[BLOCK_RUNS];
DecodedOffsetT run_offsets[BLOCK_RUNS];
} runs;
}; // union TempStorage
/// Internal storage allocator (used when the user does not provide pre-allocated shared memory)
__device__ __forceinline__ _TempStorage &PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
uint32_t linear_tid;
public:
struct TempStorage : Uninitialized<_TempStorage>
{};
//---------------------------------------------------------------------
// CONSTRUCTOR
//---------------------------------------------------------------------
/**
* \brief Constructor specialised for user-provided temporary storage, initializing using the runs' lengths. The
* algorithm's temporary storage may not be repurposed between the constructor call and subsequent
* <b>RunLengthDecode</b> calls.
*/
template <typename RunLengthT, typename TotalDecodedSizeT>
__device__ __forceinline__ BlockRunLengthDecode(TempStorage &temp_storage,
ItemT (&run_values)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
TotalDecodedSizeT &total_decoded_size)
: temp_storage(temp_storage.Alias())
, linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{
InitWithRunLengths(run_values, run_lengths, total_decoded_size);
}
/**
* \brief Constructor specialised for user-provided temporary storage, initializing using the runs' offsets. The
* algorithm's temporary storage may not be repurposed between the constructor call and subsequent
* <b>RunLengthDecode</b> calls.
*/
template <typename UserRunOffsetT>
__device__ __forceinline__ BlockRunLengthDecode(TempStorage &temp_storage,
ItemT (&run_values)[RUNS_PER_THREAD],
UserRunOffsetT (&run_offsets)[RUNS_PER_THREAD])
: temp_storage(temp_storage.Alias())
, linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{
InitWithRunOffsets(run_values, run_offsets);
}
/**
* \brief Constructor specialised for static temporary storage, initializing using the runs' lengths.
*/
template <typename RunLengthT, typename TotalDecodedSizeT>
__device__ __forceinline__ BlockRunLengthDecode(ItemT (&run_values)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
TotalDecodedSizeT &total_decoded_size)
: temp_storage(PrivateStorage())
, linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{
InitWithRunLengths(run_values, run_lengths, total_decoded_size);
}
/**
* \brief Constructor specialised for static temporary storage, initializing using the runs' offsets.
*/
template <typename UserRunOffsetT>
__device__ __forceinline__ BlockRunLengthDecode(ItemT (&run_values)[RUNS_PER_THREAD],
UserRunOffsetT (&run_offsets)[RUNS_PER_THREAD])
: temp_storage(PrivateStorage())
, linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{
InitWithRunOffsets(run_values, run_offsets);
}
private:
/**
* \brief Returns the offset of the first value within \p input which compares greater than \p val. This version takes
* \p MAX_NUM_ITEMS, an upper bound of the array size, which will be used to determine the number of binary search
* iterations at compile time.
*/
template <int MAX_NUM_ITEMS,
typename InputIteratorT,
typename OffsetT,
typename T>
__device__ __forceinline__ OffsetT StaticUpperBound(InputIteratorT input, ///< [in] Input sequence
OffsetT num_items, ///< [in] Input sequence length
T val) ///< [in] Search key
{
OffsetT lower_bound = 0;
OffsetT upper_bound = num_items;
#pragma unroll
for (int i = 0; i <= Log2<MAX_NUM_ITEMS>::VALUE; i++)
{
OffsetT mid = cub::MidPoint<OffsetT>(lower_bound, upper_bound);
mid = (cub::min)(mid, num_items - 1);
if (val < input[mid])
{
upper_bound = mid;
}
else
{
lower_bound = mid + 1;
}
}
return lower_bound;
}
template <typename RunOffsetT>
__device__ __forceinline__ void InitWithRunOffsets(ItemT (&run_values)[RUNS_PER_THREAD],
RunOffsetT (&run_offsets)[RUNS_PER_THREAD])
{
// Keep the runs' items and the offsets of each run's beginning in the temporary storage
RunOffsetT thread_dst_offset = static_cast<RunOffsetT>(linear_tid) * static_cast<RunOffsetT>(RUNS_PER_THREAD);
#pragma unroll
for (int i = 0; i < RUNS_PER_THREAD; i++)
{
temp_storage.runs.run_values[thread_dst_offset] = run_values[i];
temp_storage.runs.run_offsets[thread_dst_offset] = run_offsets[i];
thread_dst_offset++;
}
// Ensure run offsets and run values have been writen to shared memory
CTA_SYNC();
}
template <typename RunLengthT, typename TotalDecodedSizeT>
__device__ __forceinline__ void InitWithRunLengths(ItemT (&run_values)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
TotalDecodedSizeT &total_decoded_size)
{
// Compute the offset for the beginning of each run
DecodedOffsetT run_offsets[RUNS_PER_THREAD];
#pragma unroll
for (int i = 0; i < RUNS_PER_THREAD; i++)
{
run_offsets[i] = static_cast<DecodedOffsetT>(run_lengths[i]);
}
DecodedOffsetT decoded_size_aggregate;
RunOffsetScanT(this->temp_storage.offset_scan).ExclusiveSum(run_offsets, run_offsets, decoded_size_aggregate);
total_decoded_size = static_cast<TotalDecodedSizeT>(decoded_size_aggregate);
// Ensure the prefix scan's temporary storage can be reused (may be superfluous, but depends on scan implementation)
CTA_SYNC();
InitWithRunOffsets(run_values, run_offsets);
}
public:
/**
* \brief Run-length decodes the runs previously passed via a call to Init(...) and returns the run-length decoded
* items in a blocked arrangement to \p decoded_items. If the number of run-length decoded items exceeds the
* run-length decode buffer (i.e., <b>DECODED_ITEMS_PER_THREAD * BLOCK_THREADS</b>), only the items that fit within
* the buffer are returned. Subsequent calls to <b>RunLengthDecode</b> adjusting \p from_decoded_offset can be
* used to retrieve the remaining run-length decoded items. Calling __syncthreads() between any two calls to
* <b>RunLengthDecode</b> is not required.
* \p item_offsets can be used to retrieve each run-length decoded item's relative index within its run. E.g., the
* run-length encoded array of `3, 1, 4` with the respective run lengths of `2, 1, 3` would yield the run-length
* decoded array of `3, 3, 1, 4, 4, 4` with the relative offsets of `0, 1, 0, 0, 1, 2`.
* \smemreuse
*
* \param[out] decoded_items The run-length decoded items to be returned in a blocked arrangement
* \param[out] item_offsets The run-length decoded items' relative offset within the run they belong to
* \param[in] from_decoded_offset If invoked with from_decoded_offset that is larger than total_decoded_size results
* in undefined behavior.
*/
template <typename RelativeOffsetT>
__device__ __forceinline__ void RunLengthDecode(ItemT (&decoded_items)[DECODED_ITEMS_PER_THREAD],
RelativeOffsetT (&item_offsets)[DECODED_ITEMS_PER_THREAD],
DecodedOffsetT from_decoded_offset = 0)
{
// The (global) offset of the first item decoded by this thread
DecodedOffsetT thread_decoded_offset = from_decoded_offset + linear_tid * DECODED_ITEMS_PER_THREAD;
// The run that the first decoded item of this thread belongs to
// If this thread's <thread_decoded_offset> is already beyond the total decoded size, it will be assigned to the
// last run
RunOffsetT assigned_run =
StaticUpperBound<BLOCK_RUNS>(temp_storage.runs.run_offsets, BLOCK_RUNS, thread_decoded_offset) -
static_cast<RunOffsetT>(1U);
DecodedOffsetT assigned_run_begin = temp_storage.runs.run_offsets[assigned_run];
// If this thread is getting assigned the last run, we make sure it will not fetch any other run after this
DecodedOffsetT assigned_run_end = (assigned_run == BLOCK_RUNS - 1)
? thread_decoded_offset + DECODED_ITEMS_PER_THREAD
: temp_storage.runs.run_offsets[assigned_run + 1];
ItemT val = temp_storage.runs.run_values[assigned_run];
#pragma unroll
for (DecodedOffsetT i = 0; i < DECODED_ITEMS_PER_THREAD; i++)
{
decoded_items[i] = val;
item_offsets[i] = thread_decoded_offset - assigned_run_begin;
if (thread_decoded_offset == assigned_run_end - 1)
{
// We make sure that a thread is not re-entering this conditional when being assigned to the last run already by
// extending the last run's length to all the thread's item
assigned_run++;
assigned_run_begin = temp_storage.runs.run_offsets[assigned_run];
// If this thread is getting assigned the last run, we make sure it will not fetch any other run after this
assigned_run_end = (assigned_run == BLOCK_RUNS - 1) ? thread_decoded_offset + DECODED_ITEMS_PER_THREAD
: temp_storage.runs.run_offsets[assigned_run + 1];
val = temp_storage.runs.run_values[assigned_run];
}
thread_decoded_offset++;
}
}
/**
* \brief Run-length decodes the runs previously passed via a call to Init(...) and returns the run-length decoded
* items in a blocked arrangement to \p decoded_items. If the number of run-length decoded items exceeds the
* run-length decode buffer (i.e., <b>DECODED_ITEMS_PER_THREAD * BLOCK_THREADS</b>), only the items that fit within
* the buffer are returned. Subsequent calls to <b>RunLengthDecode</b> adjusting \p from_decoded_offset can be
* used to retrieve the remaining run-length decoded items. Calling __syncthreads() between any two calls to
* <b>RunLengthDecode</b> is not required.
*
* \param[out] decoded_items The run-length decoded items to be returned in a blocked arrangement
* \param[in] from_decoded_offset If invoked with from_decoded_offset that is larger than total_decoded_size results
* in undefined behavior.
*/
__device__ __forceinline__ void RunLengthDecode(ItemT (&decoded_items)[DECODED_ITEMS_PER_THREAD],
DecodedOffsetT from_decoded_offset = 0)
{
DecodedOffsetT item_offsets[DECODED_ITEMS_PER_THREAD];
RunLengthDecode(decoded_items, item_offsets, from_decoded_offset);
}
};
CUB_NAMESPACE_END
|
the_stack
|
#ifndef FLT_MAX
#define FLT_MAX 3.402823466e+38F
#endif
#define EPSILON_ABS_ZERO 1e-10
#define EPSILON_DIV_ZERO 1e-4
// for the older gpus atomicAdd with double arguments does not exist
#if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__)
static __inline__ __device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old);
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
namespace{
/*
Above-triangle test
Method: consider the tetrahedron constructed from the triangle and the query point and
check whether the signed volume of the tetrahedron is positive
*/
template<typename scalar_t>
__device__ bool above_triangle_test(
const scalar_t *v0, const scalar_t *v1, const scalar_t *v2, const scalar_t *p) {
const scalar_t x1 = v1[0] - v0[0], y1 = v1[1] - v0[1], z1 = v1[2] - v0[2];
const scalar_t x2 = v2[0] - v0[0], y2 = v2[1] - v0[1], z2 = v2[2] - v0[2];
const scalar_t x3 = p[0] - v0[0], y3 = p[1] - v0[1], z3 = p[2] - v0[2];
return (x1*y2*z3 - x1*y3*z2 - x2*y1*z3 + x2*y3*z1 + x3*y1*z2 - x3*y2*z1) >= 0;
}
/*
In-tetrahedron test
Method: check whether the query point is "above" the four triangle of the tetrahedron
*/
template<typename scalar_t>
__device__ bool in_tetrahedron_test(const scalar_t *tet, const scalar_t* p) {
bool flags[4];
const int tris[3*4] {
/* root, edge1, edge2 */
0, 2, 1,
0, 3, 2,
0, 1, 3,
1, 2, 3
};
#pragma unroll
for (int k = 0; k < 4; k++) {
const scalar_t* v0 = tet + 3 * tris[3*k+0];
const scalar_t* v1 = tet + 3 * tris[3*k+1];
const scalar_t* v2 = tet + 3 * tris[3*k+2];
flags[k] = above_triangle_test(v0, v1, v2, p);
}
return flags[0] == flags[1] && flags[0] == flags[2] && flags[0] == flags[3];
}
/*
Voxel labeling
*/
__device__ void label_occupied_voxel(float *voxel) {
atomicMax((int*)voxel, __float_as_int(1.0f));
}
__device__ void label_occupied_voxel(double *voxel) {
atomicCAS((unsigned long long*)voxel, __double_as_longlong(0), __double_as_longlong(1.0));
}
/*
Distance Calculator
*/
__device__ float calc_squared_dist(const float *p1, const float *p2) {
const float x = p1[0] - p2[0];
const float y = p1[1] - p2[1];
const float z = p1[2] - p2[2];
return x*x + y*y + z*z;
}
__device__ double calc_squared_dist(const double *p1, const double *p2) {
const double x = p1[0] - p2[0];
const double y = p1[1] - p2[1];
const double z = p1[2] - p2[2];
return x*x + y*y + z*z;
}
// __device__ float calc_dist(const float *p1, const float *p2) {
// return __fsqrt_rn(calc_squared_dist(p1, p2));
// }
// __device__ double calc_dist(const double *p1, const double *p2) {
// return __dsqrt_rn(calc_squared_dist(p1, p2));
// }
template<typename scalar_t>
__global__ void forward_voxelize_cuda_kernel(
const scalar_t* __restrict__ tetrahedrons,
scalar_t* __restrict__ out_volume,
int batch_size,
int num_tets,
int volume_res) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * num_tets) {
return;
}
const int vr = volume_res;
const int bi = i / num_tets;
const int ti = i % num_tets;
const scalar_t voxel_size = 1.0 / volume_res;
const int vr_half = vr / 2;
const scalar_t* tet = &tetrahedrons[i * 12];
scalar_t xmin = tet[0], ymin = tet[1], zmin = tet[2];
scalar_t xmax = tet[0], ymax = tet[1], zmax = tet[2];
#pragma unroll
for (int k = 1; k < 4; k++) {
xmin = fminf(xmin, tet[3*k + 0]);
xmax = fmaxf(xmax, tet[3*k + 0]);
ymin = fminf(ymin, tet[3*k + 1]);
ymax = fmaxf(ymax, tet[3*k + 1]);
zmin = fminf(zmin, tet[3*k + 2]);
zmax = fmaxf(zmax, tet[3*k + 2]);
}
// checks the voxels in the bounding box of that tetrahedron
const int xvmin = max(int(xmin/voxel_size + vr_half), 0);
const int xvmax = min(int(xmax/voxel_size + vr_half) + 1, vr-1);
const int yvmin = max(int(ymin/voxel_size + vr_half), 0);
const int yvmax = min(int(ymax/voxel_size + vr_half) + 1, vr-1);
const int zvmin = max(int(zmin/voxel_size + vr_half), 0);
const int zvmax = min(int(zmax/voxel_size + vr_half) + 1, vr-1);
for (int zz = zvmin; zz <= zvmax; zz++)
for (int yy = yvmin; yy <= yvmax; yy++)
for (int xx = xvmin; xx <= xvmax; xx++) {
#if 0
const int dx[8] = {0, 1, 0, 1, 0, 1, 0, 1};
const int dy[8] = {0, 0, 1, 1, 0, 0, 1, 1};
const int dz[8] = {0, 0, 0, 0, 1, 1, 1, 1};
// if at least one corner of the voxel is inside the tetrahedron,
// then we consider the voxel is inside the tetrahedron
#pragma unroll
for (int k = 0; k < 8; k++)
{
const scalar_t px = (xx + dx[k] - vr_half) * voxel_size;
const scalar_t py = (yy + dy[k] - vr_half) * voxel_size;
const scalar_t pz = (zz + dz[k] - vr_half) * voxel_size;
const scalar_t pt[3] = {px, py, pz};
if (in_tetrahedron_test(tet, pt)) {
const int i_ = bi*vr*vr*vr + zz*vr*vr + yy*vr + xx;
label_occupied_voxel(&(out_volume[i_]));
break;
}
}
#else
const scalar_t px = (xx + 0.5 - vr_half) * voxel_size;
const scalar_t py = (yy + 0.5 - vr_half) * voxel_size;
const scalar_t pz = (zz + 0.5 - vr_half) * voxel_size;
const scalar_t pt[3] = {px, py, pz};
if (in_tetrahedron_test(tet, pt)) {
const int i_ = bi*vr*vr*vr + zz*vr*vr + yy*vr + xx;
label_occupied_voxel(&(out_volume[i_]));
}
#endif
}
}
template<typename scalar_t>
__global__ void forward_calc_semantic_volume_cuda_kernel(
const scalar_t* __restrict__ occ_volume,
const scalar_t* __restrict__ smpl_vertices,
const scalar_t* __restrict__ smpl_vertex_code,
scalar_t* __restrict__ semantic_volume,
scalar_t* __restrict__ weight_sum_volume,
float sigma,
int batch_size,
int num_vertex,
int volume_res) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * volume_res * volume_res * volume_res) {
return;
}
if (occ_volume[i] < 1e-3) { // empty voxel
return;
}
const int vr = volume_res;
const int vn = num_vertex;
const int vr_half = vr / 2;
const scalar_t voxel_size = 1.0 / volume_res;
const int bi = i / (vr*vr*vr);
const int vi = i % (vr*vr*vr);
const int xv = vi % vr;
const int yv = (vi/vr) % vr;
const int zv = vi / (vr*vr);
const scalar_t px = (xv + 0.5 - vr_half) * voxel_size;
const scalar_t py = (yv + 0.5 - vr_half) * voxel_size;
const scalar_t pz = (zv + 0.5 - vr_half) * voxel_size;
const scalar_t pt[3] = {px, py, pz};
const scalar_t* sv = smpl_vertices + bi * vn * 3;
const scalar_t* sc = smpl_vertex_code + bi * vn * 3;
scalar_t weight_sum = 1e-10;
scalar_t code[3] = {(scalar_t)0};
for (int k = 0; k < vn; k++) {
const scalar_t d = calc_squared_dist(pt, sv + k*3);
const scalar_t w = __expf(-d/(sigma*sigma));
code[0] += w * sc[k * 3 + 0];
code[1] += w * sc[k * 3 + 1];
code[2] += w * sc[k * 3 + 2];
weight_sum += w;
}
semantic_volume[3 * i + 0] = code[0] / weight_sum;
semantic_volume[3 * i + 1] = code[1] / weight_sum;
semantic_volume[3 * i + 2] = code[2] / weight_sum;
weight_sum_volume[i] = weight_sum;
}
}
std::vector<at::Tensor> forward_semantic_voxelization_cuda(
at::Tensor smpl_vertices,
at::Tensor smpl_vertex_code,
at::Tensor smpl_tetrahedrons,
at::Tensor occ_volume,
at::Tensor semantic_volume,
at::Tensor weight_sum_volume,
float sigma) {
const auto batch_size = smpl_vertices.size(0);
const auto num_vertex = smpl_vertices.size(1);
const auto num_tets = smpl_tetrahedrons.size(1);
const auto volume_res = occ_volume.size(1);
const int threads = 512;
const dim3 blocks_1 ((batch_size * num_tets - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(smpl_vertices.scalar_type(), "forward_voxelize_cuda_kernel", ([&] {
forward_voxelize_cuda_kernel<scalar_t><<<blocks_1, threads>>>(
smpl_tetrahedrons.data_ptr<scalar_t>(),
occ_volume.data_ptr<scalar_t>(),
batch_size,
num_tets,
volume_res);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_voxelize_cuda_kernel: %s\n", cudaGetErrorString(err));
const dim3 blocks_2 ((batch_size * volume_res * volume_res * volume_res - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(smpl_vertices.scalar_type(), "forward_calc_semantic_volume_cuda_kernel", ([&] {
forward_calc_semantic_volume_cuda_kernel<scalar_t><<<blocks_2, threads>>>(
occ_volume.data_ptr<scalar_t>(),
smpl_vertices.data_ptr<scalar_t>(),
smpl_vertex_code.data_ptr<scalar_t>(),
semantic_volume.data_ptr<scalar_t>(),
weight_sum_volume.data_ptr<scalar_t>(),
sigma,
batch_size,
num_vertex,
volume_res);
}));
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_calc_semantic_volume_cuda_kernel: %s\n", cudaGetErrorString(err));
return {occ_volume, semantic_volume, weight_sum_volume};
}
|
the_stack
|
#include <thrust/sequence.h>
#include <random>
template <typename T>
struct TypedColumnTest : public cudf::test::BaseFixture {
cudf::data_type type() { return cudf::data_type{cudf::type_to_id<T>()}; }
TypedColumnTest()
: data{_num_elements * cudf::size_of(type()), rmm::cuda_stream_default},
mask{cudf::bitmask_allocation_size_bytes(_num_elements), rmm::cuda_stream_default}
{
auto typed_data = static_cast<char*>(data.data());
auto typed_mask = static_cast<char*>(mask.data());
thrust::sequence(thrust::device, typed_data, typed_data + data.size());
thrust::sequence(thrust::device, typed_mask, typed_mask + mask.size());
}
cudf::size_type num_elements() { return _num_elements; }
std::random_device r;
std::default_random_engine generator{r()};
std::uniform_int_distribution<cudf::size_type> distribution{200, 1000};
cudf::size_type _num_elements{distribution(generator)};
rmm::device_buffer data{};
rmm::device_buffer mask{};
rmm::device_buffer all_valid_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)};
rmm::device_buffer all_null_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)};
};
TYPED_TEST_CASE(TypedColumnTest, cudf::test::Types<int32_t>);
/**
* @brief Verifies equality of the properties and data of a `column`'s views.
*
* @param col The `column` to verify
*/
void verify_column_views(cudf::column col)
{
cudf::column_view view = col;
cudf::mutable_column_view mutable_view = col;
EXPECT_EQ(col.type(), view.type());
EXPECT_EQ(col.type(), mutable_view.type());
EXPECT_EQ(col.size(), view.size());
EXPECT_EQ(col.size(), mutable_view.size());
EXPECT_EQ(col.null_count(), view.null_count());
EXPECT_EQ(col.null_count(), mutable_view.null_count());
EXPECT_EQ(col.nullable(), view.nullable());
EXPECT_EQ(col.nullable(), mutable_view.nullable());
EXPECT_EQ(col.num_children(), view.num_children());
EXPECT_EQ(col.num_children(), mutable_view.num_children());
EXPECT_EQ(view.head(), mutable_view.head());
EXPECT_EQ(view.data<char>(), mutable_view.data<char>());
EXPECT_EQ(view.offset(), mutable_view.offset());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask), 0};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull)
{
cudf::column col{this->type(),
this->num_elements(),
std::move(this->data),
std::move(this->all_null_mask),
this->num_elements()};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountNoMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
rmm::device_buffer empty_null_mask{};
EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
auto invalid_size_null_mask =
create_null_mask(std::min(this->num_elements() - 50, 0), cudf::mask_state::ALL_VALID);
EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_NO_THROW(col.set_null_count(0));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountAllNull)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)};
EXPECT_NO_THROW(col.set_null_count(this->num_elements()));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllNull)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)};
EXPECT_EQ(this->num_elements(), col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllValid)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_EQ(0, col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, CopyDataNoMask)
{
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size());
}
TYPED_TEST(TypedColumnTest, MoveDataNoMask)
{
void* original_data = this->data.data();
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
}
TYPED_TEST(TypedColumnTest, CopyDataAndMask)
{
cudf::column col{this->type(),
this->num_elements(),
rmm::device_buffer{this->data, rmm::cuda_stream_default},
rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
EXPECT_NE(v.null_mask(), this->all_valid_mask.data());
CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size());
CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.null_mask(), this->all_valid_mask.data(), this->mask.size());
}
TYPED_TEST(TypedColumnTest, MoveDataAndMask)
{
void* original_data = this->data.data();
void* original_mask = this->all_valid_mask.data();
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
EXPECT_EQ(v.null_mask(), original_mask);
}
TYPED_TEST(TypedColumnTest, CopyConstructorNoMask)
{
cudf::column original{this->type(), this->num_elements(), std::move(this->data)};
cudf::column copy{original};
verify_column_views(copy);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
}
TYPED_TEST(TypedColumnTest, CopyConstructorWithMask)
{
cudf::column original{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
cudf::column copy{original};
verify_column_views(copy);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
TYPED_TEST(TypedColumnTest, MoveConstructorNoMask)
{
cudf::column original{this->type(), this->num_elements(), std::move(this->data)};
auto original_data = original.view().head();
cudf::column moved_to{std::move(original)};
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
verify_column_views(moved_to);
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
}
TYPED_TEST(TypedColumnTest, MoveConstructorWithMask)
{
cudf::column original{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
auto original_data = original.view().head();
auto original_mask = original.view().null_mask();
cudf::column moved_to{std::move(original)};
verify_column_views(moved_to);
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
EXPECT_EQ(original_mask, moved_to_view.null_mask());
}
TYPED_TEST(TypedColumnTest, ConstructWithChildren)
{
std::vector<std::unique_ptr<cudf::column>> children;
;
children.emplace_back(std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::INT8},
42,
rmm::device_buffer{this->data, rmm::cuda_stream_default},
rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}));
children.emplace_back(std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::FLOAT64},
314,
rmm::device_buffer{this->data, rmm::cuda_stream_default},
rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}));
cudf::column col{this->type(),
this->num_elements(),
rmm::device_buffer{this->data, rmm::cuda_stream_default},
rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default},
cudf::UNKNOWN_NULL_COUNT,
std::move(children)};
verify_column_views(col);
EXPECT_EQ(2, col.num_children());
EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type());
EXPECT_EQ(42, col.child(0).size());
EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type());
EXPECT_EQ(314, col.child(1).size());
}
TYPED_TEST(TypedColumnTest, ReleaseNoChildren)
{
cudf::column col{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(0u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ReleaseWithChildren)
{
std::vector<std::unique_ptr<cudf::column>> children;
children.emplace_back(std::make_unique<cudf::column>(
this->type(),
this->num_elements(),
rmm::device_buffer{this->data, rmm::cuda_stream_default},
rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}));
children.emplace_back(std::make_unique<cudf::column>(
this->type(),
this->num_elements(),
rmm::device_buffer{this->data, rmm::cuda_stream_default},
rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}));
cudf::column col{this->type(),
this->num_elements(),
rmm::device_buffer{this->data, rmm::cuda_stream_default},
rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default},
cudf::UNKNOWN_NULL_COUNT,
std::move(children)};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(2u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask)
{
cudf::column original{
this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)};
cudf::column_view original_view = original;
cudf::column copy{original_view};
verify_column_views(copy);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy);
// Verify deep copy
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
template <typename T>
struct ListsColumnTest : public cudf::test::BaseFixture {
};
using NumericTypesNotBool =
cudf::test::Concat<cudf::test::IntegralTypesNotBool, cudf::test::FloatingPointTypes>;
TYPED_TEST_CASE(ListsColumnTest, NumericTypesNotBool);
TYPED_TEST(ListsColumnTest, ListsColumnViewConstructor)
{
cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}};
auto result = std::make_unique<cudf::column>(list);
cudf::test::expect_columns_equal(list, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructor)
{
cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}};
cudf::test::lists_column_wrapper<TypeParam> expect{{3, 4}, {5, 6, 7}};
auto sliced = cudf::slice(list, {1, 3}).front();
auto result = std::make_unique<cudf::column>(sliced);
cudf::test::expect_columns_equal(expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedIncludesEmpty)
{
cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {}, {3, 4}, {8, 9}};
cudf::test::lists_column_wrapper<TypeParam> expect{{}, {3, 4}};
auto sliced = cudf::slice(list, {1, 3}).front();
auto result = std::make_unique<cudf::column>(sliced);
cudf::test::expect_columns_equal(expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedNonNestedEmpty)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
// Column of List<int>
LCW list{{1, 2}, {}, {3, 4}, {8, 9}};
// Column of 1 row, an empty List<int>
LCW expect{LCW{}};
auto sliced = cudf::slice(list, {1, 2}).front();
auto result = std::make_unique<cudf::column>(sliced);
cudf::test::expect_columns_equal(expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedNestedEmpty)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>;
// Column of List<List<int>>, with incomplete hierarchy
LCW list{{LCW{1}, LCW{2}},
{}, // < ----------- empty List<List<int>>, slice this
{LCW{3}, LCW{4, 5}}};
// Make 1-row column of type List<List<int>>, the row data contains 0 element.
// Well-formed memory layout:
// type: List<List<int>>
// Length: 1
// Mask: 1
// Offsets: 0, 0
// List<int>
// Length: 0
// Offset:
// INT
// Length: 0
auto leaf = std::make_unique<cudf::column>(cudf::column(LCW{}));
auto offset = std::make_unique<cudf::column>(cudf::column(FWCW_SZ{0, 0}));
auto null_mask = cudf::create_null_mask(0, cudf::mask_state::UNALLOCATED);
auto expect =
cudf::make_lists_column(1, std::move(offset), std::move(leaf), 0, std::move(null_mask));
auto sliced = cudf::slice(list, {1, 2}).front();
auto result = std::make_unique<cudf::column>(sliced);
cudf::test::expect_columns_equal(*expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNested)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>;
// Column of List<List<int>>, with incomplete hierarchy
LCW list{{LCW{1}, LCW{2}}, {}, {LCW{3}, LCW{4, 5}}};
auto expect = cudf::empty_like(list);
auto sliced = cudf::slice(list, {0, 0}).front();
auto result = std::make_unique<cudf::column>(sliced);
cudf::test::expect_columns_equal(*expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNonNested)
{
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>;
LCW list{{1, 2}, {}, {3, 4}, {8, 9}};
auto expect = cudf::empty_like(list);
auto sliced = cudf::slice(list, {0, 0}).front();
auto result = std::make_unique<cudf::column>(sliced);
cudf::test::expect_columns_equal(*expect, result->view());
}
TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructorWithNulls)
{
auto valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? true : false; });
auto expect_valids = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? false : true; });
using LCW = cudf::test::lists_column_wrapper<TypeParam>;
cudf::test::lists_column_wrapper<TypeParam> list{
{{{{1, 2}, {3, 4}}, valids}, LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}},
valids};
cudf::test::lists_column_wrapper<TypeParam> expect{
{LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, expect_valids};
auto sliced = cudf::slice(list, {1, 5}).front();
auto result = std::make_unique<cudf::column>(sliced);
cudf::test::expect_columns_equal(expect, result->view());
// TODO: null mask equality is being checked separately because
// expect_columns_equal doesn't do the check for lists columns.
// This is fixed in https://github.com/rapidsai/cudf/pull/5904,
// so we should remove this check after that's merged:
cudf::test::expect_columns_equal(
cudf::mask_to_bools(result->view().null_mask(), 0, 4)->view(),
cudf::mask_to_bools(static_cast<cudf::column_view>(expect).null_mask(), 0, 4)->view());
}
CUDF_TEST_PROGRAM_MAIN()
|
the_stack
|
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define IDX3C(c,i,j,in_h,in_w) ((c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define IDX4C(n,c,i,j,in_c,in_h,in_w) ((n)*((in_c)*(in_h)*(in_w)) + (c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define IDX5C(t,n,c,i,j,in_n,in_c,in_h,in_w) ((t)*((in_n)*(in_c)*(in_h)*(in_w)) + (n)*((in_c)*(in_h)*(in_w)) + (c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define BREAK return(enif_make_int(env, 0));
#define PI 3.14159265358979323846
#define SIGMOID(x) (1 / (1+exp(-1*x)))
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
return enif_make_int(env,10000+(int)error); \
} \
}
#define CUBLAS(call) \
{ \
const cublasStatus error = call; \
if (error != CUBLAS_STATUS_SUCCESS) \
{ \
return enif_make_int(env,11000+(int)error); \
} \
}
__global__ void pooling_kernel(float *a, float *b, float *c, int st_h, int st_w, int in_c, int in_h, int in_w)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,in_h2,in_w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w;
float max,fmax_h,fmax_w;
n1 = bid;
c1 = tid;
in_h2 = in_h / st_h;
in_w2 = in_w / st_w;
for(w2=0;w2<in_w2;w2++){
for(h2=0;h2<in_h2;h2++){
max = -999999999.0;
start_h1 = st_h*h2;
end_h1 = st_h*(h2+1);
start_w1 = st_w*w2;
end_w1 = st_w*(w2+1);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)] >= max){
max = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
max_h = h1;
max_w = w1;
}
}
}
b[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = max;
fmax_h = (float)max_h;
fmax_w = (float)max_w;
c[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = fmax_h * 1000.0 + fmax_w;
}
}
}
/*
1st arg in_n of tensor
2nd arg in_c of tensor
3rd arg in_h of tensor
4th arg in_w of tensor
5th arg binary of tensor
6th arg stride
return list [ts1,ts2]
ts1 is result data for forward
ts2 is result data dor backward. this is sparse matrix
e.g.
|0.1,0.2,0.3,0.4|
|0.5,0.6,0.7,0.8|
|0.9,1.0,1.1,1.2|
|1.3,1.4,1.5,1.6|
ts1
|0.6,0.8|
|1.4,1.6|
ts2
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
*/
static ERL_NIF_TERM
pooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin,c_bin,tuple;
int in_n,in_c,in_h,in_w,st_h,st_w, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &st_h)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &st_w)) return enif_make_int(env,7);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h / st_h) * (in_w / st_w);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n2 * sizeof(float), &b_bin);
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n2 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(in_c,1,1);
pooling_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, st_h, st_w, in_c, in_h, in_w);
// copy to host b,c from GPU dev_b,dev_c
CHECK(cudaMemcpy(b, dev_b, n2 * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(c, dev_c, n2 * sizeof(float), cudaMemcpyDeviceToHost));
// return forward data and backward data with tuple {b_bin,c_bin}
tuple = enif_make_tuple2(env,b_bin,c_bin);
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(tuple);
}
__global__ void unpooling_kernel(float *a, float *b, float *c, int st_h, int st_w, int in_c, int in_h, int in_w)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w,in_h1,in_w1;
float loss,elt;
n1 = bid;
c1 = tid;
in_h1 = in_h * st_h;
in_w1 = in_w * st_w;
for(h2=0;h2<in_h;h2++){
for(w2=0;w2<in_w;w2++){
start_h1 = st_h*h2;
end_h1 = st_h*(h2+1);
start_w1 = st_w*w2;
end_w1 = st_w*(w2+1);
elt = a[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
loss = b[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
max_h = (int) floor(elt / 1000.0);
max_w = (int) fmodf(elt,1000.0);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 == max_h && w1 == max_w){
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = loss;
}
else{
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = 0.0;
}
}
}
}
}
}
/*
1st arg in_n of sparse-tensor
2nd arg in_c of sparse-tensor
3rd arg in_h of sparse-tensor
4th arg in_w of sparse-tensor
5th arg binary of sparse-tensor
6th arg binary of loss-tensor
7th arg stride
return gradiate tensor
e.g.
ts1 index-tensor
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
ts2 loss-tensor
|0.1,0.2|
|0.3,0.4|
return
|0.0,0.0,0.0,0.0|
|0.0,0.1,0.0,0.2|
|0.0,0.0,0.0,0.0|
|0.0,3.4,0.0,0.4|
*/
static ERL_NIF_TERM
unpooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,st_h,st_w, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &st_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &st_w)) return enif_make_int(env,8);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h * st_h) * (in_w * st_w);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n2 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(in_c,1,1);
unpooling_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, st_h, st_w, in_c, in_h, in_w);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(c, dev_c, n2 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void convolute1_kernel(float *a, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w,
int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int oh, int ow)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
sum = 0.0;
start_h1 = st_h*h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = st_w*w2-pad;
end_w1 = start_w1 + filt_w;
for(c1=0;c1<in_c;c1++){
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
elt2 = b[IDX4C(c2,c1,h1-start_h1,w1-start_w1,filt_c,filt_h,filt_w)];
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,c2,h2,w2,filt_n,oh,ow)] = sum;
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg binary of input tensor
10th arg binary of filter tensor
11th arg stride
12th arg padding
*/
static ERL_NIF_TERM
convolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w, filt_n,filt_c,filt_h,filt_w, st_h,st_w,pad, n1, n2, n3, oh, ow;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st_h)) return enif_make_int(env,11);
if (!enif_get_int(env, argv[11], &st_w)) return enif_make_int(env,12);
if (!enif_get_int(env, argv[12], &pad)) return enif_make_int(env,13);
n1 = in_n * in_c * in_h * in_w;
n2 = filt_n * filt_c * filt_h * filt_w;
oh = (in_h+2*pad-filt_h)/st_h + 1;
ow = (in_w+2*pad-filt_w)/st_w + 1;
n3 = in_n * filt_n * oh * ow; // n of filter generate n channel
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_n,1,1);
convolute1_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, st_h, st_w, pad, in_c, in_h, in_w, oh, ow);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void deconvolute1_kernel(float *a, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w,
int st_h, int st_w, int pad1, int pad, int in_c, int in_h, int in_w, int oh, int ow, int oh1, int ow1)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
//full convolute. stride=1 always
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad1;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad1;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
for(c1=0;c1<filt_n;c1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)]; //loss tensor
elt2 = b[IDX4C(c1,c2,h1-start_h1,w1-start_w1,filt_c,filt_h,filt_w)]; //filter tensor
sum = sum + elt1*elt2;
}
}
}
}
if(h2-pad >=0 && h2-pad < oh1 && w2-pad >= 0 && w2-pad < ow1){
c[IDX4C(n1,c2,h2-pad,w2-pad,filt_c,oh1,ow1)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg binary of input loss tensor
10th arg binary of filter tensor
11th arg stride hight
12th arg stride width
13th arg padding
memo
ex padding = 1
loss 4*4
filter 2*2
input 3*3 padding=1
(3-2+2*1)/1 + 1 = 4
decovolute compute 5*5(3*3 padding=1) and save result range 3*3
*/
static ERL_NIF_TERM
deconvolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w, filt_n,filt_c,filt_h,filt_w, st_h,st_w,pad, pad1, n1, n2, n3, oh, ow, oh1, ow1, i,j,k,l;
float *a,*b, *b1, *c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st_h)) return enif_make_int(env,11);
if (!enif_get_int(env, argv[11], &st_w)) return enif_make_int(env,12);
if (!enif_get_int(env, argv[12], &pad)) return enif_make_int(env,13);
n1 = in_n * in_c * in_h * in_w;
n2 = filt_n * filt_c * filt_h * filt_w;
pad1 = filt_h - 1;
// pad1 = filt_h -1, pad is original padding size
oh = (in_h+2*pad1-filt_h)/st_h + 1;
ow = (in_w+2*pad1-filt_w)/st_w + 1;
oh1 = (in_h+2*(pad1-pad)-filt_h)/st_h + 1;
ow1 = (in_w+2*(pad1-pad)-filt_w)/st_w + 1;
n3 = in_n * filt_c * oh1 * ow1; // channel of filter generate same channel input tensor
a = (float *) a_bin.data;
b = (float *) b_bin.data;
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<filt_n;i++){
for(j=0;j<filt_c;j++){
for(k=0;k<filt_h;k++){
for(l=0;l<filt_w;l++){
b1[IDX4C(i,j,filt_h-k-1,filt_w-l-1,filt_c,filt_h,filt_w)] = b[IDX4C(i,j,k,l,filt_c,filt_h,filt_w)];
}
}
}
}
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b1, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_c,1,1);
deconvolute1_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, st_h, st_w, pad1, pad, in_c, in_h, in_w, oh, ow, oh1, ow1);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
enif_free(b1);
return(c_bin);
}
__global__ void deconvolute2_kernel(float *a1, float *a, float *b, float *c, int filt_n, int filt_c,int filt_h, int filt_w,
int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int loss_h, int loss_w)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,oh,ow,start_h1,end_h1,start_w1,end_w1;
int j,k,l,k1,l1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
// caution! stride=1
oh = (in_h+2*pad-filt_h) + 1;
ow = (in_w+2*pad-filt_w) + 1;
//dilate loss tensor.
for(j=0;j<filt_n;j++){
for(k=0;k<loss_h;k++){
for(l=0;l<loss_w;l++){
elt1 = a[IDX4C(n1,j,k,l,in_c,loss_h,loss_w)];
k1 = st_h*k;
l1 = st_w*l;
a1[IDX4C(n1,j,k1,l1,in_c,in_h,in_w)] = elt1;
}
}
}
//full convulute. stride=1
for(c2=0;c2<filt_c;c2++){
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
for(c1=0;c1<filt_n;c1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a1[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)]; //loss tensor
elt2 = b[IDX4C(c1,c2,h1-start_h1,w1-start_w1,filt_c,filt_h,filt_w)]; //filter tensor
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,c2,h2,w2,filt_c,oh,ow)] = sum;
}
}
}
}
/*
dilate loss tensor
e.g.
|1.0,2.0|
|3.0,4.0|
dilated stride=2
|1.0,0.0,2.0|
|0.0,0.0,0.0|
|3.0,0.0,4.0|
*/
/*
1st arg in_n of input loss tensor
2nd arg in_c of input loss tensor
3rd arg in_h of input loss tensor
4th arg in_w of input loss tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg binary of input loss tensor
10th arg binary of filter tensor
11th arg stride hight
12th arg stride width
13th arg padding
*/
static ERL_NIF_TERM
deconvolute2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_n,filt_c,filt_h, filt_w, st_h, st_w,pad, pad1, n1, n2, n3, oh, ow, i,j,k,l, loss_h, loss_w;
float *a, *a1, *b, *b1, *c;
float *dev_a, *dev_a1, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &loss_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &loss_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st_h)) return enif_make_int(env,11);
if (!enif_get_int(env, argv[11], &st_w)) return enif_make_int(env,12);
if (!enif_get_int(env, argv[12], &pad)) return enif_make_int(env,13);
// size for dilate
in_h = loss_h + (loss_h - 1)*(st_h - 1);
in_w = loss_w + (loss_w - 1)*(st_w - 1);
n1 = in_n * in_c * in_h * in_w; //loss tensor size
n2 = filt_n * filt_c * filt_h * filt_w; //filter tensor size
pad1 = (filt_h - 1) + pad; //padding size with dilate
oh = (in_h+2*pad1-filt_h) + 1; //output deconvolute tensor size. caution stride=1.
ow = (in_w+2*pad1-filt_w) + 1; //
n3 = in_n * filt_c * oh * ow; //
a = (float *) a_bin.data;
b = (float *) b_bin.data;
a1 = (float *) enif_alloc(n1 * sizeof(float));
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<filt_n;i++){
for(j=0;j<filt_c;j++){
for(k=0;k<filt_h;k++){
for(l=0;l<filt_w;l++){
b1[IDX4C(i,j,filt_h-k-1,filt_w-l-1,filt_c,filt_h,filt_w)] = b[IDX4C(i,j,k,l,filt_c,filt_h,filt_w)];
}
}
}
}
// dilate
for(i=0;i<n1;i++){
a1[i] = 0.0;
}
CHECK(cudaMalloc((void**)&dev_a1, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_a, in_n*1*loss_h*loss_w * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
CHECK(cudaMemcpy(dev_a1, a1, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_a, a, in_n*1*loss_h*loss_w * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b1, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_c,1,1);
deconvolute2_kernel <<<blocks, filt_c>> >(dev_a1, dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, st_h, st_w, pad1, in_c, in_h, in_w, loss_h, loss_w);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_a1);
cudaFree(dev_b);
cudaFree(dev_c);
enif_free(a1);
enif_free(b1);
return(c_bin);
}
__global__ void gradfilter1_kernel(float *a, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w, int loss_c, int loss_h, int loss_w, int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int n)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,h3,w3;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
for(c1=0;c1<filt_c;c1++){
//h1,w1 is index of filter
for(h1=0;h1<filt_h;h1++){
for(w1=0;w1<filt_w;w1++){
//h2,w2 is index of loss tensor
sum = 0.0;
for(h2=0;h2<loss_h;h2++){
for(w2=0;w2<loss_w;w2++){
//h3,w3 is index of input tensor
h3 = h1 - pad + h2;
w3 = w1 - pad + w2;
if(h3>=0 && h3<in_h && w3>=0 && w3<in_w){
elt1 = a[IDX4C(n1,c1,h3,w3,in_c,in_h,in_w)]; //input tensor
elt2 = b[IDX4C(n1,c2,h2,w2,loss_c,loss_h,loss_w)]; //loss tensor
sum = sum + elt1*elt2;
}
}
}
//set filter tensor
c[IDX5C(n1,c2,c1,h1,w1,filt_n,filt_c,filt_h,filt_w)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg loss_c of loss tensor
10th arg loss_h of loss tensor
11th arg loss_w of loss tensor
12th arg binary of filter tensor
13th arg binary of loss tensor
14th arg stride hight
15th arg stride width
16th arg padding
*/
static ERL_NIF_TERM
gradfilter1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin;
int in_n,in_c,in_h,in_w,filt_n,filt_c,filt_h,filt_w,loss_c,loss_h,loss_w,st_h,st_w,pad,n1,n2,n3,n4,i,j,k,l,m;
float *a,*b,*c,*d;
float *dev_a, *dev_b, *dev_c;
float elt;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &loss_c)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &loss_h)) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &loss_w)) return enif_make_int(env,11);
if (!enif_inspect_binary(env, argv[11], &a_bin )) return enif_make_int(env,12);
if (!enif_inspect_binary(env, argv[12], &b_bin )) return enif_make_int(env,13);
if (!enif_get_int(env, argv[13], &st_h)) return enif_make_int(env,14);
if (!enif_get_int(env, argv[14], &st_w)) return enif_make_int(env,15);
if (!enif_get_int(env, argv[15], &pad)) return enif_make_int(env,16);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * loss_c * loss_h * loss_w;
n3 = in_n * filt_n * filt_c * filt_h * filt_w;
n4 = filt_n * filt_c * filt_h * filt_w;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n4 * sizeof(float), &d_bin);
//initialize c
for(i=0;i<n3;i++){
c[i] = 0.0;
}
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_n,1,1);
gradfilter1_kernel <<<blocks, threads>>>(dev_a, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, loss_c, loss_h, loss_w, st_h, st_w, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
//average
// clear d
for(i=0;i<n4;i++){
d[i] = 0.0;
}
// copy from c to d and compute sum
for(i=0;i<in_n;i++){
for(j=0;j<filt_n;j++){
for(k=0;k<filt_c;k++){
for(l=0;l<filt_h;l++){
for(m=0;m<filt_w;m++){
elt = c[IDX5C(i,j,k,l,m,filt_n,filt_c,filt_h,filt_w)];
d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] = d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] + elt;
}
}
}
}
}
// average
for(i=0;i<n4;i++){
d[i] = d[i] / (float)in_n;
}
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(d_bin);
}
__global__ void gradfilter2_kernel(float *a, float *b1, float *b, float *c, int filt_n, int filt_c, int filt_h, int filt_w, int loss_c, int loss_h, int loss_w, int st_h, int st_w, int pad, int in_c, int in_h, int in_w, int n)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int n1,c1,c2,h1,w1,h2,w2,h3,w3,loss_h1,loss_w1,j,k,l,k1,l1;
float sum,elt1,elt2;
n1 = bid;
c2 = tid;
//dilated loss tensor size
loss_h1 = loss_h+(loss_h-1)*(st_h-1);
loss_w1 = loss_w+(loss_w-1)*(st_w-1);
//dilate loss tensor.
for(j=0;j<loss_c;j++){
for(k=0;k<loss_h;k++){
for(l=0;l<loss_w;l++){
elt1 = b[IDX4C(n1,j,k,l,loss_c,loss_h,loss_w)];
k1 = st_h*k;
l1 = st_w*l;
b1[IDX4C(n1,j,k1,l1,loss_c,loss_h1,loss_w1)] = elt1;
}
}
}
//convolute input tensor with dilated loss tensor. cuation stride is always 1.
for(c1=0;c1<filt_c;c1++){
//h1,w1 is index of filter
for(h1=0;h1<filt_h;h1++){
for(w1=0;w1<filt_w;w1++){
//h2,w2 is index of loss tensor
sum = 0.0;
for(h2=0;h2<loss_h1;h2++){
for(w2=0;w2<loss_w1;w2++){
//h3,w3 is index of input tensor
h3 = h1 - pad + h2;
w3 = w1 - pad + w2;
if(h3>=0 && h3<in_h && w3>=0 && w3<in_w){
elt1 = a[IDX4C(n1,c1,h3,w3,in_c,in_h,in_w)]; //input tensor
elt2 = b1[IDX4C(n1,c2,h2,w2,loss_c,loss_h1,loss_w1)]; //loss tensor
sum = sum + elt1*elt2;
}
}
}
//set filter tensor
c[IDX5C(n1,c2,c1,h1,w1,filt_n,filt_c,filt_h,filt_w)] = + sum;
}
}
}
}
/*
dilate loss tensor
e.g.
|1.0,2.0|
|3.0,4.0|
dilated stride=2
|1.0,0.0,2.0|
|0.0,0.0,0.0|
|3.0,0.0,4.0|
*/
/*
gradfilter2 is for stride >= 2. This one requires dilate
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_n of filter tensor
6th arg filt_c of filter tensor
7th arg filt_h of filter tensor
8th arg filt_w of filter tensor
9th arg loss_c of loss tensor
10th arg loss_h of loss tensor
11th arg loss_w of loss tensor
12th arg binary of filter tensor
13th arg binary of loss tensor
14th arg stride hight
15th arg stride width
16th arg padding
*/
static ERL_NIF_TERM
gradfilter2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin;
int in_n,in_c,in_h,in_w,filt_n,filt_c,filt_h,filt_w,loss_c,loss_h,loss_w,st_h,st_w,pad,n1,n2,n3,n4,n5,i,j,k,l,m;
float *a,*b,*b1,*c,*d;
float *dev_a, *dev_b, *dev_b1, *dev_c;
float elt;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_n)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_c)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &filt_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &filt_w)) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &loss_c)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &loss_h)) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &loss_w)) return enif_make_int(env,11);
if (!enif_inspect_binary(env, argv[11], &a_bin )) return enif_make_int(env,12);
if (!enif_inspect_binary(env, argv[12], &b_bin )) return enif_make_int(env,13);
if (!enif_get_int(env, argv[13], &st_h)) return enif_make_int(env,14);
if (!enif_get_int(env, argv[14], &st_w)) return enif_make_int(env,15);
if (!enif_get_int(env, argv[15], &pad)) return enif_make_int(env,16);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * loss_c * loss_h * loss_w;
n3 = in_n * filt_n * filt_c * filt_h * filt_w;
n4 = filt_n * filt_c * filt_h * filt_w;
n5 = in_n * loss_c * (loss_h+(loss_h-1)*(st_h-1)) * (loss_w+(loss_w-1)*(st_w-1)); // dilated loss tensor size
a = (float *) a_bin.data;
b = (float *) b_bin.data;
b1 = (float *) enif_alloc(n5 * sizeof(float)); // dilate loss tensor area
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n4 * sizeof(float), &d_bin);
//initialize c
for(i=0;i<n3;i++){
c[i] = 0.0;
}
//initialize b1
for(i=0;i<n5;i++){
b1[i] = 0.0;
}
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b1, n5 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b1, b1, n5 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
dim3 blocks(in_n,1,1);
dim3 threads(filt_n,1,1);
gradfilter2_kernel <<<blocks, threads>> >(dev_a, dev_b1, dev_b, dev_c, filt_n, filt_c, filt_h, filt_w, loss_c, loss_h, loss_w, st_h, st_w, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
//average
// clear d
for(i=0;i<n4;i++){
d[i] = 0.0;
}
// copy from c to d and compute sum
for(i=0;i<in_n;i++){
for(j=0;j<filt_n;j++){
for(k=0;k<filt_c;k++){
for(l=0;l<filt_h;l++){
for(m=0;m<filt_w;m++){
elt = c[IDX5C(i,j,k,l,m,filt_n,filt_c,filt_h,filt_w)];
d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] = d[IDX4C(j,k,l,m,filt_c,filt_h,filt_w)] + elt;
}
}
}
}
}
// average
for(i=0;i<n4;i++){
d[i] = d[i] / (float)in_n;
}
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_b1);
cudaFree(dev_c);
enif_free(b1);
return(d_bin);
}
__global__ void full_kernel(float *a, float *b, int in_n, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j,k;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_c;i++){
for(j=0;j<in_h;j++){
for(k=0;k<in_w;k++){
elt = a[IDX4C(n1,i,j,k,in_c,in_h,in_w)];
b[IDX2C(n1,i*in_h*in_w + j*in_w + k,in_n)] = elt;
}
}
}
}
}
/*
1st arg in_n of input tensor 4DIM
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg binary of input tensor
*/
static ERL_NIF_TERM
full1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_c,in_h,in_w,n1,n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
n1 = in_n * in_c * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
n = in_n;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n1 * sizeof(float), cudaMemcpyHostToDevice));
full_kernel << <1, n>> >(dev_a, dev_b, in_n, in_c, in_h, in_w, n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(b, dev_b, n1 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
__global__ void unfull_kernel(float *a, float *b, int in_n, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j,k;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_c;i++){
for(j=0;j<in_h;j++){
for(k=0;k<in_w;k++){
elt = a[IDX2C(n1,i*in_h*in_w + j*in_w + k,in_n)];
b[IDX4C(n1,i,j,k,in_c,in_h,in_w)] = elt;
}
}
}
}
}
/*
1st arg in_n of input tensor 4DIM
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg binary of input tensor
*/
static ERL_NIF_TERM
unfull1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_c,in_h,in_w,n1,n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
n1 = in_n * in_c * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
n = in_n;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n1 * sizeof(float), cudaMemcpyHostToDevice));
unfull_kernel << <1, n>> >(dev_a, dev_b, in_n, in_c, in_h, in_w, n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(b, dev_b, n1 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
new1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
ERL_NIF_TERM a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_double(env, argv[1], &d)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
a[i] = (float)d;
}
return(a_bin);
}
static ERL_NIF_TERM
new2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int r1,c1,i,j;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, r1 * c1 * sizeof(float), &a_bin);
// Set matrix data
list = argv[2]; /* matrix1 */
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX2C(i,j,r1)] = (float)d;
}
}
return(a_bin);
}
static ERL_NIF_TERM
new3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int c,h,w,i,j,k;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
a = (float *) enif_make_new_binary(env, c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[3]; /* matrix1 */
for(i=0;i<c;i++){
for(j=0;j<h;j++){
for(k=0;k<w;k++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX3C(i,j,k,h,w)] = (float)d;
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
new4(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,c,h,w,i,j,k,l;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
a = (float *) enif_make_new_binary(env, n * c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[4]; /* matrix1 */
for(i=0;i<n;i++){
for(j=0;j<c;j++){
for(k=0;k<h;k++){
for(l=0;l<w;l++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX4C(i,j,k,l,c,h,w)] = (float)d;
}
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
rand1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
float x,y,val;
float *result_data;
ERL_NIF_TERM result;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
result_data = (float *) enif_make_new_binary(env, n * sizeof(float), &result);
srand((unsigned) time(NULL));
for(i=0;i<n;i++){
//box_muller
x = (float)rand()/(float)RAND_MAX;
y = (float)rand()/(float)RAND_MAX;
val = sqrt(-2.0 * log(x)) * cos(2.0 * PI * y);
result_data[i] = val;
}
return(result);
}
static ERL_NIF_TERM
mult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int r1, c1, r2, c2, n, i, j;
float *a,*b,*c;
float* devPtrA;
float* devPtrB;
float* devPtrC;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin)) return enif_make_int(env,6);
n = r1*c2;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
for(j=0;j<c2;j++)
for(i=0;i<r1;i++)
c[IDX2C(i,j,r1)] = 0.0;
// Initialize CUBLAS
cublasInit();
CUBLAS(cublasAlloc (r1*c1, sizeof(*a), (void**)&devPtrA));
CUBLAS(cublasAlloc (r2*c2, sizeof(*b), (void**)&devPtrB));
CUBLAS(cublasAlloc (r1*c2, sizeof(*c), (void**)&devPtrC));
CUBLAS(cublasSetMatrix (r1, c1, sizeof(*a), a, r1, devPtrA, r1));
CUBLAS(cublasSetMatrix (r2, c2, sizeof(*b), b, r2, devPtrB, r2));
CUBLAS(cublasSetMatrix (r1, c2, sizeof(*c), c, r1, devPtrC, r1));
//Sgemm
cublasSgemm('N', 'N', r1, c2, c1, 1.0, devPtrA, r1, devPtrB, r2, 0.0, devPtrC, r1);
CUBLAS(cublasGetMatrix (r1, c2, sizeof(*c), devPtrC, r1, c, r1));
// Shutdown CUBLAS
cublasFree(devPtrA);
cublasFree(devPtrB);
cublasFree(devPtrC);
cublasShutdown();
return(c_bin);
}
__global__ void add1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
add1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
add1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void sub1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] - b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
sub1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
sub1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void emult1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
emult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
emult1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
static ERL_NIF_TERM
transpose1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j;
float *a,*b;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
b[IDX2C(j,i,c1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
ident1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i,j;
ERL_NIF_TERM a_bin;
float *a;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
a = (float *) enif_make_new_binary(env, n * n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
for(j=0;j<n;j++){
if(i==j)
a[IDX2C(i,j,n)] = 1.0;
else
a[IDX2C(i,j,n)] = 0.0;
}
}
return(a_bin);
}
__global__ void sigmoid_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = SIGMOID(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
__global__ void tanh_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = tanh(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
tanh_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
__global__ void relu_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(a[tid] >= 0)
b[tid] = a[tid];
else
b[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
relu_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
activate_softmax(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, k;
float *a,*b;
float max,sum,delta;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
//calculate softmax
delta = 0.01;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
max = -3.402823e38;
for(k=0;k<c1;k++){
if(a[IDX2C(i,k,r1)] > max)
max = a[IDX2C(i,k,r1)];
}
sum = 0.0;
for(k=0;k<c1;k++){
sum = sum + exp(a[IDX2C(i,k,r1)] - max);
}
b[IDX2C(i,j,r1)] = exp(a[IDX2C(i,j,r1)] - max) / (sum+delta);
}
}
return(b_bin);
}
__global__ void differ_sigmoid_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * ((1 - SIGMOID(b[tid])) * SIGMOID(b[tid]));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
differ_sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void differ_tanh_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * (1/(cosh(b[tid]) * cosh(b[tid])));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
differ_tanh_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void differ_relu_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(b[tid] >= 0)
c[tid] = a[tid];
else
c[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
differ_relu_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void smult_kernel(float d, float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = d * a[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
smult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
double s;
if (!enif_get_double(env, argv[0], &s)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &n)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
smult_kernel << <128, 128 >> >((float)s,dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
trace1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float trace;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
trace = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==j)
trace = trace + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,trace);
return(result);
}
static ERL_NIF_TERM
mean_square(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = a[IDX2C(i,j,r1)] - b[IDX2C(i,j,r1)];
s = s + d*d;
}
}
s = s / (2.0*(float(r1)));
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
cross_entropy(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s,delta;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
delta = 1e-7;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = a[IDX2C(i,j,r1)] + delta;
s = s + b[IDX2C(i,j,r1)] * log(d);
}
}
s = -1.0 * s / (float)r1;
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
elt1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &i)) enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &j)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
a = (float *) a_bin.data;
result = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
return(result);
}
static ERL_NIF_TERM
set1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)] + (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n1, c1, h1, w1, n, i, j, k, l, n2, c2, h2, w2;
float *a,*b;
double val;
if (!enif_get_int(env, argv[0], &n1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h1)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w1)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &n2)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &c2)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &h2)) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &w2)) return enif_make_int(env,9);
if (!enif_get_double(env, argv[9], &val)) return enif_make_int(env,10);
n = n1*c1*h1*w1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<n1;i++){
for(j=0;j<c1;j++){
for(k=0;k<h1;k++){
for(l=0;l<w1;l++){
if(i==n2 && j==c2 && k==h2 && l==w2){
b[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(i,j,k,l,c1,h1,w1)] + (float)val;
}
else {
b[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(i,j,k,l,c1,h1,w1)];
}
}
}
}
}
return(b_bin);
}
static ERL_NIF_TERM
average1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, i, j;
float *a,*b;
float sum;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, c1 * sizeof(float), &b_bin);
for(j=0;j<c1;j++){
sum = 0.0;
for(i=0;i<r1;i++){
sum = sum + a[IDX2C(i,j,r1)];
}
b[j] = sum / (float)r1;
}
return(b_bin);
}
/*
1st arg row-size of matrix
2nd arg col-size of matrix
3rd arg matrix data binary
*/
static ERL_NIF_TERM
sum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float sum;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
sum = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
sum = sum + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,sum);
return(result);
}
/*
transfer 2 DIm matrix to list
*/
static ERL_NIF_TERM
to_list1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int r1, c1, i, j;
float *a;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=r1-1;i>=0;i--){
for(j=c1-1;j>=0;j--){
head = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
list = enif_make_list_cell(env,head,list);
}
}
return(list);
}
/*
transfer 3 DIm matrix to list
*/
static ERL_NIF_TERM
to_list2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int c, h, w, i, j, k;
float *a;
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=c-1;i>=0;i--){
for(j=h-1;j>=0;j--){
for(k=w-1;k>=0;k--){
head = enif_make_double(env,(double)a[IDX3C(i,j,k,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
return(list);
}
/*
transfer 4 DIm matrix to list
*/
static ERL_NIF_TERM
to_list3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int n, c, h, w, i, j, k, l;
float *a;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_badarg(env);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=n-1;i>=0;i--){
for(j=c-1;j>=0;j--){
for(k=h-1;k>=0;k--){
for(l=w-1;l>=0;l--){
head = enif_make_double(env,(double)a[IDX4C(i,j,k,l,c,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
}
return(list);
}
__global__ void dropout1_kernel(float *a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
a[tid] = 1.0;
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg size of mask tensor
2nd arg rate of dropout
return mask tensor
element of mask tensor is basicaly 1.0.
element of dropout rate is 0.0.
when forward and backward, generate Hadamard product with mask tensor
*/
static ERL_NIF_TERM
dropout1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ERL_NIF_TERM a_bin;
int n,count,i,j;
float *a,*dev_a;
double dropout_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_double(env, argv[1], &dropout_rate)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, n * sizeof(float), &a_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
dropout1_kernel << <128, 128 >> >(dev_a, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(a, dev_a, n * sizeof(float), cudaMemcpyDeviceToHost));
// dropout
count = (int)(double(n)*dropout_rate);
for(i=0;i<count;i++){
j = rand() % n;
a[j] = 0.0;
}
// free
cudaFree(dev_a);
return(a_bin);
}
__global__ void sgd1_kernel(float *a, float *b, float *c, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] - b[tid]*lr;
tid += blockDim.x * gridDim.x;
}
}
/*
w - g*lr
w is weight matrix.
g is gradient matrix.
when element of w is zero result is zero. This means dropout.
return updated weight matrix.
1st arg is size of vectorized matrix
2nd arg is weight matrix or tensor
3rd arg is gradient matrix or tensor
4th arg is learning rate
*/
static ERL_NIF_TERM
sgd1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c,*dev_a, *dev_b, *dev_c;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_get_double(env, argv[3], &learning_rate)) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
sgd1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, lr, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
/*
def momentum(v, g, lr) do
Matrex.apply(v, g, fn v, g -> 0.5 * v - lr * g end)
end
*/
__global__ void momentum_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
d[tid] = ((0.9 * b[tid]) - (lr * c[tid]));
e[tid] = a[tid] + d[tid];
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a)
3rd arg v-matrix (b)
4th arg gradient-matrix (c)
5th arg learning rate
return tuple {next_v-mattrix,weight_matrix}
*/
static ERL_NIF_TERM
momentum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c ,*dev_d, *dev_e;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin )) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
momentum_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(d, dev_d, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(e, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/* ADAGRAD
h1 = h + grad*grad
lr1 = lr/(sqrt(h1))
w1 = w - lr1 * grad
a[] = w
b[] = h
c[] = grad
d[] = h1
e[] = w1
*/
__global__ void adagrad_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float lr1;
while (tid < n)
{
d[tid] = b[tid] + c[tid]*c[tid];
if(d[tid] != 0.0)
lr1 = lr/(sqrt(d[tid]));
else
lr1 = lr;
e[tid] = a[tid] - lr1 * c[tid];
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a_bin)
3rd arg h-matrix (b_bin)
4th arg grad-matrix (c_bin)
5th arg learning rate
return tuple {new-h,new-w}
*/
static ERL_NIF_TERM
adagrad1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
adagrad_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d,e from GPU dev_d,dev_e
CHECK(cudaMemcpy(d, dev_d, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(e, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/* RMSprop
h1 = alpha * h + (1 - alpha) * grad*grad
lr1 = lr /(sqrt(h) + epsilon)
w1 = w - lr1 * grad
a[] = w
b[] = h
c[] = grad
d[] = h1
e[] = w1
*/
__global__ void rms_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float lr1,alpha,epsilon;
alpha = 0.99;
epsilon = 10.0e-7;
while (tid < n)
{
d[tid] = alpha * b[tid] + (1-alpha)*c[tid]*c[tid];
lr1 = lr/(sqrt(d[tid])+epsilon);
e[tid] = a[tid] - lr1*c[tid];
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a_bin)
3rd arg h-matrix (b_bin)
4th arg grad-matrix (c_bin)
5th arg learning rate
return tuple {new-h,new-w}
*/
static ERL_NIF_TERM
rms1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
rms_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d,e from GPU dev_d,dev_e
CHECK(cudaMemcpy(d, dev_d, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(e, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/* ADAM
beta1 = 0.9
beta2 = 0.999
epsilon = 10.0e-7
alpha = 0.001
m1 = beta1 * m + (1 - beta1) * grad
v1 = beta2 * v + (1 - beta2) * grad^2
m2 = m1/(1 - beta1)
v2 = v1/(1 - beta2)
w1 = w - alpha * m2/(sqrt(v2)+epsilon)
a[] is w
b[] is m
c[] is v
d[] is grad
e[] is m1
f[] is v1
g[] is w1
*/
__global__ void adam_kernel(float *a, float *b, float *c, float *d, float *e, float *f, float *g, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float beta1,beta2,epsilon,m2,v2;
beta1 = 0.9;
beta2 = 0.999;
epsilon = 10.0e-7;
//alpha = 0.001;
while (tid < n){
e[tid] = beta1 * b[tid] + (1 - beta1) * d[tid];
f[tid] = beta2 * c[tid] + (1 - beta2) * d[tid]*d[tid];
m2 = e[tid]/(1-beta1);
v2 = f[tid]/(1-beta2);
g[tid] = a[tid] - lr * (m2/(sqrt(v2)+epsilon));
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg w-matrix (a_bin)
3rd arg m-matrix (b_bin)
4th arg v-matrix (c_bin)
5th arg grad-matrix (d_bin)
6th arg learning rate
return tuple {m1,v1,w1}
*/
static ERL_NIF_TERM
adam1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin,d_bin;
ERL_NIF_TERM e_bin,f_bin,g_bin,tuple;
int n;
float *a,*b,*c,*d,*e,*f,*g;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e, *dev_f, *dev_g;
float lr;
double learning_rate;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &d_bin)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &learning_rate)) return enif_make_int(env,6);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) d_bin.data;
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
f = (float *) enif_make_new_binary(env, n * sizeof(float), &f_bin);
g = (float *) enif_make_new_binary(env, n * sizeof(float), &g_bin);
lr = (float) learning_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_f, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_g, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_f, f, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_g, g, n * sizeof(float), cudaMemcpyHostToDevice));
adam_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, dev_f, dev_g, lr, n);
// copy to host d,e from GPU dev_d,dev_e
CHECK(cudaMemcpy(e, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(f, dev_f, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(g, dev_g, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
cudaFree(dev_f);
cudaFree(dev_g);
tuple = enif_make_tuple3(env,e_bin,f_bin,g_bin);
return(tuple);
}
/*
1st arg row-size of matrix
2nd arg col-size of matris
3rd arg predicted matrix
4th arg list of label. each element is integer
return accuracy rate
*/
static ERL_NIF_TERM
accuracy1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list,result;
int r1, c1, i, j, n, index,sum;
float *a;
double max,rate;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
// calculate accuracy
sum = 0;
list = argv[3];
for(i=0;i<r1;i++){
max = 0.0;
enif_get_list_cell(env, list, &head, &list);
enif_get_int(env,head,&n);
for(j=0;j<c1;j++){
if(a[IDX2C(i,j,r1)] > max){
max = a[IDX2C(i,j,r1)];
index = j;
}
}
if(index == n)
sum++;
}
rate = (double)sum / (double)r1;
result = enif_make_double(env,rate);
return(result);
}
/*
1st arg row-size of matrix
2nd arg col-size of matris
3rd arg predicted matrix
4th arg list of label. each element is integer
return correct number
*/
static ERL_NIF_TERM
correct1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list,result;
int r1, c1, i, j, n, index,sum;
float *a;
float max;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
// calculate correct number
sum = 0;
list = argv[3];
for(i=0;i<r1;i++){
max = 0.0;
enif_get_list_cell(env, list, &head, &list);
enif_get_int(env,head,&n);
for(j=0;j<c1;j++){
if(a[IDX2C(i,j,r1)] > max){
max = a[IDX2C(i,j,r1)];
index = j;
}
}
if(index == n)
sum++;
}
result = enif_make_double(env,(double)sum);
return(result);
}
/*
random_select for matrix data
*/
static ERL_NIF_TERM
random_select1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int r1, c1, r2, c2, i, j, n, r;
float *a, *b, *c, *d;
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &n)) return enif_make_int(env,7);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % r1;
for(j=0;j<c1;j++){
c[IDX2C(i,j,n)] = a[IDX2C(r,j,r1)];
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
/*
random_select for 4D-tensor data
*/
static ERL_NIF_TERM
random_select2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int n1,c1,h1,w1,r2,c2, i, j, k, l, n, r;
float *a, *b, *c, *d;
if (!enif_get_int(env, argv[0], &n1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h1)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w1)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &r2)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &c2)) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &n)) return enif_make_int(env,9);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1*h1*w1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*r2*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % n1;
for(j=0;j<c1;j++){
for(k=0;k<h1;k++){
for(l=0;l<w1;l++){
c[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(r,j,k,l,c1,h1,w1)];
}
}
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
/*
random_select for 3D-tensor data
*/
static ERL_NIF_TERM
random_select3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int n1,h1,w1,r2,c2, i, j, k, n, r;
float *a, *b, *c, *d;
if (!enif_get_int(env, argv[0], &n1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w1)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &r2)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &c2)) return enif_make_int(env,6);
if (!enif_inspect_binary(env, argv[6], &b_bin )) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &n)) return enif_make_int(env,8);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*h1*w1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*r2*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % n1;
for(j=0;j<h1;j++){
for(k=0;k<w1;k++){
c[IDX3C(i,j,k,h1,w1)] = a[IDX3C(r,j,k,h1,w1)];
}
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
static ERL_NIF_TERM
is_near1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
int i, n, sw;
float *a, *b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
// near check
sw = 0;
for(i=0;i<n;i++){
if(fabsf(a[i]) > fabsf(b[i])*1.15 || fabsf(a[i]) < fabsf(b[i])*0.85){
printf("%f %f \r\n", a[i], b[i]);
sw = 1;
}
}
if(sw == 0)
return enif_make_int(env,1); //true
else
return enif_make_int(env,0); //false
}
static ERL_NIF_TERM
is_equal1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
int i, n;
float *a, *b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
// equal check
for(i=0;i<n;i++){
if(a[i] != b[i]){
return enif_make_int(env,0); //false
}
}
return enif_make_int(env,1); //true
}
static ERL_NIF_TERM
analizer1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
int i, n, id;
float *a;
float max,min,sum;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &id)) return enif_make_int(env,3);
a = (float *) a_bin.data;
// near check
for(i=0;i<n;i++){
if(isnan(a[i])){
return enif_make_int(env,9999);
}
if(isinf(a[i])){
return enif_make_int(env,9998);
}
}
//find max min avarage
max = -999999999;
min = 999999999;
sum = 0;
for(i=0;i<n;i++){
if(a[i] > max)
max = a[i];
if(a[i] < min)
min = a[i];
sum = sum+a[i];
}
printf("id max min average\r\n");
printf("%d %f %f %f \r\n", id, max, min, sum/(float)n);
return enif_make_int(env,1);
}
/*
1st arg in_n of tensor
2nd arg in_c of tensor
3rd arg in_h of tensor
4th arg in_w of tensor
5th arg binary of tensor
*/
static ERL_NIF_TERM
standardize1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_c,in_h,in_w,n1,i,c1,h1,w1,count;
float *a,*b;
float sum,average;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
n1 = in_n * in_c * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
for(i=0;i<in_n;i++){
sum = 0.0;
for(c1=0;c1<in_c;c1++){
for(h1=0;h1<in_h;h1++){
for(w1=0;w1<in_w;w1++){
sum = sum + a[IDX4C(i,c1,h1,w1,in_c,in_h,in_w)];
}
}
}
count = in_c * in_h * in_w;
average = sum / (float)count;
for(c1=0;c1<in_c;c1++){
for(h1=0;h1<in_h;h1++){
for(w1=0;w1<in_w;w1++){
b[IDX4C(i,c1,h1,w1,in_c,in_h,in_w)] = a[IDX4C(i,c1,h1,w1,in_c,in_h,in_w)] - average;
}
}
}
}
return(b_bin);
}
/*
1st arg in_n of 3D tensor
2rd arg in_r of 3D tensor
3th arg in_c of 3D tensor
4th arg binary of tensor
5th arg nth in_r of 3D tensor
*/
static ERL_NIF_TERM
pickup1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_row,in_col,nth,n1,i,j;
float *a,*b;
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_row)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_col)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &nth)) return enif_make_int(env,5);
n1 = in_n * in_col;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
for(i=0;i<in_n;i++){
for(j=0;j<in_col;j++){
b[IDX2C(i,j,in_n)] = a[IDX3C(i,nth,j,in_row,in_col)];
}
}
return(b_bin);
}
/*
1st arg size of tensor or matrix
2rd arg binary of tensor or matrix
*/
static ERL_NIF_TERM
copy1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n,i;
float *a,*b;
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<n;i++){
b[i] = a[i];
}
return(b_bin);
}
static ERL_NIF_TERM
slice1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin,c_bin,d_bin,e_bin,tuple;
int in_r,in_c,in_c1,i,j,n,bias;
float *a,*b,*c,*d,*e;
if (!enif_get_int(env, argv[0], &in_r)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
in_c1 = in_c / 4;
n = in_r * (in_c / 4);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
for(i=0;i<in_r;i++){
for(j=0;j<in_c1;j++){
b[IDX2C(i,j,in_r)] = a[IDX2C(i,j,in_r)];
}
}
bias = in_c / 4;
for(i=0;i<in_r;i++){
for(j=0;j<in_c1;j++){
c[IDX2C(i,j,in_r)] = a[IDX2C(i,j+bias,in_r)];
}
}
bias = 2 * (in_c / 4);
for(i=0;i<in_r;i++){
for(j=0;j<in_c1;j++){
d[IDX2C(i,j,in_r)] = a[IDX2C(i,j+bias,in_r)];
}
}
bias = 3 * (in_c / 4);
for(i=0;i<in_r;i++){
for(j=0;j<in_c1;j++){
e[IDX2C(i,j,in_r)] = a[IDX2C(i,j+bias,in_r)];
}
}
tuple = enif_make_tuple4(env,b_bin,c_bin,d_bin,e_bin);
return(tuple);
}
static ERL_NIF_TERM
unslice1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary b_bin,c_bin,d_bin,e_bin;
ERL_NIF_TERM a_bin;
int in_r,in_c,i,j,n,bias;
float *a,*b,*c,*d,*e;
if (!enif_get_int(env, argv[0], &in_r)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin )) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &d_bin )) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &e_bin )) return enif_make_int(env,6);
n = in_r * in_c * 4;
a = (float *) enif_make_new_binary(env, n * sizeof(float), &a_bin);
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) d_bin.data;
e = (float *) e_bin.data;
for(i=0;i<in_r;i++){
for(j=0;j<in_c;j++){
a[IDX2C(i,j,in_r)] = b[IDX2C(i,j,in_r)];
}
}
bias = in_c;
for(i=0;i<in_r;i++){
for(j=0;j<in_c;j++){
a[IDX2C(i,j+bias,in_r)] = c[IDX2C(i,j,in_r)];
}
}
bias = 2 * in_c;
for(i=0;i<in_r;i++){
for(j=0;j<in_c;j++){
a[IDX2C(i,j+bias,in_r)] = d[IDX2C(i,j,in_r)] ;
}
}
bias = 3 * in_c;
for(i=0;i<in_r;i++){
for(j=0;j<in_c;j++){
a[IDX2C(i,j+bias,in_r)] = e[IDX2C(i,j,in_r)];
}
}
return(a_bin);
}
// define the array of ErlNifFunc
static ErlNifFunc nif_funcs[] = {
// {erl_function_name, erl_function_arity, c_function}
{"mult1", 6, mult1},
{"new1", 2, new1},
{"new2", 3, new2},
{"new3", 4, new3},
{"new4", 5, new4},
{"rand1", 1, rand1},
{"add1", 3, add1},
{"sub1", 3, sub1},
{"emult1", 3, emult1},
{"transpose1", 3, transpose1},
{"ident1", 1, ident1},
{"activate_sigmoid", 2 ,activate_sigmoid},
{"activate_tanh", 2 , activate_tanh},
{"activate_relu", 2, activate_relu},
{"activate_softmax", 3, activate_softmax},
{"differ_sigmoid", 3, differ_sigmoid},
{"differ_tanh", 3, differ_tanh},
{"differ_relu", 3, differ_relu},
{"smult1", 3, smult1},
{"trace1", 3, trace1},
{"mean_square", 4, mean_square},
{"cross_entropy", 4, cross_entropy},
{"elt1", 5, elt1},
{"set1", 6, set1},
{"add_diff1", 6, add_diff1},
{"add_diff2", 10, add_diff2},
{"average1", 3, average1},
{"sum1", 3, sum1},
{"to_list1", 3, to_list1},
{"to_list2", 4, to_list2},
{"to_list3", 5, to_list3},
{"dropout1", 2 , dropout1},
{"sgd1", 4, sgd1},
{"momentum1", 5, momentum1},
{"adagrad1", 5, adagrad1},
{"rms1", 5, rms1},
{"adam1", 6, adam1},
{"accuracy1", 4, accuracy1},
{"correct1", 4, correct1},
{"pooling1", 7, pooling1},
{"unpooling1", 8, unpooling1},
{"convolute1", 13, convolute1},
{"deconvolute1", 13, deconvolute1},
{"deconvolute2", 13, deconvolute2},
{"gradfilter1", 16, gradfilter1},
{"gradfilter2", 16, gradfilter2},
{"full1", 5, full1},
{"unfull1", 5, unfull1},
{"random_select1", 7, random_select1},
{"random_select2", 9, random_select2},
{"random_select3", 8, random_select3},
{"is_near1", 3, is_near1},
{"is_equal1", 3, is_equal1},
{"analizer1", 3, analizer1},
{"standardize1", 5, standardize1},
{"pickup1", 5, pickup1},
{"copy1", 2, copy1},
{"slice1", 3, slice1},
{"unslice1", 6, unslice1}
};
ERL_NIF_INIT(Elixir.Cumatrix, nif_funcs, NULL, NULL, NULL, NULL)
|
the_stack
|
#include "cuda_helper.h"
#include "cuda_vector.h"
#include "cuda_x11_aes.cu"
static uint2 *d_nonce[MAX_GPUS];
static uint32_t *d_found[MAX_GPUS];
__device__ __forceinline__ uint32_t mul27(const uint32_t x)
{
// uint32_t result = (x << 5) - (x + x + x + x + x);
uint32_t result = (x *27);
// uint32_t result;
// asm("mul24.lo.u32 %0,%1,%2; \n\t" : "=r"(result): "r"(x) , "r"(y));
return result;
}
__device__ __forceinline__ void AES_2ROUND(
const uint32_t*const __restrict__ sharedMemory,
uint32_t &x0, uint32_t &x1, uint32_t &x2, uint32_t &x3,
const uint32_t k0)
{
uint32_t f[4]=
{
x0,x1,x2,x3
};
aes_round(sharedMemory,k0,(uint8_t *) &f[0]);
aes_round(sharedMemory, (uint8_t *)&f[0]);
x0 = f[0];
x1 = f[1];
x2 = f[2];
x3 = f[3];
// aes_round(sharedMemory,f[0], f[1], f[2], f[3],x0, x1, x2, x3);
}
__device__ __forceinline__ void cuda_echo_round(
const uint32_t *const __restrict__ sharedMemory, uint32_t *const __restrict__ hash)
{
const __align__(16) uint32_t P[48] = {
0xe7e9f5f5,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0xa4213d7e,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
//8-12
0x01425eb8,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0x65978b09,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
//21-25
0x2cb6b661,
0x6b23b3b3,
0xcf93a7cf,
0x9d9d3751,
0x9ac2dea3,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
//34-38
0x579f9f33,
0xfbfbfbfb,
0xfbfbfbfb,
0xefefd3c7,
0xdbfde1dd,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0x34514d9e,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0xb134347e,
0xea6f7e7e,
0xbd7731bd,
0x8a8a1968,
0x14b8a457,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0x265f4382,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af
//58-61
};
uint32_t k0;
uint32_t a, b, c, d, ab, bc, cd, t, t2, t3,abx,bcx,cdx;
uint32_t h[16];
uint28 *phash = (uint28*)hash;
uint28 *outpt = (uint28*)h;
outpt[0] = phash[0];
outpt[1] = phash[1];
k0 = 512 + 8;
#pragma unroll
for (int idx = 0; idx < 16; idx+= 4)
{
AES_2ROUND(sharedMemory,
h[idx + 0], h[idx + 1], h[idx + 2], h[idx + 3], k0++);
}
k0 += 4;
uint32_t W[64];
#pragma unroll
for (int i = 0; i < 4; i++)
{
a = P[i];
b = P[i + 4];
c = h[i + 8];
d = P[i + 8];
ab = a ^ b;
bc = b ^ c;
cd = c ^ d;
t = (ab & 0x80808080);
t2 = (bc & 0x80808080);
t3 = (cd & 0x80808080);
abx = mul27(t >> 7) ^ ((ab^t) << 1);
bcx = mul27(t2 >> 7) ^ ((bc^t2) << 1);
cdx = mul27(t3 >> 7) ^ ((cd^t3) << 1);
W[0 + i] = abx ^ bc ^ d;
W[0 + i + 4] = bcx ^ a ^ cd;
W[0 + i + 8] = cdx ^ ab ^ d;
W[0 + i + 12] = abx ^ bcx ^ cdx ^ ab ^ c;
a = P[12 + i];
b = h[i + 4];
c = P[12 + i + 4];
d = P[12 + i + 8];
ab = a ^ b;
bc = b ^ c;
cd = c ^ d;
t = (ab & 0x80808080);
t2 = (bc & 0x80808080);
t3 = (cd & 0x80808080);
abx = mul27(t >> 7) ^ ((ab^t) << 1);
bcx = mul27(t2 >> 7) ^ ((bc^t2) << 1);
cdx = mul27(t3 >> 7) ^ ((cd^t3) << 1);
W[16 + i] = abx ^ bc ^ d;
W[16 + i + 4] = bcx ^ a ^ cd;
W[16 + i + 8] = cdx ^ ab ^ d;
W[16 + i + 12] = abx ^ bcx ^ cdx ^ ab ^ c;
a = h[i];
b = P[24 + i + 0];
c = P[24 + i + 4];
d = P[24 + i + 8];
ab = a ^ b;
bc = b ^ c;
cd = c ^ d;
t = (ab & 0x80808080);
t2 = (bc & 0x80808080);
t3 = (cd & 0x80808080);
abx = mul27(t >> 7) ^ ((ab^t) << 1);
bcx = mul27(t2 >> 7) ^ ((bc^t2) << 1);
cdx = mul27(t3 >> 7) ^ ((cd^t3) << 1);
W[32 + i] = abx ^ bc ^ d;
W[32 + i + 4] = bcx ^ a ^ cd;
W[32 + i + 8] = cdx ^ ab ^ d;
W[32 + i + 12] = abx ^ bcx ^ cdx ^ ab ^ c;
a = P[36 + i ];
b = P[36 + i +4 ];
c = P[36 + i + 8];
d = h[i + 12];
ab = a ^ b;
bc = b ^ c;
cd = c ^ d;
t = (ab & 0x80808080);
t2 = (bc & 0x80808080);
t3 = (cd & 0x80808080);
abx = mul27(t >> 7) ^ ((ab^t) << 1);
bcx = mul27(t2 >> 7) ^ ((bc^t2) << 1);
cdx = mul27(t3 >> 7) ^ ((cd^t3) << 1);
W[48 + i] = abx ^ bc ^ d;
W[48 + i + 4] = bcx ^ a ^ cd;
W[48 + i + 8] = cdx ^ ab ^ d;
W[48 + i + 12] = abx ^ bcx ^ cdx ^ ab ^ c;
}
for (int k = 1; k < 10; k++)
{
// Big Sub Words
#pragma unroll
for (int idx = 0; idx < 64; idx+=16)
{
AES_2ROUND(sharedMemory,
W[idx + 0], W[idx + 1], W[idx + 2], W[idx + 3],
k0++);
AES_2ROUND(sharedMemory,
W[idx + 4], W[idx + 5], W[idx + 6], W[idx + 7],
k0++);
AES_2ROUND(sharedMemory,
W[idx + 8], W[idx + 9], W[idx + 10], W[idx + 11],
k0++);
AES_2ROUND(sharedMemory,
W[idx + 12], W[idx + 13], W[idx + 14], W[idx + 15],
k0++);
__syncthreads();
}
// Shift Rows
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
/// 1, 5, 9, 13
t = W[4 + i];
W[4 + i] = W[20 + i];
W[20 + i] = W[36 + i];
W[36 + i] = W[52 + i];
W[52 + i] = t;
// 2, 6, 10, 14
t = W[8 + i];
W[8 + i] = W[40 + i];
W[40 + i] = t;
t = W[24 + i];
W[24 + i] = W[56 + i];
W[56 + i] = t;
// 15, 11, 7, 3
t = W[60 + i];
W[60 + i] = W[44 + i];
W[44 + i] = W[28 + i];
W[28 + i] = W[12 + i];
W[12 + i] = t;
}
// Mix Columns
#pragma unroll
for (int i = 0; i < 4; i++) // Schleife über je 2*uint32_t
{
#pragma unroll
for (int idx = 0; idx < 64; idx += 16) // Schleife über die elemnte
{
a = W[idx + i];
b = W[idx + i + 4];
c = W[idx + i + 8];
d = W[idx + i + 12];
ab = a ^ b;
bc = b ^ c;
cd = c ^ d;
t = (ab & 0x80808080);
t2 = (bc & 0x80808080);
t3 = (cd & 0x80808080);
abx = (mul27(t >> 7) ^ ((ab^t) << 1));
bcx = (mul27(t2 >> 7) ^ ((bc^t2) << 1));
cdx = (mul27(t3 >> 7) ^ ((cd^t3) << 1));
W[idx + i] = xor3x(abx, bc, d);
W[idx + i + 4] = xor3x(bcx, a, cd);
W[idx + i + 8] = xor3x(cdx, ab, d);
W[idx + i + 12] = xor3x(xor3x(abx, bcx, cdx), ab, c);
}
}
}
#pragma unroll
for (int i = 0; i<16; i += 4)
{
W[i] ^= W[32 + i] ^ 512;
W[i + 1] ^= W[32 + i + 1];
W[i + 2] ^= W[32 + i + 2];
W[i + 3] ^= W[32 + i + 3];
}
#pragma unroll
for (int i = 0; i<16; i++)
hash[i] ^= W[i];
}
__device__ __forceinline__
void echo_gpu_init_128(uint32_t *const __restrict__ sharedMemory)
{
if (threadIdx.x < 128)
{
sharedMemory[threadIdx.x] = d_AES0[threadIdx.x];
sharedMemory[threadIdx.x + 128] = d_AES0[threadIdx.x + 128];
sharedMemory[threadIdx.x + 256] = ROL8(sharedMemory[threadIdx.x]);
sharedMemory[threadIdx.x + 256 + 128] = ROL8(sharedMemory[threadIdx.x + 128]);
sharedMemory[threadIdx.x + 512] = ROL16(sharedMemory[threadIdx.x]);
sharedMemory[threadIdx.x + 512 + 128] = ROL16(sharedMemory[threadIdx.x + 128]);
sharedMemory[threadIdx.x + 768] = ROL24(sharedMemory[threadIdx.x]);
sharedMemory[threadIdx.x + 768 + 128] = ROL24(sharedMemory[threadIdx.x + 128]);
}
}
/*__device__ __forceinline__
void echo_gpu_init(uint32_t *const __restrict__ sharedMemory)
{
if (threadIdx.x < 256)
{
sharedMemory[threadIdx.x] = d_AES0[threadIdx.x];
sharedMemory[threadIdx.x + 256] = ROL8(sharedMemory[threadIdx.x]);
sharedMemory[threadIdx.x + 512] = ROL16(sharedMemory[threadIdx.x]);
sharedMemory[threadIdx.x + 768] = ROL24(sharedMemory[threadIdx.x]);
}
}
*/
__global__
__launch_bounds__(128,5)
void x11_echo512_gpu_hash_64(uint32_t threads, uint32_t startNounce, uint64_t *const __restrict__ g_hash)
{
__shared__ __align__(128) uint32_t sharedMemory[1024];
echo_gpu_init_128(sharedMemory);
uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
uint32_t nounce = (startNounce + thread);
int hashPosition = nounce - startNounce;
uint32_t *Hash = (uint32_t*)&g_hash[hashPosition<<3];
cuda_echo_round(sharedMemory, Hash);
}
}
// Setup-Funktionen
__host__ void x11_echo512_cpu_init(int thr_id, uint32_t threads)
{
cudaMalloc(&d_nonce[thr_id], sizeof(uint2));
CUDA_SAFE_CALL(cudaMalloc(&(d_found[thr_id]), 2 * sizeof(uint32_t)));
}
__host__ void x11_echo512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash)
{
uint32_t threadsperblock = 128;
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
x11_echo512_gpu_hash_64<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash);
}
__host__ void x11_echo512_cpu_free(int32_t thr_id)
{
cudaFreeHost(&d_nonce[thr_id]);
}
/*
__constant__ uint32_t P[48] = {
0xe7e9f5f5,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0xa4213d7e,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
//8-12
0x01425eb8,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0x65978b09,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
//21-25
0x2cb6b661,
0x6b23b3b3,
0xcf93a7cf,
0x9d9d3751,
0x9ac2dea3,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
//34-38
0x579f9f33,
0xfbfbfbfb,
0xfbfbfbfb,
0xefefd3c7,
0xdbfde1dd,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0x34514d9e,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0xb134347e,
0xea6f7e7e,
0xbd7731bd,
0x8a8a1968,
0x14b8a457,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0x265f4382,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af
//58-61
};
*/
__global__
__launch_bounds__(128, 5)
void x11_echo512_gpu_hash_64_final(uint32_t threads, uint32_t startNounce, const uint64_t *const __restrict__ g_hash, uint32_t *const __restrict__ d_found, uint32_t target)
{
const __align__(16) uint32_t P[48] = {
0xe7e9f5f5,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0xa4213d7e,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
//8-12
0x01425eb8,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0x65978b09,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
//21-25
0x2cb6b661,
0x6b23b3b3,
0xcf93a7cf,
0x9d9d3751,
0x9ac2dea3,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
//34-38
0x579f9f33,
0xfbfbfbfb,
0xfbfbfbfb,
0xefefd3c7,
0xdbfde1dd,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0x34514d9e,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0xb134347e,
0xea6f7e7e,
0xbd7731bd,
0x8a8a1968,
0x14b8a457,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af,
0x265f4382,
0xf5e7e9f5,
0xb3b36b23,
0xb3dbe7af
//58-61
};
uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
__shared__ __align__(128) uint32_t sharedMemory[1024];
echo_gpu_init_128(sharedMemory);
uint32_t nounce = (startNounce + thread);
int hashPosition = nounce - startNounce;
uint32_t *Hash = (uint32_t*)&g_hash[hashPosition * 8];
uint32_t h[16];
uint28 *phash = (uint28*)Hash;
uint28 *outpt = (uint28*)h;
outpt[0] = phash[0];
outpt[1] = phash[1];
uint32_t backup = h[7];
AES_2ROUND(sharedMemory,
h[0 + 0], h[0 + 1], h[0 + 2], h[0 + 3], 512 + 8);
AES_2ROUND(sharedMemory,
h[4 + 0], h[4 + 1], h[4 + 2], h[4 + 3], 512 + 9);
AES_2ROUND(sharedMemory,
h[8 + 0], h[8 + 1], h[8 + 2], h[8 + 3], 512 + 10);
AES_2ROUND(sharedMemory,
h[12 + 0], h[12 + 1], h[12 + 2], h[12 + 3], 512 + 11);
uint32_t W[64];
uint32_t abx, abx2, abx3, abx4,bcx,bcx2,bcx3,bcx4,cdx,cdx2,cdx3,cdx4;
// #pragma unroll
// for (int i = 0; i < 4; i++)
// {
uint32_t i = 0;
abx = mul27(((P[i] ^ P[i + 4]) & 0x80808080) >> 7) ^ ((P[i] ^ P[i + 4] ^ ((P[i] ^ P[i + 4]) & 0x80808080)) << 1);
abx2 = mul27(((P[12 + i] ^ h[i + 4]) & 0x80808080) >> 7) ^ (((P[12 + i] ^ h[i + 4]) ^ ((P[12 + i] ^ h[i + 4]) & 0x80808080)) << 1);
abx3 = mul27(((h[i] ^ P[24 + i + 0]) & 0x80808080) >> 7) ^ (((h[i] ^ P[24 + i + 0]) ^ ((h[i] ^ P[24 + i + 0]) & 0x80808080)) << 1);
abx4 = mul27(((P[36 + i] ^ P[36 + i + 4]) & 0x80808080) >> 7) ^ (((P[36 + i] ^ P[36 + i + 4]) ^ ((P[36 + i] ^ P[36 + i + 4]) & 0x80808080)) << 1);
bcx = mul27(((P[i + 4] ^ h[i + 8]) & 0x80808080) >> 7) ^ ((P[i + 4] ^ h[i + 8] ^ ((P[i + 4] ^ h[i + 8]) & 0x80808080)) << 1);
bcx2 = mul27(((h[i + 4] ^ P[12 + i + 4]) & 0x80808080) >> 7) ^ ((h[i + 4] ^ P[12 + i + 4] ^ ((h[i + 4] ^ P[12 + i + 4]) & 0x80808080)) << 1);
bcx3 = mul27(((P[24 + i + 0] ^ P[24 + i + 4]) & 0x80808080) >> 7) ^ (((P[24 + i + 0] ^ P[24 + i + 4]) ^ ((P[24 + i + 0] ^ P[24 + i + 4]) & 0x80808080)) << 1);
bcx4 = mul27(((P[36 + i + 4] ^ P[36 + i + 8]) & 0x80808080) >> 7) ^ (((P[36 + i + 4] ^ P[36 + i + 8]) ^ ((P[36 + i + 4] ^ P[36 + i + 8]) & 0x80808080)) << 1);
cdx = mul27(((h[i + 8] ^ P[i + 8]) & 0x80808080) >> 7) ^ (((h[i + 8] ^ P[i + 8]) ^ ((h[i + 8] ^ P[i + 8]) & 0x80808080)) << 1);
cdx2 = mul27(((P[12 + i + 4] ^ P[12 + i + 8]) & 0x80808080) >> 7) ^ (((P[12 + i + 4] ^ P[12 + i + 8]) ^ ((P[12 + i + 4] ^ P[12 + i + 8]) & 0x80808080)) << 1);
cdx3 = mul27(((P[24 + i + 4] ^ P[24 + i + 8]) & 0x80808080) >> 7) ^ (((P[24 + i + 4] ^ P[24 + i + 8]) ^ ((P[24 + i + 4] ^ P[24 + i + 8]) & 0x80808080)) << 1);
cdx4 = mul27(((P[36 + i + 8] ^ h[i + 12]) & 0x80808080) >> 7) ^ (((P[36 + i + 8] ^ h[i + 12]) ^ ((P[36 + i + 8] ^ h[i + 12]) & 0x80808080)) << 1);
W[0 + i] = abx ^ P[i + 4] ^ h[i + 8] ^ P[i + 8];
W[0 + i + 4] = bcx ^ P[i] ^ h[i + 8] ^ P[i + 8];
W[0 + i + 8] = cdx ^ P[i] ^ P[i + 4] ^ P[i + 8];
W[0 + i + 12] = abx ^ bcx ^ cdx ^ P[i] ^ P[i + 4] ^ h[i + 8];
W[16 + i] = abx2 ^ h[i + 4] ^ P[12 + i + 4] ^ P[12 + i + 8];
W[16 + i + 4] = bcx2 ^ P[12 + i] ^ P[12 + i + 4] ^ P[12 + i + 8];
W[16 + i + 8] = cdx2 ^ P[12 + i] ^ h[i + 4] ^ P[12 + i + 8];
W[16 + i + 12] = abx2 ^ bcx2 ^ cdx2 ^ P[12 + i] ^ h[i + 4] ^ P[12 + i + 4];
W[32 + i] = abx3 ^ P[24 + i + 0] ^ P[24 + i + 4] ^ P[24 + i + 8];
W[32 + i + 4] = bcx3 ^ h[i] ^ P[24 + i + 4] ^ P[24 + i + 8];
W[32 + i + 8] = cdx3 ^ h[i] ^ P[24 + i + 0] ^ P[24 + i + 8];
W[32 + i + 12] = abx3 ^ bcx3 ^ cdx3 ^ h[i] ^ P[24 + i + 0] ^ P[24 + i + 4];
W[48 + i] = abx4 ^ P[36 + i + 4] ^ P[36 + i + 8] ^ h[i + 12];
W[48 + i + 4] = bcx4 ^ P[36 + i] ^ P[36 + i + 8] ^ h[i + 12];
W[48 + i + 8] = cdx4 ^ P[36 + i] ^ P[36 + i + 4] ^ h[i + 12];
W[48 + i + 12] = abx4 ^ bcx4 ^ cdx4 ^ P[36 + i] ^ P[36 + i + 4] ^ P[36 + i + 8];
i = 1;
abx = mul27(((P[i] ^ P[i + 4]) & 0x80808080) >> 7) ^ ((P[i] ^ P[i + 4] ^ ((P[i] ^ P[i + 4]) & 0x80808080)) << 1);
abx2 = mul27(((P[12 + i] ^ h[i + 4]) & 0x80808080) >> 7) ^ (((P[12 + i] ^ h[i + 4]) ^ ((P[12 + i] ^ h[i + 4]) & 0x80808080)) << 1);
abx3 = mul27(((h[i] ^ P[24 + i + 0]) & 0x80808080) >> 7) ^ (((h[i] ^ P[24 + i + 0]) ^ ((h[i] ^ P[24 + i + 0]) & 0x80808080)) << 1);
abx4 = mul27(((P[36 + i] ^ P[36 + i + 4]) & 0x80808080) >> 7) ^ (((P[36 + i] ^ P[36 + i + 4]) ^ ((P[36 + i] ^ P[36 + i + 4]) & 0x80808080)) << 1);
bcx = mul27(((P[i + 4] ^ h[i + 8]) & 0x80808080) >> 7) ^ ((P[i + 4] ^ h[i + 8] ^ ((P[i + 4] ^ h[i + 8]) & 0x80808080)) << 1);
bcx2 = mul27(((h[i + 4] ^ P[12 + i + 4]) & 0x80808080) >> 7) ^ ((h[i + 4] ^ P[12 + i + 4] ^ ((h[i + 4] ^ P[12 + i + 4]) & 0x80808080)) << 1);
bcx3 = mul27(((P[24 + i + 0] ^ P[24 + i + 4]) & 0x80808080) >> 7) ^ (((P[24 + i + 0] ^ P[24 + i + 4]) ^ ((P[24 + i + 0] ^ P[24 + i + 4]) & 0x80808080)) << 1);
bcx4 = mul27(((P[36 + i + 4] ^ P[36 + i + 8]) & 0x80808080) >> 7) ^ (((P[36 + i + 4] ^ P[36 + i + 8]) ^ ((P[36 + i + 4] ^ P[36 + i + 8]) & 0x80808080)) << 1);
cdx = mul27(((h[i + 8] ^ P[i + 8]) & 0x80808080) >> 7) ^ (((h[i + 8] ^ P[i + 8]) ^ ((h[i + 8] ^ P[i + 8]) & 0x80808080)) << 1);
cdx2 = mul27(((P[12 + i + 4] ^ P[12 + i + 8]) & 0x80808080) >> 7) ^ (((P[12 + i + 4] ^ P[12 + i + 8]) ^ ((P[12 + i + 4] ^ P[12 + i + 8]) & 0x80808080)) << 1);
cdx3 = mul27(((P[24 + i + 4] ^ P[24 + i + 8]) & 0x80808080) >> 7) ^ (((P[24 + i + 4] ^ P[24 + i + 8]) ^ ((P[24 + i + 4] ^ P[24 + i + 8]) & 0x80808080)) << 1);
cdx4 = mul27(((P[36 + i + 8] ^ h[i + 12]) & 0x80808080) >> 7) ^ (((P[36 + i + 8] ^ h[i + 12]) ^ ((P[36 + i + 8] ^ h[i + 12]) & 0x80808080)) << 1);
W[0 + i] = abx ^ P[i + 4] ^ h[i + 8] ^ P[i + 8];
W[0 + i + 4] = bcx ^ P[i] ^ h[i + 8] ^ P[i + 8];
W[0 + i + 8] = cdx ^ P[i] ^ P[i + 4] ^ P[i + 8];
W[0 + i + 12] = abx ^ bcx ^ cdx ^ P[i] ^ P[i + 4] ^ h[i + 8];
W[16 + i] = abx2 ^ h[i + 4] ^ P[12 + i + 4] ^ P[12 + i + 8];
W[16 + i + 4] = bcx2 ^ P[12 + i] ^ P[12 + i + 4] ^ P[12 + i + 8];
W[16 + i + 8] = cdx2 ^ P[12 + i] ^ h[i + 4] ^ P[12 + i + 8];
W[16 + i + 12] = abx2 ^ bcx2 ^ cdx2 ^ P[12 + i] ^ h[i + 4] ^ P[12 + i + 4];
W[32 + i] = abx3 ^ P[24 + i + 0] ^ P[24 + i + 4] ^ P[24 + i + 8];
W[32 + i + 4] = bcx3 ^ h[i] ^ P[24 + i + 4] ^ P[24 + i + 8];
W[32 + i + 8] = cdx3 ^ h[i] ^ P[24 + i + 0] ^ P[24 + i + 8];
W[32 + i + 12] = abx3 ^ bcx3 ^ cdx3 ^ h[i] ^ P[24 + i + 0] ^ P[24 + i + 4];
W[48 + i] = abx4 ^ P[36 + i + 4] ^ P[36 + i + 8] ^ h[i + 12];
W[48 + i + 4] = bcx4 ^ P[36 + i] ^ P[36 + i + 8] ^ h[i + 12];
W[48 + i + 8] = cdx4 ^ P[36 + i] ^ P[36 + i + 4] ^ h[i + 12];
W[48 + i + 12] = abx4 ^ bcx4 ^ cdx4 ^ P[36 + i] ^ P[36 + i + 4] ^ P[36 + i + 8];
i = 2;
abx = mul27(((P[i] ^ P[i + 4]) & 0x80808080) >> 7) ^ ((P[i] ^ P[i + 4] ^ ((P[i] ^ P[i + 4]) & 0x80808080)) << 1);
abx2 = mul27(((P[12 + i] ^ h[i + 4]) & 0x80808080) >> 7) ^ (((P[12 + i] ^ h[i + 4]) ^ ((P[12 + i] ^ h[i + 4]) & 0x80808080)) << 1);
abx3 = mul27(((h[i] ^ P[24 + i + 0]) & 0x80808080) >> 7) ^ (((h[i] ^ P[24 + i + 0]) ^ ((h[i] ^ P[24 + i + 0]) & 0x80808080)) << 1);
abx4 = mul27(((P[36 + i] ^ P[36 + i + 4]) & 0x80808080) >> 7) ^ (((P[36 + i] ^ P[36 + i + 4]) ^ ((P[36 + i] ^ P[36 + i + 4]) & 0x80808080)) << 1);
bcx = mul27(((P[i + 4] ^ h[i + 8]) & 0x80808080) >> 7) ^ ((P[i + 4] ^ h[i + 8] ^ ((P[i + 4] ^ h[i + 8]) & 0x80808080)) << 1);
bcx2 = mul27(((h[i + 4] ^ P[12 + i + 4]) & 0x80808080) >> 7) ^ ((h[i + 4] ^ P[12 + i + 4] ^ ((h[i + 4] ^ P[12 + i + 4]) & 0x80808080)) << 1);
bcx3 = mul27(((P[24 + i + 0] ^ P[24 + i + 4]) & 0x80808080) >> 7) ^ (((P[24 + i + 0] ^ P[24 + i + 4]) ^ ((P[24 + i + 0] ^ P[24 + i + 4]) & 0x80808080)) << 1);
bcx4 = mul27(((P[36 + i + 4] ^ P[36 + i + 8]) & 0x80808080) >> 7) ^ (((P[36 + i + 4] ^ P[36 + i + 8]) ^ ((P[36 + i + 4] ^ P[36 + i + 8]) & 0x80808080)) << 1);
cdx = mul27(((h[i + 8] ^ P[i + 8]) & 0x80808080) >> 7) ^ (((h[i + 8] ^ P[i + 8]) ^ ((h[i + 8] ^ P[i + 8]) & 0x80808080)) << 1);
cdx2 = mul27(((P[12 + i + 4] ^ P[12 + i + 8]) & 0x80808080) >> 7) ^ (((P[12 + i + 4] ^ P[12 + i + 8]) ^ ((P[12 + i + 4] ^ P[12 + i + 8]) & 0x80808080)) << 1);
cdx3 = mul27(((P[24 + i + 4] ^ P[24 + i + 8]) & 0x80808080) >> 7) ^ (((P[24 + i + 4] ^ P[24 + i + 8]) ^ ((P[24 + i + 4] ^ P[24 + i + 8]) & 0x80808080)) << 1);
cdx4 = mul27(((P[36 + i + 8] ^ h[i + 12]) & 0x80808080) >> 7) ^ (((P[36 + i + 8] ^ h[i + 12]) ^ ((P[36 + i + 8] ^ h[i + 12]) & 0x80808080)) << 1);
W[0 + i] = abx ^ P[i + 4] ^ h[i + 8] ^ P[i + 8];
W[0 + i + 4] = bcx ^ P[i] ^ h[i + 8] ^ P[i + 8];
W[0 + i + 8] = cdx ^ P[i] ^ P[i + 4] ^ P[i + 8];
W[0 + i + 12] = abx ^ bcx ^ cdx ^ P[i] ^ P[i + 4] ^ h[i + 8];
W[16 + i] = abx2 ^ h[i + 4] ^ P[12 + i + 4] ^ P[12 + i + 8];
W[16 + i + 4] = bcx2 ^ P[12 + i] ^ P[12 + i + 4] ^ P[12 + i + 8];
W[16 + i + 8] = cdx2 ^ P[12 + i] ^ h[i + 4] ^ P[12 + i + 8];
W[16 + i + 12] = abx2 ^ bcx2 ^ cdx2 ^ P[12 + i] ^ h[i + 4] ^ P[12 + i + 4];
W[32 + i] = abx3 ^ P[24 + i + 0] ^ P[24 + i + 4] ^ P[24 + i + 8];
W[32 + i + 4] = bcx3 ^ h[i] ^ P[24 + i + 4] ^ P[24 + i + 8];
W[32 + i + 8] = cdx3 ^ h[i] ^ P[24 + i + 0] ^ P[24 + i + 8];
W[32 + i + 12] = abx3 ^ bcx3 ^ cdx3 ^ h[i] ^ P[24 + i + 0] ^ P[24 + i + 4];
W[48 + i] = abx4 ^ P[36 + i + 4] ^ P[36 + i + 8] ^ h[i + 12];
W[48 + i + 4] = bcx4 ^ P[36 + i] ^ P[36 + i + 8] ^ h[i + 12];
W[48 + i + 8] = cdx4 ^ P[36 + i] ^ P[36 + i + 4] ^ h[i + 12];
W[48 + i + 12] = abx4 ^ bcx4 ^ cdx4 ^ P[36 + i] ^ P[36 + i + 4] ^ P[36 + i + 8];
i = 3;
abx = mul27(((P[i] ^ P[i + 4]) & 0x80808080) >> 7) ^ ((P[i] ^ P[i + 4] ^ ((P[i] ^ P[i + 4]) & 0x80808080)) << 1);
abx2 = mul27(((P[12 + i] ^ h[i + 4]) & 0x80808080) >> 7) ^ (((P[12 + i] ^ h[i + 4]) ^ ((P[12 + i] ^ h[i + 4]) & 0x80808080)) << 1);
abx3 = mul27(((h[i] ^ P[24 + i + 0]) & 0x80808080) >> 7) ^ (((h[i] ^ P[24 + i + 0]) ^ ((h[i] ^ P[24 + i + 0]) & 0x80808080)) << 1);
abx4 = mul27(((P[36 + i] ^ P[36 + i + 4]) & 0x80808080) >> 7) ^ (((P[36 + i] ^ P[36 + i + 4]) ^ ((P[36 + i] ^ P[36 + i + 4]) & 0x80808080)) << 1);
bcx = mul27(((P[i + 4] ^ h[i + 8]) & 0x80808080) >> 7) ^ ((P[i + 4] ^ h[i + 8] ^ ((P[i + 4] ^ h[i + 8]) & 0x80808080)) << 1);
bcx2 = mul27(((h[i + 4] ^ P[12 + i + 4]) & 0x80808080) >> 7) ^ ((h[i + 4] ^ P[12 + i + 4] ^ ((h[i + 4] ^ P[12 + i + 4]) & 0x80808080)) << 1);
bcx3 = mul27(((P[24 + i + 0] ^ P[24 + i + 4]) & 0x80808080) >> 7) ^ (((P[24 + i + 0] ^ P[24 + i + 4]) ^ ((P[24 + i + 0] ^ P[24 + i + 4]) & 0x80808080)) << 1);
bcx4 = mul27(((P[36 + i + 4] ^ P[36 + i + 8]) & 0x80808080) >> 7) ^ (((P[36 + i + 4] ^ P[36 + i + 8]) ^ ((P[36 + i + 4] ^ P[36 + i + 8]) & 0x80808080)) << 1);
cdx = mul27(((h[i + 8] ^ P[i + 8]) & 0x80808080) >> 7) ^ (((h[i + 8] ^ P[i + 8]) ^ ((h[i + 8] ^ P[i + 8]) & 0x80808080)) << 1);
cdx2 = mul27(((P[12 + i + 4] ^ P[12 + i + 8]) & 0x80808080) >> 7) ^ (((P[12 + i + 4] ^ P[12 + i + 8]) ^ ((P[12 + i + 4] ^ P[12 + i + 8]) & 0x80808080)) << 1);
cdx3 = mul27(((P[24 + i + 4] ^ P[24 + i + 8]) & 0x80808080) >> 7) ^ (((P[24 + i + 4] ^ P[24 + i + 8]) ^ ((P[24 + i + 4] ^ P[24 + i + 8]) & 0x80808080)) << 1);
cdx4 = mul27(((P[36 + i + 8] ^ h[i + 12]) & 0x80808080) >> 7) ^ (((P[36 + i + 8] ^ h[i + 12]) ^ ((P[36 + i + 8] ^ h[i + 12]) & 0x80808080)) << 1);
W[0 + i] = abx ^ P[i + 4] ^ h[i + 8] ^ P[i + 8];
W[0 + i + 4] = bcx ^ P[i] ^ h[i + 8] ^ P[i + 8];
W[0 + i + 8] = cdx ^ P[i] ^ P[i + 4] ^ P[i + 8];
W[0 + i + 12] = abx ^ bcx ^ cdx ^ P[i] ^ P[i + 4] ^ h[i + 8];
W[16 + i] = abx2 ^ h[i + 4] ^ P[12 + i + 4] ^ P[12 + i + 8];
W[16 + i + 4] = bcx2 ^ P[12 + i] ^ P[12 + i + 4] ^ P[12 + i + 8];
W[16 + i + 8] = cdx2 ^ P[12 + i] ^ h[i + 4] ^ P[12 + i + 8];
W[16 + i + 12] = abx2 ^ bcx2 ^ cdx2 ^ P[12 + i] ^ h[i + 4] ^ P[12 + i + 4];
W[32 + i] = abx3 ^ P[24 + i + 0] ^ P[24 + i + 4] ^ P[24 + i + 8];
W[32 + i + 4] = bcx3 ^ h[i] ^ P[24 + i + 4] ^ P[24 + i + 8];
W[32 + i + 8] = cdx3 ^ h[i] ^ P[24 + i + 0] ^ P[24 + i + 8];
W[32 + i + 12] = abx3 ^ bcx3 ^ cdx3 ^ h[i] ^ P[24 + i + 0] ^ P[24 + i + 4];
W[48 + i] = abx4 ^ P[36 + i + 4] ^ P[36 + i + 8] ^ h[i + 12];
W[48 + i + 4] = bcx4 ^ P[36 + i] ^ P[36 + i + 8] ^ h[i + 12];
W[48 + i + 8] = cdx4 ^ P[36 + i] ^ P[36 + i + 4] ^ h[i + 12];
W[48 + i + 12] = abx4 ^ bcx4 ^ cdx4 ^ P[36 + i] ^ P[36 + i + 4] ^ P[36 + i + 8];
// }
uint32_t k0 = 512 + 16;
uint32_t t, t2, t3;
uint32_t a, b, c, d;
uint32_t ab, bc, cd;
// uint32_t abx, bcx, cdx;
for (int k = 1; k < 9; k++)
{
AES_2ROUND(sharedMemory, W[0 + 4], W[0 + 5], W[0 + 6], W[0 + 7], k0 + 1);
AES_2ROUND(sharedMemory, W[16 + 4], W[16 + 5], W[16 + 6], W[16 + 7], k0 + 5);
AES_2ROUND(sharedMemory, W[32 + 4], W[32 + 5], W[32 + 6], W[32 + 7], k0 + 9);
AES_2ROUND(sharedMemory, W[48 + 4], W[48 + 5], W[48 + 6], W[48 + 7], k0 + 13);
__syncthreads();
/// 1, 5, 9, 13
t = W[4 + 0];
W[4 + 0] = W[20 + 0];
W[20 + 0] = W[36 + 0];
W[36 + 0] = W[52 + 0];
W[52 + 0] = t;
AES_2ROUND(sharedMemory, W[0 + 8], W[0 + 9], W[0 + 10], W[0 + 11], k0 + 2);
AES_2ROUND(sharedMemory, W[32 + 8], W[32 + 9], W[32 + 10], W[32 + 11], k0 + 10);
AES_2ROUND(sharedMemory, W[16 + 8], W[16 + 9], W[16 + 10], W[16 + 11], k0 + 6);
AES_2ROUND(sharedMemory, W[48 + 8], W[48 + 9], W[48 + 10], W[48 + 11], k0 + 14);
__syncthreads();
// 2, 6, 10, 14
t = W[8 + 0];
W[8 + 0] = W[40 + 0];
W[40 + 0] = t;
t = W[24 + 0];
W[24 + 0] = W[56 + 0];
W[56 + 0] = t;
AES_2ROUND(sharedMemory, W[48 + 12], W[48 + 13], W[48 + 14], W[48 + 15], k0 + 15);
AES_2ROUND(sharedMemory, W[32 + 12], W[32 + 13], W[32 + 14], W[32 + 15], k0 + 11);
AES_2ROUND(sharedMemory, W[16 + 12], W[16 + 13], W[16 + 14], W[16 + 15], k0 + 7);
AES_2ROUND(sharedMemory, W[0 + 12], W[0 + 13], W[0 + 14], W[0 + 15], k0 + 3);
__syncthreads();
// 15, 11, 7, 3
t = W[60 + 0];
W[60 + 0] = W[44 + 0];
W[44 + 0] = W[28 + 0];
W[28 + 0] = W[12 + 0];
W[12 + 0] = t;
/// 1, 5, 9, 13
t = W[4 + 1];
W[4 + 1] = W[20 + 1];
W[20 + 1] = W[36 + 1];
W[36 + 1] = W[52 + 1];
W[52 + 1] = t;
AES_2ROUND(sharedMemory, W[0 + 0], W[0 + 1], W[0 + 2], W[0 + 3], k0);
// 2, 6, 10, 14
t = W[8 + 1];
W[8 + 1] = W[40 + 1];
W[40 + 1] = t;
t = W[24 + 1];
W[24 + 1] = W[56 + 1];
W[56 + 1] = t;
// 15, 11, 7, 3
t = W[60 + 1];
W[60 + 1] = W[44 + 1];
W[44 + 1] = W[28 + 1];
W[28 + 1] = W[12 + 1];
W[12 + 1] = t;
AES_2ROUND(sharedMemory, W[16 + 0], W[16 + 1], W[16 + 2], W[16 + 3], k0 + 4);
/// 1, 5, 9, 13
t = W[4 + 2];
W[4 + 2] = W[20 + 2];
W[20 + 2] = W[36 + 2];
W[36 + 2] = W[52 + 2];
W[52 + 2] = t;
// 2, 6, 10, 14
t = W[8 + 2];
W[8 + 2] = W[40 + 2];
W[40 + 2] = t;
t = W[24 + 2];
W[24 + 2] = W[56 + 2];
W[56 + 2] = t;
AES_2ROUND(sharedMemory, W[32 + 0], W[32 + 1], W[32 + 2], W[32 + 3], k0 + 8);
// 15, 11, 7, 3
t = W[60 + 2];
W[60 + 2] = W[44 + 2];
W[44 + 2] = W[28 + 2];
W[28 + 2] = W[12 + 2];
W[12 + 2] = t;
/// 1, 5, 9, 13
t = W[4 + 3];
W[4 + 3] = W[20 + 3];
W[20 + 3] = W[36 + 3];
W[36 + 3] = W[52 + 3];
W[52 + 3] = t;
AES_2ROUND(sharedMemory, W[48 + 0], W[48 + 1], W[48 + 2], W[48 + 3], k0 + 12);
// 2, 6, 10, 14
t = W[8 + 3];
W[8 + 3] = W[40 + 3];
W[40 + 3] = t;
t = W[24 + 3];
W[24 + 3] = W[56 + 3];
W[56 + 3] = t;
// 15, 11, 7, 3
t = W[60 + 3];
W[60 + 3] = W[44 + 3];
W[44 + 3] = W[28 + 3];
W[28 + 3] = W[12 + 3];
W[12 + 3] = t;
k0 = k0 + 16;
// Mix Columns
#pragma unroll
for (int i = 0; i < 4; i++) // Schleife über je 2*uint32_t
{
#pragma unroll
for (int idx = 0; idx < 64; idx += 16) // Schleife über die elemnte
{
a = W[idx + i];
b = W[idx + i + 4];
c = W[idx + i + 8];
d = W[idx + i + 12];
ab = a ^ b;
bc = b ^ c;
cd = c ^ d;
t = (ab & 0x80808080);
t2 = (bc & 0x80808080);
t3 = (cd & 0x80808080);
abx = mul27(t >> 7) ^ ((ab^t) << 1);
bcx = mul27(t2 >> 7) ^ ((bc^t2) << 1);
cdx = mul27(t3 >> 7) ^ ((cd^t3) << 1);
W[idx + i] = xor3x( abx,bc,d);
W[idx + i + 4] = xor3x(bcx , a , cd);
W[idx + i + 8] = xor3x(cdx , ab , d);
W[idx + i + 12] = xor3x(xor3x(abx , bcx , cdx) , ab , c);
}
}
}
//3, 11, 23, 31, 35, 43, 55, 63
AES_2ROUND(sharedMemory,
W[0 + 0], W[0 + 1], W[0 + 2], W[0 + 3],
512 + (9 * 16));
AES_2ROUND(sharedMemory,
W[0 + 8], W[0 + 9], W[0 + 10], W[0 + 11],
512 + (9 * 16) + 2);
AES_2ROUND(sharedMemory,
W[16 + 4], W[16 + 5], W[16 + 6], W[16 + 7],
512 + (9 * 16) + 5);
AES_2ROUND(sharedMemory,
W[16 + 12], W[16 + 13], W[16 + 14], W[16 + 15],
512 + (9 * 16) + 7);
AES_2ROUND(sharedMemory,
W[32 + 0], W[32 + 1], W[32 + 2], W[32 + 3],
512 + (9 * 16) + 8);
AES_2ROUND(sharedMemory,
W[32 + 8], W[32 + 9], W[32 + 10], W[32 + 11],
512 + (9 * 16) + 10);
AES_2ROUND(sharedMemory,
W[48 + 4], W[48 + 5], W[48 + 6], W[48 + 7],
512 + (9 * 16) + 13);
AES_2ROUND(sharedMemory,
W[60], W[61], W[62], W[63],
512 + (9 * 16) + 15);
bc = W[23] ^ W[43];
t2 = (bc & 0x80808080);
uint32_t test = xor3x(xor3x(mul27(t2 >> 7) , ((bc^t2) << 1) , W[3]) , W[43] , W[63]);
bc = W[55] ^ W[11];
t2 = (bc & 0x80808080);
test = test ^ mul27(t2 >> 7) ^ ((bc^t2) << 1) ^ W[35] ^ W[11] ^ W[31] ^ backup;
if (test <= target)
{
uint32_t tmp = atomicCAS(d_found, 0xffffffff, nounce);
if (tmp != 0xffffffff)
d_found[1] = nounce;
}
}
}
const uint32_t threadsperblock = 128;
__host__ void x11_echo512_cpu_hash_64_final(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash, uint32_t target, uint32_t *h_found)
{
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + threadsperblock - 1) / threadsperblock);
dim3 block(threadsperblock);
cudaMemset(d_found[thr_id], 0xffffffff, 2*sizeof(uint32_t));
x11_echo512_gpu_hash_64_final << <grid, block>> >(threads, startNounce, (uint64_t*)d_hash, d_found[thr_id], target);
cudaMemcpy(h_found, d_found[thr_id], 2*sizeof(uint32_t), cudaMemcpyDeviceToHost);
}
|
the_stack
|
extern "C" {
#include <ccv.h>
#include <ccv_internal.h>
#include <nnc/ccv_nnc.h>
#include <nnc/ccv_nnc_easy.h>
#include <nnc/ccv_nnc_internal.h>
}
#include <nnc/gpu/ccv_nnc_compat.h>
static inline __device__ __half log(const half v)
{
return hlog(v);
}
template<typename NUM1, typename NUM2>
__global__ void _ccv_nnc_categorical_crossentropy_forw_kernel(const int batch_size, const int count, const NUM1* const label, const NUM2* const a, NUM2* const c)
{
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int idx = (int)((float)label[i] + 0.5);
c[i] = -log(a[i * count + idx]);
}
}
template<typename NUM1, typename NUM2>
__global__ void _ccv_nnc_categorical_crossentropy_forw_kernel_trim(const int batch_size, const int count, const float trim0, const float trim1, const NUM1* const label, const NUM2* const a, NUM2* const c)
{
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int idx = (int)((float)label[i] + 0.5);
NUM2 p = (NUM2)trim1 * log(a[i * count + idx]);;
for (int j = 0; j < idx; j++)
p += (NUM2)trim0 * log(a[i * count + j]);
for (int j = idx + 1; j < count; j++)
p += (NUM2)trim0 * log(a[i * count + j]);
c[i] = -p;
}
}
template<typename NUM>
__global__ void _ccv_nnc_categorical_crossentropy_one_hot_forw_kernel(const int batch_size, const int count, const NUM* const label, const NUM* const a, NUM* const c)
{
CUDA_1D_KERNEL_LOOP(i, batch_size) {
NUM p = label[i * count] * log(a[i * count]);
for (int j = 1; j < count; j++)
p += label[i * count + j] * log(a[i * count + j]);
c[i] = -p;
}
}
template<typename NUM>
__global__ void _ccv_nnc_categorical_crossentropy_forw_kernel(const int batch_size, const int count, const int* const label, const NUM* const a, NUM* const c)
{
CUDA_1D_KERNEL_LOOP(i, batch_size) {
c[i] = -log(a[i * count + label[i]]);
}
}
template<typename NUM>
__global__ void _ccv_nnc_categorical_crossentropy_forw_kernel_trim(const int batch_size, const int count, const float trim0, const float trim1, const int* const label, const NUM* const a, NUM* const c)
{
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int idx = label[i];
NUM p = (NUM)trim1 * log(a[i * count + idx]);
for (int j = 0; j < idx; j++)
p += (NUM)trim0 * log(a[i * count + j]);
for (int j = idx + 1; j < count; j++)
p += (NUM)trim0 * log(a[i * count + j]);
c[i] = -p;
}
}
static int _ccv_nnc_categorical_crossentropy_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
assert(input_size == 2);
const ccv_nnc_tensor_t* a = inputs[0];
assert(!CCV_IS_TENSOR_VIEW(a));
const ccv_nnc_tensor_t* b = inputs[1];
assert(!CCV_IS_TENSOR_VIEW(b));
assert(output_size == 1);
ccv_nnc_tensor_t* c = outputs[0];
assert(!CCV_IS_TENSOR_VIEW(c));
const int axis_count = ccv_nnc_tensor_nd(a->info.dim);
const int batch_size = axis_count < 2 ? 1 : a->info.dim[0];
const int count = ccv_nnc_tensor_count(a->info) / batch_size;
int i;
cudaStream_t stream = ccv_nnc_stream_context_get_stream(stream_context);
assert(a->info.datatype == c->info.datatype);
if (b->info.datatype == CCV_32F || b->info.datatype == CCV_16F)
{
// If has more than 1 axis, then the range is the channel count. Otherwise, if our batch size is 1, then the range is
// the channel count. Otherwise, the range is 1 (and the only axis is the batch size).
const int range = ccv_nnc_tensor_nd(b->info.dim) > 1 ? ccv_nnc_tensor_get_c(b->info) : (batch_size == 1 ? b->info.dim[0] : 1);
if (range == 1)
{
for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && b->info.dim[i] > 0; i++)
{ assert(b->info.dim[i] == c->info.dim[i]); }
const float trim0 = cmd.info.label_smoothing.trim0;
const float trim1 = cmd.info.label_smoothing.trim1;
if (trim0 == 0 && trim1 == 1)
{
if (b->info.datatype == CCV_32F)
{
if (a->info.datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_forw_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, b->data.f32, (__half*)a->data.f16, (__half*)c->data.f16);
else
_ccv_nnc_categorical_crossentropy_forw_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, b->data.f32, a->data.f32, c->data.f32);
} else {
assert(b->info.datatype == CCV_16F);
assert(a->info.datatype == CCV_16F);
_ccv_nnc_categorical_crossentropy_forw_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, (__half*)b->data.f16, (__half*)a->data.f16, (__half*)c->data.f16);
}
} else {
if (b->info.datatype == CCV_32F)
{
if (a->info.datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_forw_kernel_trim<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, trim0, trim1, b->data.f32, (__half*)a->data.f16, (__half*)c->data.f16);
else
_ccv_nnc_categorical_crossentropy_forw_kernel_trim<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, trim0, trim1, b->data.f32, a->data.f32, c->data.f32);
} else {
assert(b->info.datatype == CCV_16F);
assert(a->info.datatype == CCV_16F);
_ccv_nnc_categorical_crossentropy_forw_kernel_trim<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, trim0, trim1, (__half*)b->data.f16, (__half*)a->data.f16, (__half*)c->data.f16);
}
}
} else {
assert(range == count);
assert(a->info.datatype == b->info.datatype);
if (a->info.datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_one_hot_forw_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, (__half*)b->data.f16, (__half*)a->data.f16, (__half*)c->data.f16);
else
_ccv_nnc_categorical_crossentropy_one_hot_forw_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, b->data.f32, a->data.f32, c->data.f32);
}
} else if (b->info.datatype == CCV_32S) {
for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && b->info.dim[i] > 0; i++)
{ assert(b->info.dim[i] == c->info.dim[i]); }
const float trim0 = cmd.info.label_smoothing.trim0;
const float trim1 = cmd.info.label_smoothing.trim1;
if (trim0 == 0 && trim1 == 1)
{
if (a->info.datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_forw_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, b->data.i32, (__half*)a->data.f16, (__half*)c->data.f16);
else
_ccv_nnc_categorical_crossentropy_forw_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, b->data.i32, a->data.f32, c->data.f32);
} else {
if (a->info.datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_forw_kernel_trim<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, trim0, trim1, b->data.i32, (__half*)a->data.f16, (__half*)c->data.f16);
else
_ccv_nnc_categorical_crossentropy_forw_kernel_trim<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, trim0, trim1, b->data.i32, a->data.f32, c->data.f32);
}
}
return CCV_NNC_EXEC_SUCCESS;
}
template<typename NUM>
__global__ void _ccv_nnc_set_zero_kernel(const int n, NUM* const a)
{
CUDA_1D_KERNEL_LOOP(i, n) {
a[i] = 0;
}
}
template<typename NUM1, typename NUM2>
__global__ void _ccv_nnc_categorical_crossentropy_back_kernel(const int batch_size, const int count, const NUM2* const g, const NUM1* const label, const NUM2* const a, NUM2* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int idx = (int)((float)label[i] + 0.5);
h[i * count + idx] = -g[i] / a[i * count + idx];
}
}
template<typename NUM1, typename NUM2>
__global__ void _ccv_nnc_categorical_crossentropy_back_kernel_trim(const int batch_size_count, const int count, const float trim0, const float trim1, const NUM2* const g, const NUM1* const label, const NUM2* const a, NUM2* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size_count) {
const int idx = i / count;
const int batch_idx = i % count;
const int lbl = (int)((float)label[idx] + 0.5);
if (batch_idx == lbl)
h[i] = -g[idx] * (NUM2)trim1 / a[i];
else
h[i] = -g[idx] * (NUM2)trim0 / a[i];
}
}
template<typename NUM>
__global__ void _ccv_nnc_categorical_crossentropy_one_hot_back_kernel(const int batch_size_count, const int count, const NUM* const g, const NUM* const label, const NUM* const a, NUM* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size_count) {
const int idx = i / count;
h[i] = -g[idx] * label[i] / a[i];
}
}
template<typename NUM>
__global__ void _ccv_nnc_categorical_crossentropy_back_kernel(const int batch_size, const int count, const NUM* const g, const int* const label, const NUM* const a, NUM* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int idx = label[i];
h[i * count + idx] = -g[i] / a[i * count + idx];
}
}
template<typename NUM>
__global__ void _ccv_nnc_categorical_crossentropy_back_kernel_trim(const int batch_size_count, const int count, const float trim0, const float trim1, const NUM* const g, const int* const label, const NUM* const a, NUM* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size_count) {
const int idx = i / count;
const int batch_idx = i % count;
const int lbl = label[idx];
if (batch_idx == lbl)
h[i] = -g[idx] * (NUM)trim1 / a[i];
else
h[i] = -g[idx] * (NUM)trim0 / a[i];
}
}
template<typename NUM1, typename NUM2>
__global__ void _ccv_nnc_categorical_crossentropy_back_kernel(const int batch_size, const int count, const NUM1* const label, const NUM2* const a, NUM2* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int idx = (int)((float)label[i] + 0.5);
h[i * count + idx] = (NUM2)-1. / a[i * count + idx];
}
}
template<typename NUM1, typename NUM2>
__global__ void _ccv_nnc_categorical_crossentropy_back_kernel_trim(const int batch_size_count, const int count, const float trim0, const float trim1, const NUM1* const label, const NUM2* const a, NUM2* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size_count) {
const int idx = i / count;
const int batch_idx = i % count;
const int lbl = (int)((float)label[idx] + 0.5);
if (batch_idx == lbl)
h[i] = (NUM2)-trim1 / a[i];
else
h[i] = (NUM2)-trim0 / a[i];
}
}
template<typename NUM>
__global__ void _ccv_nnc_categorical_crossentropy_one_hot_back_kernel(const int batch_size_count, const int count, const NUM* const label, const NUM* const a, NUM* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size_count) {
h[i] = -label[i] / a[i];
}
}
template<typename NUM>
__global__ void _ccv_nnc_categorical_crossentropy_back_kernel(const int batch_size, const int count, const int* const label, const NUM* const a, NUM* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size) {
const int idx = label[i];
h[i * count + idx] = (NUM)-1. / a[i * count + idx];
}
}
template<typename NUM>
__global__ void _ccv_nnc_categorical_crossentropy_back_kernel_trim(const int batch_size_count, const int count, const float trim0, const float trim1, const int* const label, const NUM* const a, NUM* const h)
{
CUDA_1D_KERNEL_LOOP(i, batch_size_count) {
const int idx = i / count;
const int batch_idx = i % count;
const int lbl = label[idx];
if (batch_idx == lbl)
h[i] = (NUM)-trim1 / a[i];
else
h[i] = (NUM)-trim0 / a[i];
}
}
static int _ccv_nnc_categorical_crossentropy_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
assert(input_size >= 3);
assert(output_size >= 1);
const ccv_nnc_tensor_t* g = inputs[0];
assert(!g || !CCV_IS_TENSOR_VIEW(g));
const ccv_nnc_tensor_t* a = inputs[1];
assert(!CCV_IS_TENSOR_VIEW(a));
const ccv_nnc_tensor_t* b = inputs[2];
assert(!CCV_IS_TENSOR_VIEW(b));
ccv_nnc_tensor_t* h = outputs[0];
assert(!CCV_IS_TENSOR_VIEW(h));
const int axis_count = ccv_nnc_tensor_nd(a->info.dim);
const int batch_size = axis_count < 2 ? 1 : a->info.dim[0];
const int bcount = ccv_nnc_tensor_count(a->info);
const int count = bcount / batch_size;
int i;
cudaStream_t stream = ccv_nnc_stream_context_get_stream(stream_context);
assert(a->info.datatype == h->info.datatype);
const int datatype = a->info.datatype;
if (g)
{
assert(g->info.datatype == datatype);
if (b->info.datatype == CCV_32F || b->info.datatype == CCV_16F)
{
// If has more than 1 axis, then the range is the channel count. Otherwise, if our batch size is 1, then the range is
// the channel count. Otherwise, the range is 1 (and the only axis is the batch size).
const int range = ccv_nnc_tensor_nd(b->info.dim) > 1 ? ccv_nnc_tensor_get_c(b->info) : (batch_size == 1 ? b->info.dim[0] : 1);
if (range == 1)
{
for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && a->info.dim[i] > 0; i++)
{ assert(a->info.dim[i] == h->info.dim[i]); }
const float trim0 = cmd.info.label_smoothing.trim0;
const float trim1 = cmd.info.label_smoothing.trim1;
if (trim0 == 0 && trim1 == 1)
{
if (datatype == CCV_16F)
_ccv_nnc_set_zero_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, (__half *)h->data.f16);
else
_ccv_nnc_set_zero_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, h->data.f32);
if (b->info.datatype == CCV_32F)
{
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, (__half*)g->data.f16, b->data.f32, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, g->data.f32, b->data.f32, a->data.f32, h->data.f32);
} else {
assert(b->info.datatype == CCV_16F);
assert(datatype == CCV_16F);
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, (__half*)g->data.f16, (__half*)b->data.f16, (__half*)a->data.f16, (__half*)h->data.f16);
}
} else {
if (b->info.datatype == CCV_32F)
{
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, (__half*)g->data.f16, b->data.f32, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, g->data.f32, b->data.f32, a->data.f32, h->data.f32);
} else {
assert(b->info.datatype == CCV_16F);
assert(datatype == CCV_16F);
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, (__half*)g->data.f16, (__half*)b->data.f16, (__half*)a->data.f16, (__half*)h->data.f16);
}
}
} else {
assert(range == count);
assert(b->info.datatype == datatype);
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_one_hot_back_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, (__half*)g->data.f16, (__half*)b->data.f16, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_one_hot_back_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, g->data.f32, b->data.f32, a->data.f32, h->data.f32);
}
} else if (b->info.datatype == CCV_32S) {
for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && a->info.dim[i] > 0; i++)
{ assert(a->info.dim[i] == h->info.dim[i]); }
const float trim0 = cmd.info.label_smoothing.trim0;
const float trim1 = cmd.info.label_smoothing.trim1;
if (trim0 == 0 && trim1 == 1)
{
if (datatype == CCV_16F)
_ccv_nnc_set_zero_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, (__half *)h->data.f16);
else
_ccv_nnc_set_zero_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, h->data.f32);
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, (__half*)g->data.f16, b->data.i32, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, g->data.f32, b->data.i32, a->data.f32, h->data.f32);
} else {
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, (__half*)g->data.f16, b->data.i32, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, g->data.f32, b->data.i32, a->data.f32, h->data.f32);
}
}
} else {
if (b->info.datatype == CCV_32F || b->info.datatype == CCV_16F)
{
// If has more than 1 axis, then the range is the channel count. Otherwise, if our batch size is 1, then the range is
// the channel count. Otherwise, the range is 1 (and the only axis is the batch size).
const int range = ccv_nnc_tensor_nd(b->info.dim) > 1 ? ccv_nnc_tensor_get_c(b->info) : (batch_size == 1 ? b->info.dim[0] : 1);
if (range == 1)
{
for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && a->info.dim[i] > 0; i++)
{ assert(a->info.dim[i] == h->info.dim[i]); }
const float trim0 = cmd.info.label_smoothing.trim0;
const float trim1 = cmd.info.label_smoothing.trim1;
if (trim0 == 0 && trim1 == 1)
{
if (datatype == CCV_16F)
_ccv_nnc_set_zero_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, (__half *)h->data.f16);
else
_ccv_nnc_set_zero_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, h->data.f32);
if (b->info.datatype == CCV_32F)
{
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, b->data.f32, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, b->data.f32, a->data.f32, h->data.f32);
} else {
assert(b->info.datatype == CCV_16F);
assert(datatype == CCV_16F);
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, (__half*)b->data.f16, (__half*)a->data.f16, (__half*)h->data.f16);
}
} else {
if (b->info.datatype == CCV_32F)
{
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, b->data.f32, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, b->data.f32, a->data.f32, h->data.f32);
} else {
assert(b->info.datatype == CCV_16F);
assert(datatype == CCV_16F);
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, (__half*)b->data.f16, (__half*)a->data.f16, (__half*)h->data.f16);
}
}
} else {
assert(range == count);
assert(b->info.datatype == datatype);
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_one_hot_back_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, (__half*)b->data.f16, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_one_hot_back_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, b->data.f32, a->data.f32, h->data.f32);
}
} else if (b->info.datatype == CCV_32S) {
for (i = 0; i < CCV_NNC_MAX_DIM_ALLOC && a->info.dim[i] > 0; i++)
{ assert(a->info.dim[i] == h->info.dim[i]); }
const float trim0 = cmd.info.label_smoothing.trim0;
const float trim1 = cmd.info.label_smoothing.trim1;
if (trim0 == 0 && trim1 == 1)
{
if (datatype == CCV_16F)
_ccv_nnc_set_zero_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, (__half *)h->data.f16);
else
_ccv_nnc_set_zero_kernel<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, h->data.f32);
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, b->data.i32, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_back_kernel<<<CUDA_GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, stream>>>(batch_size, count, b->data.i32, a->data.f32, h->data.f32);
} else {
if (datatype == CCV_16F)
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, b->data.i32, (__half*)a->data.f16, (__half*)h->data.f16);
else
_ccv_nnc_categorical_crossentropy_back_kernel_trim<<<CUDA_GET_BLOCKS(bcount), CUDA_NUM_THREADS, 0, stream>>>(bcount, count, trim0, trim1, b->data.i32, a->data.f32, h->data.f32);
}
}
}
return CCV_NNC_EXEC_SUCCESS;
}
REGISTER_COMMAND_BACKEND(CCV_NNC_CATEGORICAL_CROSSENTROPY_FORWARD, CCV_NNC_BACKEND_GPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
{
registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_CHWN;
registry->tensor_datatypes = CCV_32F | CCV_32S | CCV_16F;
registry->tensor_memory = CCV_TENSOR_GPU_MEMORY;
registry->algorithms = 1;
registry->exec = _ccv_nnc_categorical_crossentropy_forw;
}
REGISTER_COMMAND_BACKEND(CCV_NNC_CATEGORICAL_CROSSENTROPY_BACKWARD, CCV_NNC_BACKEND_GPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
{
registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_CHWN;
registry->tensor_datatypes = CCV_32F | CCV_32S | CCV_16F;
registry->tensor_memory = CCV_TENSOR_GPU_MEMORY;
registry->algorithms = 1;
registry->exec = _ccv_nnc_categorical_crossentropy_back;
}
|
the_stack
|
#include "vector_pool_gpu.h"
#include "cuda_utils.h"
__global__ void query_three_nn_by_stacked_local_idxs_kernel(
const float *support_xyz, const float *new_xyz, const float *new_xyz_grid_centers,
int *new_xyz_grid_idxs, float *new_xyz_grid_dist2,
const int *stack_neighbor_idxs, const int *start_len,
int M, int num_total_grids){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
// new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn
// new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
int grid_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M || grid_idx >= num_total_grids) return;
new_xyz += pt_idx * 3;
new_xyz_grid_centers += pt_idx * num_total_grids * 3 + grid_idx * 3;
new_xyz_grid_idxs += pt_idx * num_total_grids * 3 + grid_idx * 3;
new_xyz_grid_dist2 += pt_idx * num_total_grids * 3 + grid_idx * 3;
start_len += pt_idx * 2;
stack_neighbor_idxs += start_len[0];
int neighbor_length = start_len[1];
float center_x = new_xyz_grid_centers[0];
float center_y = new_xyz_grid_centers[1];
float center_z = new_xyz_grid_centers[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = -1, besti2 = -1, besti3 = -1;
for (int k = 0; k < neighbor_length; k++){
int cur_neighbor_idx = stack_neighbor_idxs[k];
float x = support_xyz[cur_neighbor_idx * 3 + 0];
float y = support_xyz[cur_neighbor_idx * 3 + 1];
float z = support_xyz[cur_neighbor_idx * 3 + 2];
float d = (center_x - x) * (center_x - x) + (center_y - y) * (center_y - y) + (center_z - z) * (center_z - z);
if (d < best1) {
best3 = best2; besti3 = besti2;
best2 = best1; besti2 = besti1;
best1 = d; besti1 = cur_neighbor_idx;
}
else if (d < best2) {
best3 = best2; besti3 = besti2;
best2 = d; besti2 = cur_neighbor_idx;
}
else if (d < best3) {
best3 = d; besti3 = cur_neighbor_idx;
}
}
if (besti2 == -1){
besti2 = besti1; best2 = best1;
}
if (besti3 == -1){
besti3 = besti1; best3 = best1;
}
new_xyz_grid_dist2[0] = best1;
new_xyz_grid_dist2[1] = best2;
new_xyz_grid_dist2[2] = best3;
new_xyz_grid_idxs[0] = besti1;
new_xyz_grid_idxs[1] = besti2;
new_xyz_grid_idxs[2] = besti3;
}
int query_three_nn_by_stacked_local_idxs_kernel_launcher_stack(
const float *support_xyz, const float *new_xyz, const float *new_xyz_grid_centers,
int *new_xyz_grid_idxs, float *new_xyz_grid_dist2,
const int *stack_neighbor_idxs, const int *start_len,
int M, int num_total_grids){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
// new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn
// new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
cudaError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), num_total_grids); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
query_three_nn_by_stacked_local_idxs_kernel<<<blocks, threads>>>(
support_xyz, new_xyz, new_xyz_grid_centers,
new_xyz_grid_idxs, new_xyz_grid_dist2, stack_neighbor_idxs, start_len,
M, num_total_grids
);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return 0;
}
__global__ void query_stacked_local_neighbor_idxs_kernel(
const float *support_xyz, const int *xyz_batch_cnt, const float *new_xyz, const int *new_xyz_batch_cnt,
int *stack_neighbor_idxs, int *start_len, int *cumsum, int avg_length_of_neighbor_idxs,
float max_neighbour_distance, int batch_size, int M, int nsample, int neighbor_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
// cumsum: (1), max offset of current data in stack_neighbor_idxs
// max_neighbour_distance: float
// nsample: find all (-1), find limited number(>0)
// neighbor_type: 1: ball, others: cube
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0];
for (int k = 1; k < batch_size; k++){
if (pt_idx < pt_cnt) break;
pt_cnt += new_xyz_batch_cnt[k];
bs_idx = k;
}
int xyz_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k];
support_xyz += xyz_batch_start_idx * 3;
new_xyz += pt_idx * 3;
start_len += pt_idx * 2;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int n = xyz_batch_cnt[bs_idx];
float local_x, local_y, local_z;
float radius2 = max_neighbour_distance * max_neighbour_distance;
int temp_idxs[1000];
int sample_cnt = 0;
for (int k = 0; k < n; ++k) {
local_x = support_xyz[k * 3 + 0] - new_x;
local_y = support_xyz[k * 3 + 1] - new_y;
local_z = support_xyz[k * 3 + 2] - new_z;
if (neighbor_type == 1){
// ball
if (local_x * local_x + local_y * local_y + local_z * local_z > radius2){
continue;
}
}
else{
// voxel
if ((fabs(local_x) > max_neighbour_distance) |
(fabs(local_y) > max_neighbour_distance) |
(fabs(local_z) > max_neighbour_distance)){
continue;
}
}
if (sample_cnt < 1000){
temp_idxs[sample_cnt] = k;
}
else{
break;
}
sample_cnt++;
if (nsample > 0 && sample_cnt >= nsample) break;
}
start_len[0] = atomicAdd(cumsum, sample_cnt);
start_len[1] = sample_cnt;
int max_thresh = avg_length_of_neighbor_idxs * M;
if (start_len[0] >= max_thresh) return;
stack_neighbor_idxs += start_len[0];
if (start_len[0] + sample_cnt >= max_thresh) sample_cnt = max_thresh - start_len[0];
for (int k = 0; k < sample_cnt; k++){
stack_neighbor_idxs[k] = temp_idxs[k] + xyz_batch_start_idx;
}
}
int query_stacked_local_neighbor_idxs_kernel_launcher_stack(
const float *support_xyz, const int *xyz_batch_cnt, const float *new_xyz, const int *new_xyz_batch_cnt,
int *stack_neighbor_idxs, int *start_len, int *cumsum, int avg_length_of_neighbor_idxs,
float max_neighbour_distance, int batch_size, int M, int nsample, int neighbor_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// stack_neighbor_idxs: (max_length_of_neighbor_idxs)
// start_len: (M1 + M2, 2) [start_offset, neighbor_length]
// cumsum: (1), max offset of current data in stack_neighbor_idxs
// max_neighbour_distance: float
// nsample: find all (-1), find limited number(>0)
// neighbor_type: 1: ball, others: cube
cudaError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
query_stacked_local_neighbor_idxs_kernel<<<blocks, threads>>>(
support_xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt,
stack_neighbor_idxs, start_len, cumsum, avg_length_of_neighbor_idxs,
max_neighbour_distance, batch_size, M, nsample, neighbor_type
);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return 0;
}
__global__ void vector_pool_kernel_stack(
const float *support_xyz, const float *support_features, const int *xyz_batch_cnt,
const float *new_xyz, float *new_features, float *new_local_xyz, const int *new_xyz_batch_cnt,
int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance,
int batch_size, int M, int num_c_in, int num_c_out,
int num_c_each_grid, int num_total_grids, int *point_cnt_of_grid, int *grouped_idxs,
int use_xyz, float grid_size_x, float grid_size_y,
float grid_size_z, int *cum_sum, int num_max_sum_points, int nsample, int neighbor_type, int pooling_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// support_features: (N1 + N2 ..., C)
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_features: (M1 + M2 ..., C), C = num_total_grids * num_c_each_grid
// new_local_xyz: (M1 + M2 ..., 3 * num_total_grids)
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz
// point_cnt_of_grid: (M1 + M2 ..., num_total_grids)
// grouped_idxs: (num_max_sum_points, 3)[idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// use_xyz: whether to calculate new_local_xyz
// neighbor_type: 1: ball, others: cube
// pooling_type: 0: avg_pool, 1: random choice
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0];
for (int k = 1; k < batch_size; k++){
if (pt_idx < pt_cnt) break;
pt_cnt += new_xyz_batch_cnt[k];
bs_idx = k;
}
int xyz_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k];
support_xyz += xyz_batch_start_idx * 3;
support_features += xyz_batch_start_idx * num_c_in;
new_xyz += pt_idx * 3;
new_features += pt_idx * num_c_out;
point_cnt_of_grid += pt_idx * num_total_grids;
new_local_xyz += pt_idx * 3 * num_total_grids;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int n = xyz_batch_cnt[bs_idx], grid_idx_x, grid_idx_y, grid_idx_z, grid_idx;
float local_x, local_y, local_z;
float radius2 = max_neighbour_distance * max_neighbour_distance;
int sample_cnt = 0;
for (int k = 0; k < n; ++k) {
local_x = support_xyz[k * 3 + 0] - new_x;
local_y = support_xyz[k * 3 + 1] - new_y;
local_z = support_xyz[k * 3 + 2] - new_z;
if (neighbor_type == 1){
// ball
if (local_x * local_x + local_y * local_y + local_z * local_z > radius2){
continue;
}
}
else{
// voxel
if ((fabs(local_x) > max_neighbour_distance) |
(fabs(local_y) > max_neighbour_distance) |
(fabs(local_z) > max_neighbour_distance)){
continue;
}
}
grid_idx_x = floorf((local_x + max_neighbour_distance) / grid_size_x);
grid_idx_y = floorf((local_y + max_neighbour_distance) / grid_size_y);
grid_idx_z = floorf((local_z + max_neighbour_distance) / grid_size_z);
grid_idx = grid_idx_x * num_grid_y * num_grid_z + grid_idx_y * num_grid_z + grid_idx_z;
grid_idx = min(max(grid_idx, 0), num_total_grids - 1);
if (pooling_type == 0){
// avg pooling
point_cnt_of_grid[grid_idx] ++;
for (int i = 0; i < num_c_in; i++){
new_features[grid_idx * num_c_each_grid + i % num_c_each_grid] += support_features[k * num_c_in + i];
}
if (use_xyz){
new_local_xyz[grid_idx * 3 + 0] += local_x;
new_local_xyz[grid_idx * 3 + 1] += local_y;
new_local_xyz[grid_idx * 3 + 2] += local_z;
}
int cnt = atomicAdd(cum_sum, 1);
if (cnt >= num_max_sum_points) continue; // continue to statistics the max number of points
grouped_idxs[cnt * 3 + 0] = xyz_batch_start_idx + k;
grouped_idxs[cnt * 3 + 1] = pt_idx;
grouped_idxs[cnt * 3 + 2] = grid_idx;
sample_cnt++;
if(nsample > 0 && sample_cnt >= nsample) break;
}
else if (pooling_type == 1){
// random choose one within sub-voxel
// printf("new_xyz=(%.2f, %.2f, %.2f, ), find neighbor k=%d: support_xyz=(%.2f, %.2f, %.2f), local_xyz=(%.2f, %.2f, %.2f), neighbor=%.2f, grid_idx=%d, point_cnt_of_grid_idx=%d\n",
// new_x, new_y, new_z, k, support_xyz[k * 3 + 0], support_xyz[k * 3 + 1], support_xyz[k * 3 + 2], local_x, local_y, local_z, max_neighbour_distance, grid_idx, point_cnt_of_grid[grid_idx]);
if (point_cnt_of_grid[grid_idx] == 0){
point_cnt_of_grid[grid_idx] ++;
for (int i = 0; i < num_c_in; i++){
new_features[grid_idx * num_c_each_grid + i % num_c_each_grid] = support_features[k * num_c_in + i];
}
if (use_xyz){
new_local_xyz[grid_idx * 3 + 0] = local_x;
new_local_xyz[grid_idx * 3 + 1] = local_y;
new_local_xyz[grid_idx * 3 + 2] = local_z;
}
int cnt = atomicAdd(cum_sum, 1);
if (cnt >= num_max_sum_points) continue; // continue to statistics the max number of points
grouped_idxs[cnt * 3 + 0] = xyz_batch_start_idx + k;
grouped_idxs[cnt * 3 + 1] = pt_idx;
grouped_idxs[cnt * 3 + 2] = grid_idx;
sample_cnt++;
if(nsample > 0 && sample_cnt >= nsample || sample_cnt >= num_total_grids) break;
}
}
}
}
int vector_pool_kernel_launcher_stack(
const float *support_xyz, const float *support_features, const int *xyz_batch_cnt,
const float *new_xyz, float *new_features, float *new_local_xyz, const int *new_xyz_batch_cnt,
int *point_cnt_of_grid, int *grouped_idxs,
int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance,
int batch_size, int N, int M, int num_c_in, int num_c_out, int num_total_grids,
int use_xyz, int num_max_sum_points, int nsample, int neighbor_type, int pooling_type){
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// support_features: (N1 + N2 ..., C)
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_features: (M1 + M2 ..., C)
// new_local_xyz: (M1 + M2 ..., 3)
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz
// use_xyz: whether to calculate new_local_xyz
// grouped_idxs: (num_max_sum_points, 3)[idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// neighbor_type: 1: ball, others: cube
// pooling_type: 0: avg_pool, 1: random choice
cudaError_t err;
int num_c_each_grid = num_c_out / num_total_grids;
float grid_size_x = max_neighbour_distance * 2 / num_grid_x;
float grid_size_y = max_neighbour_distance * 2 / num_grid_y;
float grid_size_z = max_neighbour_distance * 2 / num_grid_z;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
int cum_sum = 0;
int *p_cum_sum;
cudaMalloc((void**)&p_cum_sum, sizeof(int));
cudaMemcpy(p_cum_sum, &cum_sum, sizeof(int), cudaMemcpyHostToDevice);
vector_pool_kernel_stack<<<blocks, threads>>>(
support_xyz, support_features, xyz_batch_cnt,
new_xyz, new_features, new_local_xyz, new_xyz_batch_cnt,
num_grid_x, num_grid_y, num_grid_z, max_neighbour_distance,
batch_size, M, num_c_in, num_c_out,
num_c_each_grid, num_total_grids, point_cnt_of_grid, grouped_idxs,
use_xyz, grid_size_x, grid_size_y, grid_size_z, p_cum_sum, num_max_sum_points,
nsample, neighbor_type, pooling_type
);
cudaMemcpy(&cum_sum, p_cum_sum, sizeof(int), cudaMemcpyDeviceToHost);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return cum_sum;
}
__global__ void vector_pool_grad_kernel_stack(const float *grad_new_features,
const int *point_cnt_of_grid, const int *grouped_idxs,
float *grad_support_features, int N, int M, int num_c_out, int num_c_in,
int num_c_each_grid, int num_total_grids, int num_max_sum_points){
// grad_new_features: (M1 + M2 ..., C_out)
// point_cnt_of_grid: (M1 + M2 ..., num_total_grids)
// grouped_idxs: (num_max_sum_points, 3) [idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// grad_support_features: (N1 + N2 ..., C_in)
int channel_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_max_sum_points || channel_idx >= num_c_in) return;
int idx_of_support_xyz = grouped_idxs[index * 3 + 0];
int idx_of_new_xyz = grouped_idxs[index * 3 + 1];
int idx_of_grid_idx = grouped_idxs[index * 3 + 2];
int num_total_pts = point_cnt_of_grid[idx_of_new_xyz * num_total_grids + idx_of_grid_idx];
grad_support_features += idx_of_support_xyz * num_c_in + channel_idx;
grad_new_features += idx_of_new_xyz * num_c_out + idx_of_grid_idx * num_c_each_grid;
int channel_idx_of_cin = channel_idx % num_c_each_grid;
float cur_grad = 1 / fmaxf(float(num_total_pts), 1.0);
atomicAdd(grad_support_features, grad_new_features[channel_idx_of_cin] * cur_grad);
}
void vector_pool_grad_kernel_launcher_stack(
const float *grad_new_features, const int *point_cnt_of_grid, const int *grouped_idxs,
float *grad_support_features, int N, int M, int num_c_out, int num_c_in, int num_total_grids,
int num_max_sum_points){
// grad_new_features: (M1 + M2 ..., C_out)
// point_cnt_of_grid: (M1 + M2 ..., num_total_grids)
// grouped_idxs: (num_max_sum_points, 3) [idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz]
// grad_support_features: (N1 + N2 ..., C_in)
int num_c_each_grid = num_c_out / num_total_grids;
cudaError_t err;
dim3 blocks(DIVUP(num_max_sum_points, THREADS_PER_BLOCK), num_c_in); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
vector_pool_grad_kernel_stack<<<blocks, threads>>>(
grad_new_features, point_cnt_of_grid, grouped_idxs, grad_support_features,
N, M, num_c_out, num_c_in, num_c_each_grid, num_total_grids, num_max_sum_points
);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
the_stack
|
#include "support_kernels.cu"
#include <stdio.h>
//////////////////////////////
//////////////////////////////
//////////////////////////////
#define LEVEL_MIN 3
extern "C" __global__ void boundaryReduction(const int n_particles,
real4 *positions,
float3 *output_min,
float3 *output_max)
{
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
//const uint idx = bid * blockDim.x + tid;
volatile __shared__ float3 shmem[512];
float3 r_min = (float3){+1e10f, +1e10f, +1e10f};
float3 r_max = (float3){-1e10f, -1e10f, -1e10f};
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[256];
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
// unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
real4 pos;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
//based on reduce6 example
while (i < n_particles) {
if (i < n_particles)
{
pos = positions[i];
r_min.x = fminf(pos.x, r_min.x);
r_min.y = fminf(pos.y, r_min.y);
r_min.z = fminf(pos.z, r_min.z);
r_max.x = fmaxf(pos.x, r_max.x);
r_max.y = fmaxf(pos.y, r_max.y);
r_max.z = fmaxf(pos.z, r_max.z);
}
if (i + blockSize < n_particles)
{
pos = positions[i + blockSize];
r_min.x = fminf(pos.x, r_min.x);
r_min.y = fminf(pos.y, r_min.y);
r_min.z = fminf(pos.z, r_min.z);
r_max.x = fmaxf(pos.x, r_max.x);
r_max.y = fmaxf(pos.y, r_max.y);
r_max.z = fmaxf(pos.z, r_max.z);
}
i += gridSize;
}
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if (tid < 32)
{
sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compiler doesnt allow: volatile float3 = float3
output_min[bid].x = sh_rmin[0].x; output_min[bid].y = sh_rmin[0].y; output_min[bid].z = sh_rmin[0].z;
output_max[bid].x = sh_rmax[0].x; output_max[bid].y = sh_rmax[0].y; output_max[bid].z = sh_rmax[0].z;
}
}
//Get the domain size, by taking into account the group size
extern "C" __global__ void boundaryReductionGroups(const int n_groups,
real4 *positions,
real4 *sizes,
float3 *output_min,
float3 *output_max)
{
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
//const uint idx = bid * blockDim.x + tid;
volatile __shared__ float3 shmem[512];
float3 r_min = (float3){+1e10f, +1e10f, +1e10f};
float3 r_max = (float3){-1e10f, -1e10f, -1e10f};
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[256];
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
// unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
real4 pos;
real4 size;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
//based on reduce6 example
while (i < n_groups) {
if (i < n_groups)
{
pos = positions[i];
size = sizes[i];
r_min.x = fminf(pos.x-size.x, r_min.x);
r_min.y = fminf(pos.y-size.y, r_min.y);
r_min.z = fminf(pos.z-size.z, r_min.z);
r_max.x = fmaxf(pos.x+size.x, r_max.x);
r_max.y = fmaxf(pos.y+size.y, r_max.y);
r_max.z = fmaxf(pos.z+size.z, r_max.z);
}
if (i + blockSize < n_groups)
{
pos = positions[i + blockSize];
size = sizes[i + blockSize];
r_min.x = fminf(pos.x-size.x, r_min.x);
r_min.y = fminf(pos.y-size.y, r_min.y);
r_min.z = fminf(pos.z-size.z, r_min.z);
r_max.x = fmaxf(pos.x+size.x, r_max.x);
r_max.y = fmaxf(pos.y+size.y, r_max.y);
r_max.z = fmaxf(pos.z+size.z, r_max.z);
}
i += gridSize;
}
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if (tid < 32)
{
sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compiler doesnt allow: volatile float3 = float3
output_min[bid].x = sh_rmin[0].x; output_min[bid].y = sh_rmin[0].y; output_min[bid].z = sh_rmin[0].z;
output_max[bid].x = sh_rmax[0].x; output_max[bid].y = sh_rmax[0].y; output_max[bid].z = sh_rmax[0].z;
}
}
//#define EXACT_KEY
extern "C" __global__ void cl_build_key_list(uint4 *body_key,
real4 *body_pos,
int n_bodies,
real4 corner) {
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_bodies) return;
real4 pos = body_pos[id];
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
// crd.x = (int)((pos.x - corner.x) / domain_fac + 0.5);
// crd.y = (int)((pos.y - corner.y) / domain_fac + 0.5);
// crd.z = (int)((pos.z - corner.z) / domain_fac + 0.5);
// uint4 key = get_key(crd);
// if (id == n_bodies) key = (uint4){0xFFFFFFFF, 0xFFFFFFFF, 0, 0};
body_key[id] = get_key(crd);;
}
#if 0
This might be useful to speed up the group creating by
not building a full key but only the first 10 bits
extern "C" __global__ void build_phkey_list(uint2 *body_key,
real4 *body_pos,
int n_bodies,
real4 corner) {
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id > n_bodies) return;
real4 pos = body_pos[id];
int4 crd;
real domain_fac = corner.w;
//Get the integer position, will be used for the key calculation
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
const int bits = 18;
int i,xi, yi, zi;
int mask;
long key;
//0= 000, 1=001, 2=011, 3=010, 4=110, 5=111, 6=101, 7=100
//000=0=0, 001=1=1, 011=3=2, 010=2=3, 110=6=4, 111=7=5, 101=5=6, 100=4=7
const int C[8] = {0, 1, 7, 6, 3, 2, 4, 5};
int temp;
mask = 1 << (bits - 1);
key = 0;
for(i = 0; i < bits; i++, mask >>= 1)
{
xi = (crd.x & mask) ? 1 : 0;
yi = (crd.y & mask) ? 1 : 0;
zi = (crd.z & mask) ? 1 : 0;
if(xi == 0 && yi == 0 && zi == 0)
{
temp = crd.z; crd.z = crd.y; crd.y = temp;
}
else if(xi == 0 && yi == 0 && zi == 1)
{
temp = crd.x; crd.x = crd.y; crd.y = temp;
}
else if(xi == 1 && yi == 0 && zi == 1)
{
temp = crd.x; crd.x = crd.y; crd.y = temp;
}
else if(xi == 1 && yi == 0 && zi == 0)
{
crd.x = (crd.x) ^ (-1);
crd.z = (crd.z) ^ (-1);
}
else if(xi == 1 && yi == 1 && zi == 0)
{
crd.x = (crd.x) ^ (-1);
crd.z = (crd.z) ^ (-1);
}
else if(xi == 1 && yi == 1 && zi == 1)
{
temp = (crd.x) ^ (-1);
crd.x = (crd.y) ^ (-1);
crd.y = temp;
}
else if(xi == 0 && yi == 1 && zi == 1)
{
temp = (crd.x) ^ (-1);
crd.x = (crd.y) ^ (-1);
crd.y = temp;
}
else
{
temp = (crd.z) ^ (-1);
crd.z = (crd.y) ^ (-1);
crd.y = temp;
}
int index = (xi << 2) + (yi << 1) + zi;
key = (key << 3) + C[index];
}
uint2 key_new;
key_new.x = key & 0xFFFFFFFF;
key_new.y = (key >> 32) & 0xFFFFFFFF;
if (id == n_bodies) key_new = (uint2){0xFFFFFFFF, 0xFFFFFFFF};
body_key[id] = key_new;
}
#endif
extern "C" __global__ void cl_build_valid_list(int n_bodies,
int level,
uint4 *body_key,
uint *valid_list){
// uint2 *test_key_data) {
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint id = bid * blockDim.x + tid;
const uint4 key_F = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
const uint4 key_B = {0xFFFFFFF1, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //A border, valid0 will become 1
const uint4 key_I = {0xFFFFFFF2, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //Ignore
const uint4 key_E = {0xFFFFFFF3, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //End
const uint4 key_A = {0xFFFFFFF4, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //Start and End
// const uint2 key_TEST = {0x0, 0x0}; //Start and End
//TODO clean this if we dont use it
if (id >= n_bodies) return; // >= since the last particle is extra boudnary particle
uint4 mask = get_mask(level);
mask.x = mask.x | ((uint)1 << 30) | ((uint)1 << 31);
uint4 key_m;
uint4 key_c = body_key[id];
uint4 key_p;
if (id == 0)
{
key_m = key_F;
}
else
{
key_m = body_key[id-1];
}
if((id+1) < n_bodies) //The last particle gets a different key to compare with
{
key_p = body_key[id+1];
}
else
key_p = (uint4){0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
int valid0 = 0;
int valid1 = 0;
if (cmp_uint4(key_c, key_A) == 0) {
valid0 = 1; //Set a border
valid1 = 1; //Set a border
}
else if (cmp_uint4(key_c, key_B) == 0) {
valid0 = 1; //Set a border
}
else if (cmp_uint4(key_c, key_E) == 0) {
valid1 = 1; //Set a border
}
else if (cmp_uint4(key_c, key_I) == 0) {
//Do nothing
}
else if (cmp_uint4(key_c, key_F) != 0) {
key_c.x = key_c.x & mask.x;
key_c.y = key_c.y & mask.y;
key_c.z = key_c.z & mask.z;
key_p.x = key_p.x & mask.x;
key_p.y = key_p.y & mask.y;
key_p.z = key_p.z & mask.z;
key_m.x = key_m.x & mask.x;
key_m.y = key_m.y & mask.y;
key_m.z = key_m.z & mask.z;
valid0 = abs(cmp_uint4(key_c, key_m));
valid1 = abs(cmp_uint4(key_c, key_p));
}
valid_list[id*2] = id | ((valid0) << 31);
valid_list[id*2+1] = id | ((valid1) << 31);
}
//////////////////////////////
//////////////////////////////
//////////////////////////////
extern "C" __global__ void cl_build_nodes(uint level,
uint compact_list_len,
uint offset,
uint *compact_list,
// uint *compact_list_end,
uint4 *bodies_key,
uint4 *node_key,
uint *n_children,
uint2 *node_bodies){
// uint *testValidList) {
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= compact_list_len) return;
uint bi = compact_list[id*2];
uint bj = compact_list[id*2+1] + 1;
uint4 key = bodies_key[bi];
uint4 mask = get_mask(level);
key = (uint4){key.x & mask.x, key.y & mask.y, key.z & mask.z, 0};
node_bodies[offset+id] = (uint2){bi | (level << BITLEVELS), bj};
node_key [offset+id] = key;
n_children [offset+id] = 0;
if ((int)level > (int)(LEVEL_MIN - 1))
if (bj - bi <= NLEAF) //Leaf can only have NLEAF particles, if its more there will be a split
for (int i = bi; i < bj; i++)
bodies_key[i] = (uint4){0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; //sets the key to FF to indicate the body is used
}
//////////////////////////////
//////////////////////////////
//////////////////////////////
extern "C" __global__ void cl_link_tree(int n_nodes,
uint *n_children,
uint2 *node_bodies,
real4 *bodies_pos,
real4 corner,
uint2 *level_list, //TODO could make this constant if it proves usefull
// uint* parent_id_list,
uint* valid_list,
uint4 *node_keys,
uint4 *bodies_key,
int maxLevel) {
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_nodes) return;
uint2 bij = node_bodies[id];
uint level = (bij.x & LEVELMASK) >> BITLEVELS;
uint bi = bij.x & ILEVELMASK;
uint bj = bij.y;
real4 pos = bodies_pos[bi];
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
uint4 key = get_key(crd);
/********* accumulate children *****/
uint4 mask = get_mask(level - 1);
key = (uint4){key.x & mask.x, key.y & mask.y, key.z & mask.z, 0};
uint2 cij;
if(id > 0)
cij = level_list[level-1];
int ci;
//Jeroen, modified this since we dont use textures in find_key,
//the function will fail because out of bound memory access when id==0
if(id > 0)
ci = find_key(key, cij, node_keys);
else
ci = 0;
//ci now points to the node that is the parent, was used in previous group method
// parent_id_list[id] = ci;
mask = get_imask(mask);
key = (uint4) {key.x | mask.x, key.y | mask.y, key.z | mask.z, 0 };
if (id > 0)
atomicAdd(&n_children[ci], (1 << 28));
key = get_key(crd);
mask = get_mask(level);
key = (uint4) {key.x & mask.x, key.y & mask.y, key.z & mask.z, 0};
/********* store the 1st child *****/
cij = level_list[level+1];
int cj = -1;
cj = find_key(key, cij, node_keys);
atomicOr(&n_children[id], cj); //Atomic since multiple threads can work on this
uint valid = id | (uint)(0 << 31);
if ((int)level > (int)(LEVEL_MIN - 1))
if ((bj - bi) <= NLEAF)
valid = id | (uint)(1 << 31); //Distinguish leaves and nodes
valid_list[id] = valid;
}
//Determines which level of node starts at which offset
extern "C" __global__ void build_level_list(const int n_nodes,
const int n_leafs,
uint *leafsIdxs,
uint2 *node_bodies,
uint* valid_list){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint id = bid * blockDim.x + tid;
if (id >= n_nodes-n_leafs) return;
const int nodeID = leafsIdxs[id+n_leafs]; //Get the idx into the node_bodies array
int level_c, level_m, level_p;
uint2 bij = node_bodies[leafsIdxs[id+n_leafs]]; //current non-leaf
level_c = (bij.x & LEVELMASK) >> BITLEVELS;
if((id+1) < (n_nodes-n_leafs)){
//The last node gets a default lvl
bij = node_bodies[leafsIdxs[id+1+n_leafs]]; //next non-leaf
level_p = (bij.x & LEVELMASK) >> BITLEVELS;
}
else{
//Last is always an end
level_p = MAXLEVELS+5;
}
//Compare level with the node before and node after
if(nodeID == 0)
{
level_m = -1;
}
else
{
bij = node_bodies[ leafsIdxs[id-1+n_leafs]]; //Get info of previous non-leaf node
level_m = (bij.x & LEVELMASK) >> BITLEVELS;
}
int valid0 = 0;
int valid1 = 0;
valid0 = (level_c != level_m) << 31 | (id+n_leafs);
valid1 = (level_c != level_p) << 31 | (id+n_leafs);
valid_list[id*2] = valid0;
valid_list[id*2+1] = valid1;
} //end build_level_list
//Finds nodes/leafs that will become groups
//After executions valid_list contains the
//valid nodes/leafs that form groups
extern "C" __global__ void build_group_list(int n_nodes,
uint* parent_id_list,
uint2 *node_bodies,
uint* valid_list)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_nodes) return;
uint2 bij = node_bodies[id];
int ownChildren = bij.y - (bij.x & ILEVELMASK);
bij = node_bodies[parent_id_list[id]];
int parentChildren = bij.y - (bij.x & ILEVELMASK);
//group if nchild <= NCRIT AND parent_nchild > NCRIT
//if((ownChildren <= NCRIT) && (parentChildren > NCRIT))
if((ownChildren <= NCRIT) && (parentChildren > NCRIT))
valid_list[id] = id | (uint)(1 << 31); //Group
else
valid_list[id] = id | (0 << 31); //Not a group
}
//Finds nodes/leafs that will become groups
//After executions valid_list contains the
//valid nodes/leafs that form groups
extern "C" __global__ void build_group_list2(int n_particles,
uint *validList,
real4 *bodies_pos,
const float DIST)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint idx = bid * blockDim.x + tid;
//TODO use shared mem ffor the positions
//since we use them multiple times?
//Note that we do not include the final particle
//Since there is no reason to check it
if (idx >= n_particles) return;
//Get the current
float4 curPos, nexPos, prevPos;
curPos = bodies_pos[idx];
//Have to check the first and last to prevent out of bound access
if(idx+1 == n_particles)
nexPos = curPos;
else
nexPos = bodies_pos[idx+1];
if(idx == 0)
prevPos = curPos;
else
prevPos = bodies_pos[idx-1];
//Compute geometrical distance
float dsPlus = ((curPos.x-nexPos.x)*(curPos.x-nexPos.x)) +
((curPos.y-nexPos.y)*(curPos.y-nexPos.y)) +
((curPos.z-nexPos.z)*(curPos.z-nexPos.z));
float dsMin = ((curPos.x-prevPos.x)*(curPos.x-prevPos.x)) +
((curPos.y-prevPos.y)*(curPos.y-prevPos.y)) +
((curPos.z-prevPos.z)*(curPos.z-prevPos.z));
//Multiples of the preferred group size are _always_ valid
int validStart = ((idx % NCRIT) == 0);
int validEnd = (((idx+1) % NCRIT) == 0);
// const int DIST = 1;
// const float DIST = 44;
//The extra possible split(s) if the distance between two particles is too large
if(dsPlus > DIST) validEnd = 1;
if(dsMin > DIST) validStart = 1;
//Last particle is always the end, n_particles dont have to be a multiple of NCRIT
//so this is required
if(idx+1 == n_particles) validEnd = 1;
//Set valid
validList[2*idx + 0] = (idx) | (uint)(validStart << 31);
validList[2*idx + 1] = (idx+1) | (uint)(validEnd << 31);
}
extern "C" __global__ void store_group_list(int n_groups,
uint *validList,
uint *body2group_list,
uint2 *group_list)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
// uint idx = bid * blockDim.x + tid;
if(bid >= n_groups) return;
int start = validList[2*bid];
int end = validList[2*bid+1];
if((start + tid) <= end)
{
body2group_list[start + tid] = bid;
}
if(tid == 0)
{
group_list[bid] = (uint2){start,end};
}
}
extern "C" __global__ void expandLeafList(int n_leafs,
uint *leaf2NodeIdx,
uint2 *node_bodies,
uint *leafPart2Body)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint idx = bid * blockDim.x + tid;
if(bid >= n_leafs) return;
uint2 bij = node_bodies[leaf2NodeIdx[bid]];
uint bi = bij.x & ILEVELMASK;
uint bj = bij.y;
//Write the particle id at the correct location, only if we are
//below the end particle id
if(bi+tid < bj)
{
leafPart2Body[idx] = idx;
}
}
//Assign a grp id to each particle of that grp to
//create particle -> group relation using the
//group -> particle relation
extern "C" __global__ void build_body2group_list(const int n_groups,
uint *group_list,
uint2 *node_bodies,
uint *body2group_list)
{
const int bid = gridDim.x * blockIdx.y + blockIdx.x;
const int tid = threadIdx.x;
if (bid >= n_groups) return;
const int nodeID = group_list[bid];
uint2 bij = node_bodies[nodeID];
const uint firstChild = bij.x & ILEVELMASK;
const uint nChildren = bij.y - (bij.x & ILEVELMASK);
int idx = firstChild+tid;
//Save the group id for this particle
if (tid < nChildren)
body2group_list[idx] = bid;
}
#if 1
//Finds nodes/leafs that will become groups
//After executions valid_list contains the
//valid nodes/leafs that form groups
extern "C" __global__ void build_group_list_new(int n_particles,
uint *validList)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint idx = bid * blockDim.x + tid;
//TODO use shared mem ffor the positions
//since we use them multiple times?
//Note that we do not include the final particle
//Since there is no reason to check it
if (idx >= n_particles) return;
//Multiples of the preferred group size are _always_ valid
int validStart = ((idx % NCRIT) == 0);
int validEnd = (((idx+1) % NCRIT) == 0);
//Last particle is always the end, n_particles dont have to be a multiple of NCRIT
//so this is required
if(idx+1 == n_particles) validEnd = 1;
//Set valid
if(validStart)
validList[2*idx + 0] = (idx) | (uint)(validStart << 31);
if(validEnd)
validList[2*idx + 1] = (idx) | (uint)(validEnd << 31);
}
#endif
// valid0 = abs(cmp_uint4(key_c, key_m));
// valid1 = abs(cmp_uint4(key_c, key_p));
// }
//
// valid_list[id*2] = id | ((valid0) << 31);
// valid_list[id*2+1] = id | ((valid1) << 31);
|
the_stack
|
#include <stdio.h>
#include <math.h>
#include "common.h"
///////////////////////////////////////////////////////////////
// Construct the particle boundary box
// Setting particle normals require the explicit construction
///////////////////////////////////////////////////////////////
void constructBoundaryBox(boundary_particle *boundary_particles, AABB* boundary, param *params)
{
double spacing = params->spacing_particle;
// Create boundary particles with spacing h
int num_x = ceil((boundary->max_x - boundary->min_x)/spacing);
int num_y = ceil((boundary->max_y - boundary->min_y)/spacing);
int num_z = ceil((boundary->max_z - boundary->min_z)/spacing);
double min_x = boundary->min_x;
double min_y = boundary->min_y;
double min_z = boundary->min_z;
double max_x = min_x + (num_x-1)*spacing;
double max_y = min_y + (num_y-1)*spacing;
double max_z = min_z + (num_z-1)*spacing;
boundary->max_x = max_x;
boundary->max_y = max_y;
boundary->max_z = max_z;
int i,nx,ny,nz;
double recip_root_three = 1.0/sqrt(3.0);
double recip_root_two = 1.0/sqrt(2.0);
i = 0;
// Corner front bottom left
boundary_particles[i].pos.x = min_x;
boundary_particles[i].pos.y = max_y;
boundary_particles[i].pos.z = min_z;
boundary_particles[i].n.x = recip_root_three;
boundary_particles[i].n.y = -recip_root_three;
boundary_particles[i].n.z = recip_root_three;
i++;
// Corner front bottom right
boundary_particles[i].pos.x = max_x;
boundary_particles[i].pos.y = max_y;
boundary_particles[i].pos.z = min_z;
boundary_particles[i].n.x = -recip_root_three;
boundary_particles[i].n.y = -recip_root_three;
boundary_particles[i].n.z = recip_root_three;
i++;
// Corner front top left
boundary_particles[i].pos.x = min_x;
boundary_particles[i].pos.y = max_y;
boundary_particles[i].pos.z = max_z;
boundary_particles[i].n.x = recip_root_three;
boundary_particles[i].n.y = -recip_root_three;
boundary_particles[i].n.z = -recip_root_three;
i++;
// Corner front top right
boundary_particles[i].pos.x = max_x;
boundary_particles[i].pos.y = max_y;
boundary_particles[i].pos.z = max_z;
boundary_particles[i].n.x = -recip_root_three;
boundary_particles[i].n.y = -recip_root_three;
boundary_particles[i].n.z = -recip_root_three;
i++;
// Corner back bottom left
boundary_particles[i].pos.x = min_x;
boundary_particles[i].pos.y = min_y;
boundary_particles[i].pos.z = min_z;
boundary_particles[i].n.x = recip_root_three;
boundary_particles[i].n.y = recip_root_three;
boundary_particles[i].n.z = recip_root_three;
i++;
// Corner back bottom right
boundary_particles[i].pos.x = max_x;
boundary_particles[i].pos.y = min_y;
boundary_particles[i].pos.z = min_z;
boundary_particles[i].n.x = -recip_root_three;
boundary_particles[i].n.y = recip_root_three;
boundary_particles[i].n.z = recip_root_three;
i++;
// Corner back top left
boundary_particles[i].pos.x = min_x;
boundary_particles[i].pos.y = min_y;
boundary_particles[i].pos.z = max_z;
boundary_particles[i].n.x = recip_root_three;
boundary_particles[i].n.y = recip_root_three;
boundary_particles[i].n.z = -recip_root_three;
i++;
// Corner back top right
boundary_particles[i].pos.x = max_x;
boundary_particles[i].pos.y = min_y;
boundary_particles[i].pos.z = max_z;
boundary_particles[i].n.x = -recip_root_three;
boundary_particles[i].n.y = recip_root_three;
boundary_particles[i].n.z = -recip_root_three;
i++;
for (nx=0; nx<num_x-2; nx++) {
// Bottom right row
boundary_particles[i].pos.x = min_x + spacing + nx*spacing;
boundary_particles[i].pos.y = max_y;
boundary_particles[i].pos.z = min_z;
boundary_particles[i].n.x = 0.0;
boundary_particles[i].n.y = -recip_root_two;
boundary_particles[i].n.z = recip_root_two;
i++;
// Top right row
boundary_particles[i].pos.x = min_x + spacing + nx*spacing;
boundary_particles[i].pos.y = max_y;
boundary_particles[i].pos.z = max_z;
boundary_particles[i].n.x = 0.0;
boundary_particles[i].n.y = -recip_root_two;
boundary_particles[i].n.z = -recip_root_two;
i++;
// Bottom left row
boundary_particles[i].pos.x = min_x + spacing + nx*spacing;
boundary_particles[i].pos.y = min_y;
boundary_particles[i].pos.z = min_z;
boundary_particles[i].n.x = 0.0;
boundary_particles[i].n.y = recip_root_two;
boundary_particles[i].n.z = recip_root_two;
i++;
// Top left row
boundary_particles[i].pos.x = min_x + spacing + nx*spacing;
boundary_particles[i].pos.y = min_y;
boundary_particles[i].pos.z = max_z;
boundary_particles[i].n.x = 0.0;
boundary_particles[i].n.y = recip_root_two;
boundary_particles[i].n.z = -recip_root_two;
i++;
}
for (ny=0; ny<num_y-2; ny++) {
// Bottom front row
boundary_particles[i].pos.x = max_x;
boundary_particles[i].pos.y = min_y + spacing + ny*spacing;
boundary_particles[i].pos.z = min_z;
boundary_particles[i].n.x = -recip_root_two;
boundary_particles[i].n.y = 0.0;
boundary_particles[i].n.z = recip_root_two;
i++;
// Top front row
boundary_particles[i].pos.x = max_x;
boundary_particles[i].pos.y = min_y + spacing + ny*spacing;
boundary_particles[i].pos.z = max_z;
boundary_particles[i].n.x = -recip_root_two;
boundary_particles[i].n.y = 0.0;
boundary_particles[i].n.z = -recip_root_two;
i++;
// Bottom back row
boundary_particles[i].pos.x = min_x;
boundary_particles[i].pos.y = min_y + spacing + ny*spacing;
boundary_particles[i].pos.z = min_z;
boundary_particles[i].n.x = recip_root_two;
boundary_particles[i].n.y = 0.0;
boundary_particles[i].n.z = recip_root_two;
i++;
// Top back row
boundary_particles[i].pos.x = min_x;
boundary_particles[i].pos.y = min_y + spacing + ny*spacing;
boundary_particles[i].pos.z = max_z;
boundary_particles[i].n.x = recip_root_two;
boundary_particles[i].n.y = 0.0;
boundary_particles[i].n.z = -recip_root_two;
i++;
for (nx=0; nx<num_x-2; nx++) {
// Top face
boundary_particles[i].pos.x = min_x + spacing + nx*spacing;
boundary_particles[i].pos.y = min_y + spacing + ny*spacing;
boundary_particles[i].pos.z = max_z;
boundary_particles[i].n.x = 0.0;
boundary_particles[i].n.y = 0.0;
boundary_particles[i].n.z = -1.0;
i++;
// Bottom face
boundary_particles[i].pos.x = min_x + spacing + nx*spacing;
boundary_particles[i].pos.y = min_y + spacing + ny*spacing;
boundary_particles[i].pos.z = min_z;
boundary_particles[i].n.x = 0.0;
boundary_particles[i].n.y = 0.0;
boundary_particles[i].n.z = 1.0;
i++;
}
}
for (nz=0; nz<num_z-2; nz++) {
// left front column
boundary_particles[i].pos.x = min_x;
boundary_particles[i].pos.y = max_y;
boundary_particles[i].pos.z = min_z + spacing + nz*spacing;
boundary_particles[i].n.x = recip_root_two;
boundary_particles[i].n.y = -recip_root_two;
boundary_particles[i].n.z = 0.0;
i++;
// right front column
boundary_particles[i].pos.x = max_x;
boundary_particles[i].pos.y = max_y;
boundary_particles[i].pos.z = min_z + spacing + nz*spacing;
boundary_particles[i].n.x = -recip_root_two;
boundary_particles[i].n.y = -recip_root_two;
boundary_particles[i].n.z = 0.0;
i++;
// left back column
boundary_particles[i].pos.x = min_x;
boundary_particles[i].pos.y = min_y;
boundary_particles[i].pos.z = min_z + spacing + nz*spacing;
boundary_particles[i].n.x = recip_root_two;
boundary_particles[i].n.y = recip_root_two;
boundary_particles[i].n.z = 0.0;
i++;
// right back column
boundary_particles[i].pos.x = max_x;
boundary_particles[i].pos.y = min_y;
boundary_particles[i].pos.z = min_z + spacing + nz*spacing;
boundary_particles[i].n.x = -recip_root_two;
boundary_particles[i].n.y = recip_root_two;
boundary_particles[i].n.z = 0.0;
i++;
for (nx=0; nx<num_x-2; nx++) {
// Front face
boundary_particles[i].pos.x = min_x + spacing + nx*spacing;
boundary_particles[i].pos.y = max_y;
boundary_particles[i].pos.z = min_z + spacing + nz*spacing;
boundary_particles[i].n.x = 0.0;
boundary_particles[i].n.y = -1.0;
boundary_particles[i].n.z = 0.0;
i++;
// Back face
boundary_particles[i].pos.x = min_x + spacing + nx*spacing;
boundary_particles[i].pos.y = min_y;
boundary_particles[i].pos.z = min_z + spacing + nz*spacing;
boundary_particles[i].n.x = 0.0;
boundary_particles[i].n.y = 1.0;
boundary_particles[i].n.z = 0.0;
i++;
}
for (ny=0; ny<num_y-2; ny++) {
// Left face
boundary_particles[i].pos.x = min_x;
boundary_particles[i].pos.y = min_y + spacing + ny*spacing;
boundary_particles[i].pos.z = min_z + spacing + nz*spacing;
boundary_particles[i].n.x = 1.0;
boundary_particles[i].n.y = 0.0;
boundary_particles[i].n.z = 0.0;
i++;
// Right face
boundary_particles[i].pos.x = max_x;
boundary_particles[i].pos.y = min_y + spacing + ny*spacing;
boundary_particles[i].pos.z = min_z + spacing + nz*spacing;
boundary_particles[i].n.x = -1.0;
boundary_particles[i].n.y = 0.0;
boundary_particles[i].n.z = 0.0;
i++;
}
}
params->number_boundary_particles = i;
params->number_particles = params->number_fluid_particles + params->number_boundary_particles;
}
|
the_stack
|
#define DATA_TYPE float
//#define DATA_TYPE __half
namespace std {
template <typename _CharT, typename _Traits>
inline basic_ostream<_CharT, _Traits> &
tab(basic_ostream<_CharT, _Traits> &__os) {
return __os.put(__os.widen('\t'));
}
}
void print_usage(){
//b:l:m:e:g:j:i:p:r:w:o:h
std::cout<<"runTest -b batch_size -n num_layers -l seq_len -e 0 -i input_file -p parameter_file -d dir" <<std::endl;
std::cout<<"# Two important input parameters:"<<std::endl;
std::cout<<std::tab<<"-b(--batch) : batch_size(INT).Batchsize for inferencei. Default:8"<<std::endl;
std::cout<<std::tab<<"-s(--seq) : seq_len(INT). Length of input sequence. Default:128"<<std::endl;
std::cout<<std::endl<<"# Model Parameters:"<<std::endl;
std::cout<<std::tab<<"-q(--epsilon) : epsilon(FLOAT). Small float added to variance to avoid dividing by zero.Default:0"<<std::endl;
std::cout<<std::tab<<"-g(--gpu) : gpu id (INT). Id of GPU card Default: 0"<<std::endl;
std::cout<<std::tab<<"-e(--gemm) : gemm_file(STRING). Filename of the gemm function configuraton file."<<std::endl;
std::cout<<std::tab<<"-j(--json) : json_file(STRING). Filename of the json configuraton file."<<std::endl;
std::cout<<std::endl<<"# Run this code in different mode "<<std::endl;
std::cout<<std::tab<<"-m(--mode) : mode(INT{0,1,2,3}). "<<std::endl;
std::cout<<std::tab<<std::tab<<"0 (FP16_TIME_TEST): Run perf test in fp16 mode "<<std::endl;
std::cout<<std::tab<<std::tab<<"1 (FP16_CORRECTNESS_TEST): Run correctness test in fp16 mode "<<std::endl;
std::cout<<std::tab<<std::tab<<"2 (FP32_TIME_TEST): Run perf test in fp32 mode "<<std::endl;
std::cout<<std::tab<<std::tab<<"3 (FP32_CORRECTNESS_TEST): Run correctness test in fp32 mode "<<std::endl;
std::cout<<std::endl<<"## VERIFICATION mode:"<<std::endl;
std::cout<<std::tab<<"-i(--input) : input_file(STRING). Filename of input."<<std::endl;
std::cout<<std::tab<<"-p(--para) : parameter_file(STRING). Filename of input."<<std::endl;
std::cout<<std::tab<<"-r(--result) : result_file(STRING). Filename of the result file generated from tensorflow."<<std::endl;
std::cout<<std::endl<<"## TIMING mode:"<<std::endl;
std::cout<<std::tab<<"-w(--warm) : warm_up_ite(INT). Warm up iterations. Default: 100"<<std::endl;
std::cout<<std::tab<<"-t(--ite) : profile_ite(INT). Profile iterations. Default: 200"<<std::endl;
std::cout<<"Example for correctness check: runTest -m 0 -b 8 -s 128 -g 0 -e gemm.in -j json.xml -i data.npz -p para.npz -r output.npz"<<std::endl;
std::cout<<"Example for timing: runTest -m 1 -b 8 -g 0 -e gemm.in -j json.xml"<<std::endl;
}
struct option opts[] = {
{"batch", 1, NULL, 'b'},
{"seq", 1, NULL, 's'},
{"epsilon", 1, NULL, 'q'},
{"gpu", 1, NULL, 'g'},
{"gemm", 1, NULL, 'e'},
{"json", 1, NULL, 'j'},
{"mode", 1, NULL, 'm'},
{"input", 1, NULL, 'i'},
{"para", 1, NULL, 'p'},
{"result", 1, NULL, 'r'},
{"warm", 1, NULL, 'w'},
{"ite", 1, NULL, 't'},
{"help", 0, NULL, 'h'}
};
template<typename T>
void layerRes(cudaStream_t stream, cublasHandle_t cublas_handle,
int num_layers, int batch_size, int seq_len,
int head_num, int size_per_head, int hidden_dim,
int hidden_dim_ff,int num_token,float epsilon,
std::string input_file,std::string para_file,
std::string gemm_file, std::string output_file, bool use_float16){
//Load Host Input Data
InputDataHost input_data_host(batch_size,seq_len);
input_data_host.fillInputData(input_file);
//Load Host Weight Data
PreWeightHost<T> pre_weight_host(hidden_dim,num_token);
pre_weight_host.fillPreWeight(para_file);
std::vector<LayerWeightHost<T> > arr_layer_weight_host(num_layers,
LayerWeightHost<T>(hidden_dim,hidden_dim_ff));
for(int i=0;i<num_layers;i++){
arr_layer_weight_host[i].fillLayerWeight(i,para_file);
}
//Construct Debug Class
XlnetDebug<T> xlnet_debug(stream,cublas_handle,num_layers,batch_size,seq_len,
head_num,size_per_head,hidden_dim,hidden_dim_ff,num_token,epsilon,
pre_weight_host,arr_layer_weight_host,gemm_file);
//Run Debug Class
bool ifCorrect=xlnet_debug.verifyLayerRes(input_data_host,output_file);
if(ifCorrect==1){
std::cout<<"Result Correct"<<std::endl;
}else{
std::cout<<"Result Wrong"<<std::endl;
}
}
template<typename T>
void profile(cudaStream_t stream, cublasHandle_t cublas_handle,
int num_layers, int batch_size, int seq_len, int head_num, int size_per_head,
int hidden_dim,int hidden_dim_ff,int num_token, float epsilon,
std::string input_file,std::string para_file,
std::string gemm_file,int warm_up_ite, int profile_ite){
//Construct Debug Class
InputDataHost input_data_host(batch_size,seq_len);
PreWeightHost<T> pre_weight_host(hidden_dim, num_token);
std::vector<LayerWeightHost<T> > arr_layer_weight_host(num_layers,
LayerWeightHost<T>(hidden_dim,hidden_dim_ff));
XlnetDebug<T> xlnet_debug(stream,cublas_handle,num_layers,batch_size,seq_len,
head_num,size_per_head,hidden_dim,hidden_dim_ff,num_token, epsilon,
pre_weight_host,arr_layer_weight_host,gemm_file);
//Run Debug Class
float run_time=xlnet_debug.profileOneLayer(warm_up_ite, profile_ite);
std::cout<<"RUN_TIME: batch_size= "<<batch_size<<" seq_len= "<<seq_len<<" run_time= "<<run_time<<" MS"<<std::endl<<std::endl;
}
void readJson(std::string json_file, int &d_head, int &d_inner, int &d_model, int& n_head, int &n_layer, int& n_token){
std::string item_list[6]={"d_head", "d_inner","d_model", "n_head", "n_layer", "n_token"};
std::ifstream json_stream;
json_stream.open(json_file.c_str());
std::string line;
while(std::getline(json_stream, line))
{
int n=line.length();
//std::cout<<line<<std::endl;
char sentence[n+1];
strcpy(sentence,line.c_str());
char str[20];
int value=0;
sscanf(sentence, "%s %d,", str, &value);
if(strstr(str, item_list[0].c_str())){
d_head=value;
}else if(strstr(str, item_list[1].c_str())){
d_inner=value;
}else if(strstr(str, item_list[2].c_str())){
d_model=value;
}else if(strstr(str, item_list[3].c_str())){
n_head=value;
}else if(strstr(str, item_list[4].c_str())){
n_layer=value;
}else if(strstr(str, item_list[5].c_str())){
n_token=value;
}
}
}
int main(int argc, char* argv[]) {
//Process input data
int batch_size =0;
int num_layers =12;
int seq_len =128;
int head_num =12;
int num_token=32000;
int size_per_head =64;
int hidden_dim=head_num*size_per_head;
int hidden_dim_ff = 3072;
float epsilon =0.0f;
RUN_MODE mode=FP32_TIME_TEST;
int gpu_id=0;
int warm_up_ite=100;
int profile_ite=100;
std::string input_file="";
std::string para_file="";
std::string gemm_file="./gemm.in";
std::string result_file="";
std::string json_file="";
const char *opt_string = "b:s:q:m:e:g:j:i:p:r:w:t:h";
int option=1;
while((option = getopt_long(argc, argv,opt_string,opts,NULL)) != -1){
switch (option) {
case 'b' :
batch_size = atoi(optarg);
break;
case 's' :
seq_len = atoi(optarg);
break;
case 'q' :
epsilon = atof(optarg);
break;
case 'g' :
gpu_id = atoi(optarg);
break;
case 'm' :
mode =(RUN_MODE) atoi(optarg);
break;
case 'e' :
gemm_file =optarg;
break;
case 'j' :
json_file =optarg;
break;
case 'i' :
input_file =optarg;
break;
case 'p' :
para_file =optarg;
break;
case 'r' :
result_file =optarg;
break;
case 'w' :
warm_up_ite = atoi(optarg);
break;
case 't' :
profile_ite = atoi(optarg);
break;
case 'h' :
print_usage();
exit(EXIT_FAILURE);
default:
print_usage();
exit(EXIT_FAILURE);
}
}
if(batch_size==0){
print_usage();
exit(EXIT_FAILURE);
}
readJson(json_file, size_per_head,hidden_dim_ff, hidden_dim, head_num, num_layers, num_token);
std::cout<<"Read Json file, got the meta parameters:"<<std::endl;
std::cout<<"batch_size="<<batch_size<<", seq_len="<<seq_len<<std::endl;
std::cout<<"size_per_head(d_head)= "<<size_per_head<<std::endl;
std::cout<<"hidden_dim_ff(d_inner)="<<hidden_dim_ff<<std::endl;
std::cout<<"hidden_dim(d_model)="<<hidden_dim<<std::endl;
std::cout<<"head_num(n_head)="<<head_num<<std::endl;
std::cout<<"num_layers(n_layer)="<<num_layers<<std::endl;
std::cout<<"num_token(n_token)="<<num_token<<std::endl<<std::endl;
//Prepare device environment
cudaSetDevice(gpu_id);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, gpu_id);
printf("Using Device %s\n\n", prop.name);
cudaStream_t stream;
cudaStreamCreate(&stream);
cublasHandle_t cublas_handle;
cublasCreate(&cublas_handle);
cublasSetStream(cublas_handle, stream);
switch(mode){
case FP16_TIME_TEST:
std::cout<<"Run in mode FP16_TIME_TEST: gemm_file="<<gemm_file<<std::endl;
profile<__half>(stream,cublas_handle,
num_layers,batch_size,seq_len,head_num,size_per_head,hidden_dim,hidden_dim_ff,num_token,epsilon,
input_file,para_file,gemm_file,warm_up_ite, profile_ite);
break;
case FP16_CORRECTNESS_TEST:
std::cout<<"Run in mode FP16_CORRECTNESS_TEST: gemm_file="<<gemm_file<<std::endl;
layerRes<__half>(stream,cublas_handle,
num_layers,batch_size,seq_len,head_num,size_per_head,hidden_dim,hidden_dim_ff,num_token, epsilon,
input_file,para_file,gemm_file,result_file, true);
break;
case FP32_TIME_TEST:
std::cout<<"Run in mode FP32_TIME_TEST: gemm_file="<<gemm_file<<std::endl;
profile<float>(stream,cublas_handle,
num_layers,batch_size,seq_len,head_num,size_per_head,hidden_dim,hidden_dim_ff,num_token,epsilon,
input_file,para_file,gemm_file,warm_up_ite, profile_ite);
break;
case FP32_CORRECTNESS_TEST:
std::cout<<"Run in mode FP32_CORRECTNESS_TEST gemm_file="<<gemm_file<<std::endl;
layerRes<float>(stream,cublas_handle,
num_layers,batch_size,seq_len,head_num,size_per_head,hidden_dim,hidden_dim_ff,num_token, epsilon,
input_file,para_file,gemm_file,result_file,false);
break;
}
return 0;
}
|
the_stack
|
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
__device__ __constant__ float4 reference_node_coordinates[d_max_num_nodes];
/* This kernel do skinning of both vertex and nodes given
* node coordinate and vertex coordinate, vertex.w can not be used
*/
__global__ void skinningVertexAndNodeBruteForceKernel(
const DeviceArrayView<float4> vertex_confid_array,
const int node_num,
ushort4* vertex_knn_array, float4* vertex_knn_weight,
ushort4* node_knn_array, float4* node_knn_weight
) {
// Outof bound: for both vertex and node knn are updated by this kernel
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= vertex_confid_array.Size() + node_num) return;
// Load the vertex from memory
float4 vertex;
if (idx < vertex_confid_array.Size()) {
const float4 vertex_confid = vertex_confid_array[idx];
vertex = make_float4(vertex_confid.x, vertex_confid.y, vertex_confid.z, 1.0);
}
else if (idx >= vertex_confid_array.Size() && idx < vertex_confid_array.Size() + node_num) {
const int offset = idx - vertex_confid_array.Size();
const float4 node = reference_node_coordinates[offset];
vertex = make_float4(node.x, node.y, node.z, 1.0);
}
//Keep priority queue using heap
float4 distance = make_float4(1e6f, 1e6f, 1e6f, 1e6f);
ushort4 node_idx = make_ushort4(0, 0, 0, 0);
//Burte force
bruteForceSearch4Padded(vertex, reference_node_coordinates, node_num, distance, node_idx);
//The forth part of vertex might be confidence
const float3 v = make_float3(vertex.x, vertex.y, vertex.z);
//Compute the knn weight given knn
const float4 node0_v4 = reference_node_coordinates[node_idx.x];
const float3 node0_v = make_float3(node0_v4.x, node0_v4.y, node0_v4.z);
const float vn_dist0 = squared_norm(v - node0_v);
const float4 node1_v4 = reference_node_coordinates[node_idx.y];
const float3 node1_v = make_float3(node1_v4.x, node1_v4.y, node1_v4.z);
const float vn_dist1 = squared_norm(v - node1_v);
const float4 node2_v4 = reference_node_coordinates[node_idx.z];
const float3 node2_v = make_float3(node2_v4.x, node2_v4.y, node2_v4.z);
const float vn_dist2 = squared_norm(v - node2_v);
const float4 node3_v4 = reference_node_coordinates[node_idx.w];
const float3 node3_v = make_float3(node3_v4.x, node3_v4.y, node3_v4.z);
const float vn_dist3 = squared_norm(v - node3_v);
// Compute the weight of this node
float4 weight;
weight.x = __expf(-vn_dist0 / (2 * d_node_radius_square));
weight.y = __expf(-vn_dist1 / (2 * d_node_radius_square));
weight.z = __expf(-vn_dist2 / (2 * d_node_radius_square));
weight.w = __expf(-vn_dist3 / (2 * d_node_radius_square));
#if defined(USE_INTERPOLATE_WEIGHT_NORMALIZATION) //Do a normalization on the weights?
const float inv_weight_sum = 1.0f / fabsf_sum(weight);
weight.x *= inv_weight_sum;
weight.y *= inv_weight_sum;
weight.z *= inv_weight_sum;
weight.w *= inv_weight_sum;
#endif
// Store the result to global memory
if (idx < vertex_confid_array.Size())
{
vertex_knn_array[idx] = node_idx;
vertex_knn_weight[idx] = weight;
}
else if (idx >= vertex_confid_array.Size()
&& idx < vertex_confid_array.Size() + node_num)
{
const int offset = idx - vertex_confid_array.Size();
node_knn_array[offset] = node_idx;
node_knn_weight[offset] = weight;
}
}
__global__ void updateVertexNodeKnnWeightKernel(
const DeviceArrayView<float4> vertex_confid_array,
ushort4* vertex_knn_array, float4* vertex_knn_weight,
DeviceArraySlice<ushort4> node_knn_array, float4* node_knn_weight,
// The offset and number of added nodes
const int node_offset, const int padded_node_num
) {
// Outof bound: for both vertex and node knn are updated by this kernel
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= vertex_confid_array.Size() + node_knn_array.Size()) return;
// Collect information form global memory
float3 v;
ushort4 knn;
if (idx < vertex_confid_array.Size()) {
const float4 vertex_confid = vertex_confid_array[idx];
v = make_float3(vertex_confid.x, vertex_confid.y, vertex_confid.z);
knn = vertex_knn_array[idx];
}
else if (idx >= vertex_confid_array.Size() && idx < vertex_confid_array.Size() + node_knn_array.Size()) {
const auto offset = idx - vertex_confid_array.Size();
const float4 node = reference_node_coordinates[offset];
v = make_float3(node.x, node.y, node.z);
knn = node_knn_array[offset];
}
else {
return;
}
// load knn for each thread
const ushort4 knn_prev = knn;
float4 n0 = reference_node_coordinates[knn.x];
float tmp0 = v.x - n0.x;
float tmp1 = v.y - n0.y;
float tmp2 = v.z - n0.z;
float4 n1 = reference_node_coordinates[knn.y];
float tmp6 = v.x - n1.x;
float tmp7 = v.y - n1.y;
float tmp8 = v.z - n1.z;
float4 n2 = reference_node_coordinates[knn.z];
float tmp12 = v.x - n2.x;
float tmp13 = v.y - n2.y;
float tmp14 = v.z - n2.z;
float4 n3 = reference_node_coordinates[knn.w];
float tmp18 = v.x - n3.x;
float tmp19 = v.y - n3.y;
float tmp20 = v.z - n3.z;
float tmp3 = __fmul_rn(tmp0, tmp0);
float tmp9 = __fmul_rn(tmp6, tmp6);
float tmp15 = __fmul_rn(tmp12, tmp12);
float tmp21 = __fmul_rn(tmp18, tmp18);
float tmp4 = __fmaf_rn(tmp1, tmp1, tmp3);
float tmp10 = __fmaf_rn(tmp7, tmp7, tmp9);
float tmp16 = __fmaf_rn(tmp13, tmp13, tmp15);
float tmp22 = __fmaf_rn(tmp19, tmp19, tmp21);
float tmp5 = __fmaf_rn(tmp2, tmp2, tmp4);
float tmp11 = __fmaf_rn(tmp8, tmp8, tmp10);
float tmp17 = __fmaf_rn(tmp14, tmp14, tmp16);
float tmp23 = __fmaf_rn(tmp20, tmp20, tmp22);
//keep priority queue using heap
float4 distance = make_float4(tmp5, tmp11, tmp17, tmp23);
KnnHeapDevice heap(distance, knn);
//The update loop
for (auto k = node_offset; k < padded_node_num + node_offset; k += 4) {
n0 = reference_node_coordinates[k + 0];
tmp0 = v.x - n0.x;
tmp1 = v.y - n0.y;
tmp2 = v.z - n0.z;
n1 = reference_node_coordinates[k + 1];
tmp6 = v.x - n1.x;
tmp7 = v.y - n1.y;
tmp8 = v.z - n1.z;
n2 = reference_node_coordinates[k + 2];
tmp12 = v.x - n2.x;
tmp13 = v.y - n2.y;
tmp14 = v.z - n2.z;
n3 = reference_node_coordinates[k + 3];
tmp18 = v.x - n3.x;
tmp19 = v.y - n3.y;
tmp20 = v.z - n3.z;
tmp3 = __fmul_rn(tmp0, tmp0);
tmp9 = __fmul_rn(tmp6, tmp6);
tmp15 = __fmul_rn(tmp12, tmp12);
tmp21 = __fmul_rn(tmp18, tmp18);
tmp4 = __fmaf_rn(tmp1, tmp1, tmp3);
tmp10 = __fmaf_rn(tmp7, tmp7, tmp9);
tmp16 = __fmaf_rn(tmp13, tmp13, tmp15);
tmp22 = __fmaf_rn(tmp19, tmp19, tmp21);
tmp5 = __fmaf_rn(tmp2, tmp2, tmp4);
tmp11 = __fmaf_rn(tmp8, tmp8, tmp10);
tmp17 = __fmaf_rn(tmp14, tmp14, tmp16);
tmp23 = __fmaf_rn(tmp20, tmp20, tmp22);
//Update it
heap.update(k + 0, tmp5);
heap.update(k + 1, tmp11);
heap.update(k + 2, tmp17);
heap.update(k + 3, tmp23);
}//End of the update loop
// If the knn doesn't change
if (knn.x == knn_prev.x && knn.y == knn_prev.y && knn.z == knn_prev.z && knn.w == knn_prev.w) return;
// If changed, update the weight
const float4 node0_v4 = reference_node_coordinates[knn.x];
const float3 node0_v = make_float3(node0_v4.x, node0_v4.y, node0_v4.z);
const float vn_dist0 = squared_norm(v - node0_v);
const float4 node1_v4 = reference_node_coordinates[knn.y];
const float3 node1_v = make_float3(node1_v4.x, node1_v4.y, node1_v4.z);
const float vn_dist1 = squared_norm(v - node1_v);
const float4 node2_v4 = reference_node_coordinates[knn.z];
const float3 node2_v = make_float3(node2_v4.x, node2_v4.y, node2_v4.z);
const float vn_dist2 = squared_norm(v - node2_v);
const float4 node3_v4 = reference_node_coordinates[knn.w];
const float3 node3_v = make_float3(node3_v4.x, node3_v4.y, node3_v4.z);
const float vn_dist3 = squared_norm(v - node3_v);
// Compute the weight of this node
float4 weight;
weight.x = __expf(-vn_dist0 / (2 * d_node_radius_square));
weight.y = __expf(-vn_dist1 / (2 * d_node_radius_square));
weight.z = __expf(-vn_dist2 / (2 * d_node_radius_square));
weight.w = __expf(-vn_dist3 / (2 * d_node_radius_square));
//Do a normalization?
#if defined(USE_INTERPOLATE_WEIGHT_NORMALIZATION)
const float weight_sum = weight.x + weight.y + weight.z + weight.w;
const float inv_weight_sum = 1.0f / weight_sum;
weight.x *= inv_weight_sum;
weight.y *= inv_weight_sum;
weight.z *= inv_weight_sum;
weight.w *= inv_weight_sum;
#endif
// Store the result to global memory
if (idx < vertex_confid_array.Size()) {
vertex_knn_array[idx] = knn;
vertex_knn_weight[idx] = weight;
}
else if (idx >= vertex_confid_array.Size() && idx < vertex_confid_array.Size() + node_knn_array.Size()) {
const int offset = idx - vertex_confid_array.Size();
node_knn_array[offset] = knn;
node_knn_weight[offset] = weight;
}
} // End of kernel
} // device
} // surfelwarp
surfelwarp::ReferenceNodeSkinner::ReferenceNodeSkinner() {
m_init_skinner = nullptr; //Just use brute force at first
m_num_bruteforce_nodes = 0;
//Update the invalid nodes
m_invalid_nodes.create(Constants::kMaxNumNodes);
//The other part of the constant memory should be filled with invalid points
std::vector<float4> h_invalid_nodes;
h_invalid_nodes.resize(Constants::kMaxNumNodes);
float* begin = (float*)h_invalid_nodes.data();
float* end = begin + 4 * Constants::kMaxNumNodes;
std::fill(begin, end, 1e6f);
m_invalid_nodes.upload(h_invalid_nodes);
//Fill the constant memory with invalid values at first
fillInvalidConstantPoints();
}
/* The method for initial skinning
*/
void surfelwarp::ReferenceNodeSkinner::BuildInitialSkinningIndex(const SynchronizeArray<float4>& nodes, cudaStream_t stream)
{
//Build the index for brute force searcher
buildBruteForceIndex(nodes.DeviceArrayReadOnly(), stream);
SURFELWARP_CHECK(m_num_bruteforce_nodes == nodes.DeviceArraySize());
//If there is a customized searcher
if(m_init_skinner != nullptr) {
m_init_skinner->BuildIndexHostNodes(nodes.HostArray(), stream);
}
}
void surfelwarp::ReferenceNodeSkinner::fillInvalidConstantPoints(cudaStream_t stream) {
cudaSafeCall(cudaMemcpyToSymbolAsync(
device::reference_node_coordinates,
m_invalid_nodes.ptr(),
sizeof(float4) * Constants::kMaxNumNodes,
0, // no offset
cudaMemcpyDeviceToDevice,
stream
));
}
void surfelwarp::ReferenceNodeSkinner::replaceWithMorePoints(
const DeviceArrayView<float4> &nodes,
cudaStream_t stream
) {
SURFELWARP_CHECK_GE(nodes.Size(), m_num_bruteforce_nodes) << "Please use BuildIndex() instead!";
cudaSafeCall(cudaMemcpyToSymbolAsync(
device::reference_node_coordinates,
nodes.RawPtr(),
nodes.Size() * sizeof(float4),
0, // no offset
cudaMemcpyDeviceToDevice,
stream
));
m_num_bruteforce_nodes = nodes.Size();
}
void surfelwarp::ReferenceNodeSkinner::buildBruteForceIndex(
const DeviceArrayView<float4> &nodes,
cudaStream_t stream
) {
//If the new nodes is more than previous nodes
if(nodes.Size() >= m_num_bruteforce_nodes) {
replaceWithMorePoints(nodes, stream);
return;
}
//Check the size
SURFELWARP_CHECK(nodes.Size() <= Constants::kMaxNumNodes) << "Too many nodes";
//First clear the buffer
fillInvalidConstantPoints(stream);
//Copy the value to device
cudaSafeCall(cudaMemcpyToSymbolAsync(
device::reference_node_coordinates,
nodes.RawPtr(),
nodes.Size() * sizeof(float4),
0, // no offset
cudaMemcpyDeviceToDevice,
stream
));
//Update size
m_num_bruteforce_nodes = nodes.Size();
}
void surfelwarp::ReferenceNodeSkinner::performBruteForceSkinning(
const DeviceArrayView<float4>& reference_vertex,
const DeviceArrayView<float4>& reference_node,
DeviceArraySlice<ushort4> vertex_knn,
DeviceArraySlice<ushort4> node_knn,
DeviceArraySlice<float4> vertex_knn_weight,
DeviceArraySlice<float4> node_knn_weight,
cudaStream_t stream
) const {
//Check the size
SURFELWARP_CHECK_EQ(reference_node.Size(), m_num_bruteforce_nodes);
dim3 blk(256);
dim3 grid(divUp(reference_vertex.Size() + m_num_bruteforce_nodes, blk.x));
device::skinningVertexAndNodeBruteForceKernel<<<grid, blk, 0, stream>>>(
reference_vertex,
m_num_bruteforce_nodes,
vertex_knn, vertex_knn_weight,
node_knn, node_knn_weight
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
void surfelwarp::ReferenceNodeSkinner::PerformSkinning(
SurfelGeometry::SkinnerInput geometry,
WarpField::SkinnerInput warp_field,
cudaStream_t stream
) {
if(m_init_skinner != nullptr) { //Using the customized version
} else {
//Using brute for skinning here
performBruteForceSkinning(
geometry.reference_vertex_confid,
warp_field.reference_node_coords,
geometry.surfel_knn, warp_field.node_knn,
geometry.surfel_knn_weight, warp_field.node_knn_weight,
stream
);
}
//Check it
/*KNNSearch::CheckKNNSearch(
warp_field.reference_node_coords,
warp_field.reference_node_coords,
warp_field.node_knn.ArrayView()
);
KNNSearch::CheckKNNSearch(
warp_field.reference_node_coords,
geometry.reference_vertex_confid,
geometry.surfel_knn.ArrayView()
);*/
}
/* The method for skinning update. nodes[newnode_offset] should be the first new node
*/
void surfelwarp::ReferenceNodeSkinner::UpdateBruteForceSkinningIndexWithNewNodes(
const DeviceArrayView<float4>& nodes,
unsigned newnode_offset,
cudaStream_t stream
) {
//Check the size
const unsigned prev_nodesize = newnode_offset;
SURFELWARP_CHECK(nodes.Size() >= prev_nodesize); //There should be more nodes now
SURFELWARP_CHECK(nodes.Size() <= Constants::kMaxNumNodes);
//There is no node to append
if(nodes.Size() == prev_nodesize) return;
//Everything seems to be correct, do it
const auto new_node_size = nodes.Size() - newnode_offset;
const float4* node_ptr = nodes.RawPtr() + newnode_offset;
cudaSafeCall(cudaMemcpyToSymbolAsync(
device::reference_node_coordinates,
node_ptr,
new_node_size * sizeof(float4),
newnode_offset * sizeof(float4),
cudaMemcpyDeviceToDevice,
stream
));
//Update the size
m_num_bruteforce_nodes = nodes.Size();
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
void surfelwarp::ReferenceNodeSkinner::updateSkinning(
unsigned newnode_offset,
const DeviceArrayView<float4>& reference_vertex,
const DeviceArrayView<float4>& reference_node,
DeviceArraySlice<ushort4> vertex_knn,
DeviceArraySlice<ushort4> node_knn,
DeviceArraySlice<float4> vertex_knn_weight,
DeviceArraySlice<float4> node_knn_weight,
cudaStream_t stream
) const {
//Check the size
const unsigned prev_nodesize = newnode_offset;
SURFELWARP_CHECK(reference_node.Size() >= prev_nodesize); //There should be more nodes now
if(reference_node.Size() == prev_nodesize) return;
//The index should be updated
SURFELWARP_CHECK(m_num_bruteforce_nodes == reference_node.Size()) << "The index is not updated";
//The numer of append node
const auto num_appended_node = m_num_bruteforce_nodes - newnode_offset;
const auto padded_newnode_num = divUp(num_appended_node, 4) * 4;
//Let's to it
dim3 blk(256);
dim3 grid(divUp(reference_vertex.Size() + m_num_bruteforce_nodes, blk.x));
device::updateVertexNodeKnnWeightKernel<<<grid, blk, 0, stream>>>(
reference_vertex,
vertex_knn.RawPtr(), vertex_knn_weight.RawPtr(),
node_knn, node_knn_weight.RawPtr(),
newnode_offset, padded_newnode_num
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
void surfelwarp::ReferenceNodeSkinner::PerformSkinningUpdate(
SurfelGeometry::SkinnerInput geometry,
WarpField::SkinnerInput warp_field,
unsigned newnode_offset,
cudaStream_t stream
) {
//Check the size
SURFELWARP_CHECK(geometry.surfel_knn.Size() == geometry.surfel_knn_weight.Size());
SURFELWARP_CHECK(geometry.surfel_knn.Size() == geometry.reference_vertex_confid.Size());
SURFELWARP_CHECK(warp_field.reference_node_coords.Size() == warp_field.node_knn.Size());
SURFELWARP_CHECK(warp_field.reference_node_coords.Size() == warp_field.node_knn_weight.Size());
//Hand in to workforce
updateSkinning(
newnode_offset,
geometry.reference_vertex_confid,
warp_field.reference_node_coords,
geometry.surfel_knn, warp_field.node_knn,
geometry.surfel_knn_weight, warp_field.node_knn_weight,
stream
);
//Check it
/*KNNSearch::CheckApproximateKNNSearch(
warp_field.reference_node_coords,
geometry.reference_vertex_confid,
geometry.surfel_knn.ArrayView()
);
KNNSearch::CheckKNNSearch(
warp_field.reference_node_coords,
warp_field.reference_node_coords,
warp_field.node_knn.ArrayView()
);*/
}
|
the_stack
|
const int WARP_SIZE = 32;
// The maximum number of threads in a block
const int MAX_BLOCK_SIZE = 512;
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static int getNumThreads(int nElem) {
int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE };
for (int i = 0; i != 5; ++i) {
if (nElem <= threadSizes[i]) {
return threadSizes[i];
}
}
return MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template <typename Dtype, typename Acctype>
struct Float2 {
Acctype v1, v2;
__device__ Float2() {}
__device__ Float2(Dtype v1, Dtype v2) : v1(ScalarConvert<Dtype, Acctype>::to(v1)), v2(ScalarConvert<Dtype, Acctype>::to(v2)) {}
__device__ Float2(Dtype v) : v1(ScalarConvert<Dtype, Acctype>::to(v)), v2(ScalarConvert<Dtype, Acctype>::to(v)) {}
__device__ Float2(int v) : v1(ScalarConvert<int, Acctype>::to(v)), v2(ScalarConvert<int, Acctype>::to(v)) {}
__device__ Float2& operator+=(const Float2& a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template <typename Dtype, typename Acctype, typename DeviceTensor3>
struct SumOp {
__device__ SumOp(const DeviceTensor3 t) : tensor(t) {}
__device__ __forceinline__ Acctype operator()(int batch, int plane, int n) {
return ScalarConvert<Dtype, Acctype>::to(tensor[batch][plane][n]);
}
const DeviceTensor3 tensor;
};
template <typename Dtype, typename Acctype, typename DeviceTensor3>
struct VarOp {
__device__ VarOp(Acctype m, const DeviceTensor3 t) : mean(m), tensor(t) {}
__device__ __forceinline__ Acctype operator()(int batch, int plane, int n) {
Dtype val = tensor[batch][plane][n];
return (val - mean) * (val - mean);
}
const Acctype mean;
const DeviceTensor3 tensor;
};
template <typename Dtype, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<Dtype, Acctype> operator()(int batch, int plane, int n) {
Dtype g = gradOutput[batch][plane][n];
Dtype c = ScalarConvert<Acctype, Dtype>::to(input[batch][plane][n] - mean);
return Float2<Dtype, Acctype>(g, g * c);
}
const Acctype mean;
const DeviceTensor3 input;
const DeviceTensor3 gradOutput;
};
// Sum across all threads within a warp
template <typename T>
static __device__ __forceinline__ T warpSum(T val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE);
}
#else
__shared__ T values[MAX_BLOCK_SIZE];
values[threadIdx.x] = val;
__threadfence_block();
const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
for (int i = 1; i < WARP_SIZE; i++) {
val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
}
#endif
return val;
}
template <typename Dtype, typename Acctype>
static __device__ __forceinline__ Float2<Dtype, Acctype> warpSum(Float2<Dtype, Acctype> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
template<typename T, typename Op, typename DeviceTensor3, typename IndexTensor>
__device__ T reduce_vl(Op op, DeviceTensor3 tensor, int plane, IndexTensor input_lengths) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor>
__global__ void batchnorm_inference_kernel(
const DeviceTensor3 input,
const IndexTensor input_lengths,
DeviceTensor3 output,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
Acctype epsilon) {
int plane = blockIdx.x;
Acctype invstd = Acctype(1) / sqrt(runningVar[plane].ldg() + epsilon);
Acctype mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane].ldg());
Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane].ldg()) : Acctype(1);
Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane].ldg()) : Acctype(0);
// Write normalized and update the output
for (int batch = 0; batch < input.getSize(0); batch++) {
for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) {
Dtype inp = input[batch][plane][x].ldg();
output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invstd + beta);
}
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor>
__global__ void batchnorm_mean_kernel(
const DeviceTensor3 input,
const IndexTensor input_lengths,
const int length_sum,
DeviceTensor1 out_mean) {
int plane = blockIdx.x;
Acctype norm = Acctype(1) / length_sum;
Acctype mean = reduce_vl<Acctype>(SumOp<Dtype, Acctype, DeviceTensor3>(input), input, plane, input_lengths) * norm;
if (threadIdx.x == 0) {
out_mean[plane] = ScalarConvert<Acctype, Dtype>::to(mean);
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor>
__global__ void batchnorm_val_kernel(
const DeviceTensor3 input,
const IndexTensor input_lengths,
const int length_sum,
const DeviceTensor1 in_mean,
DeviceTensor1 out_var) {
int plane = blockIdx.x;
Acctype norm = Acctype(1) / length_sum;
Acctype mean = ScalarConvert<Dtype, Acctype>::to(in_mean[plane]);
Acctype var = reduce_vl<Acctype>(VarOp<Dtype, Acctype, DeviceTensor3>(mean, input), input, plane, input_lengths) * norm;
if (threadIdx.x == 0) {
out_var[plane] = ScalarConvert<Acctype, Dtype>::to(var);
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor>
__global__ void batchnorm_output_kernel(
const DeviceTensor3 input,
const IndexTensor input_lengths,
const int length_sum,
DeviceTensor3 output,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
const Acctype epsilon,
const Acctype momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveVar) {
int plane = blockIdx.x;
int N = length_sum;
Acctype mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]);
Acctype var = ScalarConvert<Dtype, Acctype>::to(saveVar[plane]);
Acctype invStd = 1 / sqrt(var + epsilon);
// Save the mean, variance, and moving averages
if (threadIdx.x == 0) {
// Momentum based writeback
Acctype unbiasedVar = var * N / (N - 1);
runningMean[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningMean[plane] + momentum * mean);
runningVar[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningVar[plane] + momentum * unbiasedVar);
}
// Write normalized and update the output
Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : ScalarConvert<int, Acctype>::to(1);
Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane]) : ScalarConvert<int, Acctype>::to(0);
for (int batch = 0; batch < input.getSize(0); ++batch) {
for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) {
//for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
// int t = input_lengths[batch];
// printf("block: %d, batch: %d, input_length: %d, x:%d\n", blockIdx.x, batch, t, x);
Dtype inp = input[batch][plane][x].ldg();
output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invStd + beta);
}
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor>
__global__ void batchnorm_grad_stats_kernel(
const DeviceTensor3 input,
const IndexTensor input_lengths,
const int length_sum,
const DeviceTensor3 gradOutput,
const DeviceTensor1 runningMean,
const DeviceTensor1 saveMean,
DeviceTensor1 gradOutputMean_all,
DeviceTensor1 dotP_all,
bool train) {
int plane = blockIdx.x;
int N = length_sum;
Acctype mean;
if (train) {
mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]);
} else {
mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]);
}
Acctype norm = Acctype(1) / N;
GradOp<Dtype, Acctype, DeviceTensor3> g(mean, input, gradOutput);
Float2<Dtype, Acctype> res = reduce_vl<Float2<Dtype, Acctype>, GradOp<Dtype, Acctype, DeviceTensor3>, DeviceTensor3>
(g, gradOutput, plane, input_lengths);
Acctype gradOutputMean = res.v1 * norm;
Acctype dotP = res.v2 * norm;
if (threadIdx.x == 0) {
gradOutputMean_all[plane] = ScalarConvert<Acctype, Dtype>::to(gradOutputMean);
dotP_all[plane] = ScalarConvert<Acctype, Dtype>::to(dotP);
}
}
template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor>
__global__ void batchnorm_backward_kernel(
const DeviceTensor3 input,
const IndexTensor input_lengths,
const int length_sum,
const DeviceTensor3 gradOutput,
const DeviceTensor1 gradOutputMean,
const DeviceTensor1 dotP_all,
DeviceTensor3 gradInput,
DeviceTensor1 gradWeight,
DeviceTensor1 gradBias,
const DeviceTensor1 weight,
const DeviceTensor1 runningMean,
const DeviceTensor1 runningVar,
const DeviceTensor1 saveMean,
const DeviceTensor1 saveVar,
bool train,
Acctype scale,
double eps) {
int plane = blockIdx.x;
int N = length_sum;
Acctype mean, stdVal;
if (train) {
mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]);
stdVal = 1 / sqrt(ScalarConvert<Dtype, Acctype>::to(saveVar[plane]) + eps);
} else {
mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]);
stdVal = 1 / sqrt(runningVar[plane] + eps);
}
Acctype weightVal = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : Acctype(1);
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(input - mean, gradOutput)
// Acctype gradOutputSum = res.v1;
Acctype gradOutputSum = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]) * N;
Acctype dotP = ScalarConvert<Dtype, Acctype>::to(dotP_all[plane]);
// Acctype gradMean = gradOutputSum * norm;
Acctype gradMean = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]);
// Acctype projScale = dotP * norm * stdVal * stdVal;
Acctype projScale = dotP * stdVal * stdVal;
Acctype gradScale = stdVal * weightVal;
if (gradInput.numElements() > 0) {
for (int batch = 0; batch < gradOutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) {
Dtype gradOut = gradOutput[batch][plane][x];
if (train) {
Dtype inp = input[batch][plane][x];
Acctype proj = (inp - mean) * projScale;
gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to((gradOut - proj - gradMean) * gradScale);
} else {
gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gradOut * gradScale);
}
}
}
}
if (gradWeight.numElements() > 0) {
if (threadIdx.x == 0) {
gradWeight[plane] += ScalarConvert<Acctype, Dtype>::to(scale * dotP * stdVal);
}
}
if (gradBias.numElements() > 0) {
if (threadIdx.x == 0) {
gradBias[plane] += ScalarConvert<Acctype, Dtype>::to(scale * gradOutputSum);
}
}
}
#define FloatTensor3 THCDeviceTensor<float, 3>
#define FloatTensor1 THCDeviceTensor<float, 1>
#define IntTensor1 THCDeviceTensor<int, 1>
template <typename Dtype, int Dim>
static THCDeviceTensor<Dtype, Dim> devicetensor(THCState *state, THCTensor *t) {
if (!t) {
return THCDeviceTensor<Dtype, Dim>();
}
int inDim = t->dim();
THAssert(inDim == Dim);
return toDeviceTensor<Dtype, Dim>(state, t);
}
extern "C" void THNN_CudaBatchnormUpdateOutput(
THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
int length_sum, int train, double momentum, double eps);
extern "C" void THNN_CudaBatchnormMean(
THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, int length_sum);
extern "C" void THNN_CudaBatchnormVar(
THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, THCTensor *saveVar_, int length_sum);
void THNN_CudaBatchnormMean(
THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, int length_sum) {
FloatTensor3 input = devicetensor<float, 3>(state, input_);
FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_);
IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_);
cudaStream_t s = THCState_getCurrentStream(state);
cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state);
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
batchnorm_mean_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>(
input, input_lengths, length_sum, saveMean);
THCudaCheck(cudaGetLastError());
}
void THNN_CudaBatchnormVar(
THCState *state, THCTensor *input_, THCTensor *input_lengths_,
THCTensor *saveMean_, THCTensor *saveVar_, int length_sum) {
FloatTensor3 input = devicetensor<float, 3>(state, input_);
FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_);
FloatTensor1 saveVar = devicetensor<float, 1>(state, saveVar_);
IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_);
cudaStream_t s = THCState_getCurrentStream(state);
cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state);
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
batchnorm_val_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>(
input, input_lengths, length_sum, saveMean, saveVar);
THCudaCheck(cudaGetLastError());
}
void THNN_CudaBatchnormUpdateOutput(
THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
int length_sum, int train, double momentum, double eps) {
THCTensor_resizeAs(state, output_, input_);
FloatTensor3 input = devicetensor<float, 3>(state, input_);
FloatTensor3 output = devicetensor<float, 3>(state, output_);
FloatTensor1 weight = devicetensor<float, 1>(state, weight_);
FloatTensor1 bias = devicetensor<float, 1>(state, bias_);
FloatTensor1 runningMean = devicetensor<float, 1>(state, runningMean_);
FloatTensor1 runningVar = devicetensor<float, 1>(state, runningVar_);
FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_);
FloatTensor1 saveStd = devicetensor<float, 1>(state, saveStd_);
IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_);
cudaStream_t s = THCState_getCurrentStream(state);
cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state);
if (!train) {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
batchnorm_inference_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>(
input, input_lengths, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
batchnorm_output_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>(
input, input_lengths, length_sum, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveStd);
}
THCudaCheck(cudaGetLastError());
}
extern "C" void THNN_CudaBatchnormBackward(
THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_,
THCTensor *gradOutputMean_, THCTensor *dotP,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double scale, double eps);
extern "C" void THNN_CudaBatchnormGradStats(
THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_,
THCTensor *runningMean_, THCTensor *saveMean_,
THCTensor *gradOutputMean_, THCTensor *dotP_, int length_sum, int train);
void THNN_CudaBatchnormGradStats(
THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_,
THCTensor *runningMean_, THCTensor *saveMean_,
THCTensor *gradOutputMean_, THCTensor *dotP_, int length_sum, int train) {
// THCUNN_check_shape(state, input_, gradOutput_);
FloatTensor3 input = devicetensor<float, 3>(state, input_);
FloatTensor3 gradOutput = devicetensor<float, 3>(state, gradOutput_);
FloatTensor1 gradOutputMean = devicetensor<float, 1>(state, gradOutputMean_);
FloatTensor1 dotP = devicetensor<float, 1>(state, dotP_);
FloatTensor1 runningMean = devicetensor<float, 1>(state, runningMean_);
FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_);
IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_);
cudaStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
batchnorm_grad_stats_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>(
input, input_lengths, length_sum, gradOutput, runningMean, saveMean, gradOutputMean, dotP, train);
THCudaCheck(cudaGetLastError());
}
void THNN_CudaBatchnormBackward(
THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_,
THCTensor *gradOutputMean_, THCTensor *dotP_,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double scale, double eps) {
// THCUNN_check_shape(state, input_, gradOutput_);
FloatTensor3 input = devicetensor<float, 3>(state, input_);
FloatTensor3 gradOutput = devicetensor<float, 3>(state, gradOutput_);
FloatTensor1 gradOutputMean = devicetensor<float, 1>(state, gradOutputMean_);
FloatTensor1 dotP = devicetensor<float, 1>(state, dotP_);
FloatTensor3 gradInput = devicetensor<float, 3>(state, gradInput_);
FloatTensor1 gradWeight = devicetensor<float, 1>(state, gradWeight_);
FloatTensor1 gradBias = devicetensor<float, 1>(state, gradBias_);
FloatTensor1 weight = devicetensor<float, 1>(state, weight_);
FloatTensor1 runningMean = devicetensor<float, 1>(state, runningMean_);
FloatTensor1 runningVar = devicetensor<float, 1>(state, runningVar_);
FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_);
FloatTensor1 saveStd = devicetensor<float, 1>(state, saveStd_);
IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_);
cudaStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
batchnorm_backward_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>(
input, input_lengths, length_sum, gradOutput, gradOutputMean, dotP, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
saveMean, saveStd, train, scale, eps);
THCudaCheck(cudaGetLastError());
}
|
the_stack
|
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include "cuda/cuda_check.h"
#include "ssids/gpu/kernels/datatypes.h"
#define HOGG_ASSEMBLE_TX 128 // Block height
#define HOGG_ASSEMBLE_TY 8 // Block width
#define HOGG_ASSEMBLE_NTX 32 // Number of threads x
#define HOGG_ASSEMBLE_NTY 4 // Number of threads y
#define ADD_DELAYS_TX 32
#define ADD_DELAYS_TY 4
namespace /* anon */ {
struct load_nodes_type {
long nnz; // Number of entries to map
int lda; // Leading dimension of A
int ldl; // Leading dimension of L
double *lcol; // Pointer to non-delay part of L
long offn; // Offset into nlist
long offr; // Offset into rlist
};
/*
* Perform assembly according to nlist:
* lval( nlist(2,i) ) = val( nlist(1,i) ) (in Fortran)
*
* Each block handles one node (regardless of size!!!)
* Note: modified value lval is passed in via pointer in lndata, not as argument
*/
__global__ void
cu_load_nodes(
const struct load_nodes_type *lndata,
const long *nlist,
const double *aval
) {
lndata += blockIdx.x;
const long nnz = lndata->nnz;
const int lda = lndata->lda;
const int ldl = lndata->ldl;
nlist += 2*lndata->offn;
double *const lval = lndata->lcol;
for (int i = threadIdx.x; i < nnz; i += blockDim.x) {
// Note: nlist is 1-indexed, not 0 indexed, so we have to adjust
const int r = (nlist[2*i+1] - 1) % lda; // row index
const int c = (nlist[2*i+1] - 1) / lda; // col index
const long sidx = nlist[2*i+0] - 1; // source index
lval[r + c*ldl] = aval[sidx];
}
}
/*
* Perform assembly according to nlist:
* lval( nlist(2,i) ) = val( nlist(1,i) ) (in Fortran)
* with the added twist of needing to perform a scaling at the same time
*
* Each block handles one node (regardless of size!!!)
* Note: modified value lval is passed in via pointer in lndata, not as argument
*/
__global__ void
cu_load_nodes_sc(
const struct load_nodes_type *lndata,
const long *nlist,
const int *rlist,
const double *scale,
const double *aval
) {
lndata += blockIdx.x;
const int nnz = lndata->nnz;
const int lda = lndata->lda;
const int ldl = lndata->ldl;
nlist += 2*lndata->offn;
double *const lval = lndata->lcol;
rlist += lndata->offr;
for (int i = threadIdx.x; i < nnz; i += blockDim.x) {
// Note: nlist and rlist are 1-indexed, not 0 indexed, so we adjust
const int r = (nlist[2*i+1] - 1) % lda; // row index
const int c = (nlist[2*i+1] - 1) / lda; // col index
const long sidx = nlist[2*i+0] - 1; // source index
const double rs = scale[rlist[r] - 1]; // row scaling
const double cs = scale[rlist[c] - 1]; // col scaling
lval[r + c*ldl] = rs * aval[sidx] * cs;
}
}
// BLOCK_SIZE = blockDim.x
// maxabs must be initialized to zeros
template< typename ELEMENT_TYPE, unsigned int BLOCK_SIZE >
__global__ void
cu_max_abs( const long n, const ELEMENT_TYPE *const u, ELEMENT_TYPE *const maxabs )
{
__shared__ volatile ELEMENT_TYPE tmax[BLOCK_SIZE];
tmax[threadIdx.x] = 0.0;
for ( long i = threadIdx.x + blockDim.x*blockIdx.x; i < n;
i += blockDim.x*gridDim.x ) {
const ELEMENT_TYPE v = fabs(u[i]);
if ( v > tmax[threadIdx.x] )
tmax[threadIdx.x] = v;
}
__syncthreads();
for ( int inc = 1; inc < BLOCK_SIZE; inc *= 2 ) {
if ( 2*inc*threadIdx.x + inc < BLOCK_SIZE
&& tmax[2*inc*threadIdx.x + inc] > tmax[2*inc*threadIdx.x] )
tmax[2*inc*threadIdx.x] = tmax[2*inc*threadIdx.x + inc];
__syncthreads();
}
if ( threadIdx.x == 0 && tmax[0] > 0.0 )
maxabs[blockIdx.x] = tmax[0];
}
/* Following data type describes a single child-parent assembly */
struct assemble_cp_type {
// Parent data
int pvoffset; // Offset to start of parent node values
double *pval; // Pointer to non-delay part of parent L
int ldp; // Leading dimension of parent
// Child data
int cm; // Number of rows in child
int cn; // Number of columns in child
int ldc; // Leading dimension of child
long cvoffset; // Offset to start of child node values
double *cv; // Pointer to start of child node values
// Alignment data
int *rlist_direct; // Pointer to start of child's rlist
int *ind; // Pointer to start of child's contribution index
// Sync data
int sync_offset; // we watch sync[sync_offset]
int sync_wait_for; // and wait for it to have value >= sync_wait_for
};
/* Following data type describes actions of single CUDA block */
struct assemble_blk_type {
int cp; // node we're assembling into
int blk; // block number of that node
};
/* Used to force volatile load of a declared non-volatile variable */
template <typename T_ELEM>
__inline__ __device__ T_ELEM loadVolatile(volatile T_ELEM *const vptr) {
return *vptr;
}
/* Performs sparse assembly of a m x n child into a parent as dictated by
* rlist_direct (supplied as part of cpdata).
*
* A lookup is performed in blkdata to determine which child-parent assembly
* is to be performed next, and which block of that assembly this is.
*
* next_blk is used to ensure all blocks run in exact desired order.
* sync[] is used to ensure dependencies are completed in the correct order.
*/
template <unsigned int blk_sz_x, unsigned int blk_sz_y,
unsigned int ntx, unsigned nty>
void __global__ assemble(
const struct assemble_blk_type *blkdata, // block mapping
const struct assemble_cp_type *cpdata, // child-parent data
const double *const children, // pointer to array containing children
double *const parents, // pointer to array containing parents
unsigned int *const next_blk, // gmem location used to determine next block
volatile unsigned int *const sync // sync[cp] is #blocks completed so far for cp
) {
// Get block number
__shared__ volatile unsigned int mynext_blk;
if(threadIdx.x==0 && threadIdx.y==0)
mynext_blk = atomicAdd(next_blk, 1);
__syncthreads();
// Determine global information
blkdata += mynext_blk;
cpdata += blkdata->cp;
int blk = blkdata->blk;
int nx = (cpdata->cm-1) / blk_sz_x + 1; // number of blocks high child is
int bx = blk % nx; // coordinate of block in x direction
int by = blk / nx; // coordinate of block in y direction
int ldc = cpdata->ldc;
int ldp = cpdata->ldp;
// Initialize local information
int m = min(blk_sz_x, cpdata->cm - bx*blk_sz_x);
int n = min(blk_sz_y, cpdata->cn - by*blk_sz_y);
const double *src =
cpdata->cv + ldc*by*blk_sz_y + bx*blk_sz_x;
double *dest = cpdata->pval;
int *rows = cpdata->rlist_direct + bx*blk_sz_x;
int *cols = cpdata->rlist_direct + by*blk_sz_y;
// Wait for previous child of this parent to complete
if(threadIdx.x==0 && threadIdx.y==0) {
while(sync[cpdata->sync_offset] < cpdata->sync_wait_for) /**/;
}
__syncthreads();
// Perform assembly
for(int j=0; j<blk_sz_y/nty; j++) {
if( threadIdx.y+j*nty < n ) {
int col = cols[threadIdx.y+j*nty]-1;
for(int i=0; i<blk_sz_x/ntx; i++) {
if( threadIdx.x+i*ntx < m ) {
int row = rows[threadIdx.x+i*ntx]-1;
dest[row + col*ldp] +=
src[threadIdx.x+i*ntx + (threadIdx.y+j*nty)*ldc];
}
}
}
}
// Record that we're done
__syncthreads();
if(threadIdx.x==0 && threadIdx.y==0) {
atomicAdd((int*)&(sync[blkdata->cp]), 1);
}
}
struct assemble_delay_type {
int dskip; // Number of rows to skip for delays from later children
int m; // Number of rows in child to copy
int n; // Number of cols in child to copy
int ldd; // Leading dimension of dest (parent)
int lds; // Leading dimension of src (child)
double *dval; // Pointer to dest (parent)
double *sval; // Pointer to src (child)
long roffset; // Offset to rlist_direct
};
/* Copies delays from child to parent using one block per parent
* Note: src and dest pointers both contained in dinfo
*/
void __global__ add_delays(
struct assemble_delay_type *dinfo, // information on each block
const int *rlist_direct // children's rows indices in parents
) {
dinfo += blockIdx.x;
const int dskip = dinfo->dskip; // number of delays
const int m = dinfo->m; // number of rows
const int n = dinfo->n; // number of cols
const int ldd = dinfo->ldd; // leading dimension of dest
const int lds = dinfo->lds; // leading dimension of src
double *const dest = dinfo->dval;
const double *const src = dinfo->sval;
rlist_direct += dinfo->roffset;
for ( int y = threadIdx.y; y < n; y += blockDim.y ) {
for ( int x = threadIdx.x; x < m; x += blockDim.x ) {
if ( x < n ) {
dest[x + y*ldd] = src[x + y*lds];
}
else {
int xt = dskip + rlist_direct[x - n] - 1;
dest[xt + y*ldd] = src[x + y*lds];
}
}
}
}
} /* anon namespace */
/*******************************************************************************
* Following routines are exported with C binding so can be called from Fortran
******************************************************************************/
extern "C" {
/* Invokes the add_delays<<<>>>() kernel */
void spral_ssids_add_delays( const cudaStream_t *stream, int ndblk,
struct assemble_delay_type *gpu_dinfo, int *rlist_direct ) {
if ( ndblk == 0 ) return; // Nothing to see here
dim3 threads(ADD_DELAYS_TX, ADD_DELAYS_TY);
for ( int i = 0; i < ndblk; i += MAX_CUDA_BLOCKS ) {
int nb = min(MAX_CUDA_BLOCKS, ndblk - i);
add_delays
<<< nb, threads, 0, *stream >>>
( gpu_dinfo + i, rlist_direct );
CudaCheckError();
}
}
/* Runs the kernel assemble<<<>>>() after setting up memory correctly. */
/* Requires gpu_next_sync[] to be of size >= (1+ncp)*sizeof(unsigned int) */
void spral_ssids_assemble(const cudaStream_t *stream, int nblk, int blkoffset,
struct assemble_blk_type *blkdata, int ncp,
struct assemble_cp_type *cpdata, double *children,
double *parents, unsigned int *gpu_next_sync) {
/* Create and initialize synchronization objects using a single call:
next_blk[1]
sync[ncp]
*/
CudaSafeCall(
cudaMemsetAsync(gpu_next_sync,0,(1+ncp)*sizeof(unsigned int),*stream)
);
/* Note, that we can only have at most 65535 blocks per dimn.
* For some problems, nblk can exceed this, so we use more than one launch.
* As the next block we look at is specified by next_blk this works fine.
*/
dim3 threads(HOGG_ASSEMBLE_NTX, HOGG_ASSEMBLE_NTY);
for(int i=0; i<nblk; i+=MAX_CUDA_BLOCKS) {
int blocks = min(MAX_CUDA_BLOCKS, nblk-i);
assemble
<HOGG_ASSEMBLE_TX, HOGG_ASSEMBLE_TY,
HOGG_ASSEMBLE_NTX, HOGG_ASSEMBLE_NTY>
<<<blocks, threads, 0, *stream>>>
(&blkdata[blkoffset], cpdata, children, parents, &gpu_next_sync[0],
&gpu_next_sync[1]);
CudaCheckError();
}
}
// Note: modified value lval is passed in via pointer in lndata, not as argument
void spral_ssids_load_nodes( const cudaStream_t *stream, int nblocks,
const struct load_nodes_type *lndata, const long* list,
const double* mval ) {
for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) {
int nb = min(MAX_CUDA_BLOCKS, nblocks - i);
cu_load_nodes <<< nb, 128, 0, *stream >>> ( lndata + i, list, mval );
CudaCheckError();
}
}
// Note: modified value lval is passed in via pointer in lndata, not as argument
void spral_ssids_load_nodes_sc( const cudaStream_t *stream, int nblocks,
const struct load_nodes_type *lndata, const long* list, const int* rlist,
const double* scale, const double* mval ) {
for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) {
int nb = min(MAX_CUDA_BLOCKS, nblocks - i);
cu_load_nodes_sc <<< nb, 128, 0, *stream >>> ( lndata + i, list, rlist, scale, mval );
CudaCheckError();
}
}
void spral_ssids_max_abs( const cudaStream_t *stream,
int nb, long n, double* u, double* buff, double* maxabs )
{
cudaMemsetAsync(buff, 0, nb*sizeof(double), *stream);
cudaStreamSynchronize(*stream);
if ( n > 1024*nb )
cu_max_abs< double, 256 ><<< nb, 256, 0, *stream >>>( n, u, buff );
else
cu_max_abs< double, 32 ><<< nb, 32, 0, *stream >>>( n, u, buff );
CudaCheckError();
cu_max_abs< double, 1024 ><<< 1, 1024, 0, *stream >>>( nb, buff, maxabs );
CudaCheckError();
}
} // end extern "C"
|
the_stack
|
namespace timemachine {
Context::Context(
int N,
const double *x_0,
const double *v_0,
const double *box_0,
Integrator *intg,
std::vector<BoundPotential *> bps,
MonteCarloBarostat *barostat)
: N_(N), intg_(intg), bps_(bps), step_(0), d_sum_storage_(nullptr), d_sum_storage_bytes_(0), barostat_(barostat) {
d_x_t_ = gpuErrchkCudaMallocAndCopy(x_0, N * 3);
d_v_t_ = gpuErrchkCudaMallocAndCopy(v_0, N * 3);
d_box_t_ = gpuErrchkCudaMallocAndCopy(box_0, 3 * 3);
gpuErrchk(cudaMalloc(&d_du_dx_t_, N * 3 * sizeof(*d_du_dx_t_)));
gpuErrchk(cudaMalloc(&d_du_dl_buffer_, N * sizeof(*d_du_dl_buffer_)));
gpuErrchk(cudaMalloc(&d_u_buffer_, N * sizeof(*d_u_buffer_)));
unsigned long long *d_in_tmp = nullptr; // dummy
unsigned long long *d_out_tmp = nullptr; // dummy
// Compute the storage size necessary to reduce du_dl
cub::DeviceReduce::Sum(d_sum_storage_, d_sum_storage_bytes_, d_in_tmp, d_out_tmp, N_);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMalloc(&d_sum_storage_, d_sum_storage_bytes_));
// for(int i=0; i < bps.size(); i++) {
// cudaStream_t stream;
// gpuErrchk(cudaStreamCreate(&stream));
// streams_.push_back(stream);
// }
};
Context::~Context() {
gpuErrchk(cudaFree(d_x_t_));
gpuErrchk(cudaFree(d_v_t_));
gpuErrchk(cudaFree(d_box_t_));
gpuErrchk(cudaFree(d_du_dx_t_));
gpuErrchk(cudaFree(d_du_dl_buffer_));
gpuErrchk(cudaFree(d_u_buffer_));
gpuErrchk(cudaFree(d_sum_storage_));
// for(int i=0; i < streams_.size(); i++) {
// gpuErrchk(cudaStreamDestroy(streams_[i]));
// }
};
void Context::add_observable(Observable *obs) { this->observables_.push_back(obs); }
std::array<std::vector<double>, 3>
Context::multiple_steps(const std::vector<double> &lambda_schedule, int store_du_dl_interval, int store_x_interval) {
unsigned long long *d_du_dl_buffer = nullptr;
double *d_box_buffer = nullptr;
// try catch block is to deal with leaks in d_du_dl_buffer
if (store_du_dl_interval <= 0) {
throw std::runtime_error("store_du_dl_interval <= 0");
}
if (store_x_interval <= 0) {
throw std::runtime_error("store_x_interval <= 0");
}
int du_dl_buffer_size = (lambda_schedule.size() + store_du_dl_interval - 1) / store_du_dl_interval;
int x_buffer_size = (lambda_schedule.size() + store_x_interval - 1) / store_x_interval;
int box_buffer_size = x_buffer_size * 3 * 3;
std::vector<double> h_x_buffer(x_buffer_size * N_ * 3);
try {
gpuErrchk(cudaMalloc(&d_box_buffer, box_buffer_size * sizeof(*d_box_buffer)));
// indicator so we can set it to a default arg.
gpuErrchk(cudaMalloc(&d_du_dl_buffer, du_dl_buffer_size * sizeof(*d_du_dl_buffer)));
gpuErrchk(cudaMemset(d_du_dl_buffer, 0, du_dl_buffer_size * sizeof(*d_du_dl_buffer)));
for (int i = 0; i < lambda_schedule.size(); i++) {
// decide if we need to store the du_dl for this step
unsigned long long *du_dl_ptr = nullptr;
if (i % store_du_dl_interval == 0) {
// pemdas but just to make it clear we're doing pointer arithmetic
du_dl_ptr = d_du_dl_buffer + (i / store_du_dl_interval);
}
if (i % store_x_interval == 0) {
gpuErrchk(cudaMemcpy(
&h_x_buffer[0] + (i / store_x_interval) * N_ * 3,
d_x_t_,
N_ * 3 * sizeof(double),
cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(
&d_box_buffer[0] + (i / store_x_interval) * 3 * 3,
d_box_t_,
3 * 3 * sizeof(*d_box_buffer),
cudaMemcpyDeviceToDevice));
}
double lambda = lambda_schedule[i];
this->_step(lambda, du_dl_ptr);
}
cudaDeviceSynchronize();
std::vector<unsigned long long> h_du_dl_buffer_ull(du_dl_buffer_size);
gpuErrchk(cudaMemcpy(
&h_du_dl_buffer_ull[0],
d_du_dl_buffer,
du_dl_buffer_size * sizeof(*d_du_dl_buffer),
cudaMemcpyDeviceToHost));
std::vector<double> h_du_dl_buffer_double(du_dl_buffer_size);
for (int i = 0; i < h_du_dl_buffer_ull.size(); i++) {
h_du_dl_buffer_double[i] = FIXED_TO_FLOAT<double>(h_du_dl_buffer_ull[i]);
}
std::vector<double> h_box_buffer(box_buffer_size);
gpuErrchk(cudaMemcpy(
&h_box_buffer[0], d_box_buffer, box_buffer_size * sizeof(*d_box_buffer), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_du_dl_buffer));
gpuErrchk(cudaFree(d_box_buffer));
return std::array<std::vector<double>, 3>({h_du_dl_buffer_double, h_x_buffer, h_box_buffer});
} catch (...) {
gpuErrchk(cudaFree(d_du_dl_buffer));
gpuErrchk(cudaFree(d_box_buffer));
throw;
}
}
std::array<std::vector<double>, 3> Context::multiple_steps_U(
const double lambda, // which lambda window we run the integrator over
const int n_steps,
const std::vector<double> &lambda_windows, // which lambda windows we wish to evaluate U at
int store_u_interval,
int store_x_interval) {
unsigned long long *d_u_traj = nullptr;
double *d_box_traj = nullptr;
// try catch block is to deal with leaks in d_u_buffer
if (store_u_interval <= 0) {
throw std::runtime_error("store_u_interval <= 0");
}
if (store_x_interval <= 0) {
throw std::runtime_error("store_x_interval <= 0");
}
int n_windows = lambda_windows.size();
int u_traj_size = ((n_steps + store_u_interval - 1) / store_u_interval) * n_windows;
int x_traj_size = (n_steps + store_x_interval - 1) / store_x_interval;
int box_traj_size = x_traj_size * 3 * 3;
std::vector<double> h_x_traj(x_traj_size * N_ * 3);
try {
gpuErrchk(cudaMalloc(&d_box_traj, box_traj_size * sizeof(*d_box_traj)));
gpuErrchk(cudaMalloc(&d_u_traj, u_traj_size * sizeof(*d_u_traj)));
gpuErrchk(cudaMemset(d_u_traj, 0, u_traj_size * sizeof(*d_u_traj)));
for (int step = 0; step < n_steps; step++) {
if (step % store_x_interval == 0) {
gpuErrchk(cudaMemcpy(
&h_x_traj[0] + (step / store_x_interval) * N_ * 3,
d_x_t_,
N_ * 3 * sizeof(double),
cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(
&d_box_traj[0] + (step / store_x_interval) * 3 * 3,
d_box_t_,
3 * 3 * sizeof(*d_box_traj),
cudaMemcpyDeviceToDevice));
}
cudaStream_t stream = static_cast<cudaStream_t>(0);
for (int i = 0; i < observables_.size(); i++) {
observables_[i]->observe(step, N_, d_x_t_, d_box_t_, lambda);
}
gpuErrchk(cudaMemsetAsync(d_du_dx_t_, 0, N_ * 3 * sizeof(*d_du_dx_t_), stream));
// first pass generate the forces
for (int i = 0; i < bps_.size(); i++) {
bps_[i]->execute_device(
N_,
d_x_t_,
d_box_t_,
lambda,
d_du_dx_t_, // we only need the forces
nullptr,
nullptr,
nullptr,
stream);
}
// we need to compute aggregate energies on this step
if (step % store_u_interval == 0) {
unsigned long long *u_ptr = d_u_traj + (step / store_u_interval) * n_windows;
for (int w = 0; w < n_windows; w++) {
// reset buffers on each pass.
gpuErrchk(cudaMemsetAsync(d_u_buffer_, 0, N_ * sizeof(*d_u_buffer_), stream));
for (int i = 0; i < bps_.size(); i++) {
bps_[i]->execute_device(
N_, d_x_t_, d_box_t_, lambda_windows[w], nullptr, nullptr, nullptr, d_u_buffer_, stream);
}
cub::DeviceReduce::Sum(d_sum_storage_, d_sum_storage_bytes_, d_u_buffer_, u_ptr + w, N_, stream);
gpuErrchk(cudaPeekAtLastError());
}
}
intg_->step_fwd(d_x_t_, d_v_t_, d_du_dx_t_, d_box_t_, stream);
if (barostat_) {
// May modify coords, du_dx and box size
barostat_->inplace_move(d_x_t_, d_box_t_, lambda, stream);
}
}
cudaDeviceSynchronize();
std::vector<unsigned long long> h_u_traj_ull(u_traj_size);
gpuErrchk(cudaMemcpy(&h_u_traj_ull[0], d_u_traj, u_traj_size * sizeof(*d_u_traj), cudaMemcpyDeviceToHost));
std::vector<double> h_u_traj_double(u_traj_size);
for (int i = 0; i < h_u_traj_ull.size(); i++) {
h_u_traj_double[i] = FIXED_TO_FLOAT<double>(h_u_traj_ull[i]);
}
std::vector<double> h_box_traj(box_traj_size);
gpuErrchk(cudaMemcpy(&h_box_traj[0], d_box_traj, box_traj_size * sizeof(*d_box_traj), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_u_traj));
gpuErrchk(cudaFree(d_box_traj));
return std::array<std::vector<double>, 3>({h_u_traj_double, h_x_traj, h_box_traj});
} catch (...) {
gpuErrchk(cudaFree(d_u_traj));
gpuErrchk(cudaFree(d_box_traj));
throw;
}
}
void Context::step(double lambda) {
this->_step(lambda, nullptr);
cudaDeviceSynchronize();
}
void Context::_step(double lambda, unsigned long long *du_dl_out) {
// the observables decide on whether or not to act on given
// data (cheap pointers in any case)
cudaStream_t stream = static_cast<cudaStream_t>(0);
for (int i = 0; i < observables_.size(); i++) {
observables_[i]->observe(step_, N_, d_x_t_, d_box_t_, lambda);
}
gpuErrchk(cudaMemsetAsync(d_du_dx_t_, 0, N_ * 3 * sizeof(*d_du_dx_t_), stream));
if (du_dl_out) {
gpuErrchk(cudaMemsetAsync(d_du_dl_buffer_, 0, N_ * sizeof(*d_du_dl_buffer_), stream));
}
for (int i = 0; i < bps_.size(); i++) {
bps_[i]->execute_device(
N_,
d_x_t_,
d_box_t_,
lambda,
d_du_dx_t_, // we only need the forces
nullptr,
du_dl_out ? d_du_dl_buffer_ : nullptr,
nullptr,
stream);
}
// compute du_dl
if (du_dl_out) {
cub::DeviceReduce::Sum(d_sum_storage_, d_sum_storage_bytes_, d_du_dl_buffer_, du_dl_out, N_, stream);
gpuErrchk(cudaPeekAtLastError());
}
// for(int i=0; i < streams_.size(); i++) {
// gpuErrchk(cudaStreamSynchronize(streams_[i]));
// }
intg_->step_fwd(d_x_t_, d_v_t_, d_du_dx_t_, d_box_t_, stream);
if (barostat_) {
// May modify coords, du_dx and box size
barostat_->inplace_move(d_x_t_, d_box_t_, lambda, stream);
}
step_ += 1;
};
int Context::num_atoms() const { return N_; }
void Context::set_x_t(const double *in_buffer) {
gpuErrchk(cudaMemcpy(d_x_t_, in_buffer, N_ * 3 * sizeof(*in_buffer), cudaMemcpyHostToDevice));
}
void Context::get_du_dx_t_minus_1(unsigned long long *out_buffer) const {
gpuErrchk(cudaMemcpy(out_buffer, d_du_dx_t_, N_ * 3 * sizeof(*out_buffer), cudaMemcpyDeviceToHost));
}
void Context::get_x_t(double *out_buffer) const {
gpuErrchk(cudaMemcpy(out_buffer, d_x_t_, N_ * 3 * sizeof(*out_buffer), cudaMemcpyDeviceToHost));
}
void Context::get_v_t(double *out_buffer) const {
gpuErrchk(cudaMemcpy(out_buffer, d_v_t_, N_ * 3 * sizeof(*out_buffer), cudaMemcpyDeviceToHost));
}
void Context::get_box(double *out_buffer) const {
gpuErrchk(cudaMemcpy(out_buffer, d_box_t_, 3 * 3 * sizeof(*out_buffer), cudaMemcpyDeviceToHost));
}
} // namespace timemachine
|
the_stack
|
* \test Tests the individual building blocks of the eigenvalue routines based on the QR method.
**/
/*
*
* Test file for qr-method
*
*/
// include necessary system headers
#include <iostream>
#ifndef NDEBUG
#define NDEBUG
#endif
#define VIENNACL_WITH_UBLAS
#include "viennacl/scalar.hpp"
#include "viennacl/vector.hpp"
#include "viennacl/linalg/prod.hpp"
#include <fstream>
#include <iomanip>
#include "viennacl/linalg/qr-method.hpp"
#include "viennacl/linalg/qr-method-common.hpp"
#include "viennacl/linalg/matrix_operations.hpp"
#include "viennacl/tools/random.hpp"
#define EPS 10.0e-3
namespace ublas = boost::numeric::ublas;
typedef float ScalarType;
void read_matrix_size(std::fstream& f, std::size_t& sz);
void read_matrix_size(std::fstream& f, std::size_t& sz)
{
if(!f.is_open())
{
throw std::invalid_argument("File is not opened");
}
f >> sz;
}
template <typename MatrixLayout>
void read_matrix_body(std::fstream& f, viennacl::matrix<ScalarType, MatrixLayout>& A)
{
if(!f.is_open())
{
throw std::invalid_argument("File is not opened");
}
boost::numeric::ublas::matrix<ScalarType> h_A(A.size1(), A.size2());
for(std::size_t i = 0; i < h_A.size1(); i++) {
for(std::size_t j = 0; j < h_A.size2(); j++) {
ScalarType val = 0.0;
f >> val;
h_A(i, j) = val;
}
}
viennacl::copy(h_A, A);
}
void matrix_print(viennacl::matrix<ScalarType>& A_orig);
void matrix_print(viennacl::matrix<ScalarType>& A_orig)
{
ublas::matrix<ScalarType> A(A_orig.size1(), A_orig.size2());
viennacl::copy(A_orig, A);
for (unsigned int i = 0; i < A.size1(); i++) {
for (unsigned int j = 0; j < A.size2(); j++)
std::cout << std::setprecision(6) << std::fixed << A(i, j) << "\t";
std::cout << std::endl;
}
std::cout << std::endl;
}
void matrix_print(ublas::matrix<ScalarType>& A);
void matrix_print(ublas::matrix<ScalarType>& A)
{
for (unsigned int i = 0; i < A.size1(); i++) {
for (unsigned int j = 0; j < A.size2(); j++)
std::cout << std::setprecision(6) << std::fixed << A(i, j) << "\t";
std::cout << std::endl;
}
std::cout << std::endl;
}
void vector_print(std::vector<ScalarType>& v );
void vector_print(std::vector<ScalarType>& v )
{
for (unsigned int i = 0; i < v.size(); i++)
std::cout << std::setprecision(6) << std::fixed << v[i] << ",\t";
std::cout << "\n";
}
template <typename MatrixType, typename VCLMatrixType>
bool check_for_equality(MatrixType const & ublas_A, VCLMatrixType const & vcl_A)
{
typedef typename MatrixType::value_type value_type;
ublas::matrix<value_type> vcl_A_cpu(vcl_A.size1(), vcl_A.size2());
viennacl::backend::finish(); //workaround for a bug in APP SDK 2.7 on Trinity APUs (with Catalyst 12.8)
viennacl::copy(vcl_A, vcl_A_cpu);
for (std::size_t i=0; i<ublas_A.size1(); ++i)
{
for (std::size_t j=0; j<ublas_A.size2(); ++j)
{
if (std::abs(ublas_A(i,j) - vcl_A_cpu(i,j)) > EPS * std::max(std::abs(ublas_A(i, j)), std::abs(vcl_A_cpu(i, j))))
{
std::cout << "Error at index (" << i << ", " << j << "): " << ublas_A(i,j) << " vs. " << vcl_A_cpu(i,j) << std::endl;
std::cout << std::endl << "TEST failed!" << std::endl;
return false;
}
}
}
std::cout << "PASSED!" << std::endl;
return true;
}
template <typename VectorType>
bool check_for_equality(VectorType const & vec_A, VectorType const & vec_B)
{
for (std::size_t i=0; i<vec_A.size(); ++i)
{
if (std::abs(vec_A[i] - vec_B[i]) > EPS)
{
std::cout << "Error at index (" << i << "): " << vec_A[i] << " vs " <<vec_B[i] << std::endl;
std::cout << std::endl << "TEST failed!" << std::endl;
return false;
}
}
std::cout << "PASSED!" << std::endl;
return true;
}
void fill_vector(std::vector<ScalarType>& v);
void fill_vector(std::vector<ScalarType>& v)
{
viennacl::tools::uniform_random_numbers<ScalarType> randomNumber;
for (unsigned int i = 0; i < v.size(); ++i)
v[i] = randomNumber();
}
/*
*
* ------------Functions to be tested---------------
*
*/
template <typename NumericT>
void house_update_A_left(ublas::matrix<NumericT> & A,
std::vector<NumericT> D,
unsigned int start)
{
NumericT ss = 0;
std::size_t row_start = start + 1;
for(std::size_t i = 0; i < A.size2(); i++)
{
ss = 0;
for(std::size_t j = row_start; j < A.size1(); j++)
ss = ss +(D[j] * A(j, i));
for(std::size_t j = row_start; j < A.size1(); j++)
A(j, i) = A(j, i) - (2 * D[j] * ss);
}
}
template <typename NumericT>
void house_update_A_right(ublas::matrix<NumericT> & A,
std::vector<NumericT> D)
{
NumericT ss = 0;
for(std::size_t i = 0; i < A.size1(); i++)
{
ss = 0;
for(std::size_t j = 0; j < A.size2(); j++)
ss = ss + (D[j] * A(i, j));
NumericT sum_Av = ss;
for(std::size_t j = 0; j < A.size2(); j++)
A(i, j) = A(i, j) - (2 * D[j] * sum_Av);
}
}
template <typename NumericT>
void house_update_QL(ublas::matrix<NumericT> & Q,
std::vector<NumericT> D,
std::size_t A_size1)
{
NumericT beta = 2;
ublas::matrix<NumericT> ubl_P(A_size1, A_size1);
ublas::matrix<ScalarType> I = ublas::identity_matrix<ScalarType>(Q.size1());
ublas::matrix<NumericT> Q_temp(Q.size1(), Q.size2());
for(std::size_t i = 0; i < Q.size1(); i++)
{
for(std::size_t j = 0; j < Q.size2(); j++)
{
Q_temp(i, j) = Q(i, j);
}
}
ubl_P = ublas::identity_matrix<NumericT>(A_size1);
//scaled_rank_1 update
for(std::size_t i = 0; i < A_size1; i++)
{
for(std::size_t j = 0; j < A_size1; j++)
{
ubl_P(i, j) = I(i, j) - beta * (D[i] * D[j]);
}
}
Q = ublas::prod(Q_temp, ubl_P);
}
template <typename NumericT>
void givens_next(ublas::matrix<NumericT> & Q,
std::vector<NumericT> & tmp1,
std::vector<NumericT> & tmp2,
int l,
int m)
{
for(int i2 = m - 1; i2 >= l; i2--)
{
std::size_t i = static_cast<std::size_t>(i2);
for(std::size_t k = 0; k < Q.size1(); k++)
{
NumericT h = Q(k, i+1);
Q(k, i+1) = tmp2[i] * Q(k, i) + tmp1[i]*h;
Q(k, i) = tmp1[i] * Q(k, i) - tmp2[i]*h;
}
}
}
template <typename NumericT>
void copy_vec(ublas::matrix<NumericT>& A,
std::vector<NumericT> & V,
std::size_t row_start,
std::size_t col_start,
bool copy_col)
{
if(copy_col)
{
for(std::size_t i = row_start; i < A.size1(); i++)
{
V[i - row_start] = A(i, col_start);
}
}
else
{
for(std::size_t i = col_start; i < A.size1(); i++)
{
V[i - col_start] = A(row_start, i);
}
}
}
template <typename NumericT>
void bidiag_pack(ublas::matrix<NumericT> & A,
std::vector<NumericT> & D,
std::vector<NumericT> & S)
{
std::size_t size = std::min(D.size(), S.size());
std::size_t i = 0;
for(i = 0; i < size - 1; i++)
{
D[i] = A(i, i);
S[i + 1] = A(i, i + 1);
}
D[size - 1] = A(size - 1, size - 1);
}
template <typename MatrixLayout>
void test_qr_method_sym(const std::string& fn)
{
std::cout << "Reading..." << std::endl;
std::size_t sz;
// read file
std::fstream f(fn.c_str(), std::fstream::in);
//read size of input matrix
read_matrix_size(f, sz);
viennacl::matrix<ScalarType, MatrixLayout> vcl_A(sz, sz), vcl_Q(sz, sz);
viennacl::vector<ScalarType> vcl_D(sz), vcl_E(sz), vcl_F(sz), vcl_G(sz), vcl_H(sz);
std::vector<ScalarType> std_D(sz), std_E(sz), std_F(sz), std_G(sz), std_H(sz);
ublas::matrix<ScalarType> ubl_A(sz, sz), ubl_Q(sz, sz);
std::cout << "Testing matrix of size " << sz << "-by-" << sz << std::endl << std::endl;
read_matrix_body(f, vcl_A);
f.close();
viennacl::copy(vcl_A, ubl_A);
fill_vector(std_D);
copy(std_D, vcl_D);
//--------------------------------------------------------
std::cout << std::endl << "Testing house_update_left..." << std::endl;
viennacl::linalg::house_update_A_left(vcl_A, vcl_D, 0);
house_update_A_left(ubl_A, std_D, 0);
if(!check_for_equality(ubl_A, vcl_A))
exit(EXIT_FAILURE);
//--------------------------------------------------------
std::cout << std::endl << "Testing house_update_right..." << std::endl;
copy(ubl_A, vcl_A);
copy(std_D, vcl_D);
viennacl::linalg::house_update_A_right(vcl_A, vcl_D);
house_update_A_right(ubl_A, std_D);
if(!check_for_equality(ubl_A, vcl_A))
exit(EXIT_FAILURE);
//--------------------------------------------------------
std::cout << std::endl << "Testing house_update_QL..." << std::endl;
ubl_Q = ublas::identity_matrix<ScalarType>(ubl_Q.size1());
copy(ubl_Q, vcl_Q);
copy(ubl_A, vcl_A);
copy(std_D, vcl_D);
viennacl::linalg::house_update_QL(vcl_Q, vcl_D, vcl_A.size1());
house_update_QL(ubl_Q, std_D, ubl_A.size1());
if(!check_for_equality(ubl_Q, vcl_Q))
exit(EXIT_FAILURE);
//--------------------------------------------------------
std::cout << std::endl << "Testing givens next..." << std::endl;
fill_vector(std_E);
fill_vector(std_F);
copy(std_E, vcl_E);
copy(std_F, vcl_F);
copy(ubl_Q, vcl_Q);
copy(ubl_A, vcl_A);
viennacl::linalg::givens_next(vcl_Q, vcl_E, vcl_F, 2, 5);
givens_next(ubl_Q, std_E, std_F, 2, 5);
if(!check_for_equality(ubl_Q, vcl_Q))
exit(EXIT_FAILURE);
//--------------------------------------------------------
std::cout << std::endl << "Testing copy vec..." << std::endl;
viennacl::linalg::copy_vec(vcl_A, vcl_D, 0, 2, 1);
copy_vec(ubl_A, std_D, 0, 2, 1);
copy(vcl_D, std_E); //check for equality only for same vector types
if(!check_for_equality(std_D, std_E))
exit(EXIT_FAILURE);
//--------------------------------------------------------
std::cout << std::endl << "Testing bidiag pack..." << std::endl;
viennacl::linalg::bidiag_pack(vcl_A, vcl_D, vcl_F);
vcl_F[0] = 0; // first element in superdiagonal is irrelevant.
bidiag_pack(ubl_A, std_G, std_H);
std_H[0] = 0;
copy(std_G, vcl_G);
copy(std_H, vcl_H);
if(!check_for_equality(vcl_D, vcl_G))
exit(EXIT_FAILURE);
if(!check_for_equality(vcl_F, vcl_H))
exit(EXIT_FAILURE);
//--------------------------------------------------------
}
int main()
{
std::cout << std::endl << "Test qr_method_sym for row_major matrix" << std::endl;
test_qr_method_sym<viennacl::row_major>("../examples/testdata/eigen/symm5.example");
std::cout << std::endl << "Test qr_method_sym for column_major matrix" << std::endl;
test_qr_method_sym<viennacl::column_major>("../examples/testdata/eigen/symm5.example");
std::cout << std::endl <<"--------TEST SUCCESSFULLY COMPLETED----------" << std::endl;
}
|
the_stack
|
* \file
* cub::DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within device-accessible memory.
*/
#pragma once
#include <stdio.h>
#include <iterator>
#include <limits>
#include "dispatch/dispatch_histogram.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \brief DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within device-accessible memory. 
* \ingroup SingleModule
*
* \par Overview
* A <a href="http://en.wikipedia.org/wiki/Histogram"><em>histogram</em></a>
* counts the number of observations that fall into each of the disjoint categories (known as <em>bins</em>).
*
* \par Usage Considerations
* \cdp_class{DeviceHistogram}
*
*/
struct DeviceHistogram
{
/******************************************************************//**
* \name Evenly-segmented bin ranges
*********************************************************************/
//@{
/**
* \brief Computes an intensity histogram from a sequence of data samples using equal-width bins.
*
* \par
* - The number of histogram bins is (\p num_levels - 1)
* - All bins comprise the same width of sample values: (\p upper_level - \p lower_level) / (\p num_levels - 1)
* - \devicestorage
*
* \par Snippet
* The code snippet below illustrates the computation of a six-bin histogram
* from a sequence of float samples
*
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input samples and
* // output histogram
* int num_samples; // e.g., 10
* float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, 0.3, 2.9, 2.0, 6.1, 999.5]
* int* d_histogram; // e.g., [ -, -, -, -, -, -, -, -]
* int num_levels; // e.g., 7 (seven level boundaries for six bins)
* float lower_level; // e.g., 0.0 (lower sample value boundary of lowest bin)
* float upper_level; // e.g., 12.0 (upper sample value boundary of upper bin)
* ...
*
* // Determine temporary device storage requirements
* void* d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, lower_level, upper_level, num_samples);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Compute histograms
* cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, lower_level, upper_level, num_samples);
*
* // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0];
*
* \endcode
*
* \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator
* \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters
* \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels)
* \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1
*/
template <
typename SampleIteratorT,
typename CounterT,
typename LevelT,
typename OffsetT>
CUB_RUNTIME_FUNCTION
static cudaError_t HistogramEven(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples.
CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length <tt>num_levels</tt> - 1.
int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is <tt>num_levels</tt> - 1.
LevelT lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin.
LevelT upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin.
OffsetT num_samples, ///< [in] The number of input samples (i.e., the length of \p d_samples)
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
/// The sample value type of the input iterator
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
CounterT* d_histogram1[1] = {d_histogram};
int num_levels1[1] = {num_levels};
LevelT lower_level1[1] = {lower_level};
LevelT upper_level1[1] = {upper_level};
return MultiHistogramEven<1, 1>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram1,
num_levels1,
lower_level1,
upper_level1,
num_samples,
1,
sizeof(SampleT) * num_samples,
stream,
debug_synchronous);
}
/**
* \brief Computes an intensity histogram from a sequence of data samples using equal-width bins.
*
* \par
* - A two-dimensional <em>region of interest</em> within \p d_samples can be specified
* using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters.
* - The row stride must be a whole multiple of the sample data type
* size, i.e., <tt>(row_stride_bytes % sizeof(SampleT)) == 0</tt>.
* - The number of histogram bins is (\p num_levels - 1)
* - All bins comprise the same width of sample values: (\p upper_level - \p lower_level) / (\p num_levels - 1)
* - \devicestorage
*
* \par Snippet
* The code snippet below illustrates the computation of a six-bin histogram
* from a 2x5 region of interest within a flattened 2x7 array of float samples.
*
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input samples and
* // output histogram
* int num_row_samples; // e.g., 5
* int num_rows; // e.g., 2;
* size_t row_stride_bytes; // e.g., 7 * sizeof(float)
* float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, -, -,
* // 0.3, 2.9, 2.0, 6.1, 999.5, -, -]
* int* d_histogram; // e.g., [ -, -, -, -, -, -, -, -]
* int num_levels; // e.g., 7 (seven level boundaries for six bins)
* float lower_level; // e.g., 0.0 (lower sample value boundary of lowest bin)
* float upper_level; // e.g., 12.0 (upper sample value boundary of upper bin)
* ...
*
* // Determine temporary device storage requirements
* void* d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, lower_level, upper_level,
* num_row_samples, num_rows, row_stride_bytes);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Compute histograms
* cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes, d_samples, d_histogram,
* d_samples, d_histogram, num_levels, lower_level, upper_level,
* num_row_samples, num_rows, row_stride_bytes);
*
* // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0];
*
* \endcode
*
* \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator
* \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters
* \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels)
* \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1
*/
template <
typename SampleIteratorT,
typename CounterT,
typename LevelT,
typename OffsetT>
CUB_RUNTIME_FUNCTION
static cudaError_t HistogramEven(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples.
CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length <tt>num_levels</tt> - 1.
int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is <tt>num_levels</tt> - 1.
LevelT lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin.
LevelT upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin.
OffsetT num_row_samples, ///< [in] The number of data samples per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
CounterT* d_histogram1[1] = {d_histogram};
int num_levels1[1] = {num_levels};
LevelT lower_level1[1] = {lower_level};
LevelT upper_level1[1] = {upper_level};
return MultiHistogramEven<1, 1>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram1,
num_levels1,
lower_level1,
upper_level1,
num_row_samples,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
/**
* \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using equal-width bins.
*
* \par
* - The input is a sequence of <em>pixel</em> structures, where each pixel comprises
* a record of \p NUM_CHANNELS consecutive data samples (e.g., an <em>RGBA</em> pixel).
* - Of the \p NUM_CHANNELS specified, the function will only compute histograms
* for the first \p NUM_ACTIVE_CHANNELS (e.g., only <em>RGB</em> histograms from <em>RGBA</em>
* pixel samples).
* - The number of histogram bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
* - For channel<sub><em>i</em></sub>, the range of values for all histogram bins
* have the same width: (<tt>upper_level[i]</tt> - <tt>lower_level[i]</tt>) / (<tt> num_levels[i]</tt> - 1)
* - \devicestorage
*
* \par Snippet
* The code snippet below illustrates the computation of three 256-bin <em>RGB</em> histograms
* from a quad-channel sequence of <em>RGBA</em> pixels (8 bits per channel per pixel)
*
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input samples
* // and output histograms
* int num_pixels; // e.g., 5
* unsigned char* d_samples; // e.g., [(2, 6, 7, 5), (3, 0, 2, 1), (7, 0, 6, 2),
* // (0, 6, 7, 5), (3, 0, 2, 6)]
* int* d_histogram[3]; // e.g., three device pointers to three device buffers,
* // each allocated with 256 integer counters
* int num_levels[3]; // e.g., {257, 257, 257};
* unsigned int lower_level[3]; // e.g., {0, 0, 0};
* unsigned int upper_level[3]; // e.g., {256, 256, 256};
* ...
*
* // Determine temporary device storage requirements
* void* d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, lower_level, upper_level, num_pixels);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Compute histograms
* cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, lower_level, upper_level, num_pixels);
*
* // d_histogram <-- [ [1, 0, 1, 2, 0, 0, 0, 1, 0, 0, 0, ..., 0],
* // [0, 3, 0, 0, 0, 0, 2, 0, 0, 0, 0, ..., 0],
* // [0, 0, 2, 0, 0, 0, 1, 2, 0, 0, 0, ..., 0] ]
*
* \endcode
*
* \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
* \tparam NUM_ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed
* \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator
* \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters
* \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels)
* \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleIteratorT,
typename CounterT,
typename LevelT,
typename OffsetT>
CUB_RUNTIME_FUNCTION
static cudaError_t MultiHistogramEven(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four <em>RGBA</em> 8-bit samples).
CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histogram[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_pixels, ///< [in] The number of multi-channel pixels (i.e., the length of \p d_samples / NUM_CHANNELS)
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
/// The sample value type of the input iterator
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
return MultiHistogramEven<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
lower_level,
upper_level,
num_pixels,
1,
sizeof(SampleT) * NUM_CHANNELS * num_pixels,
stream,
debug_synchronous);
}
/**
* \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using equal-width bins.
*
* \par
* - The input is a sequence of <em>pixel</em> structures, where each pixel comprises
* a record of \p NUM_CHANNELS consecutive data samples (e.g., an <em>RGBA</em> pixel).
* - Of the \p NUM_CHANNELS specified, the function will only compute histograms
* for the first \p NUM_ACTIVE_CHANNELS (e.g., only <em>RGB</em> histograms from <em>RGBA</em>
* pixel samples).
* - A two-dimensional <em>region of interest</em> within \p d_samples can be specified
* using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters.
* - The row stride must be a whole multiple of the sample data type
* size, i.e., <tt>(row_stride_bytes % sizeof(SampleT)) == 0</tt>.
* - The number of histogram bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
* - For channel<sub><em>i</em></sub>, the range of values for all histogram bins
* have the same width: (<tt>upper_level[i]</tt> - <tt>lower_level[i]</tt>) / (<tt> num_levels[i]</tt> - 1)
* - \devicestorage
*
* \par Snippet
* The code snippet below illustrates the computation of three 256-bin <em>RGB</em> histograms from a 2x3 region of
* interest of within a flattened 2x4 array of quad-channel <em>RGBA</em> pixels (8 bits per channel per pixel).
*
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input samples
* // and output histograms
* int num_row_pixels; // e.g., 3
* int num_rows; // e.g., 2
* size_t row_stride_bytes; // e.g., 4 * sizeof(unsigned char) * NUM_CHANNELS
* unsigned char* d_samples; // e.g., [(2, 6, 7, 5), (3, 0, 2, 1), (7, 0, 6, 2), (-, -, -, -),
* // (0, 6, 7, 5), (3, 0, 2, 6), (1, 1, 1, 1), (-, -, -, -)]
* int* d_histogram[3]; // e.g., three device pointers to three device buffers,
* // each allocated with 256 integer counters
* int num_levels[3]; // e.g., {257, 257, 257};
* unsigned int lower_level[3]; // e.g., {0, 0, 0};
* unsigned int upper_level[3]; // e.g., {256, 256, 256};
* ...
*
* // Determine temporary device storage requirements
* void* d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, lower_level, upper_level,
* num_row_pixels, num_rows, row_stride_bytes);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Compute histograms
* cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, lower_level, upper_level,
* num_row_pixels, num_rows, row_stride_bytes);
*
* // d_histogram <-- [ [1, 1, 1, 2, 0, 0, 0, 1, 0, 0, 0, ..., 0],
* // [0, 4, 0, 0, 0, 0, 2, 0, 0, 0, 0, ..., 0],
* // [0, 1, 2, 0, 0, 0, 1, 2, 0, 0, 0, ..., 0] ]
*
* \endcode
*
* \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
* \tparam NUM_ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed
* \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator
* \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters
* \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels)
* \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleIteratorT,
typename CounterT,
typename LevelT,
typename OffsetT>
CUB_RUNTIME_FUNCTION
static cudaError_t MultiHistogramEven(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four <em>RGBA</em> 8-bit samples).
CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histogram[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel.
LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
/// The sample value type of the input iterator
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
Int2Type<sizeof(SampleT) == 1> is_byte_sample;
if ((sizeof(OffsetT) > sizeof(int)) &&
((unsigned long long) (num_rows * row_stride_bytes) < (unsigned long long) std::numeric_limits<int>::max()))
{
// Down-convert OffsetT data type
return DipatchHistogram<NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, LevelT, int>::DispatchEven(
d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, lower_level, upper_level,
(int) num_row_pixels, (int) num_rows, (int) (row_stride_bytes / sizeof(SampleT)),
stream, debug_synchronous, is_byte_sample);
}
return DipatchHistogram<NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, LevelT, OffsetT>::DispatchEven(
d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, lower_level, upper_level,
num_row_pixels, num_rows, (OffsetT) (row_stride_bytes / sizeof(SampleT)),
stream, debug_synchronous, is_byte_sample);
}
//@} end member group
/******************************************************************//**
* \name Custom bin ranges
*********************************************************************/
//@{
/**
* \brief Computes an intensity histogram from a sequence of data samples using the specified bin boundary levels.
*
* \par
* - The number of histogram bins is (\p num_levels - 1)
* - The value range for bin<sub><em>i</em></sub> is [<tt>level[i]</tt>, <tt>level[i+1]</tt>)
* - \devicestorage
*
* \par Snippet
* The code snippet below illustrates the computation of an six-bin histogram
* from a sequence of float samples
*
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input samples and
* // output histogram
* int num_samples; // e.g., 10
* float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, 0.3, 2.9, 2.0, 6.1, 999.5]
* int* d_histogram; // e.g., [ -, -, -, -, -, -, -, -]
* int num_levels // e.g., 7 (seven level boundaries for six bins)
* float* d_levels; // e.g., [0.0, 2.0, 4.0, 6.0, 8.0, 12.0, 16.0]
* ...
*
* // Determine temporary device storage requirements
* void* d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, d_levels, num_samples);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Compute histograms
* cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, d_levels, num_samples);
*
* // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0];
*
* \endcode
*
* \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator
* \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters
* \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels)
* \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1
*/
template <
typename SampleIteratorT,
typename CounterT,
typename LevelT,
typename OffsetT>
CUB_RUNTIME_FUNCTION
static cudaError_t HistogramRange(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples.
CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length <tt>num_levels</tt> - 1.
int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is <tt>num_levels</tt> - 1.
LevelT* d_levels, ///< [in] The pointer to the array of boundaries (levels). Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_samples, ///< [in] The number of data samples per row in the region of interest
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
/// The sample value type of the input iterator
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
CounterT* d_histogram1[1] = {d_histogram};
int num_levels1[1] = {num_levels};
LevelT* d_levels1[1] = {d_levels};
return MultiHistogramRange<1, 1>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram1,
num_levels1,
d_levels1,
num_samples,
1,
sizeof(SampleT) * num_samples,
stream,
debug_synchronous);
}
/**
* \brief Computes an intensity histogram from a sequence of data samples using the specified bin boundary levels.
*
* \par
* - A two-dimensional <em>region of interest</em> within \p d_samples can be specified
* using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters.
* - The row stride must be a whole multiple of the sample data type
* size, i.e., <tt>(row_stride_bytes % sizeof(SampleT)) == 0</tt>.
* - The number of histogram bins is (\p num_levels - 1)
* - The value range for bin<sub><em>i</em></sub> is [<tt>level[i]</tt>, <tt>level[i+1]</tt>)
* - \devicestorage
*
* \par Snippet
* The code snippet below illustrates the computation of a six-bin histogram
* from a 2x5 region of interest within a flattened 2x7 array of float samples.
*
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input samples and
* // output histogram
* int num_row_samples; // e.g., 5
* int num_rows; // e.g., 2;
* int row_stride_bytes; // e.g., 7 * sizeof(float)
* float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, -, -,
* // 0.3, 2.9, 2.0, 6.1, 999.5, -, -]
* int* d_histogram; // e.g., [ , , , , , , , ]
* int num_levels // e.g., 7 (seven level boundaries for six bins)
* float *d_levels; // e.g., [0.0, 2.0, 4.0, 6.0, 8.0, 12.0, 16.0]
* ...
*
* // Determine temporary device storage requirements
* void* d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, d_levels,
* num_row_samples, num_rows, row_stride_bytes);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Compute histograms
* cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, d_levels,
* num_row_samples, num_rows, row_stride_bytes);
*
* // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0];
*
* \endcode
*
* \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator
* \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters
* \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels)
* \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1
*/
template <
typename SampleIteratorT,
typename CounterT,
typename LevelT,
typename OffsetT>
CUB_RUNTIME_FUNCTION
static cudaError_t HistogramRange(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples.
CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length <tt>num_levels</tt> - 1.
int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is <tt>num_levels</tt> - 1.
LevelT* d_levels, ///< [in] The pointer to the array of boundaries (levels). Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_samples, ///< [in] The number of data samples per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
CounterT* d_histogram1[1] = {d_histogram};
int num_levels1[1] = {num_levels};
LevelT* d_levels1[1] = {d_levels};
return MultiHistogramRange<1, 1>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram1,
num_levels1,
d_levels1,
num_row_samples,
num_rows,
row_stride_bytes,
stream,
debug_synchronous);
}
/**
* \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using the specified bin boundary levels.
*
* \par
* - The input is a sequence of <em>pixel</em> structures, where each pixel comprises
* a record of \p NUM_CHANNELS consecutive data samples (e.g., an <em>RGBA</em> pixel).
* - Of the \p NUM_CHANNELS specified, the function will only compute histograms
* for the first \p NUM_ACTIVE_CHANNELS (e.g., <em>RGB</em> histograms from <em>RGBA</em>
* pixel samples).
* - The number of histogram bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
* - For channel<sub><em>i</em></sub>, the range of values for all histogram bins
* have the same width: (<tt>upper_level[i]</tt> - <tt>lower_level[i]</tt>) / (<tt> num_levels[i]</tt> - 1)
* - \devicestorage
*
* \par Snippet
* The code snippet below illustrates the computation of three 4-bin <em>RGB</em> histograms
* from a quad-channel sequence of <em>RGBA</em> pixels (8 bits per channel per pixel)
*
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input samples
* // and output histograms
* int num_pixels; // e.g., 5
* unsigned char *d_samples; // e.g., [(2, 6, 7, 5),(3, 0, 2, 1),(7, 0, 6, 2),
* // (0, 6, 7, 5),(3, 0, 2, 6)]
* unsigned int *d_histogram[3]; // e.g., [[ -, -, -, -],[ -, -, -, -],[ -, -, -, -]];
* int num_levels[3]; // e.g., {5, 5, 5};
* unsigned int *d_levels[3]; // e.g., [ [0, 2, 4, 6, 8],
* // [0, 2, 4, 6, 8],
* // [0, 2, 4, 6, 8] ];
* ...
*
* // Determine temporary device storage requirements
* void* d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, d_levels, num_pixels);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Compute histograms
* cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, d_levels, num_pixels);
*
* // d_histogram <-- [ [1, 3, 0, 1],
* // [3, 0, 0, 2],
* // [0, 2, 0, 3] ]
*
* \endcode
*
* \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
* \tparam NUM_ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed
* \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator
* \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters
* \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels)
* \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleIteratorT,
typename CounterT,
typename LevelT,
typename OffsetT>
CUB_RUNTIME_FUNCTION
static cudaError_t MultiHistogramRange(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four <em>RGBA</em> 8-bit samples).
CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histogram[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT* d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_pixels, ///< [in] The number of multi-channel pixels (i.e., the length of \p d_samples / NUM_CHANNELS)
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
/// The sample value type of the input iterator
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
return MultiHistogramRange<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>(
d_temp_storage,
temp_storage_bytes,
d_samples,
d_histogram,
num_levels,
d_levels,
num_pixels,
1,
sizeof(SampleT) * NUM_CHANNELS * num_pixels,
stream,
debug_synchronous);
}
/**
* \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using the specified bin boundary levels.
*
* \par
* - The input is a sequence of <em>pixel</em> structures, where each pixel comprises
* a record of \p NUM_CHANNELS consecutive data samples (e.g., an <em>RGBA</em> pixel).
* - Of the \p NUM_CHANNELS specified, the function will only compute histograms
* for the first \p NUM_ACTIVE_CHANNELS (e.g., <em>RGB</em> histograms from <em>RGBA</em>
* pixel samples).
* - A two-dimensional <em>region of interest</em> within \p d_samples can be specified
* using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters.
* - The row stride must be a whole multiple of the sample data type
* size, i.e., <tt>(row_stride_bytes % sizeof(SampleT)) == 0</tt>.
* - The number of histogram bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
* - For channel<sub><em>i</em></sub>, the range of values for all histogram bins
* have the same width: (<tt>upper_level[i]</tt> - <tt>lower_level[i]</tt>) / (<tt> num_levels[i]</tt> - 1)
* - \devicestorage
*
* \par Snippet
* The code snippet below illustrates the computation of three 4-bin <em>RGB</em> histograms from a 2x3 region of
* interest of within a flattened 2x4 array of quad-channel <em>RGBA</em> pixels (8 bits per channel per pixel).
*
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input samples
* // and output histograms
* int num_row_pixels; // e.g., 3
* int num_rows; // e.g., 2
* size_t row_stride_bytes; // e.g., 4 * sizeof(unsigned char) * NUM_CHANNELS
* unsigned char* d_samples; // e.g., [(2, 6, 7, 5),(3, 0, 2, 1),(1, 1, 1, 1),(-, -, -, -),
* // (7, 0, 6, 2),(0, 6, 7, 5),(3, 0, 2, 6),(-, -, -, -)]
* int* d_histogram[3]; // e.g., [[ -, -, -, -],[ -, -, -, -],[ -, -, -, -]];
* int num_levels[3]; // e.g., {5, 5, 5};
* unsigned int* d_levels[3]; // e.g., [ [0, 2, 4, 6, 8],
* // [0, 2, 4, 6, 8],
* // [0, 2, 4, 6, 8] ];
* ...
*
* // Determine temporary device storage requirements
* void* d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, d_levels, num_row_pixels, num_rows, row_stride_bytes);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Compute histograms
* cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes,
* d_samples, d_histogram, num_levels, d_levels, num_row_pixels, num_rows, row_stride_bytes);
*
* // d_histogram <-- [ [2, 3, 0, 1],
* // [3, 0, 0, 2],
* // [1, 2, 0, 3] ]
*
* \endcode
*
* \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
* \tparam NUM_ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed
* \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator
* \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters
* \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels)
* \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1
*/
template <
int NUM_CHANNELS,
int NUM_ACTIVE_CHANNELS,
typename SampleIteratorT,
typename CounterT,
typename LevelT,
typename OffsetT>
CUB_RUNTIME_FUNCTION
static cudaError_t MultiHistogramRange(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four <em>RGBA</em> 8-bit samples).
CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histogram[i]</tt> should be <tt>num_levels[i]</tt> - 1.
int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1.
LevelT* d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive.
OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest
OffsetT num_rows, ///< [in] The number of rows in the region of interest
size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
/// The sample value type of the input iterator
typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT;
Int2Type<sizeof(SampleT) == 1> is_byte_sample;
if ((sizeof(OffsetT) > sizeof(int)) &&
((unsigned long long) (num_rows * row_stride_bytes) < (unsigned long long) std::numeric_limits<int>::max()))
{
// Down-convert OffsetT data type
return DipatchHistogram<NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, LevelT, int>::DispatchRange(
d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, d_levels,
(int) num_row_pixels, (int) num_rows, (int) (row_stride_bytes / sizeof(SampleT)),
stream, debug_synchronous, is_byte_sample);
}
return DipatchHistogram<NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, LevelT, OffsetT>::DispatchRange(
d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, d_levels,
num_row_pixels, num_rows, (OffsetT) (row_stride_bytes / sizeof(SampleT)),
stream, debug_synchronous, is_byte_sample);
}
//@} end member group
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
|
the_stack
|
using namespace std;
#ifndef MAP_FILE
#define MAP_FILE MAP_SHARED
#endif
long fsize(int fd) {
struct stat stat;
int res = fstat(fd, &stat);
return stat.st_size;
}
int printll(char *s) {
while (*s != '\n' && *s != ',' && *s != '\t') {
putchar(*s++);
}
return 0;
}
long hash(char *str0, int len) {
unsigned char *str = (unsigned char *)str0;
unsigned long hash = 5381;
int c;
while ((c = *str++) && len--)
hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
return hash;
}
long HEAP_SIZE_CPU = 1073741826; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; //
void *mallocBase = calloc(HEAP_SIZE_CPU, 1);
void *mallocAddr = mallocBase;
void *waterMark = mallocBase;
void *myMalloc(size_t bytes) {
void *res = mallocAddr;
mallocAddr = (void *)((char *)mallocAddr + bytes);
if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU)
fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n");
return res;
}
long HEAP_SIZE = 8589934608; // 4294967304; // this is for GPU
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) {
long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec);
result->tv_sec = diff / 1000000;
result->tv_usec = diff % 1000000;
return (diff < 0);
}
#define CUDA_CALL(f) { \
cudaError_t err = (f); \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \
cudaGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define CUBLAS_CALL(f) { \
cublasStatus_t stat = (f); \
if (stat != CUBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \
stat, __FILE__, __LINE__); \
exit(stat); \
} \
}
void *gpuMallocBase;
void *gpuMallocAddr;
// Alignment boundary size, in bytes.
constexpr int N = 4; // 16
void *myGpuMalloc(size_t bytes) {
bytes = ((bytes + (1 << N) - 1) >> N) << N;
void *res = gpuMallocAddr;
gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes);
if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE)
fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n");
return res;
}
template <typename T>
__global__ void arrayUpdate(T *data, int index, T value) {
data[index] = value;
}
__global__ void arrayFill(float* data, float value, int size) {
int stride = gridDim.x * blockDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < size; i += stride) data[i] = value;
}
__global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < size; i += stride) {
out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]);
}
}
__global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < size; i += stride) {
if (inplace) {
if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0;
} else {
if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i];
}
}
}
__global__ void nllLoss(float *x, int x_stride, float *y, int* target) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
y[tid] = -1 * x[offset];
}
__global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
xGrad[offset] += -1 * yGrad[tid];
}
// only for 4D tensor in and 3D tensor out
__global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement,
float* out, int outStride0, int outStride1, int outStride2, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < nElement; i += stride) {
int inOff2 = i / inSize3;
int inDim3 = i - inOff2 * inSize3;
int inOff1 = inOff2 / inSize2;
int inDim2 = inOff2 - inOff1 * inSize2;
int inDim0 = inOff1 / inSize1;
int inDim1 = inOff1 - inDim0 * inSize1;
int outOff = 0;
if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2;
if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2;
in[i] += out[outOff];
}
}
//following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49
static inline __device__ int compute(int outputSize0, int outputSize1, int outputSize2, int outputSize3,
int outputStride0, int outputStride1, int outputStride2, int outputStride3,
const int dimSize, const int concatDim, int linearIndex) {
int offset = 0;
int curDimSize = 3 == concatDim ? dimSize : outputSize3;
int nextDimIndex = linearIndex / curDimSize;
int curDimIndex = linearIndex - curDimSize * nextDimIndex;
int curDimOffset = curDimIndex * outputStride3;
offset += curDimOffset;
linearIndex = nextDimIndex;
curDimSize = 2 == concatDim ? dimSize : outputSize2;
nextDimIndex = linearIndex / curDimSize;
curDimIndex = linearIndex - curDimSize * nextDimIndex;
curDimOffset = curDimIndex * outputStride2;
offset += curDimOffset;
linearIndex = nextDimIndex;
curDimSize = 1 == concatDim ? dimSize : outputSize1;
nextDimIndex = linearIndex / curDimSize;
curDimIndex = linearIndex - curDimSize * nextDimIndex;
curDimOffset = curDimIndex * outputStride1;
offset += curDimOffset;
linearIndex = nextDimIndex;
return offset + linearIndex * outputStride0;
// for (int i = 3; i >= 1; i--) {
// int curDimSize = i == concatDim ? dimSize : outputSize[i];
// int nextDimIndex = linearIndex / curDimSize;
// int curDimIndex = linearIndex - curDimSize * nextDimIndex;
// int curDimOffset = curDimIndex * outputStride[i];
// offset += curDimOffset;
// linearIndex = nextDimIndex;
// }
// return offset + linearIndex * outputStride[0];
}
// TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1
__global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1,
float* in2, int dimSize2, int nElement2,
float* out, int concatDim,
int outSize0, int outSize1, int outSize2, int outSize3,
int outStride0, int outStride1, int outStride2, int outStride3) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nElement = blockIdx.y == 0 ? nElement1 : nElement2;
if (tid >= nElement) return;
float* data = blockIdx.y == 0 ? in1 : in2;
int offset = blockIdx.y == 0 ? 0 : dimSize1;
int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2;
int dataOffset = offset * outStride1;
int stride = gridDim.x * blockDim.x;
while (tid < nElement) {
int elementOffset = compute(outSize0, outSize1, outSize2, outSize3,
outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid);
out[dataOffset + elementOffset] = data[tid];
tid += stride;
}
}
// TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1
__global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1,
float* in2, int dimSize2, int nElement2,
float* out, int concatDim,
int outSize0, int outSize1, int outSize2, int outSize3,
int outStride0, int outStride1, int outStride2, int outStride3) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nElement = blockIdx.y == 0 ? nElement1 : nElement2;
if (tid >= nElement) return;
float* data = blockIdx.y == 0 ? in1 : in2;
int offset = blockIdx.y == 0 ? 0 : dimSize1;
int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2;
int dataOffset = offset * outStride1;
int stride = gridDim.x * blockDim.x;
while (tid < nElement) {
int elementOffset = compute(outSize0, outSize1, outSize2, outSize3,
outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid);
data[tid] += out[dataOffset + elementOffset];
tid += stride;
}
}
__global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < outScalarCount; tid += stride) {
int linearIndex = tid;
int outIndex0 = linearIndex / outStride0;
linearIndex = linearIndex - outIndex0 * outStride0;
int outIndex1 = linearIndex / outStride1;
int outIndex2 = linearIndex - outIndex1 * outStride1;
int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1;
out[tid] = in[inIndex];
}
}
__global__ void shift0(float* in, float* out, int inDim0, int inStride0, int inStride1, int inScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < inScalarCount; tid += stride) {
int linearIndex = tid;
int inIndex0 = linearIndex / inStride0;
linearIndex = linearIndex - inIndex0 * inStride0;
int inIndex1 = linearIndex / inStride1;
if (inIndex0 + inIndex1 >= inDim0) return;
out[tid + inIndex1 * inStride0] = in[tid];
}
}
__global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride) {
if (d[tid] > clip) d[tid] = clip;
if (d[tid] < -clip) d[tid] = -clip;
m[tid] += d[tid] * d[tid];
x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001);
d[tid] = 0;
}
}
__global__ void momentum_update_1D_1D(float* x, float* d, float* m, float learning_rate, float momentum, float gradClip, bool nesterov, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride) {
float temp = d[tid];
if (temp > gradClip) temp = gradClip;
if (temp < -gradClip) temp = -gradClip;
m[tid] *= momentum;
m[tid] += temp;
if (nesterov) { temp += momentum * m[tid]; }
else { temp = m[tid]; }
x[tid] -= learning_rate * temp;
d[tid] = 0;
}
}
__global__ void addScalar(float* in, float* out, float add, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in[tid] + add;
}
__global__ void minusScalar(float* in, float* out, float minus, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in[tid] - minus;
}
__global__ void multScalar(float* in, float* out, float mult, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in[tid] * mult;
}
__global__ void divScalar(float* in, float* out, float div, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in[tid] / div;
}
__global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] * in2[tid];
}
__global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] += in1[tid] * in2[tid];
}
__global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] + in2[tid];
}
__global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] - in2[tid];
}
__global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] / in2[tid];
}
__global__ void elementwise_1D_1D_exp(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = exp(in[tid]);
}
__global__ void elementwise_1D_1D_log(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = log(in[tid]);
}
__global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = sqrt(in[tid]);
}
__global__ void elementwise_1D_1D_square(float* in, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in[tid] * in[tid];
}
__global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) in_d[tid] += out_d[tid] * out_x[tid];
}
__global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) in_d[tid] += out_d[tid] / in_x[tid];
}
__global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2;
}
__global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid];
}
__global__ void clipAt(float* in, float bound, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) {
if (in[tid] > bound) in[tid] = bound;
if (in[tid] < -bound) in[tid] = -bound;
}
}
__global__ void mask4D(float* in, int* mask, int xstrides0, int xstrides1, int xstrides2, int xstrides3, int scalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < scalarCount; tid += stride) {
int linearIndex = tid;
int xindex0 = linearIndex / xstrides0;
linearIndex = linearIndex - xstrides0 * xindex0;
int xindex1 = linearIndex / xstrides1;
linearIndex = linearIndex - xstrides1 * xindex1;
int xindex2 = linearIndex / xstrides2;
int xindex3 = linearIndex - xstrides2 * xindex2;
if (xindex3 >= mask[xindex0]) in[tid] = 0;
}
}
__global__ void mul_sub(float* in1, float* in2, float* out, int in1ScalarCount, int in2ScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < in1ScalarCount; tid += stride) {
out[tid] = in1[tid] * in2[tid % in2ScalarCount];
}
}
__global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < in1ScalarCount; tid += stride) {
int index = tid % in2ScalarCount;
in1_d[tid] += out[tid] * in2_x[index];
in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced!
}
}
// From: https://github.com/pytorch/pytorch/blob/master/aten/src/THC/THCIntegerDivider.cuh
// Result of div/mod operation stored together.
template <typename Value>
struct DivMod {
Value div, mod;
__host__ __device__ DivMod(Value div, Value mod) : div(div), mod(mod) { }
};
// Base case: we only have an implementation for uint32_t for now. For
// everything else, we use plain division.
template <typename Value>
struct IntDivider {
IntDivider() { } // Dummy constructor for arrays.
IntDivider(Value d) : divisor(d) { }
__host__ __device__ inline Value div(Value n) const { return n / divisor; }
__host__ __device__ inline Value mod(Value n) const { return n % divisor; }
__host__ __device__ inline DivMod<Value> divmod(Value n) const {
return DivMod<Value>(n / divisor, n % divisor);
}
Value divisor;
};
// Implement fast integer division.
template <>
struct IntDivider<unsigned int> {
static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int.");
IntDivider() { } // Dummy constructor for arrays.
IntDivider(unsigned int d) : divisor(d) {
assert(divisor >= 1 && divisor <= INT32_MAX);
// TODO: gcc/clang has __builtin_clz() but it's not portable.
for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;
uint64_t one = 1;
uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;
m1 = magic;
assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits.
}
__host__ __device__ inline unsigned int div(unsigned int n) const {
#ifdef __CUDA_ARCH__
// 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and
// 'm1'.
unsigned int t = __umulhi(n, m1);
return (t + n) >> shift;
#else
// Using uint64_t so that the addition does not overflow.
uint64_t t = ((uint64_t) n * m1) >> 32;
return (t + n) >> shift;
#endif
}
__host__ __device__ inline unsigned int mod(unsigned int n) const {
return n - div(n) * divisor;
}
__host__ __device__ inline DivMod<unsigned int> divmod(unsigned int n) const {
unsigned int q = div(n);
return DivMod<unsigned int>(q, n - q * divisor);
}
unsigned int divisor; // d above.
unsigned int m1; // Magic number: m' above.
unsigned int shift; // Shift amounts.
};
// From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/OffsetCalculator.cuh
/// OffsetCalculator calculates the offset in bytes of a linear index for NARGS
/// operands that share the same shape, but may have different strides.
template <int NARGS>
struct OffsetCalculator {
static constexpr int MAX_DIMS = 25;
// The offset for each argument (in bytes). Wrapper around fixed-size array.
struct offsets_t {
__host__ __device__ uint32_t& operator[](int idx) {
return values[idx];
}
uint32_t values[NARGS];
};
// OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides) : dims(dims) {
OffsetCalculator(int dims, const int32_t* sizes, const int32_t* const* strides) : dims(dims) {
for (int i = 0; i < MAX_DIMS; ++i) {
if (i < dims) {
sizes_[i] = IntDivider<uint32_t>(sizes[i]);
} else {
sizes_[i] = IntDivider<uint32_t>(1);
}
for (int arg = 0; arg < NARGS; arg++) {
strides_[i][arg] = i < dims ? strides[arg][i] : 0;
}
}
}
__host__ __device__ offsets_t get(uint32_t linear_idx) const {
offsets_t offsets;
#pragma unroll
for (int arg = 0; arg < NARGS; arg++) {
offsets[arg] = 0;
}
#pragma unroll
for (int dim = 0; dim < MAX_DIMS; ++dim) {
if (dim == dims) {
break;
}
auto divmod = sizes_[dim].divmod(linear_idx);
linear_idx = divmod.div;
#pragma unroll
for (int arg = 0; arg < NARGS; arg++) {
offsets[arg] += divmod.mod * strides_[dim][arg];
}
}
return offsets;
}
void print() {
for (auto i = 1; i < 128; i++) {
auto offsets = get(i);
printf("offsets[%d]: ", i);
for (auto arg = 0; arg < NARGS; arg++) {
printf("%d ", offsets[arg]);
}
printf("\n");
}
}
int dims;
IntDivider<uint32_t> sizes_[MAX_DIMS];
uint32_t strides_[MAX_DIMS][NARGS];
};
// From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/Loops.cuh
template<int nt, int vt, typename func_t>
__launch_bounds__(nt, 4)
__global__ void elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
int idx = nv * blockIdx.x + tid;
#pragma unroll
for (int i = 0; i < vt; i++) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template<int nt, int vt, typename func_t>
static void launch_kernel(int64_t N, const func_t& f) {
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
elementwise_kernel<nt, vt, func_t><<<grid, block, 0>>>(N, f);
}
template<typename func_t>
void gpu_unary_kernel(float *res, float *x,
int32_t resRank, const int32_t resScalarCount,
const int32_t* resShape, const int32_t* const* strides,
const func_t& f) {
OffsetCalculator<2> calc(resRank, resShape, strides);
launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) {
auto offsets = calc.get(idx);
float* out = &res[offsets[0]];
float* in = &x[offsets[1]];
*out = f(*in);
});
}
template<typename func_t>
void gpu_binary_kernel(float *res, float *x, float *y,
int32_t resRank, const int32_t resScalarCount,
const int32_t* resShape, const int32_t* const* strides,
const func_t& f) {
OffsetCalculator<3> calc(resRank, resShape, strides);
launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) {
auto offsets = calc.get(idx);
float* out = &res[offsets[0]];
float* in1 = &x[offsets[1]];
float* in2 = &y[offsets[2]];
*out = f(*in1, *in2);
});
}
#define CUDNN_CALL(f) { \
cudnnStatus_t stat = (f); \
if (stat != CUDNN_STATUS_SUCCESS) { \
fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \
stat, __FILE__, __LINE__); \
exit(stat); \
} \
}
void Snippet(char *);
std::random_device rd{};
std::mt19937 gen{rd()};
std::normal_distribution<> d{0, 0.01};
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: query <filename>\n");
return 0;
}
Snippet(argv[1]);
return 0;
}
/*****************************************
Emitting C Generated Code
*******************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
void Snippet(char* x0) {
// Backend setup.
cublasHandle_t cublasHandle;
CUBLAS_CALL(cublasCreate(&cublasHandle));
CUDA_CALL(cudaMalloc(&gpuMallocBase, HEAP_SIZE));
CUDA_CALL(cudaMemset(gpuMallocBase, 0, HEAP_SIZE));
gpuMallocAddr = gpuMallocBase;
cudnnHandle_t cudnnHandle;
CUDNN_CALL(cudnnCreate(&cudnnHandle));
srand(42);
struct timeval begin_0, end_0, diff_0;
gettimeofday(&begin_0, NULL);
int32_t x7 = open("../../cifar10_data/cifar-10-batches-bin/data_batch_1.bin",0);
int64_t x8 = fsize(x7);
int64_t x10 = x8 / 3073LL;
int32_t x11 = (int32_t)x10;
int32_t x12 = x11 * 3072;
float* x13 = (float*)myMalloc(x12 * sizeof(float));;
int* x14 = (int32_t*)myMalloc(x11 * sizeof(int32_t));;
char* x9 = (char*)mmap(0, x8, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x7, 0);
for(int x16=0; x16 < x11; x16++) {
int32_t x17 = x16 * 3073;
char x18 = x9[x17];
int32_t x19 = (int32_t)(unsigned char)x18;
x14[x16] = x19;
int32_t x25 = x17 + 1;
int32_t x23 = x16 * 3072;
for(int x22=0; x22 < 3072; x22++) {
int32_t x26 = x25 + x22;
char x27 = x9[x26];
int32_t x24 = x23 + x22;
float x28 = (float)(unsigned char)x27;
float x29 = x28 / 255.0f;
x13[x24] = x29;
}
}
gettimeofday(&end_0, NULL);
timeval_subtract(&diff_0, &end_0, &begin_0);;
int64_t x37 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec));
float x38 = (float)x37;
float x39 = x38 / 1000000.0f;
printf("Data normalized (all prepare time) in %lf sec\n",x39);
// Tensor 'toGPU' invocation.
float* x313 = (float*)myGpuMalloc(262144 * sizeof(float));
int32_t x42 = open("/home/fei/bitbucket/Lantern/src/out/PLDI19evaluation/resnet50/resnet50.onnx.bin",0);
int64_t x43 = fsize(x42);
float* x44 = (float*)mmap(0, x43, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x42, 0);
float* x45 = x44+5205440;
CUDA_CALL(cudaMemcpy(x313, x45, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x316 = (float*)myGpuMalloc(256 * sizeof(float));
float* x46 = x44+148672;
CUDA_CALL(cudaMemcpy(x316, x46, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x319 = (float*)myGpuMalloc(128 * sizeof(float));
float* x47 = x44+816064;
CUDA_CALL(cudaMemcpy(x319, x47, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x322 = (float*)myGpuMalloc(128 * sizeof(float));
float* x48 = x44+950080;
CUDA_CALL(cudaMemcpy(x322, x48, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x325 = (float*)myGpuMalloc(64 * sizeof(float));
float* x49 = x44+94784;
CUDA_CALL(cudaMemcpy(x325, x49, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x328 = (float*)myGpuMalloc(32768 * sizeof(float));
float* x50 = x44+220608;
CUDA_CALL(cudaMemcpy(x328, x50, 32768 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x331 = (float*)myGpuMalloc(512 * sizeof(float));
float* x51 = x44+22495680;
CUDA_CALL(cudaMemcpy(x331, x51, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x334 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x52 = x44+2964928;
CUDA_CALL(cudaMemcpy(x334, x52, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x337 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x53 = x44+4348352;
CUDA_CALL(cudaMemcpy(x337, x53, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x340 = (float*)myGpuMalloc(512 * sizeof(float));
float* x54 = x44+20133312;
CUDA_CALL(cudaMemcpy(x340, x54, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x343 = (float*)myGpuMalloc(256 * sizeof(float));
float* x55 = x44+2169536;
CUDA_CALL(cudaMemcpy(x343, x55, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x346 = (float*)myGpuMalloc(128 * sizeof(float));
float* x56 = x44+668224;
CUDA_CALL(cudaMemcpy(x346, x56, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x349 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x57 = x44+2432448;
CUDA_CALL(cudaMemcpy(x349, x57, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x352 = (float*)myGpuMalloc(512 * sizeof(float));
float* x58 = x44+1446336;
CUDA_CALL(cudaMemcpy(x352, x58, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x355 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x59 = x44+4081088;
CUDA_CALL(cudaMemcpy(x355, x59, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x358 = (float*)myGpuMalloc(256 * sizeof(float));
float* x60 = x44+1578688;
CUDA_CALL(cudaMemcpy(x358, x60, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x361 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x61 = x44+6325696;
CUDA_CALL(cudaMemcpy(x361, x61, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x364 = (float*)myGpuMalloc(512 * sizeof(float));
float* x62 = x44+602048;
CUDA_CALL(cudaMemcpy(x364, x62, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x367 = (float*)myGpuMalloc(64 * sizeof(float));
float* x63 = x44+165888;
CUDA_CALL(cudaMemcpy(x367, x63, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x370 = (float*)myGpuMalloc(512 * sizeof(float));
float* x64 = x44+1164736;
CUDA_CALL(cudaMemcpy(x370, x64, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x373 = (float*)myGpuMalloc(64 * sizeof(float));
float* x65 = x44+6080;
CUDA_CALL(cudaMemcpy(x373, x65, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x376 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x66 = x44+253888;
CUDA_CALL(cudaMemcpy(x376, x66, 147456 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x379 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x67 = x44+20135360;
CUDA_CALL(cudaMemcpy(x379, x67, 2359296 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x382 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x68 = x44+2960832;
CUDA_CALL(cudaMemcpy(x382, x68, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x385 = (float*)myGpuMalloc(256 * sizeof(float));
float* x69 = x44+3227072;
CUDA_CALL(cudaMemcpy(x385, x69, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x388 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x70 = x44+3228096;
CUDA_CALL(cudaMemcpy(x388, x70, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x391 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x71 = x44+43456;
CUDA_CALL(cudaMemcpy(x391, x71, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x394 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x72 = x44+22496704;
CUDA_CALL(cudaMemcpy(x394, x72, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x397 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x73 = x44+9092544;
CUDA_CALL(cudaMemcpy(x397, x73, 2359296 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x400 = (float*)myGpuMalloc(128 * sizeof(float));
float* x74 = x44+816320;
CUDA_CALL(cudaMemcpy(x400, x74, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x403 = (float*)myGpuMalloc(256 * sizeof(float));
float* x75 = x44+60608;
CUDA_CALL(cudaMemcpy(x403, x75, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x406 = (float*)myGpuMalloc(256 * sizeof(float));
float* x76 = x44+219584;
CUDA_CALL(cudaMemcpy(x406, x76, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x409 = (float*)myGpuMalloc(128 * sizeof(float));
float* x77 = x44+1379392;
CUDA_CALL(cudaMemcpy(x409, x77, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x412 = (float*)myGpuMalloc(128 * sizeof(float));
float* x78 = x44+1231296;
CUDA_CALL(cudaMemcpy(x412, x78, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x415 = (float*)myGpuMalloc(64 * sizeof(float));
float* x79 = x44+1856;
CUDA_CALL(cudaMemcpy(x415, x79, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x418 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x80 = x44+1098176;
CUDA_CALL(cudaMemcpy(x418, x80, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x421 = (float*)myGpuMalloc(512 * sizeof(float));
float* x81 = x44+601536;
CUDA_CALL(cudaMemcpy(x421, x81, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x424 = (float*)myGpuMalloc(128 * sizeof(float));
float* x82 = x44+401728;
CUDA_CALL(cudaMemcpy(x424, x82, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x427 = (float*)myGpuMalloc(64 * sizeof(float));
float* x83 = x44+131904;
CUDA_CALL(cudaMemcpy(x427, x83, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x430 = (float*)myGpuMalloc(128 * sizeof(float));
float* x84 = x44+949696;
CUDA_CALL(cudaMemcpy(x430, x84, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x433 = (float*)myGpuMalloc(512 * sizeof(float));
float* x85 = x44+15664576;
CUDA_CALL(cudaMemcpy(x433, x85, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x436 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x86 = x44+18027968;
CUDA_CALL(cudaMemcpy(x436, x86, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x439 = (float*)myGpuMalloc(10 * sizeof(float));
float* x87 = x44+23573952;
CUDA_CALL(cudaMemcpy(x439, x87, 10 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x442 = (float*)myGpuMalloc(64 * sizeof(float));
float* x88 = x44+43264;
CUDA_CALL(cudaMemcpy(x442, x88, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x445 = (float*)myGpuMalloc(512 * sizeof(float));
float* x89 = x44+11453376;
CUDA_CALL(cudaMemcpy(x445, x89, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x448 = (float*)myGpuMalloc(64 * sizeof(float));
float* x90 = x44+6272;
CUDA_CALL(cudaMemcpy(x448, x90, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x451 = (float*)myGpuMalloc(512 * sizeof(float));
float* x91 = x44+882112;
CUDA_CALL(cudaMemcpy(x451, x91, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x454 = (float*)myGpuMalloc(64 * sizeof(float));
float* x92 = x44+6144;
CUDA_CALL(cudaMemcpy(x454, x92, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x457 = (float*)myGpuMalloc(512 * sizeof(float));
float* x93 = x44+1445824;
CUDA_CALL(cudaMemcpy(x457, x93, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x460 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x94 = x44+1379776;
CUDA_CALL(cudaMemcpy(x460, x94, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x463 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x95 = x44+3818944;
CUDA_CALL(cudaMemcpy(x463, x95, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x466 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x96 = x44+5202368;
CUDA_CALL(cudaMemcpy(x466, x96, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x469 = (float*)myGpuMalloc(256 * sizeof(float));
float* x97 = x44+148416;
CUDA_CALL(cudaMemcpy(x469, x97, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x472 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x98 = x44+7441856;
CUDA_CALL(cudaMemcpy(x472, x98, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x475 = (float*)myGpuMalloc(64 * sizeof(float));
float* x99 = x44+94720;
CUDA_CALL(cudaMemcpy(x475, x99, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x478 = (float*)myGpuMalloc(128 * sizeof(float));
float* x100 = x44+1097792;
CUDA_CALL(cudaMemcpy(x478, x100, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x481 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x101 = x44+12504512;
CUDA_CALL(cudaMemcpy(x481, x101, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x484 = (float*)myGpuMalloc(256 * sizeof(float));
float* x102 = x44+4938944;
CUDA_CALL(cudaMemcpy(x484, x102, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x487 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x103 = x44+14611904;
CUDA_CALL(cudaMemcpy(x487, x103, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x490 = (float*)myGpuMalloc(512 * sizeof(float));
float* x104 = x44+15666112;
CUDA_CALL(cudaMemcpy(x490, x104, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x493 = (float*)myGpuMalloc(512 * sizeof(float));
float* x105 = x44+18026432;
CUDA_CALL(cudaMemcpy(x493, x105, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x496 = (float*)myGpuMalloc(512 * sizeof(float));
float* x106 = x44+9091520;
CUDA_CALL(cudaMemcpy(x496, x106, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x499 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x107 = x44+19080640;
CUDA_CALL(cudaMemcpy(x499, x107, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x502 = (float*)myGpuMalloc(256 * sizeof(float));
float* x108 = x44+6588608;
CUDA_CALL(cudaMemcpy(x502, x108, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x505 = (float*)myGpuMalloc(256 * sizeof(float));
float* x109 = x44+8299456;
CUDA_CALL(cudaMemcpy(x505, x109, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x508 = (float*)myGpuMalloc(256 * sizeof(float));
float* x110 = x44+60352;
CUDA_CALL(cudaMemcpy(x508, x110, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x511 = (float*)myGpuMalloc(64 * sizeof(float));
float* x111 = x44+202944;
CUDA_CALL(cudaMemcpy(x511, x111, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x514 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x112 = x44+166080;
CUDA_CALL(cudaMemcpy(x514, x112, 36864 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x517 = (float*)myGpuMalloc(256 * sizeof(float));
float* x113 = x44+6058432;
CUDA_CALL(cudaMemcpy(x517, x113, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x520 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x114 = x44+2436544;
CUDA_CALL(cudaMemcpy(x520, x114, 524288 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x523 = (float*)myGpuMalloc(256 * sizeof(float));
float* x115 = x44+77248;
CUDA_CALL(cudaMemcpy(x523, x115, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x526 = (float*)myGpuMalloc(256 * sizeof(float));
float* x116 = x44+6587840;
CUDA_CALL(cudaMemcpy(x526, x116, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x529 = (float*)myGpuMalloc(512 * sizeof(float));
float* x117 = x44+20133824;
CUDA_CALL(cudaMemcpy(x529, x117, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x532 = (float*)myGpuMalloc(128 * sizeof(float));
float* x118 = x44+1379264;
CUDA_CALL(cudaMemcpy(x532, x118, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x535 = (float*)myGpuMalloc(256 * sizeof(float));
float* x119 = x44+7708608;
CUDA_CALL(cudaMemcpy(x535, x119, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x538 = (float*)myGpuMalloc(64 * sizeof(float));
float* x120 = x44+165824;
CUDA_CALL(cudaMemcpy(x538, x120, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x541 = (float*)myGpuMalloc(512 * sizeof(float));
float* x121 = x44+1164224;
CUDA_CALL(cudaMemcpy(x541, x121, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x544 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x122 = x44+94912;
CUDA_CALL(cudaMemcpy(x544, x122, 36864 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x547 = (float*)myGpuMalloc(128 * sizeof(float));
float* x123 = x44+253376;
CUDA_CALL(cudaMemcpy(x547, x123, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x550 = (float*)myGpuMalloc(256 * sizeof(float));
float* x124 = x44+7708096;
CUDA_CALL(cudaMemcpy(x550, x124, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x553 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x125 = x44+2962880;
CUDA_CALL(cudaMemcpy(x553, x125, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x556 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x126 = x44+203200;
CUDA_CALL(cudaMemcpy(x556, x126, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x559 = (float*)myGpuMalloc(512 * sizeof(float));
float* x127 = x44+883648;
CUDA_CALL(cudaMemcpy(x559, x127, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x562 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x128 = x44+6059456;
CUDA_CALL(cudaMemcpy(x562, x128, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x565 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x129 = x44+6336;
CUDA_CALL(cudaMemcpy(x565, x129, 36864 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x568 = (float*)myGpuMalloc(256 * sizeof(float));
float* x130 = x44+148928;
CUDA_CALL(cudaMemcpy(x568, x130, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x571 = (float*)myGpuMalloc(256 * sizeof(float));
float* x131 = x44+5467584;
CUDA_CALL(cudaMemcpy(x571, x131, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x574 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x132 = x44+8563136;
CUDA_CALL(cudaMemcpy(x574, x132, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x577 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x133 = x44+19076544;
CUDA_CALL(cudaMemcpy(x577, x133, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x580 = (float*)myGpuMalloc(128 * sizeof(float));
float* x134 = x44+816192;
CUDA_CALL(cudaMemcpy(x580, x134, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x583 = (float*)myGpuMalloc(256 * sizeof(float));
float* x135 = x44+3818176;
CUDA_CALL(cudaMemcpy(x583, x135, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x586 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x136 = x44+8299968;
CUDA_CALL(cudaMemcpy(x586, x136, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x589 = (float*)myGpuMalloc(256 * sizeof(float));
float* x137 = x44+5468352;
CUDA_CALL(cudaMemcpy(x589, x137, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x592 = (float*)myGpuMalloc(256 * sizeof(float));
float* x138 = x44+2170048;
CUDA_CALL(cudaMemcpy(x592, x138, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x595 = (float*)myGpuMalloc(128 * sizeof(float));
float* x139 = x44+668352;
CUDA_CALL(cudaMemcpy(x595, x139, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x598 = (float*)myGpuMalloc(512 * sizeof(float));
float* x140 = x44+468928;
CUDA_CALL(cudaMemcpy(x598, x140, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x601 = (float*)myGpuMalloc(64 * sizeof(float));
float* x141 = x44+94848;
CUDA_CALL(cudaMemcpy(x601, x141, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x604 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x142 = x44+23545280;
CUDA_CALL(cudaMemcpy(x604, x142, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x607 = (float*)myGpuMalloc(256 * sizeof(float));
float* x143 = x44+7179456;
CUDA_CALL(cudaMemcpy(x607, x143, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x610 = (float*)myGpuMalloc(64 * sizeof(float));
float* x144 = x44+43328;
CUDA_CALL(cudaMemcpy(x610, x144, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x613 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x145 = x44+401856;
CUDA_CALL(cudaMemcpy(x613, x145, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x616 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x146 = x44+14609856;
CUDA_CALL(cudaMemcpy(x616, x146, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x619 = (float*)myGpuMalloc(256 * sizeof(float));
float* x147 = x44+2169280;
CUDA_CALL(cudaMemcpy(x619, x147, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x622 = (float*)myGpuMalloc(256 * sizeof(float));
float* x148 = x44+7178944;
CUDA_CALL(cudaMemcpy(x622, x148, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x625 = (float*)myGpuMalloc(64 * sizeof(float));
float* x149 = x44+1920;
CUDA_CALL(cudaMemcpy(x625, x149, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x628 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x150 = x44+816576;
CUDA_CALL(cudaMemcpy(x628, x150, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x631 = (float*)myGpuMalloc(128 * sizeof(float));
float* x151 = x44+949952;
CUDA_CALL(cudaMemcpy(x631, x151, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x634 = (float*)myGpuMalloc(512 * sizeof(float));
float* x152 = x44+11452864;
CUDA_CALL(cudaMemcpy(x634, x152, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x637 = (float*)myGpuMalloc(64 * sizeof(float));
float* x153 = x44+6208;
CUDA_CALL(cudaMemcpy(x637, x153, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x640 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x154 = x44+12506560;
CUDA_CALL(cudaMemcpy(x640, x154, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x643 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x155 = x44+4939200;
CUDA_CALL(cudaMemcpy(x643, x155, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x646 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x156 = x44+2433472;
CUDA_CALL(cudaMemcpy(x646, x156, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x649 = (float*)myGpuMalloc(64 * sizeof(float));
float* x157 = x44+203136;
CUDA_CALL(cudaMemcpy(x649, x157, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x652 = (float*)myGpuMalloc(512 * sizeof(float));
float* x158 = x44+601024;
CUDA_CALL(cudaMemcpy(x652, x158, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x655 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x159 = x44+7442880;
CUDA_CALL(cudaMemcpy(x655, x159, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x658 = (float*)myGpuMalloc(512 * sizeof(float));
float* x160 = x44+9092032;
CUDA_CALL(cudaMemcpy(x658, x160, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x661 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x161 = x44+8564160;
CUDA_CALL(cudaMemcpy(x661, x161, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x664 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x162 = x44+23551424;
CUDA_CALL(cudaMemcpy(x664, x162, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x667 = (float*)myGpuMalloc(256 * sizeof(float));
float* x163 = x44+4938688;
CUDA_CALL(cudaMemcpy(x667, x163, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x670 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x164 = x44+14613952;
CUDA_CALL(cudaMemcpy(x670, x164, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x673 = (float*)myGpuMalloc(256 * sizeof(float));
float* x165 = x44+60096;
CUDA_CALL(cudaMemcpy(x673, x165, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x676 = (float*)myGpuMalloc(128 * sizeof(float));
float* x166 = x44+1097664;
CUDA_CALL(cudaMemcpy(x676, x166, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x679 = (float*)myGpuMalloc(128 * sizeof(float));
float* x167 = x44+401600;
CUDA_CALL(cudaMemcpy(x679, x167, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x682 = (float*)myGpuMalloc(256 * sizeof(float));
float* x168 = x44+4347328;
CUDA_CALL(cudaMemcpy(x682, x168, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x685 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x169 = x44+132032;
CUDA_CALL(cudaMemcpy(x685, x169, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x688 = (float*)myGpuMalloc(256 * sizeof(float));
float* x170 = x44+1578944;
CUDA_CALL(cudaMemcpy(x688, x170, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x691 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x171 = x44+1165760;
CUDA_CALL(cudaMemcpy(x691, x171, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x694 = (float*)myGpuMalloc(256 * sizeof(float));
float* x172 = x44+220352;
CUDA_CALL(cudaMemcpy(x694, x172, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x697 = (float*)myGpuMalloc(128 * sizeof(float));
float* x173 = x44+253760;
CUDA_CALL(cudaMemcpy(x697, x173, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x700 = (float*)myGpuMalloc(64 * sizeof(float));
float* x174 = x44+203008;
CUDA_CALL(cudaMemcpy(x700, x174, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x703 = (float*)myGpuMalloc(256 * sizeof(float));
float* x175 = x44+6058688;
CUDA_CALL(cudaMemcpy(x703, x175, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x706 = (float*)myGpuMalloc(512 * sizeof(float));
float* x176 = x44+15665088;
CUDA_CALL(cudaMemcpy(x706, x176, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x709 = (float*)myGpuMalloc(512 * sizeof(float));
float* x177 = x44+18026944;
CUDA_CALL(cudaMemcpy(x709, x177, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x712 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x178 = x44+8566208;
CUDA_CALL(cudaMemcpy(x712, x178, 524288 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x715 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x179 = x44+5203392;
CUDA_CALL(cudaMemcpy(x715, x179, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x718 = (float*)myGpuMalloc(256 * sizeof(float));
float* x180 = x44+8298944;
CUDA_CALL(cudaMemcpy(x718, x180, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x721 = (float*)myGpuMalloc(64 * sizeof(float));
float* x181 = x44+94656;
CUDA_CALL(cudaMemcpy(x721, x181, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x724 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x182 = x44+4084160;
CUDA_CALL(cudaMemcpy(x724, x182, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x727 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x183 = x44+19078592;
CUDA_CALL(cudaMemcpy(x727, x183, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x730 = (float*)myGpuMalloc(512 * sizeof(float));
float* x184 = x44+467392;
CUDA_CALL(cudaMemcpy(x730, x184, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x733 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x185 = x44+6322624;
CUDA_CALL(cudaMemcpy(x733, x185, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x736 = (float*)myGpuMalloc(512 * sizeof(float));
float* x186 = x44+883136;
CUDA_CALL(cudaMemcpy(x736, x186, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x739 = (float*)myGpuMalloc(128 * sizeof(float));
float* x187 = x44+1379648;
CUDA_CALL(cudaMemcpy(x739, x187, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x742 = (float*)myGpuMalloc(512 * sizeof(float));
float* x188 = x44+468416;
CUDA_CALL(cudaMemcpy(x742, x188, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x745 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x189 = x44+149440;
CUDA_CALL(cudaMemcpy(x745, x189, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x748 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x190 = x44+7445952;
CUDA_CALL(cudaMemcpy(x748, x190, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x751 = (float*)myGpuMalloc(1728 * sizeof(float));
float* x191 = x44+0;
CUDA_CALL(cudaMemcpy(x751, x191, 1728 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x754 = (float*)myGpuMalloc(64 * sizeof(float));
float* x192 = x44+131840;
CUDA_CALL(cudaMemcpy(x754, x192, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x757 = (float*)myGpuMalloc(512 * sizeof(float));
float* x193 = x44+15665600;
CUDA_CALL(cudaMemcpy(x757, x193, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x760 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x194 = x44+15666624;
CUDA_CALL(cudaMemcpy(x760, x194, 2359296 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x763 = (float*)myGpuMalloc(512 * sizeof(float));
float* x195 = x44+1445312;
CUDA_CALL(cudaMemcpy(x763, x195, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x766 = (float*)myGpuMalloc(256 * sizeof(float));
float* x196 = x44+3227840;
CUDA_CALL(cudaMemcpy(x766, x196, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x769 = (float*)myGpuMalloc(64 * sizeof(float));
float* x197 = x44+43392;
CUDA_CALL(cudaMemcpy(x769, x197, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x772 = (float*)myGpuMalloc(512 * sizeof(float));
float* x198 = x44+11452352;
CUDA_CALL(cudaMemcpy(x772, x198, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x775 = (float*)myGpuMalloc(512 * sizeof(float));
float* x199 = x44+18025920;
CUDA_CALL(cudaMemcpy(x775, x199, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x778 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x200 = x44+6324672;
CUDA_CALL(cudaMemcpy(x778, x200, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x781 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x201 = x44+60864;
CUDA_CALL(cudaMemcpy(x781, x201, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x784 = (float*)myGpuMalloc(256 * sizeof(float));
float* x202 = x44+5468096;
CUDA_CALL(cudaMemcpy(x784, x202, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x787 = (float*)myGpuMalloc(64 * sizeof(float));
float* x203 = x44+43200;
CUDA_CALL(cudaMemcpy(x787, x203, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x790 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x204 = x44+1231808;
CUDA_CALL(cudaMemcpy(x790, x204, 147456 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x793 = (float*)myGpuMalloc(256 * sizeof(float));
float* x205 = x44+149184;
CUDA_CALL(cudaMemcpy(x793, x205, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x796 = (float*)myGpuMalloc(512 * sizeof(float));
float* x206 = x44+1163712;
CUDA_CALL(cudaMemcpy(x796, x206, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x799 = (float*)myGpuMalloc(256 * sizeof(float));
float* x207 = x44+7178688;
CUDA_CALL(cudaMemcpy(x799, x207, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x802 = (float*)myGpuMalloc(512 * sizeof(float));
float* x208 = x44+22495168;
CUDA_CALL(cudaMemcpy(x802, x208, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x805 = (float*)myGpuMalloc(128 * sizeof(float));
float* x209 = x44+949824;
CUDA_CALL(cudaMemcpy(x805, x209, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x808 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x210 = x44+78272;
CUDA_CALL(cudaMemcpy(x808, x210, 16384 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x811 = (float*)myGpuMalloc(128 * sizeof(float));
float* x211 = x44+253504;
CUDA_CALL(cudaMemcpy(x811, x211, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x814 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x212 = x44+14607808;
CUDA_CALL(cudaMemcpy(x814, x212, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x817 = (float*)myGpuMalloc(256 * sizeof(float));
float* x213 = x44+4348096;
CUDA_CALL(cudaMemcpy(x817, x213, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x820 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x214 = x44+1579456;
CUDA_CALL(cudaMemcpy(x820, x214, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x823 = (float*)myGpuMalloc(256 * sizeof(float));
float* x215 = x44+7708864;
CUDA_CALL(cudaMemcpy(x823, x215, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x826 = (float*)myGpuMalloc(128 * sizeof(float));
float* x216 = x44+668480;
CUDA_CALL(cudaMemcpy(x826, x216, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x829 = (float*)myGpuMalloc(256 * sizeof(float));
float* x217 = x44+4347840;
CUDA_CALL(cudaMemcpy(x829, x217, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x832 = (float*)myGpuMalloc(64 * sizeof(float));
float* x218 = x44+203072;
CUDA_CALL(cudaMemcpy(x832, x218, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x835 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x219 = x44+1447360;
CUDA_CALL(cudaMemcpy(x835, x219, 131072 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x838 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x220 = x44+23547328;
CUDA_CALL(cudaMemcpy(x838, x220, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x841 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x221 = x44+4083136;
CUDA_CALL(cudaMemcpy(x841, x221, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x844 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x222 = x44+8565184;
CUDA_CALL(cudaMemcpy(x844, x222, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x847 = (float*)myGpuMalloc(256 * sizeof(float));
float* x223 = x44+220096;
CUDA_CALL(cudaMemcpy(x847, x223, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x850 = (float*)myGpuMalloc(256 * sizeof(float));
float* x224 = x44+6588096;
CUDA_CALL(cudaMemcpy(x850, x224, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x853 = (float*)myGpuMalloc(256 * sizeof(float));
float* x225 = x44+6058944;
CUDA_CALL(cudaMemcpy(x853, x225, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x856 = (float*)myGpuMalloc(64 * sizeof(float));
float* x226 = x44+166016;
CUDA_CALL(cudaMemcpy(x856, x226, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x859 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x227 = x44+5204416;
CUDA_CALL(cudaMemcpy(x859, x227, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x862 = (float*)myGpuMalloc(256 * sizeof(float));
float* x228 = x44+8299200;
CUDA_CALL(cudaMemcpy(x862, x228, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x865 = (float*)myGpuMalloc(128 * sizeof(float));
float* x229 = x44+401472;
CUDA_CALL(cudaMemcpy(x865, x229, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x868 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x230 = x44+950208;
CUDA_CALL(cudaMemcpy(x868, x230, 147456 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x871 = (float*)myGpuMalloc(256 * sizeof(float));
float* x231 = x44+4938432;
CUDA_CALL(cudaMemcpy(x871, x231, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x874 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x232 = x44+12508608;
CUDA_CALL(cudaMemcpy(x874, x232, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x877 = (float*)myGpuMalloc(512 * sizeof(float));
float* x233 = x44+22494656;
CUDA_CALL(cudaMemcpy(x877, x233, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x880 = (float*)myGpuMalloc(512 * sizeof(float));
float* x234 = x44+18027456;
CUDA_CALL(cudaMemcpy(x880, x234, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x883 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x235 = x44+884160;
CUDA_CALL(cudaMemcpy(x883, x235, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x886 = (float*)myGpuMalloc(256 * sizeof(float));
float* x236 = x44+4347584;
CUDA_CALL(cudaMemcpy(x886, x236, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x889 = (float*)myGpuMalloc(256 * sizeof(float));
float* x237 = x44+1579200;
CUDA_CALL(cudaMemcpy(x889, x237, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x892 = (float*)myGpuMalloc(256 * sizeof(float));
float* x238 = x44+59840;
CUDA_CALL(cudaMemcpy(x892, x238, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x895 = (float*)myGpuMalloc(256 * sizeof(float));
float* x239 = x44+3818432;
CUDA_CALL(cudaMemcpy(x895, x239, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x898 = (float*)myGpuMalloc(512 * sizeof(float));
float* x240 = x44+9090496;
CUDA_CALL(cudaMemcpy(x898, x240, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x901 = (float*)myGpuMalloc(512 * sizeof(float));
float* x241 = x44+22496192;
CUDA_CALL(cudaMemcpy(x901, x241, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x904 = (float*)myGpuMalloc(256 * sizeof(float));
float* x242 = x44+77504;
CUDA_CALL(cudaMemcpy(x904, x242, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x907 = (float*)myGpuMalloc(128 * sizeof(float));
float* x243 = x44+253632;
CUDA_CALL(cudaMemcpy(x907, x243, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x910 = (float*)myGpuMalloc(512 * sizeof(float));
float* x244 = x44+11451840;
CUDA_CALL(cudaMemcpy(x910, x244, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x913 = (float*)myGpuMalloc(64 * sizeof(float));
float* x245 = x44+1728;
CUDA_CALL(cudaMemcpy(x913, x245, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x916 = (float*)myGpuMalloc(512 * sizeof(float));
float* x246 = x44+600512;
CUDA_CALL(cudaMemcpy(x916, x246, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x919 = (float*)myGpuMalloc(64 * sizeof(float));
float* x247 = x44+131776;
CUDA_CALL(cudaMemcpy(x919, x247, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x922 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x248 = x44+7443904;
CUDA_CALL(cudaMemcpy(x922, x248, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x925 = (float*)myGpuMalloc(512 * sizeof(float));
float* x249 = x44+467904;
CUDA_CALL(cudaMemcpy(x925, x249, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x928 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x250 = x44+2963904;
CUDA_CALL(cudaMemcpy(x928, x250, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x931 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x251 = x44+11453888;
CUDA_CALL(cudaMemcpy(x931, x251, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x934 = (float*)myGpuMalloc(512 * sizeof(float));
float* x252 = x44+20134336;
CUDA_CALL(cudaMemcpy(x934, x252, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x937 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x253 = x44+12510656;
CUDA_CALL(cudaMemcpy(x937, x253, 2097152 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x940 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x254 = x44+14616000;
CUDA_CALL(cudaMemcpy(x940, x254, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x943 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x255 = x44+2434496;
CUDA_CALL(cudaMemcpy(x943, x255, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x946 = (float*)myGpuMalloc(128 * sizeof(float));
float* x256 = x44+1097920;
CUDA_CALL(cudaMemcpy(x946, x256, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x949 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x257 = x44+4085184;
CUDA_CALL(cudaMemcpy(x949, x257, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x952 = (float*)myGpuMalloc(256 * sizeof(float));
float* x258 = x44+3227328;
CUDA_CALL(cudaMemcpy(x952, x258, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x955 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x259 = x44+2961856;
CUDA_CALL(cudaMemcpy(x955, x259, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x958 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x260 = x44+7179712;
CUDA_CALL(cudaMemcpy(x958, x260, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x961 = (float*)myGpuMalloc(128 * sizeof(float));
float* x261 = x44+668096;
CUDA_CALL(cudaMemcpy(x961, x261, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x964 = (float*)myGpuMalloc(512 * sizeof(float));
float* x262 = x44+1165248;
CUDA_CALL(cudaMemcpy(x964, x262, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x967 = (float*)myGpuMalloc(512 * sizeof(float));
float* x263 = x44+9091008;
CUDA_CALL(cudaMemcpy(x967, x263, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x970 = (float*)myGpuMalloc(128 * sizeof(float));
float* x264 = x44+816448;
CUDA_CALL(cudaMemcpy(x970, x264, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x973 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x265 = x44+7709120;
CUDA_CALL(cudaMemcpy(x973, x265, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x976 = (float*)myGpuMalloc(20480 * sizeof(float));
float* x266 = x44+23553472;
CUDA_CALL(cudaMemcpy(x976, x266, 20480 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x979 = (float*)myGpuMalloc(256 * sizeof(float));
float* x267 = x44+4938176;
CUDA_CALL(cudaMemcpy(x979, x267, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x982 = (float*)myGpuMalloc(256 * sizeof(float));
float* x268 = x44+2169792;
CUDA_CALL(cudaMemcpy(x982, x268, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x985 = (float*)myGpuMalloc(256 * sizeof(float));
float* x269 = x44+6059200;
CUDA_CALL(cudaMemcpy(x985, x269, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x988 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x270 = x44+6323648;
CUDA_CALL(cudaMemcpy(x988, x270, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x991 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x271 = x44+4082112;
CUDA_CALL(cudaMemcpy(x991, x271, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x994 = (float*)myGpuMalloc(4096 * sizeof(float));
float* x272 = x44+1984;
CUDA_CALL(cudaMemcpy(x994, x272, 4096 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x997 = (float*)myGpuMalloc(512 * sizeof(float));
float* x273 = x44+1446848;
CUDA_CALL(cudaMemcpy(x997, x273, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1000 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x274 = x44+668608;
CUDA_CALL(cudaMemcpy(x1000, x274, 147456 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1003 = (float*)myGpuMalloc(128 * sizeof(float));
float* x275 = x44+1231552;
CUDA_CALL(cudaMemcpy(x1003, x275, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1006 = (float*)myGpuMalloc(256 * sizeof(float));
float* x276 = x44+3818688;
CUDA_CALL(cudaMemcpy(x1006, x276, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1009 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x277 = x44+6321600;
CUDA_CALL(cudaMemcpy(x1009, x277, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1012 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x278 = x44+12502464;
CUDA_CALL(cudaMemcpy(x1012, x278, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1015 = (float*)myGpuMalloc(256 * sizeof(float));
float* x279 = x44+8299712;
CUDA_CALL(cudaMemcpy(x1015, x279, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1018 = (float*)myGpuMalloc(256 * sizeof(float));
float* x280 = x44+5467840;
CUDA_CALL(cudaMemcpy(x1018, x280, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1021 = (float*)myGpuMalloc(128 * sizeof(float));
float* x281 = x44+1231424;
CUDA_CALL(cudaMemcpy(x1021, x281, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1024 = (float*)myGpuMalloc(256 * sizeof(float));
float* x282 = x44+78016;
CUDA_CALL(cudaMemcpy(x1024, x282, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1027 = (float*)myGpuMalloc(64 * sizeof(float));
float* x283 = x44+131968;
CUDA_CALL(cudaMemcpy(x1027, x283, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1030 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x284 = x44+19082688;
CUDA_CALL(cudaMemcpy(x1030, x284, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1033 = (float*)myGpuMalloc(512 * sizeof(float));
float* x285 = x44+882624;
CUDA_CALL(cudaMemcpy(x1033, x285, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1036 = (float*)myGpuMalloc(256 * sizeof(float));
float* x286 = x44+219840;
CUDA_CALL(cudaMemcpy(x1036, x286, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1039 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x287 = x44+8562112;
CUDA_CALL(cudaMemcpy(x1039, x287, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1042 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x288 = x44+5468608;
CUDA_CALL(cudaMemcpy(x1042, x288, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1045 = (float*)myGpuMalloc(256 * sizeof(float));
float* x289 = x44+7179200;
CUDA_CALL(cudaMemcpy(x1045, x289, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1048 = (float*)myGpuMalloc(64 * sizeof(float));
float* x290 = x44+1792;
CUDA_CALL(cudaMemcpy(x1048, x290, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1051 = (float*)myGpuMalloc(128 * sizeof(float));
float* x291 = x44+401344;
CUDA_CALL(cudaMemcpy(x1051, x291, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1054 = (float*)myGpuMalloc(256 * sizeof(float));
float* x292 = x44+7708352;
CUDA_CALL(cudaMemcpy(x1054, x292, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1057 = (float*)myGpuMalloc(256 * sizeof(float));
float* x293 = x44+6588352;
CUDA_CALL(cudaMemcpy(x1057, x293, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1060 = (float*)myGpuMalloc(512 * sizeof(float));
float* x294 = x44+20134848;
CUDA_CALL(cudaMemcpy(x1060, x294, 512 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1063 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x295 = x44+602560;
CUDA_CALL(cudaMemcpy(x1063, x295, 65536 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1066 = (float*)myGpuMalloc(64 * sizeof(float));
float* x296 = x44+165952;
CUDA_CALL(cudaMemcpy(x1066, x296, 64 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1069 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x297 = x44+469440;
CUDA_CALL(cudaMemcpy(x1069, x297, 131072 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1072 = (float*)myGpuMalloc(256 * sizeof(float));
float* x298 = x44+3227584;
CUDA_CALL(cudaMemcpy(x1072, x298, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1075 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x299 = x44+23549376;
CUDA_CALL(cudaMemcpy(x1075, x299, 2048 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1078 = (float*)myGpuMalloc(128 * sizeof(float));
float* x300 = x44+1231680;
CUDA_CALL(cudaMemcpy(x1078, x300, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1081 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x301 = x44+6588864;
CUDA_CALL(cudaMemcpy(x1081, x301, 589824 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1084 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x302 = x44+5201344;
CUDA_CALL(cudaMemcpy(x1084, x302, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1087 = (float*)myGpuMalloc(256 * sizeof(float));
float* x303 = x44+77760;
CUDA_CALL(cudaMemcpy(x1087, x303, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1090 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x304 = x44+19084736;
CUDA_CALL(cudaMemcpy(x1090, x304, 1048576 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1093 = (float*)myGpuMalloc(128 * sizeof(float));
float* x305 = x44+1098048;
CUDA_CALL(cudaMemcpy(x1093, x305, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1096 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x306 = x44+2435520;
CUDA_CALL(cudaMemcpy(x1096, x306, 1024 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1099 = (float*)myGpuMalloc(128 * sizeof(float));
float* x307 = x44+1379520;
CUDA_CALL(cudaMemcpy(x1099, x307, 128 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1102 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x308 = x44+2170304;
CUDA_CALL(cudaMemcpy(x1102, x308, 262144 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1105 = (float*)myGpuMalloc(256 * sizeof(float));
float* x309 = x44+1578432;
CUDA_CALL(cudaMemcpy(x1105, x309, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1108 = (float*)myGpuMalloc(256 * sizeof(float));
float* x310 = x44+3817920;
CUDA_CALL(cudaMemcpy(x1108, x310, 256 * sizeof(float), cudaMemcpyHostToDevice));
// Tensor 'toGPU' invocation.
float* x1111 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x311 = x44+7444928;
CUDA_CALL(cudaMemcpy(x1111, x311, 1024 * sizeof(float), cudaMemcpyHostToDevice));
float* x1113 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1114 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1115 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1116 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1117 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1118 = (float*)myGpuMalloc(32768 * sizeof(float));
float* x1119 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1120 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1121 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x1122 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1123 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1124 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1125 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1126 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1127 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1128 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1129 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1130 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1131 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1132 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1133 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1134 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x1135 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x1136 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1137 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1138 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x1139 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x1140 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1141 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x1142 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1143 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1144 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1145 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1146 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1147 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1148 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x1149 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1150 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1151 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1152 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1153 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1154 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1155 = (float*)myGpuMalloc(10 * sizeof(float));
float* x1156 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1157 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1158 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1159 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1160 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1161 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1162 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x1163 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1164 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1165 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1166 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1167 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1168 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1169 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1170 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1171 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1172 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1173 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1174 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1175 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1176 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1177 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1178 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1179 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1180 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x1181 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1182 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1183 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1184 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1185 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1186 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1187 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1188 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1189 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1190 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x1191 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1192 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1193 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1194 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x1195 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1196 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1197 = (float*)myGpuMalloc(36864 * sizeof(float));
float* x1198 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1199 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1200 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1201 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1202 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1203 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1204 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1205 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1206 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1207 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1208 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1209 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1210 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1211 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1212 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1213 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x1214 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1215 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1216 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1217 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1218 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x1219 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1220 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1221 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1222 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1223 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1224 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1225 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1226 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1227 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1228 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1229 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1230 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1231 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1232 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1233 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1234 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1235 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1236 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1237 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x1238 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1239 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x1240 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1241 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1242 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1243 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1244 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1245 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1246 = (float*)myGpuMalloc(524288 * sizeof(float));
float* x1247 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1248 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1249 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1250 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1251 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1252 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1253 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1254 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1255 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1256 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1257 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x1258 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1259 = (float*)myGpuMalloc(1728 * sizeof(float));
float* x1260 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1261 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1262 = (float*)myGpuMalloc(2359296 * sizeof(float));
float* x1263 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1264 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1265 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1266 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1267 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1268 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1269 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x1270 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1271 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1272 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x1273 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1274 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1275 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1276 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1277 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1278 = (float*)myGpuMalloc(16384 * sizeof(float));
float* x1279 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1280 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1281 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1282 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x1283 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1284 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1285 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1286 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1287 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x1288 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1289 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1290 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1291 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1292 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1293 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1294 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1295 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1296 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1297 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1298 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x1299 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1300 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1301 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1302 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1303 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x1304 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1305 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1306 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1307 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1308 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1309 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1310 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1311 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1312 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1313 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1314 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1315 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1316 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1317 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1318 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1319 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1320 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1321 = (float*)myGpuMalloc(2097152 * sizeof(float));
float* x1322 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1323 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1324 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1325 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1326 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1327 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1328 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1329 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1330 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1331 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1332 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1333 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x1334 = (float*)myGpuMalloc(20480 * sizeof(float));
float* x1335 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1336 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1337 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1338 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1339 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1340 = (float*)myGpuMalloc(4096 * sizeof(float));
float* x1341 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1342 = (float*)myGpuMalloc(147456 * sizeof(float));
float* x1343 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1344 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1345 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1346 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1347 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1348 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1349 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1350 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1351 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1352 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1353 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1354 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1355 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1356 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x1357 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1358 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1359 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1360 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1361 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1362 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1363 = (float*)myGpuMalloc(65536 * sizeof(float));
float* x1364 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1365 = (float*)myGpuMalloc(131072 * sizeof(float));
float* x1366 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1367 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x1368 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1369 = (float*)myGpuMalloc(589824 * sizeof(float));
float* x1370 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1371 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1372 = (float*)myGpuMalloc(1048576 * sizeof(float));
float* x1373 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1374 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x1375 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1376 = (float*)myGpuMalloc(262144 * sizeof(float));
float* x1377 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1378 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1379 = (float*)myGpuMalloc(1024 * sizeof(float));
double* x1380 = (double*)myMalloc(4 * sizeof(double));;
double* x1381 = (double*)myMalloc(4 * sizeof(double));;
int64_t x1382 = (long)mallocAddr;
int64_t x1383 = (long)gpuMallocAddr;
// training loop starts here
int32_t x1394 = x11 / 64;
int32_t x1411 = 31 / 1;
int32_t x1412 = x1411 + 1;
int32_t x1416 = 4096 * x1412;
int32_t x1417 = x1416 * x1412;
int32_t x1413 = x1412 * x1412;
int32_t x1414 = 64 * x1413;
int32_t x1415 = 64 * x1414;
int32_t x1443 = x1412 - 2;
int32_t x1444 = x1443 / 2;
int32_t x1445 = x1444 + 1;
int32_t x1449 = 4096 * x1445;
int32_t x1450 = x1449 * x1445;
bool x1454 = x1445 >= 1;
bool x1455;
if (x1454) {
x1455 = x1454;
} else {
x1455 = false;
}
int32_t x1460 = x1444 / 1;
int32_t x1461 = x1460 + 1;
int32_t x1465 = 4096 * x1461;
int32_t x1466 = x1465 * x1461;
int32_t x1462 = x1461 * x1461;
int32_t x1463 = 64 * x1462;
int32_t x1464 = 64 * x1463;
int32_t x1488 = x1461 + 2;
bool x1489 = x1488 >= 3;
bool x1490;
if (x1489) {
x1490 = x1489;
} else {
x1490 = false;
}
int32_t x1495 = x1488 - 3;
int32_t x1496 = x1495 / 1;
int32_t x1497 = x1496 + 1;
int32_t x1501 = 4096 * x1497;
int32_t x1502 = x1501 * x1497;
int32_t x1498 = x1497 * x1497;
int32_t x1499 = 64 * x1498;
int32_t x1500 = 64 * x1499;
bool x1524 = x1497 >= 1;
bool x1525;
if (x1524) {
x1525 = x1524;
} else {
x1525 = false;
}
int32_t x1530 = x1496 / 1;
int32_t x1531 = x1530 + 1;
int32_t x1535 = 16384 * x1531;
int32_t x1536 = x1535 * x1531;
int32_t x1532 = x1531 * x1531;
int32_t x1533 = 256 * x1532;
int32_t x1534 = 64 * x1533;
int32_t x1558 = 16384 * x1461;
int32_t x1559 = x1558 * x1461;
int32_t x1556 = 256 * x1462;
int32_t x1557 = 64 * x1556;
bool x1576 = x1461 == 1;
bool x1577 = x1461 == x1531;
bool x1578 = x1576 || x1577;
bool x1579;
if (x1578) {
x1579 = x1578;
} else {
x1579 = false;
}
bool x1594 = x1531 >= 1;
bool x1595;
if (x1594) {
x1595 = x1594;
} else {
x1595 = false;
}
int32_t x1600 = x1530 / 1;
int32_t x1601 = x1600 + 1;
int32_t x1605 = 4096 * x1601;
int32_t x1606 = x1605 * x1601;
int32_t x1602 = x1601 * x1601;
int32_t x1603 = 64 * x1602;
int32_t x1604 = 64 * x1603;
int32_t x1628 = x1601 + 2;
bool x1629 = x1628 >= 3;
bool x1630;
if (x1629) {
x1630 = x1629;
} else {
x1630 = false;
}
int32_t x1635 = x1628 - 3;
int32_t x1636 = x1635 / 1;
int32_t x1637 = x1636 + 1;
int32_t x1641 = 4096 * x1637;
int32_t x1642 = x1641 * x1637;
int32_t x1638 = x1637 * x1637;
int32_t x1639 = 64 * x1638;
int32_t x1640 = 64 * x1639;
bool x1664 = x1637 >= 1;
bool x1665;
if (x1664) {
x1665 = x1664;
} else {
x1665 = false;
}
int32_t x1670 = x1636 / 1;
int32_t x1671 = x1670 + 1;
int32_t x1675 = 16384 * x1671;
int32_t x1676 = x1675 * x1671;
int32_t x1672 = x1671 * x1671;
int32_t x1673 = 256 * x1672;
int32_t x1674 = 64 * x1673;
bool x1693 = x1531 == 1;
bool x1694 = x1531 == x1671;
bool x1695 = x1693 || x1694;
bool x1696;
if (x1695) {
x1696 = x1695;
} else {
x1696 = false;
}
bool x1711 = x1671 >= 1;
bool x1712;
if (x1711) {
x1712 = x1711;
} else {
x1712 = false;
}
int32_t x1717 = x1670 / 1;
int32_t x1718 = x1717 + 1;
int32_t x1722 = 4096 * x1718;
int32_t x1723 = x1722 * x1718;
int32_t x1719 = x1718 * x1718;
int32_t x1720 = 64 * x1719;
int32_t x1721 = 64 * x1720;
int32_t x1745 = x1718 + 2;
bool x1746 = x1745 >= 3;
bool x1747;
if (x1746) {
x1747 = x1746;
} else {
x1747 = false;
}
int32_t x1752 = x1745 - 3;
int32_t x1753 = x1752 / 1;
int32_t x1754 = x1753 + 1;
int32_t x1758 = 4096 * x1754;
int32_t x1759 = x1758 * x1754;
int32_t x1755 = x1754 * x1754;
int32_t x1756 = 64 * x1755;
int32_t x1757 = 64 * x1756;
bool x1781 = x1754 >= 1;
bool x1782;
if (x1781) {
x1782 = x1781;
} else {
x1782 = false;
}
int32_t x1787 = x1753 / 1;
int32_t x1788 = x1787 + 1;
int32_t x1792 = 16384 * x1788;
int32_t x1793 = x1792 * x1788;
int32_t x1789 = x1788 * x1788;
int32_t x1790 = 256 * x1789;
int32_t x1791 = 64 * x1790;
bool x1810 = x1671 == 1;
bool x1811 = x1671 == x1788;
bool x1812 = x1810 || x1811;
bool x1813;
if (x1812) {
x1813 = x1812;
} else {
x1813 = false;
}
bool x1828 = x1788 >= 1;
bool x1829;
if (x1828) {
x1829 = x1828;
} else {
x1829 = false;
}
int32_t x1834 = x1787 / 1;
int32_t x1835 = x1834 + 1;
int32_t x1839 = 8192 * x1835;
int32_t x1840 = x1839 * x1835;
int32_t x1836 = x1835 * x1835;
int32_t x1837 = 128 * x1836;
int32_t x1838 = 64 * x1837;
int32_t x1862 = x1835 + 2;
bool x1863 = x1862 >= 3;
bool x1864;
if (x1863) {
x1864 = x1863;
} else {
x1864 = false;
}
int32_t x1869 = x1862 - 3;
int32_t x1870 = x1869 / 2;
int32_t x1871 = x1870 + 1;
int32_t x1875 = 8192 * x1871;
int32_t x1876 = x1875 * x1871;
int32_t x1872 = x1871 * x1871;
int32_t x1873 = 128 * x1872;
int32_t x1874 = 64 * x1873;
bool x1898 = x1871 >= 1;
bool x1899;
if (x1898) {
x1899 = x1898;
} else {
x1899 = false;
}
int32_t x1904 = x1870 / 1;
int32_t x1905 = x1904 + 1;
int32_t x1909 = 32768 * x1905;
int32_t x1910 = x1909 * x1905;
int32_t x1906 = x1905 * x1905;
int32_t x1907 = 512 * x1906;
int32_t x1908 = 64 * x1907;
int32_t x1930 = x1787 / 2;
int32_t x1931 = x1930 + 1;
int32_t x1935 = 32768 * x1931;
int32_t x1936 = x1935 * x1931;
int32_t x1932 = x1931 * x1931;
int32_t x1933 = 512 * x1932;
int32_t x1934 = 64 * x1933;
bool x1953 = x1931 == 1;
bool x1954 = x1931 == x1905;
bool x1955 = x1953 || x1954;
bool x1956;
if (x1955) {
x1956 = x1955;
} else {
x1956 = false;
}
bool x1971 = x1905 >= 1;
bool x1972;
if (x1971) {
x1972 = x1971;
} else {
x1972 = false;
}
int32_t x1977 = x1904 / 1;
int32_t x1978 = x1977 + 1;
int32_t x1982 = 8192 * x1978;
int32_t x1983 = x1982 * x1978;
int32_t x1979 = x1978 * x1978;
int32_t x1980 = 128 * x1979;
int32_t x1981 = 64 * x1980;
int32_t x2005 = x1978 + 2;
bool x2006 = x2005 >= 3;
bool x2007;
if (x2006) {
x2007 = x2006;
} else {
x2007 = false;
}
int32_t x2012 = x2005 - 3;
int32_t x2013 = x2012 / 1;
int32_t x2014 = x2013 + 1;
int32_t x2018 = 8192 * x2014;
int32_t x2019 = x2018 * x2014;
int32_t x2015 = x2014 * x2014;
int32_t x2016 = 128 * x2015;
int32_t x2017 = 64 * x2016;
bool x2041 = x2014 >= 1;
bool x2042;
if (x2041) {
x2042 = x2041;
} else {
x2042 = false;
}
int32_t x2047 = x2013 / 1;
int32_t x2048 = x2047 + 1;
int32_t x2052 = 32768 * x2048;
int32_t x2053 = x2052 * x2048;
int32_t x2049 = x2048 * x2048;
int32_t x2050 = 512 * x2049;
int32_t x2051 = 64 * x2050;
bool x2070 = x1905 == 1;
bool x2071 = x1905 == x2048;
bool x2072 = x2070 || x2071;
bool x2073;
if (x2072) {
x2073 = x2072;
} else {
x2073 = false;
}
bool x2088 = x2048 >= 1;
bool x2089;
if (x2088) {
x2089 = x2088;
} else {
x2089 = false;
}
int32_t x2094 = x2047 / 1;
int32_t x2095 = x2094 + 1;
int32_t x2099 = 8192 * x2095;
int32_t x2100 = x2099 * x2095;
int32_t x2096 = x2095 * x2095;
int32_t x2097 = 128 * x2096;
int32_t x2098 = 64 * x2097;
int32_t x2122 = x2095 + 2;
bool x2123 = x2122 >= 3;
bool x2124;
if (x2123) {
x2124 = x2123;
} else {
x2124 = false;
}
int32_t x2129 = x2122 - 3;
int32_t x2130 = x2129 / 1;
int32_t x2131 = x2130 + 1;
int32_t x2135 = 8192 * x2131;
int32_t x2136 = x2135 * x2131;
int32_t x2132 = x2131 * x2131;
int32_t x2133 = 128 * x2132;
int32_t x2134 = 64 * x2133;
bool x2158 = x2131 >= 1;
bool x2159;
if (x2158) {
x2159 = x2158;
} else {
x2159 = false;
}
int32_t x2164 = x2130 / 1;
int32_t x2165 = x2164 + 1;
int32_t x2169 = 32768 * x2165;
int32_t x2170 = x2169 * x2165;
int32_t x2166 = x2165 * x2165;
int32_t x2167 = 512 * x2166;
int32_t x2168 = 64 * x2167;
bool x2187 = x2048 == 1;
bool x2188 = x2048 == x2165;
bool x2189 = x2187 || x2188;
bool x2190;
if (x2189) {
x2190 = x2189;
} else {
x2190 = false;
}
bool x2205 = x2165 >= 1;
bool x2206;
if (x2205) {
x2206 = x2205;
} else {
x2206 = false;
}
int32_t x2211 = x2164 / 1;
int32_t x2212 = x2211 + 1;
int32_t x2216 = 8192 * x2212;
int32_t x2217 = x2216 * x2212;
int32_t x2213 = x2212 * x2212;
int32_t x2214 = 128 * x2213;
int32_t x2215 = 64 * x2214;
int32_t x2239 = x2212 + 2;
bool x2240 = x2239 >= 3;
bool x2241;
if (x2240) {
x2241 = x2240;
} else {
x2241 = false;
}
int32_t x2246 = x2239 - 3;
int32_t x2247 = x2246 / 1;
int32_t x2248 = x2247 + 1;
int32_t x2252 = 8192 * x2248;
int32_t x2253 = x2252 * x2248;
int32_t x2249 = x2248 * x2248;
int32_t x2250 = 128 * x2249;
int32_t x2251 = 64 * x2250;
bool x2275 = x2248 >= 1;
bool x2276;
if (x2275) {
x2276 = x2275;
} else {
x2276 = false;
}
int32_t x2281 = x2247 / 1;
int32_t x2282 = x2281 + 1;
int32_t x2286 = 32768 * x2282;
int32_t x2287 = x2286 * x2282;
int32_t x2283 = x2282 * x2282;
int32_t x2284 = 512 * x2283;
int32_t x2285 = 64 * x2284;
bool x2304 = x2165 == 1;
bool x2305 = x2165 == x2282;
bool x2306 = x2304 || x2305;
bool x2307;
if (x2306) {
x2307 = x2306;
} else {
x2307 = false;
}
bool x2322 = x2282 >= 1;
bool x2323;
if (x2322) {
x2323 = x2322;
} else {
x2323 = false;
}
int32_t x2328 = x2281 / 1;
int32_t x2329 = x2328 + 1;
int32_t x2333 = 16384 * x2329;
int32_t x2334 = x2333 * x2329;
int32_t x2330 = x2329 * x2329;
int32_t x2331 = 256 * x2330;
int32_t x2332 = 64 * x2331;
int32_t x2356 = x2329 + 2;
bool x2357 = x2356 >= 3;
bool x2358;
if (x2357) {
x2358 = x2357;
} else {
x2358 = false;
}
int32_t x2363 = x2356 - 3;
int32_t x2364 = x2363 / 2;
int32_t x2365 = x2364 + 1;
int32_t x2369 = 16384 * x2365;
int32_t x2370 = x2369 * x2365;
int32_t x2366 = x2365 * x2365;
int32_t x2367 = 256 * x2366;
int32_t x2368 = 64 * x2367;
bool x2392 = x2365 >= 1;
bool x2393;
if (x2392) {
x2393 = x2392;
} else {
x2393 = false;
}
int32_t x2398 = x2364 / 1;
int32_t x2399 = x2398 + 1;
int32_t x2403 = 65536 * x2399;
int32_t x2404 = x2403 * x2399;
int32_t x2400 = x2399 * x2399;
int32_t x2401 = 1024 * x2400;
int32_t x2402 = 64 * x2401;
int32_t x2424 = x2281 / 2;
int32_t x2425 = x2424 + 1;
int32_t x2429 = 65536 * x2425;
int32_t x2430 = x2429 * x2425;
int32_t x2426 = x2425 * x2425;
int32_t x2427 = 1024 * x2426;
int32_t x2428 = 64 * x2427;
bool x2447 = x2425 == 1;
bool x2448 = x2425 == x2399;
bool x2449 = x2447 || x2448;
bool x2450;
if (x2449) {
x2450 = x2449;
} else {
x2450 = false;
}
bool x2465 = x2399 >= 1;
bool x2466;
if (x2465) {
x2466 = x2465;
} else {
x2466 = false;
}
int32_t x2471 = x2398 / 1;
int32_t x2472 = x2471 + 1;
int32_t x2476 = 16384 * x2472;
int32_t x2477 = x2476 * x2472;
int32_t x2473 = x2472 * x2472;
int32_t x2474 = 256 * x2473;
int32_t x2475 = 64 * x2474;
int32_t x2499 = x2472 + 2;
bool x2500 = x2499 >= 3;
bool x2501;
if (x2500) {
x2501 = x2500;
} else {
x2501 = false;
}
int32_t x2506 = x2499 - 3;
int32_t x2507 = x2506 / 1;
int32_t x2508 = x2507 + 1;
int32_t x2512 = 16384 * x2508;
int32_t x2513 = x2512 * x2508;
int32_t x2509 = x2508 * x2508;
int32_t x2510 = 256 * x2509;
int32_t x2511 = 64 * x2510;
bool x2535 = x2508 >= 1;
bool x2536;
if (x2535) {
x2536 = x2535;
} else {
x2536 = false;
}
int32_t x2541 = x2507 / 1;
int32_t x2542 = x2541 + 1;
int32_t x2546 = 65536 * x2542;
int32_t x2547 = x2546 * x2542;
int32_t x2543 = x2542 * x2542;
int32_t x2544 = 1024 * x2543;
int32_t x2545 = 64 * x2544;
bool x2564 = x2399 == 1;
bool x2565 = x2399 == x2542;
bool x2566 = x2564 || x2565;
bool x2567;
if (x2566) {
x2567 = x2566;
} else {
x2567 = false;
}
bool x2582 = x2542 >= 1;
bool x2583;
if (x2582) {
x2583 = x2582;
} else {
x2583 = false;
}
int32_t x2588 = x2541 / 1;
int32_t x2589 = x2588 + 1;
int32_t x2593 = 16384 * x2589;
int32_t x2594 = x2593 * x2589;
int32_t x2590 = x2589 * x2589;
int32_t x2591 = 256 * x2590;
int32_t x2592 = 64 * x2591;
int32_t x2616 = x2589 + 2;
bool x2617 = x2616 >= 3;
bool x2618;
if (x2617) {
x2618 = x2617;
} else {
x2618 = false;
}
int32_t x2623 = x2616 - 3;
int32_t x2624 = x2623 / 1;
int32_t x2625 = x2624 + 1;
int32_t x2629 = 16384 * x2625;
int32_t x2630 = x2629 * x2625;
int32_t x2626 = x2625 * x2625;
int32_t x2627 = 256 * x2626;
int32_t x2628 = 64 * x2627;
bool x2652 = x2625 >= 1;
bool x2653;
if (x2652) {
x2653 = x2652;
} else {
x2653 = false;
}
int32_t x2658 = x2624 / 1;
int32_t x2659 = x2658 + 1;
int32_t x2663 = 65536 * x2659;
int32_t x2664 = x2663 * x2659;
int32_t x2660 = x2659 * x2659;
int32_t x2661 = 1024 * x2660;
int32_t x2662 = 64 * x2661;
bool x2681 = x2542 == 1;
bool x2682 = x2542 == x2659;
bool x2683 = x2681 || x2682;
bool x2684;
if (x2683) {
x2684 = x2683;
} else {
x2684 = false;
}
bool x2699 = x2659 >= 1;
bool x2700;
if (x2699) {
x2700 = x2699;
} else {
x2700 = false;
}
int32_t x2705 = x2658 / 1;
int32_t x2706 = x2705 + 1;
int32_t x2710 = 16384 * x2706;
int32_t x2711 = x2710 * x2706;
int32_t x2707 = x2706 * x2706;
int32_t x2708 = 256 * x2707;
int32_t x2709 = 64 * x2708;
int32_t x2733 = x2706 + 2;
bool x2734 = x2733 >= 3;
bool x2735;
if (x2734) {
x2735 = x2734;
} else {
x2735 = false;
}
int32_t x2740 = x2733 - 3;
int32_t x2741 = x2740 / 1;
int32_t x2742 = x2741 + 1;
int32_t x2746 = 16384 * x2742;
int32_t x2747 = x2746 * x2742;
int32_t x2743 = x2742 * x2742;
int32_t x2744 = 256 * x2743;
int32_t x2745 = 64 * x2744;
bool x2769 = x2742 >= 1;
bool x2770;
if (x2769) {
x2770 = x2769;
} else {
x2770 = false;
}
int32_t x2775 = x2741 / 1;
int32_t x2776 = x2775 + 1;
int32_t x2780 = 65536 * x2776;
int32_t x2781 = x2780 * x2776;
int32_t x2777 = x2776 * x2776;
int32_t x2778 = 1024 * x2777;
int32_t x2779 = 64 * x2778;
bool x2798 = x2659 == 1;
bool x2799 = x2659 == x2776;
bool x2800 = x2798 || x2799;
bool x2801;
if (x2800) {
x2801 = x2800;
} else {
x2801 = false;
}
bool x2816 = x2776 >= 1;
bool x2817;
if (x2816) {
x2817 = x2816;
} else {
x2817 = false;
}
int32_t x2822 = x2775 / 1;
int32_t x2823 = x2822 + 1;
int32_t x2827 = 16384 * x2823;
int32_t x2828 = x2827 * x2823;
int32_t x2824 = x2823 * x2823;
int32_t x2825 = 256 * x2824;
int32_t x2826 = 64 * x2825;
int32_t x2850 = x2823 + 2;
bool x2851 = x2850 >= 3;
bool x2852;
if (x2851) {
x2852 = x2851;
} else {
x2852 = false;
}
int32_t x2857 = x2850 - 3;
int32_t x2858 = x2857 / 1;
int32_t x2859 = x2858 + 1;
int32_t x2863 = 16384 * x2859;
int32_t x2864 = x2863 * x2859;
int32_t x2860 = x2859 * x2859;
int32_t x2861 = 256 * x2860;
int32_t x2862 = 64 * x2861;
bool x2886 = x2859 >= 1;
bool x2887;
if (x2886) {
x2887 = x2886;
} else {
x2887 = false;
}
int32_t x2892 = x2858 / 1;
int32_t x2893 = x2892 + 1;
int32_t x2897 = 65536 * x2893;
int32_t x2898 = x2897 * x2893;
int32_t x2894 = x2893 * x2893;
int32_t x2895 = 1024 * x2894;
int32_t x2896 = 64 * x2895;
bool x2915 = x2776 == 1;
bool x2916 = x2776 == x2893;
bool x2917 = x2915 || x2916;
bool x2918;
if (x2917) {
x2918 = x2917;
} else {
x2918 = false;
}
bool x2933 = x2893 >= 1;
bool x2934;
if (x2933) {
x2934 = x2933;
} else {
x2934 = false;
}
int32_t x2939 = x2892 / 1;
int32_t x2940 = x2939 + 1;
int32_t x2944 = 16384 * x2940;
int32_t x2945 = x2944 * x2940;
int32_t x2941 = x2940 * x2940;
int32_t x2942 = 256 * x2941;
int32_t x2943 = 64 * x2942;
int32_t x2967 = x2940 + 2;
bool x2968 = x2967 >= 3;
bool x2969;
if (x2968) {
x2969 = x2968;
} else {
x2969 = false;
}
int32_t x2974 = x2967 - 3;
int32_t x2975 = x2974 / 1;
int32_t x2976 = x2975 + 1;
int32_t x2980 = 16384 * x2976;
int32_t x2981 = x2980 * x2976;
int32_t x2977 = x2976 * x2976;
int32_t x2978 = 256 * x2977;
int32_t x2979 = 64 * x2978;
bool x3003 = x2976 >= 1;
bool x3004;
if (x3003) {
x3004 = x3003;
} else {
x3004 = false;
}
int32_t x3009 = x2975 / 1;
int32_t x3010 = x3009 + 1;
int32_t x3014 = 65536 * x3010;
int32_t x3015 = x3014 * x3010;
int32_t x3011 = x3010 * x3010;
int32_t x3012 = 1024 * x3011;
int32_t x3013 = 64 * x3012;
bool x3032 = x2893 == 1;
bool x3033 = x2893 == x3010;
bool x3034 = x3032 || x3033;
bool x3035;
if (x3034) {
x3035 = x3034;
} else {
x3035 = false;
}
bool x3050 = x3010 >= 1;
bool x3051;
if (x3050) {
x3051 = x3050;
} else {
x3051 = false;
}
int32_t x3056 = x3009 / 1;
int32_t x3057 = x3056 + 1;
int32_t x3061 = 32768 * x3057;
int32_t x3062 = x3061 * x3057;
int32_t x3058 = x3057 * x3057;
int32_t x3059 = 512 * x3058;
int32_t x3060 = 64 * x3059;
int32_t x3084 = x3057 + 2;
bool x3085 = x3084 >= 3;
bool x3086;
if (x3085) {
x3086 = x3085;
} else {
x3086 = false;
}
int32_t x3091 = x3084 - 3;
int32_t x3092 = x3091 / 2;
int32_t x3093 = x3092 + 1;
int32_t x3097 = 32768 * x3093;
int32_t x3098 = x3097 * x3093;
int32_t x3094 = x3093 * x3093;
int32_t x3095 = 512 * x3094;
int32_t x3096 = 64 * x3095;
bool x3120 = x3093 >= 1;
bool x3121;
if (x3120) {
x3121 = x3120;
} else {
x3121 = false;
}
int32_t x3126 = x3092 / 1;
int32_t x3127 = x3126 + 1;
int32_t x3131 = 131072 * x3127;
int32_t x3132 = x3131 * x3127;
int32_t x3128 = x3127 * x3127;
int32_t x3129 = 2048 * x3128;
int32_t x3130 = 64 * x3129;
int32_t x3152 = x3009 / 2;
int32_t x3153 = x3152 + 1;
int32_t x3157 = 131072 * x3153;
int32_t x3158 = x3157 * x3153;
int32_t x3154 = x3153 * x3153;
int32_t x3155 = 2048 * x3154;
int32_t x3156 = 64 * x3155;
bool x3175 = x3153 == 1;
bool x3176 = x3153 == x3127;
bool x3177 = x3175 || x3176;
bool x3178;
if (x3177) {
x3178 = x3177;
} else {
x3178 = false;
}
bool x3193 = x3127 >= 1;
bool x3194;
if (x3193) {
x3194 = x3193;
} else {
x3194 = false;
}
int32_t x3199 = x3126 / 1;
int32_t x3200 = x3199 + 1;
int32_t x3204 = 32768 * x3200;
int32_t x3205 = x3204 * x3200;
int32_t x3201 = x3200 * x3200;
int32_t x3202 = 512 * x3201;
int32_t x3203 = 64 * x3202;
int32_t x3227 = x3200 + 2;
bool x3228 = x3227 >= 3;
bool x3229;
if (x3228) {
x3229 = x3228;
} else {
x3229 = false;
}
int32_t x3234 = x3227 - 3;
int32_t x3235 = x3234 / 1;
int32_t x3236 = x3235 + 1;
int32_t x3240 = 32768 * x3236;
int32_t x3241 = x3240 * x3236;
int32_t x3237 = x3236 * x3236;
int32_t x3238 = 512 * x3237;
int32_t x3239 = 64 * x3238;
bool x3263 = x3236 >= 1;
bool x3264;
if (x3263) {
x3264 = x3263;
} else {
x3264 = false;
}
int32_t x3269 = x3235 / 1;
int32_t x3270 = x3269 + 1;
int32_t x3274 = 131072 * x3270;
int32_t x3275 = x3274 * x3270;
int32_t x3271 = x3270 * x3270;
int32_t x3272 = 2048 * x3271;
int32_t x3273 = 64 * x3272;
bool x3292 = x3127 == 1;
bool x3293 = x3127 == x3270;
bool x3294 = x3292 || x3293;
bool x3295;
if (x3294) {
x3295 = x3294;
} else {
x3295 = false;
}
bool x3310 = x3270 >= 1;
bool x3311;
if (x3310) {
x3311 = x3310;
} else {
x3311 = false;
}
int32_t x3316 = x3269 / 1;
int32_t x3317 = x3316 + 1;
int32_t x3321 = 32768 * x3317;
int32_t x3322 = x3321 * x3317;
int32_t x3318 = x3317 * x3317;
int32_t x3319 = 512 * x3318;
int32_t x3320 = 64 * x3319;
int32_t x3344 = x3317 + 2;
bool x3345 = x3344 >= 3;
bool x3346;
if (x3345) {
x3346 = x3345;
} else {
x3346 = false;
}
int32_t x3351 = x3344 - 3;
int32_t x3352 = x3351 / 1;
int32_t x3353 = x3352 + 1;
int32_t x3357 = 32768 * x3353;
int32_t x3358 = x3357 * x3353;
int32_t x3354 = x3353 * x3353;
int32_t x3355 = 512 * x3354;
int32_t x3356 = 64 * x3355;
bool x3380 = x3353 >= 1;
bool x3381;
if (x3380) {
x3381 = x3380;
} else {
x3381 = false;
}
int32_t x3386 = x3352 / 1;
int32_t x3387 = x3386 + 1;
int32_t x3391 = 131072 * x3387;
int32_t x3392 = x3391 * x3387;
int32_t x3388 = x3387 * x3387;
int32_t x3389 = 2048 * x3388;
int32_t x3390 = 64 * x3389;
bool x3409 = x3270 == 1;
bool x3410 = x3270 == x3387;
bool x3411 = x3409 || x3410;
bool x3412;
if (x3411) {
x3412 = x3411;
} else {
x3412 = false;
}
bool x3427 = x3387 >= 2;
bool x3428;
if (x3427) {
x3428 = x3427;
} else {
x3428 = false;
}
int32_t x3437 = x3387 - 2;
int32_t x3438 = x3437 / 1;
int32_t x3439 = x3438 + 1;
int32_t x3443 = 131072 * x3439;
int32_t x3444 = x3443 * x3439;
int32_t x3440 = x3439 * x3439;
int32_t x3441 = 2048 * x3440;
int32_t x3442 = 64 * x3441;
bool x3700 = x3387 == x3270;
bool x3701;
if (x3700) {
x3701 = x3700;
} else {
x3701 = false;
}
bool x3702 = x3387 == 1;
bool x3703 = x3702 || x3700;
bool x3704;
if (x3703) {
x3704 = x3703;
} else {
x3704 = false;
}
bool x3771 = x3270 == x3127;
bool x3772;
if (x3771) {
x3772 = x3771;
} else {
x3772 = false;
}
bool x3773 = x3409 || x3771;
bool x3774;
if (x3773) {
x3774 = x3773;
} else {
x3774 = false;
}
bool x3841 = x3127 == x3153;
bool x3842;
if (x3841) {
x3842 = x3841;
} else {
x3842 = false;
}
bool x3843 = x3292 || x3841;
bool x3844;
if (x3843) {
x3844 = x3843;
} else {
x3844 = false;
}
bool x3923 = x3010 == x2893;
bool x3924;
if (x3923) {
x3924 = x3923;
} else {
x3924 = false;
}
bool x3925 = x3010 == 1;
bool x3926 = x3925 || x3923;
bool x3927;
if (x3926) {
x3927 = x3926;
} else {
x3927 = false;
}
bool x3994 = x2893 == x2776;
bool x3995;
if (x3994) {
x3995 = x3994;
} else {
x3995 = false;
}
bool x3996 = x3032 || x3994;
bool x3997;
if (x3996) {
x3997 = x3996;
} else {
x3997 = false;
}
bool x4064 = x2776 == x2659;
bool x4065;
if (x4064) {
x4065 = x4064;
} else {
x4065 = false;
}
bool x4066 = x2915 || x4064;
bool x4067;
if (x4066) {
x4067 = x4066;
} else {
x4067 = false;
}
bool x4134 = x2659 == x2542;
bool x4135;
if (x4134) {
x4135 = x4134;
} else {
x4135 = false;
}
bool x4136 = x2798 || x4134;
bool x4137;
if (x4136) {
x4137 = x4136;
} else {
x4137 = false;
}
bool x4204 = x2542 == x2399;
bool x4205;
if (x4204) {
x4205 = x4204;
} else {
x4205 = false;
}
bool x4206 = x2681 || x4204;
bool x4207;
if (x4206) {
x4207 = x4206;
} else {
x4207 = false;
}
bool x4274 = x2399 == x2425;
bool x4275;
if (x4274) {
x4275 = x4274;
} else {
x4275 = false;
}
bool x4276 = x2564 || x4274;
bool x4277;
if (x4276) {
x4277 = x4276;
} else {
x4277 = false;
}
bool x4356 = x2282 == x2165;
bool x4357;
if (x4356) {
x4357 = x4356;
} else {
x4357 = false;
}
bool x4358 = x2282 == 1;
bool x4359 = x4358 || x4356;
bool x4360;
if (x4359) {
x4360 = x4359;
} else {
x4360 = false;
}
bool x4427 = x2165 == x2048;
bool x4428;
if (x4427) {
x4428 = x4427;
} else {
x4428 = false;
}
bool x4429 = x2304 || x4427;
bool x4430;
if (x4429) {
x4430 = x4429;
} else {
x4430 = false;
}
bool x4497 = x2048 == x1905;
bool x4498;
if (x4497) {
x4498 = x4497;
} else {
x4498 = false;
}
bool x4499 = x2187 || x4497;
bool x4500;
if (x4499) {
x4500 = x4499;
} else {
x4500 = false;
}
bool x4567 = x1905 == x1931;
bool x4568;
if (x4567) {
x4568 = x4567;
} else {
x4568 = false;
}
bool x4569 = x2070 || x4567;
bool x4570;
if (x4569) {
x4570 = x4569;
} else {
x4570 = false;
}
bool x4649 = x1788 == x1671;
bool x4650;
if (x4649) {
x4650 = x4649;
} else {
x4650 = false;
}
bool x4651 = x1788 == 1;
bool x4652 = x4651 || x4649;
bool x4653;
if (x4652) {
x4653 = x4652;
} else {
x4653 = false;
}
bool x4720 = x1671 == x1531;
bool x4721;
if (x4720) {
x4721 = x4720;
} else {
x4721 = false;
}
bool x4722 = x1810 || x4720;
bool x4723;
if (x4722) {
x4723 = x4722;
} else {
x4723 = false;
}
bool x4790 = x1531 == x1461;
bool x4791;
if (x4790) {
x4791 = x4790;
} else {
x4791 = false;
}
bool x4792 = x1693 || x4790;
bool x4793;
if (x4792) {
x4793 = x4792;
} else {
x4793 = false;
}
int32_t x6494 = x1394 / 10;
double x6499 = (double)x11;
int64_t x6525 = (int64_t)x11;
float x6529 = (float)x11;
for(int x1386=0; x1386 < 4; x1386++) {
struct timeval begin_1, end_1, diff_1;
float x1388 = 0.0f;
float x1389 = x1388;
float x1390 = x1389;
int32_t x1391 = x1386 + 1;
printf("Start training epoch %d\n",x1391);
gettimeofday(&begin_1, NULL);
for(int x1396=0; x1396 < x1394; x1396++) {
int32_t x1397 = x1396 * 64;
int32_t x1398 = x1397 * 3072;
float* x1399 = x13+x1398;
int* x1400 = x14+x1397;
// Tensor 'toGPU' invocation.
float* x1402 = (float*)myGpuMalloc(196608 * sizeof(float));
CUDA_CALL(cudaMemcpy(x1402, x1399, 196608 * sizeof(float), cudaMemcpyHostToDevice));
float* x1404 = (float*)myGpuMalloc(2 * sizeof(float));
int* x1405 = (int32_t*)myGpuMalloc(64 * sizeof(int32_t));
CUDA_CALL(cudaMemcpy(x1405, x1400, 64 * sizeof(int32_t), cudaMemcpyHostToDevice));
float* x1407 = (float*)myGpuMalloc(1 * sizeof(float));
float* x1408 = (float*)myGpuMalloc(1 * sizeof(float));
// allocate memory to save the final loss in CPU Tensor
float* x1410 = (float*)myGpuMalloc(1 * sizeof(float));
float* x1418 = (float*)myGpuMalloc(x1417 * sizeof(float));
float* x1419 = (float*)myMalloc(1 * sizeof(float));;
x1419[0] = 0.0f;
float* x1421 = (float*)myMalloc(1 * sizeof(float));;
x1421[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 3, 32, 32));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 3, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1421, in_desc, x1402, filt_desc, x751,
conv_desc, algo, ws_data, ws_size,
x1419, out_desc, x1418));
};
float* x1424 = (float*)myGpuMalloc(x1417 * sizeof(float));
float* x1425 = (float*)myGpuMalloc(x1415 * sizeof(float));
float* x1426 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1427 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1428 = (float*)myMalloc(1 * sizeof(float));;
x1428[0] = 0.0f;
float* x1430 = (float*)myMalloc(1 * sizeof(float));;
x1430[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1430, x1428, in_desc, x1418, out_desc, x1425, sbmv_desc, x913,
x1048, 0.1, x415, x625, 1.0E-5,
x1426, x1427));
};
float* x1433 = (float*)myGpuMalloc(x1417 * sizeof(float));
float* x1434 = (float*)myMalloc(1 * sizeof(float));;
x1434[0] = 0.0f;
float* x1436 = (float*)myMalloc(1 * sizeof(float));;
x1436[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1436, x_desc, x1425, x1434, x_desc, x1425));
};
float* x1439 = (float*)myMalloc(1 * sizeof(float));;
x1439[0] = 0.0f;
float* x1441 = (float*)myMalloc(1 * sizeof(float));;
x1441[0] = 1.0f;
float* x1451 = (float*)myGpuMalloc(x1450 * sizeof(float));
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412) );
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1445, x1445));
cudnnPoolingDescriptor_t poolingDesc;
CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc));
CUDNN_CALL(cudnnSetPooling2dDescriptor(
poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN,
2, 2, 0,
0, 2, 2
));
CUDNN_CALL(cudnnPoolingForward(
cudnnHandle,
poolingDesc,
x1441, in_desc, x1425, x1439, out_desc, x1451));
};
float* x1453 = (float*)myGpuMalloc(x1450 * sizeof(float));
if (x1455) {
} else {
assert(false && "ERROR not specified");
}
float* x1467 = (float*)myGpuMalloc(x1466 * sizeof(float));
float* x1468 = (float*)myMalloc(1 * sizeof(float));;
x1468[0] = 0.0f;
float* x1470 = (float*)myMalloc(1 * sizeof(float));;
x1470[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1445, x1445));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1470, in_desc, x1451, filt_desc, x994,
conv_desc, algo, ws_data, ws_size,
x1468, out_desc, x1467));
};
float* x1473 = (float*)myGpuMalloc(x1466 * sizeof(float));
float* x1474 = (float*)myGpuMalloc(x1464 * sizeof(float));
float* x1475 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1476 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1477 = (float*)myMalloc(1 * sizeof(float));;
x1477[0] = 0.0f;
float* x1479 = (float*)myMalloc(1 * sizeof(float));;
x1479[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1479, x1477, in_desc, x1467, out_desc, x1474, sbmv_desc, x373,
x454, 0.1, x637, x448, 1.0E-5,
x1475, x1476));
};
float* x1482 = (float*)myGpuMalloc(x1466 * sizeof(float));
float* x1483 = (float*)myMalloc(1 * sizeof(float));;
x1483[0] = 0.0f;
float* x1485 = (float*)myMalloc(1 * sizeof(float));;
x1485[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1485, x_desc, x1474, x1483, x_desc, x1474));
};
if (x1490) {
} else {
assert(false && "ERROR not specified");
}
float* x1503 = (float*)myGpuMalloc(x1502 * sizeof(float));
float* x1504 = (float*)myMalloc(1 * sizeof(float));;
x1504[0] = 0.0f;
float* x1506 = (float*)myMalloc(1 * sizeof(float));;
x1506[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1506, in_desc, x1474, filt_desc, x565,
conv_desc, algo, ws_data, ws_size,
x1504, out_desc, x1503));
};
float* x1509 = (float*)myGpuMalloc(x1502 * sizeof(float));
float* x1510 = (float*)myGpuMalloc(x1500 * sizeof(float));
float* x1511 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1512 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1513 = (float*)myMalloc(1 * sizeof(float));;
x1513[0] = 0.0f;
float* x1515 = (float*)myMalloc(1 * sizeof(float));;
x1515[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1515, x1513, in_desc, x1503, out_desc, x1510, sbmv_desc, x787,
x442, 0.1, x610, x769, 1.0E-5,
x1511, x1512));
};
float* x1518 = (float*)myGpuMalloc(x1502 * sizeof(float));
float* x1519 = (float*)myMalloc(1 * sizeof(float));;
x1519[0] = 0.0f;
float* x1521 = (float*)myMalloc(1 * sizeof(float));;
x1521[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1521, x_desc, x1510, x1519, x_desc, x1510));
};
if (x1525) {
} else {
assert(false && "ERROR not specified");
}
float* x1537 = (float*)myGpuMalloc(x1536 * sizeof(float));
float* x1538 = (float*)myMalloc(1 * sizeof(float));;
x1538[0] = 0.0f;
float* x1540 = (float*)myMalloc(1 * sizeof(float));;
x1540[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1540, in_desc, x1510, filt_desc, x391,
conv_desc, algo, ws_data, ws_size,
x1538, out_desc, x1537));
};
float* x1543 = (float*)myGpuMalloc(x1536 * sizeof(float));
float* x1544 = (float*)myGpuMalloc(x1534 * sizeof(float));
float* x1545 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1546 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1547 = (float*)myMalloc(1 * sizeof(float));;
x1547[0] = 0.0f;
float* x1549 = (float*)myMalloc(1 * sizeof(float));;
x1549[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1549, x1547, in_desc, x1537, out_desc, x1544, sbmv_desc, x892,
x673, 0.1, x508, x403, 1.0E-5,
x1545, x1546));
};
float* x1552 = (float*)myGpuMalloc(x1536 * sizeof(float));
if (x1455) {
} else {
assert(false && "ERROR not specified");
}
float* x1560 = (float*)myGpuMalloc(x1559 * sizeof(float));
float* x1561 = (float*)myMalloc(1 * sizeof(float));;
x1561[0] = 0.0f;
float* x1563 = (float*)myMalloc(1 * sizeof(float));;
x1563[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1445, x1445));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1563, in_desc, x1451, filt_desc, x781,
conv_desc, algo, ws_data, ws_size,
x1561, out_desc, x1560));
};
float* x1566 = (float*)myGpuMalloc(x1559 * sizeof(float));
float* x1567 = (float*)myGpuMalloc(x1557 * sizeof(float));
float* x1568 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1569 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1570 = (float*)myMalloc(1 * sizeof(float));;
x1570[0] = 0.0f;
float* x1572 = (float*)myMalloc(1 * sizeof(float));;
x1572[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1572, x1570, in_desc, x1560, out_desc, x1567, sbmv_desc, x523,
x904, 0.1, x1087, x1024, 1.0E-5,
x1568, x1569));
};
float* x1575 = (float*)myGpuMalloc(x1559 * sizeof(float));
if (x1579) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1461) x Sym(1461), res: x Const(64) x Const(256) x Sym(1531) x Sym(1531)");
}
float* x1584 = (float*)myMalloc(1 * sizeof(float));;
x1584[0] = 1.0f;
float* x1586 = (float*)myMalloc(1 * sizeof(float));;
x1586[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1584, bias_desc, x1567, x1586, out_desc, x1544));
};
float* x1589 = (float*)myMalloc(1 * sizeof(float));;
x1589[0] = 0.0f;
float* x1591 = (float*)myMalloc(1 * sizeof(float));;
x1591[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1591, x_desc, x1544, x1589, x_desc, x1544));
};
if (x1595) {
} else {
assert(false && "ERROR not specified");
}
float* x1607 = (float*)myGpuMalloc(x1606 * sizeof(float));
float* x1608 = (float*)myMalloc(1 * sizeof(float));;
x1608[0] = 0.0f;
float* x1610 = (float*)myMalloc(1 * sizeof(float));;
x1610[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1610, in_desc, x1544, filt_desc, x808,
conv_desc, algo, ws_data, ws_size,
x1608, out_desc, x1607));
};
float* x1613 = (float*)myGpuMalloc(x1606 * sizeof(float));
float* x1614 = (float*)myGpuMalloc(x1604 * sizeof(float));
float* x1615 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1616 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1617 = (float*)myMalloc(1 * sizeof(float));;
x1617[0] = 0.0f;
float* x1619 = (float*)myMalloc(1 * sizeof(float));;
x1619[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1619, x1617, in_desc, x1607, out_desc, x1614, sbmv_desc, x721,
x475, 0.1, x325, x601, 1.0E-5,
x1615, x1616));
};
float* x1622 = (float*)myGpuMalloc(x1606 * sizeof(float));
float* x1623 = (float*)myMalloc(1 * sizeof(float));;
x1623[0] = 0.0f;
float* x1625 = (float*)myMalloc(1 * sizeof(float));;
x1625[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1625, x_desc, x1614, x1623, x_desc, x1614));
};
if (x1630) {
} else {
assert(false && "ERROR not specified");
}
float* x1643 = (float*)myGpuMalloc(x1642 * sizeof(float));
float* x1644 = (float*)myMalloc(1 * sizeof(float));;
x1644[0] = 0.0f;
float* x1646 = (float*)myMalloc(1 * sizeof(float));;
x1646[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1646, in_desc, x1614, filt_desc, x544,
conv_desc, algo, ws_data, ws_size,
x1644, out_desc, x1643));
};
float* x1649 = (float*)myGpuMalloc(x1642 * sizeof(float));
float* x1650 = (float*)myGpuMalloc(x1640 * sizeof(float));
float* x1651 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1652 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1653 = (float*)myMalloc(1 * sizeof(float));;
x1653[0] = 0.0f;
float* x1655 = (float*)myMalloc(1 * sizeof(float));;
x1655[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1655, x1653, in_desc, x1643, out_desc, x1650, sbmv_desc, x919,
x754, 0.1, x427, x1027, 1.0E-5,
x1651, x1652));
};
float* x1658 = (float*)myGpuMalloc(x1642 * sizeof(float));
float* x1659 = (float*)myMalloc(1 * sizeof(float));;
x1659[0] = 0.0f;
float* x1661 = (float*)myMalloc(1 * sizeof(float));;
x1661[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1661, x_desc, x1650, x1659, x_desc, x1650));
};
if (x1665) {
} else {
assert(false && "ERROR not specified");
}
float* x1677 = (float*)myGpuMalloc(x1676 * sizeof(float));
float* x1678 = (float*)myMalloc(1 * sizeof(float));;
x1678[0] = 0.0f;
float* x1680 = (float*)myMalloc(1 * sizeof(float));;
x1680[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1680, in_desc, x1650, filt_desc, x685,
conv_desc, algo, ws_data, ws_size,
x1678, out_desc, x1677));
};
float* x1683 = (float*)myGpuMalloc(x1676 * sizeof(float));
float* x1684 = (float*)myGpuMalloc(x1674 * sizeof(float));
float* x1685 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1686 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1687 = (float*)myMalloc(1 * sizeof(float));;
x1687[0] = 0.0f;
float* x1689 = (float*)myMalloc(1 * sizeof(float));;
x1689[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1689, x1687, in_desc, x1677, out_desc, x1684, sbmv_desc, x469,
x316, 0.1, x568, x793, 1.0E-5,
x1685, x1686));
};
float* x1692 = (float*)myGpuMalloc(x1676 * sizeof(float));
if (x1696) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1531) x Sym(1531), res: x Const(64) x Const(256) x Sym(1671) x Sym(1671)");
}
float* x1701 = (float*)myMalloc(1 * sizeof(float));;
x1701[0] = 1.0f;
float* x1703 = (float*)myMalloc(1 * sizeof(float));;
x1703[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1701, bias_desc, x1544, x1703, out_desc, x1684));
};
float* x1706 = (float*)myMalloc(1 * sizeof(float));;
x1706[0] = 0.0f;
float* x1708 = (float*)myMalloc(1 * sizeof(float));;
x1708[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1708, x_desc, x1684, x1706, x_desc, x1684));
};
if (x1712) {
} else {
assert(false && "ERROR not specified");
}
float* x1724 = (float*)myGpuMalloc(x1723 * sizeof(float));
float* x1725 = (float*)myMalloc(1 * sizeof(float));;
x1725[0] = 0.0f;
float* x1727 = (float*)myMalloc(1 * sizeof(float));;
x1727[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1727, in_desc, x1684, filt_desc, x745,
conv_desc, algo, ws_data, ws_size,
x1725, out_desc, x1724));
};
float* x1730 = (float*)myGpuMalloc(x1723 * sizeof(float));
float* x1731 = (float*)myGpuMalloc(x1721 * sizeof(float));
float* x1732 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1733 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1734 = (float*)myMalloc(1 * sizeof(float));;
x1734[0] = 0.0f;
float* x1736 = (float*)myMalloc(1 * sizeof(float));;
x1736[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1736, x1734, in_desc, x1724, out_desc, x1731, sbmv_desc, x538,
x367, 0.1, x1066, x856, 1.0E-5,
x1732, x1733));
};
float* x1739 = (float*)myGpuMalloc(x1723 * sizeof(float));
float* x1740 = (float*)myMalloc(1 * sizeof(float));;
x1740[0] = 0.0f;
float* x1742 = (float*)myMalloc(1 * sizeof(float));;
x1742[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1742, x_desc, x1731, x1740, x_desc, x1731));
};
if (x1747) {
} else {
assert(false && "ERROR not specified");
}
float* x1760 = (float*)myGpuMalloc(x1759 * sizeof(float));
float* x1761 = (float*)myMalloc(1 * sizeof(float));;
x1761[0] = 0.0f;
float* x1763 = (float*)myMalloc(1 * sizeof(float));;
x1763[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1763, in_desc, x1731, filt_desc, x514,
conv_desc, algo, ws_data, ws_size,
x1761, out_desc, x1760));
};
float* x1766 = (float*)myGpuMalloc(x1759 * sizeof(float));
float* x1767 = (float*)myGpuMalloc(x1757 * sizeof(float));
float* x1768 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1769 = (float*)myGpuMalloc(64 * sizeof(float));
float* x1770 = (float*)myMalloc(1 * sizeof(float));;
x1770[0] = 0.0f;
float* x1772 = (float*)myMalloc(1 * sizeof(float));;
x1772[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1772, x1770, in_desc, x1760, out_desc, x1767, sbmv_desc, x511,
x700, 0.1, x832, x649, 1.0E-5,
x1768, x1769));
};
float* x1775 = (float*)myGpuMalloc(x1759 * sizeof(float));
float* x1776 = (float*)myMalloc(1 * sizeof(float));;
x1776[0] = 0.0f;
float* x1778 = (float*)myMalloc(1 * sizeof(float));;
x1778[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1778, x_desc, x1767, x1776, x_desc, x1767));
};
if (x1782) {
} else {
assert(false && "ERROR not specified");
}
float* x1794 = (float*)myGpuMalloc(x1793 * sizeof(float));
float* x1795 = (float*)myMalloc(1 * sizeof(float));;
x1795[0] = 0.0f;
float* x1797 = (float*)myMalloc(1 * sizeof(float));;
x1797[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1797, in_desc, x1767, filt_desc, x556,
conv_desc, algo, ws_data, ws_size,
x1795, out_desc, x1794));
};
float* x1800 = (float*)myGpuMalloc(x1793 * sizeof(float));
float* x1801 = (float*)myGpuMalloc(x1791 * sizeof(float));
float* x1802 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1803 = (float*)myGpuMalloc(256 * sizeof(float));
float* x1804 = (float*)myMalloc(1 * sizeof(float));;
x1804[0] = 0.0f;
float* x1806 = (float*)myMalloc(1 * sizeof(float));;
x1806[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1806, x1804, in_desc, x1794, out_desc, x1801, sbmv_desc, x406,
x1036, 0.1, x847, x694, 1.0E-5,
x1802, x1803));
};
float* x1809 = (float*)myGpuMalloc(x1793 * sizeof(float));
if (x1813) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1671) x Sym(1671), res: x Const(64) x Const(256) x Sym(1788) x Sym(1788)");
}
float* x1818 = (float*)myMalloc(1 * sizeof(float));;
x1818[0] = 1.0f;
float* x1820 = (float*)myMalloc(1 * sizeof(float));;
x1820[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1818, bias_desc, x1684, x1820, out_desc, x1801));
};
float* x1823 = (float*)myMalloc(1 * sizeof(float));;
x1823[0] = 0.0f;
float* x1825 = (float*)myMalloc(1 * sizeof(float));;
x1825[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1825, x_desc, x1801, x1823, x_desc, x1801));
};
if (x1829) {
} else {
assert(false && "ERROR not specified");
}
float* x1841 = (float*)myGpuMalloc(x1840 * sizeof(float));
float* x1842 = (float*)myMalloc(1 * sizeof(float));;
x1842[0] = 0.0f;
float* x1844 = (float*)myMalloc(1 * sizeof(float));;
x1844[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1844, in_desc, x1801, filt_desc, x328,
conv_desc, algo, ws_data, ws_size,
x1842, out_desc, x1841));
};
float* x1847 = (float*)myGpuMalloc(x1840 * sizeof(float));
float* x1848 = (float*)myGpuMalloc(x1838 * sizeof(float));
float* x1849 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1850 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1851 = (float*)myMalloc(1 * sizeof(float));;
x1851[0] = 0.0f;
float* x1853 = (float*)myMalloc(1 * sizeof(float));;
x1853[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1853, x1851, in_desc, x1841, out_desc, x1848, sbmv_desc, x547,
x811, 0.1, x907, x697, 1.0E-5,
x1849, x1850));
};
float* x1856 = (float*)myGpuMalloc(x1840 * sizeof(float));
float* x1857 = (float*)myMalloc(1 * sizeof(float));;
x1857[0] = 0.0f;
float* x1859 = (float*)myMalloc(1 * sizeof(float));;
x1859[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1859, x_desc, x1848, x1857, x_desc, x1848));
};
if (x1864) {
} else {
assert(false && "ERROR not specified");
}
float* x1877 = (float*)myGpuMalloc(x1876 * sizeof(float));
float* x1878 = (float*)myMalloc(1 * sizeof(float));;
x1878[0] = 0.0f;
float* x1880 = (float*)myMalloc(1 * sizeof(float));;
x1880[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1880, in_desc, x1848, filt_desc, x376,
conv_desc, algo, ws_data, ws_size,
x1878, out_desc, x1877));
};
float* x1883 = (float*)myGpuMalloc(x1876 * sizeof(float));
float* x1884 = (float*)myGpuMalloc(x1874 * sizeof(float));
float* x1885 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1886 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1887 = (float*)myMalloc(1 * sizeof(float));;
x1887[0] = 0.0f;
float* x1889 = (float*)myMalloc(1 * sizeof(float));;
x1889[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1889, x1887, in_desc, x1877, out_desc, x1884, sbmv_desc, x1051,
x865, 0.1, x679, x424, 1.0E-5,
x1885, x1886));
};
float* x1892 = (float*)myGpuMalloc(x1876 * sizeof(float));
float* x1893 = (float*)myMalloc(1 * sizeof(float));;
x1893[0] = 0.0f;
float* x1895 = (float*)myMalloc(1 * sizeof(float));;
x1895[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1895, x_desc, x1884, x1893, x_desc, x1884));
};
if (x1899) {
} else {
assert(false && "ERROR not specified");
}
float* x1911 = (float*)myGpuMalloc(x1910 * sizeof(float));
float* x1912 = (float*)myMalloc(1 * sizeof(float));;
x1912[0] = 0.0f;
float* x1914 = (float*)myMalloc(1 * sizeof(float));;
x1914[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1914, in_desc, x1884, filt_desc, x613,
conv_desc, algo, ws_data, ws_size,
x1912, out_desc, x1911));
};
float* x1917 = (float*)myGpuMalloc(x1910 * sizeof(float));
float* x1918 = (float*)myGpuMalloc(x1908 * sizeof(float));
float* x1919 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1920 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1921 = (float*)myMalloc(1 * sizeof(float));;
x1921[0] = 0.0f;
float* x1923 = (float*)myMalloc(1 * sizeof(float));;
x1923[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1923, x1921, in_desc, x1911, out_desc, x1918, sbmv_desc, x730,
x925, 0.1, x742, x598, 1.0E-5,
x1919, x1920));
};
float* x1926 = (float*)myGpuMalloc(x1910 * sizeof(float));
if (x1829) {
} else {
assert(false && "ERROR not specified");
}
float* x1937 = (float*)myGpuMalloc(x1936 * sizeof(float));
float* x1938 = (float*)myMalloc(1 * sizeof(float));;
x1938[0] = 0.0f;
float* x1940 = (float*)myMalloc(1 * sizeof(float));;
x1940[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1940, in_desc, x1801, filt_desc, x1069,
conv_desc, algo, ws_data, ws_size,
x1938, out_desc, x1937));
};
float* x1943 = (float*)myGpuMalloc(x1936 * sizeof(float));
float* x1944 = (float*)myGpuMalloc(x1934 * sizeof(float));
float* x1945 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1946 = (float*)myGpuMalloc(512 * sizeof(float));
float* x1947 = (float*)myMalloc(1 * sizeof(float));;
x1947[0] = 0.0f;
float* x1949 = (float*)myMalloc(1 * sizeof(float));;
x1949[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1949, x1947, in_desc, x1937, out_desc, x1944, sbmv_desc, x916,
x652, 0.1, x421, x364, 1.0E-5,
x1945, x1946));
};
float* x1952 = (float*)myGpuMalloc(x1936 * sizeof(float));
if (x1956) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1931) x Sym(1931), res: x Const(64) x Const(512) x Sym(1905) x Sym(1905)");
}
float* x1961 = (float*)myMalloc(1 * sizeof(float));;
x1961[0] = 1.0f;
float* x1963 = (float*)myMalloc(1 * sizeof(float));;
x1963[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x1961, bias_desc, x1944, x1963, out_desc, x1918));
};
float* x1966 = (float*)myMalloc(1 * sizeof(float));;
x1966[0] = 0.0f;
float* x1968 = (float*)myMalloc(1 * sizeof(float));;
x1968[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x1968, x_desc, x1918, x1966, x_desc, x1918));
};
if (x1972) {
} else {
assert(false && "ERROR not specified");
}
float* x1984 = (float*)myGpuMalloc(x1983 * sizeof(float));
float* x1985 = (float*)myMalloc(1 * sizeof(float));;
x1985[0] = 0.0f;
float* x1987 = (float*)myMalloc(1 * sizeof(float));;
x1987[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x1987, in_desc, x1918, filt_desc, x1063,
conv_desc, algo, ws_data, ws_size,
x1985, out_desc, x1984));
};
float* x1990 = (float*)myGpuMalloc(x1983 * sizeof(float));
float* x1991 = (float*)myGpuMalloc(x1981 * sizeof(float));
float* x1992 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1993 = (float*)myGpuMalloc(128 * sizeof(float));
float* x1994 = (float*)myMalloc(1 * sizeof(float));;
x1994[0] = 0.0f;
float* x1996 = (float*)myMalloc(1 * sizeof(float));;
x1996[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x1996, x1994, in_desc, x1984, out_desc, x1991, sbmv_desc, x961,
x346, 0.1, x595, x826, 1.0E-5,
x1992, x1993));
};
float* x1999 = (float*)myGpuMalloc(x1983 * sizeof(float));
float* x2000 = (float*)myMalloc(1 * sizeof(float));;
x2000[0] = 0.0f;
float* x2002 = (float*)myMalloc(1 * sizeof(float));;
x2002[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2002, x_desc, x1991, x2000, x_desc, x1991));
};
if (x2007) {
} else {
assert(false && "ERROR not specified");
}
float* x2020 = (float*)myGpuMalloc(x2019 * sizeof(float));
float* x2021 = (float*)myMalloc(1 * sizeof(float));;
x2021[0] = 0.0f;
float* x2023 = (float*)myMalloc(1 * sizeof(float));;
x2023[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2023, in_desc, x1991, filt_desc, x1000,
conv_desc, algo, ws_data, ws_size,
x2021, out_desc, x2020));
};
float* x2026 = (float*)myGpuMalloc(x2019 * sizeof(float));
float* x2027 = (float*)myGpuMalloc(x2017 * sizeof(float));
float* x2028 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2029 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2030 = (float*)myMalloc(1 * sizeof(float));;
x2030[0] = 0.0f;
float* x2032 = (float*)myMalloc(1 * sizeof(float));;
x2032[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2032, x2030, in_desc, x2020, out_desc, x2027, sbmv_desc, x319,
x580, 0.1, x400, x970, 1.0E-5,
x2028, x2029));
};
float* x2035 = (float*)myGpuMalloc(x2019 * sizeof(float));
float* x2036 = (float*)myMalloc(1 * sizeof(float));;
x2036[0] = 0.0f;
float* x2038 = (float*)myMalloc(1 * sizeof(float));;
x2038[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2038, x_desc, x2027, x2036, x_desc, x2027));
};
if (x2042) {
} else {
assert(false && "ERROR not specified");
}
float* x2054 = (float*)myGpuMalloc(x2053 * sizeof(float));
float* x2055 = (float*)myMalloc(1 * sizeof(float));;
x2055[0] = 0.0f;
float* x2057 = (float*)myMalloc(1 * sizeof(float));;
x2057[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2057, in_desc, x2027, filt_desc, x628,
conv_desc, algo, ws_data, ws_size,
x2055, out_desc, x2054));
};
float* x2060 = (float*)myGpuMalloc(x2053 * sizeof(float));
float* x2061 = (float*)myGpuMalloc(x2051 * sizeof(float));
float* x2062 = (float*)myGpuMalloc(512 * sizeof(float));
float* x2063 = (float*)myGpuMalloc(512 * sizeof(float));
float* x2064 = (float*)myMalloc(1 * sizeof(float));;
x2064[0] = 0.0f;
float* x2066 = (float*)myMalloc(1 * sizeof(float));;
x2066[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2066, x2064, in_desc, x2054, out_desc, x2061, sbmv_desc, x451,
x1033, 0.1, x736, x559, 1.0E-5,
x2062, x2063));
};
float* x2069 = (float*)myGpuMalloc(x2053 * sizeof(float));
if (x2073) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1905) x Sym(1905), res: x Const(64) x Const(512) x Sym(2048) x Sym(2048)");
}
float* x2078 = (float*)myMalloc(1 * sizeof(float));;
x2078[0] = 1.0f;
float* x2080 = (float*)myMalloc(1 * sizeof(float));;
x2080[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2078, bias_desc, x1918, x2080, out_desc, x2061));
};
float* x2083 = (float*)myMalloc(1 * sizeof(float));;
x2083[0] = 0.0f;
float* x2085 = (float*)myMalloc(1 * sizeof(float));;
x2085[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2085, x_desc, x2061, x2083, x_desc, x2061));
};
if (x2089) {
} else {
assert(false && "ERROR not specified");
}
float* x2101 = (float*)myGpuMalloc(x2100 * sizeof(float));
float* x2102 = (float*)myMalloc(1 * sizeof(float));;
x2102[0] = 0.0f;
float* x2104 = (float*)myMalloc(1 * sizeof(float));;
x2104[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2104, in_desc, x2061, filt_desc, x883,
conv_desc, algo, ws_data, ws_size,
x2102, out_desc, x2101));
};
float* x2107 = (float*)myGpuMalloc(x2100 * sizeof(float));
float* x2108 = (float*)myGpuMalloc(x2098 * sizeof(float));
float* x2109 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2110 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2111 = (float*)myMalloc(1 * sizeof(float));;
x2111[0] = 0.0f;
float* x2113 = (float*)myMalloc(1 * sizeof(float));;
x2113[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2113, x2111, in_desc, x2101, out_desc, x2108, sbmv_desc, x430,
x805, 0.1, x631, x322, 1.0E-5,
x2109, x2110));
};
float* x2116 = (float*)myGpuMalloc(x2100 * sizeof(float));
float* x2117 = (float*)myMalloc(1 * sizeof(float));;
x2117[0] = 0.0f;
float* x2119 = (float*)myMalloc(1 * sizeof(float));;
x2119[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2119, x_desc, x2108, x2117, x_desc, x2108));
};
if (x2124) {
} else {
assert(false && "ERROR not specified");
}
float* x2137 = (float*)myGpuMalloc(x2136 * sizeof(float));
float* x2138 = (float*)myMalloc(1 * sizeof(float));;
x2138[0] = 0.0f;
float* x2140 = (float*)myMalloc(1 * sizeof(float));;
x2140[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2140, in_desc, x2108, filt_desc, x868,
conv_desc, algo, ws_data, ws_size,
x2138, out_desc, x2137));
};
float* x2143 = (float*)myGpuMalloc(x2136 * sizeof(float));
float* x2144 = (float*)myGpuMalloc(x2134 * sizeof(float));
float* x2145 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2146 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2147 = (float*)myMalloc(1 * sizeof(float));;
x2147[0] = 0.0f;
float* x2149 = (float*)myMalloc(1 * sizeof(float));;
x2149[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2149, x2147, in_desc, x2137, out_desc, x2144, sbmv_desc, x676,
x478, 0.1, x946, x1093, 1.0E-5,
x2145, x2146));
};
float* x2152 = (float*)myGpuMalloc(x2136 * sizeof(float));
float* x2153 = (float*)myMalloc(1 * sizeof(float));;
x2153[0] = 0.0f;
float* x2155 = (float*)myMalloc(1 * sizeof(float));;
x2155[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2155, x_desc, x2144, x2153, x_desc, x2144));
};
if (x2159) {
} else {
assert(false && "ERROR not specified");
}
float* x2171 = (float*)myGpuMalloc(x2170 * sizeof(float));
float* x2172 = (float*)myMalloc(1 * sizeof(float));;
x2172[0] = 0.0f;
float* x2174 = (float*)myMalloc(1 * sizeof(float));;
x2174[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2174, in_desc, x2144, filt_desc, x418,
conv_desc, algo, ws_data, ws_size,
x2172, out_desc, x2171));
};
float* x2177 = (float*)myGpuMalloc(x2170 * sizeof(float));
float* x2178 = (float*)myGpuMalloc(x2168 * sizeof(float));
float* x2179 = (float*)myGpuMalloc(512 * sizeof(float));
float* x2180 = (float*)myGpuMalloc(512 * sizeof(float));
float* x2181 = (float*)myMalloc(1 * sizeof(float));;
x2181[0] = 0.0f;
float* x2183 = (float*)myMalloc(1 * sizeof(float));;
x2183[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2183, x2181, in_desc, x2171, out_desc, x2178, sbmv_desc, x796,
x541, 0.1, x370, x964, 1.0E-5,
x2179, x2180));
};
float* x2186 = (float*)myGpuMalloc(x2170 * sizeof(float));
if (x2190) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2048) x Sym(2048), res: x Const(64) x Const(512) x Sym(2165) x Sym(2165)");
}
float* x2195 = (float*)myMalloc(1 * sizeof(float));;
x2195[0] = 1.0f;
float* x2197 = (float*)myMalloc(1 * sizeof(float));;
x2197[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2195, bias_desc, x2061, x2197, out_desc, x2178));
};
float* x2200 = (float*)myMalloc(1 * sizeof(float));;
x2200[0] = 0.0f;
float* x2202 = (float*)myMalloc(1 * sizeof(float));;
x2202[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2202, x_desc, x2178, x2200, x_desc, x2178));
};
if (x2206) {
} else {
assert(false && "ERROR not specified");
}
float* x2218 = (float*)myGpuMalloc(x2217 * sizeof(float));
float* x2219 = (float*)myMalloc(1 * sizeof(float));;
x2219[0] = 0.0f;
float* x2221 = (float*)myMalloc(1 * sizeof(float));;
x2221[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2221, in_desc, x2178, filt_desc, x691,
conv_desc, algo, ws_data, ws_size,
x2219, out_desc, x2218));
};
float* x2224 = (float*)myGpuMalloc(x2217 * sizeof(float));
float* x2225 = (float*)myGpuMalloc(x2215 * sizeof(float));
float* x2226 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2227 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2228 = (float*)myMalloc(1 * sizeof(float));;
x2228[0] = 0.0f;
float* x2230 = (float*)myMalloc(1 * sizeof(float));;
x2230[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2230, x2228, in_desc, x2218, out_desc, x2225, sbmv_desc, x412,
x1021, 0.1, x1003, x1078, 1.0E-5,
x2226, x2227));
};
float* x2233 = (float*)myGpuMalloc(x2217 * sizeof(float));
float* x2234 = (float*)myMalloc(1 * sizeof(float));;
x2234[0] = 0.0f;
float* x2236 = (float*)myMalloc(1 * sizeof(float));;
x2236[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2236, x_desc, x2225, x2234, x_desc, x2225));
};
if (x2241) {
} else {
assert(false && "ERROR not specified");
}
float* x2254 = (float*)myGpuMalloc(x2253 * sizeof(float));
float* x2255 = (float*)myMalloc(1 * sizeof(float));;
x2255[0] = 0.0f;
float* x2257 = (float*)myMalloc(1 * sizeof(float));;
x2257[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2257, in_desc, x2225, filt_desc, x790,
conv_desc, algo, ws_data, ws_size,
x2255, out_desc, x2254));
};
float* x2260 = (float*)myGpuMalloc(x2253 * sizeof(float));
float* x2261 = (float*)myGpuMalloc(x2251 * sizeof(float));
float* x2262 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2263 = (float*)myGpuMalloc(128 * sizeof(float));
float* x2264 = (float*)myMalloc(1 * sizeof(float));;
x2264[0] = 0.0f;
float* x2266 = (float*)myMalloc(1 * sizeof(float));;
x2266[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2266, x2264, in_desc, x2254, out_desc, x2261, sbmv_desc, x532,
x409, 0.1, x1099, x739, 1.0E-5,
x2262, x2263));
};
float* x2269 = (float*)myGpuMalloc(x2253 * sizeof(float));
float* x2270 = (float*)myMalloc(1 * sizeof(float));;
x2270[0] = 0.0f;
float* x2272 = (float*)myMalloc(1 * sizeof(float));;
x2272[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2272, x_desc, x2261, x2270, x_desc, x2261));
};
if (x2276) {
} else {
assert(false && "ERROR not specified");
}
float* x2288 = (float*)myGpuMalloc(x2287 * sizeof(float));
float* x2289 = (float*)myMalloc(1 * sizeof(float));;
x2289[0] = 0.0f;
float* x2291 = (float*)myMalloc(1 * sizeof(float));;
x2291[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2291, in_desc, x2261, filt_desc, x460,
conv_desc, algo, ws_data, ws_size,
x2289, out_desc, x2288));
};
float* x2294 = (float*)myGpuMalloc(x2287 * sizeof(float));
float* x2295 = (float*)myGpuMalloc(x2285 * sizeof(float));
float* x2296 = (float*)myGpuMalloc(512 * sizeof(float));
float* x2297 = (float*)myGpuMalloc(512 * sizeof(float));
float* x2298 = (float*)myMalloc(1 * sizeof(float));;
x2298[0] = 0.0f;
float* x2300 = (float*)myMalloc(1 * sizeof(float));;
x2300[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2300, x2298, in_desc, x2288, out_desc, x2295, sbmv_desc, x763,
x457, 0.1, x352, x997, 1.0E-5,
x2296, x2297));
};
float* x2303 = (float*)myGpuMalloc(x2287 * sizeof(float));
if (x2307) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2165) x Sym(2165), res: x Const(64) x Const(512) x Sym(2282) x Sym(2282)");
}
float* x2312 = (float*)myMalloc(1 * sizeof(float));;
x2312[0] = 1.0f;
float* x2314 = (float*)myMalloc(1 * sizeof(float));;
x2314[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2312, bias_desc, x2178, x2314, out_desc, x2295));
};
float* x2317 = (float*)myMalloc(1 * sizeof(float));;
x2317[0] = 0.0f;
float* x2319 = (float*)myMalloc(1 * sizeof(float));;
x2319[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2319, x_desc, x2295, x2317, x_desc, x2295));
};
if (x2323) {
} else {
assert(false && "ERROR not specified");
}
float* x2335 = (float*)myGpuMalloc(x2334 * sizeof(float));
float* x2336 = (float*)myMalloc(1 * sizeof(float));;
x2336[0] = 0.0f;
float* x2338 = (float*)myMalloc(1 * sizeof(float));;
x2338[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2338, in_desc, x2295, filt_desc, x835,
conv_desc, algo, ws_data, ws_size,
x2336, out_desc, x2335));
};
float* x2341 = (float*)myGpuMalloc(x2334 * sizeof(float));
float* x2342 = (float*)myGpuMalloc(x2332 * sizeof(float));
float* x2343 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2344 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2345 = (float*)myMalloc(1 * sizeof(float));;
x2345[0] = 0.0f;
float* x2347 = (float*)myMalloc(1 * sizeof(float));;
x2347[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2347, x2345, in_desc, x2335, out_desc, x2342, sbmv_desc, x1105,
x358, 0.1, x688, x889, 1.0E-5,
x2343, x2344));
};
float* x2350 = (float*)myGpuMalloc(x2334 * sizeof(float));
float* x2351 = (float*)myMalloc(1 * sizeof(float));;
x2351[0] = 0.0f;
float* x2353 = (float*)myMalloc(1 * sizeof(float));;
x2353[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2353, x_desc, x2342, x2351, x_desc, x2342));
};
if (x2358) {
} else {
assert(false && "ERROR not specified");
}
float* x2371 = (float*)myGpuMalloc(x2370 * sizeof(float));
float* x2372 = (float*)myMalloc(1 * sizeof(float));;
x2372[0] = 0.0f;
float* x2374 = (float*)myMalloc(1 * sizeof(float));;
x2374[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2374, in_desc, x2342, filt_desc, x820,
conv_desc, algo, ws_data, ws_size,
x2372, out_desc, x2371));
};
float* x2377 = (float*)myGpuMalloc(x2370 * sizeof(float));
float* x2378 = (float*)myGpuMalloc(x2368 * sizeof(float));
float* x2379 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2380 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2381 = (float*)myMalloc(1 * sizeof(float));;
x2381[0] = 0.0f;
float* x2383 = (float*)myMalloc(1 * sizeof(float));;
x2383[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2383, x2381, in_desc, x2371, out_desc, x2378, sbmv_desc, x619,
x343, 0.1, x982, x592, 1.0E-5,
x2379, x2380));
};
float* x2386 = (float*)myGpuMalloc(x2370 * sizeof(float));
float* x2387 = (float*)myMalloc(1 * sizeof(float));;
x2387[0] = 0.0f;
float* x2389 = (float*)myMalloc(1 * sizeof(float));;
x2389[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2389, x_desc, x2378, x2387, x_desc, x2378));
};
if (x2393) {
} else {
assert(false && "ERROR not specified");
}
float* x2405 = (float*)myGpuMalloc(x2404 * sizeof(float));
float* x2406 = (float*)myMalloc(1 * sizeof(float));;
x2406[0] = 0.0f;
float* x2408 = (float*)myMalloc(1 * sizeof(float));;
x2408[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2408, in_desc, x2378, filt_desc, x1102,
conv_desc, algo, ws_data, ws_size,
x2406, out_desc, x2405));
};
float* x2411 = (float*)myGpuMalloc(x2404 * sizeof(float));
float* x2412 = (float*)myGpuMalloc(x2402 * sizeof(float));
float* x2413 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2414 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2415 = (float*)myMalloc(1 * sizeof(float));;
x2415[0] = 0.0f;
float* x2417 = (float*)myMalloc(1 * sizeof(float));;
x2417[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2417, x2415, in_desc, x2405, out_desc, x2412, sbmv_desc, x349,
x646, 0.1, x943, x1096, 1.0E-5,
x2413, x2414));
};
float* x2420 = (float*)myGpuMalloc(x2404 * sizeof(float));
if (x2323) {
} else {
assert(false && "ERROR not specified");
}
float* x2431 = (float*)myGpuMalloc(x2430 * sizeof(float));
float* x2432 = (float*)myMalloc(1 * sizeof(float));;
x2432[0] = 0.0f;
float* x2434 = (float*)myMalloc(1 * sizeof(float));;
x2434[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2434, in_desc, x2295, filt_desc, x520,
conv_desc, algo, ws_data, ws_size,
x2432, out_desc, x2431));
};
float* x2437 = (float*)myGpuMalloc(x2430 * sizeof(float));
float* x2438 = (float*)myGpuMalloc(x2428 * sizeof(float));
float* x2439 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2440 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2441 = (float*)myMalloc(1 * sizeof(float));;
x2441[0] = 0.0f;
float* x2443 = (float*)myMalloc(1 * sizeof(float));;
x2443[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2443, x2441, in_desc, x2431, out_desc, x2438, sbmv_desc, x382,
x955, 0.1, x553, x928, 1.0E-5,
x2439, x2440));
};
float* x2446 = (float*)myGpuMalloc(x2430 * sizeof(float));
if (x2450) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2425) x Sym(2425), res: x Const(64) x Const(1024) x Sym(2399) x Sym(2399)");
}
float* x2455 = (float*)myMalloc(1 * sizeof(float));;
x2455[0] = 1.0f;
float* x2457 = (float*)myMalloc(1 * sizeof(float));;
x2457[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2455, bias_desc, x2438, x2457, out_desc, x2412));
};
float* x2460 = (float*)myMalloc(1 * sizeof(float));;
x2460[0] = 0.0f;
float* x2462 = (float*)myMalloc(1 * sizeof(float));;
x2462[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2462, x_desc, x2412, x2460, x_desc, x2412));
};
if (x2466) {
} else {
assert(false && "ERROR not specified");
}
float* x2478 = (float*)myGpuMalloc(x2477 * sizeof(float));
float* x2479 = (float*)myMalloc(1 * sizeof(float));;
x2479[0] = 0.0f;
float* x2481 = (float*)myMalloc(1 * sizeof(float));;
x2481[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2481, in_desc, x2412, filt_desc, x334,
conv_desc, algo, ws_data, ws_size,
x2479, out_desc, x2478));
};
float* x2484 = (float*)myGpuMalloc(x2477 * sizeof(float));
float* x2485 = (float*)myGpuMalloc(x2475 * sizeof(float));
float* x2486 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2487 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2488 = (float*)myMalloc(1 * sizeof(float));;
x2488[0] = 0.0f;
float* x2490 = (float*)myMalloc(1 * sizeof(float));;
x2490[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2490, x2488, in_desc, x2478, out_desc, x2485, sbmv_desc, x385,
x952, 0.1, x1072, x766, 1.0E-5,
x2486, x2487));
};
float* x2493 = (float*)myGpuMalloc(x2477 * sizeof(float));
float* x2494 = (float*)myMalloc(1 * sizeof(float));;
x2494[0] = 0.0f;
float* x2496 = (float*)myMalloc(1 * sizeof(float));;
x2496[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2496, x_desc, x2485, x2494, x_desc, x2485));
};
if (x2501) {
} else {
assert(false && "ERROR not specified");
}
float* x2514 = (float*)myGpuMalloc(x2513 * sizeof(float));
float* x2515 = (float*)myMalloc(1 * sizeof(float));;
x2515[0] = 0.0f;
float* x2517 = (float*)myMalloc(1 * sizeof(float));;
x2517[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2517, in_desc, x2485, filt_desc, x388,
conv_desc, algo, ws_data, ws_size,
x2515, out_desc, x2514));
};
float* x2520 = (float*)myGpuMalloc(x2513 * sizeof(float));
float* x2521 = (float*)myGpuMalloc(x2511 * sizeof(float));
float* x2522 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2523 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2524 = (float*)myMalloc(1 * sizeof(float));;
x2524[0] = 0.0f;
float* x2526 = (float*)myMalloc(1 * sizeof(float));;
x2526[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2526, x2524, in_desc, x2514, out_desc, x2521, sbmv_desc, x1108,
x583, 0.1, x895, x1006, 1.0E-5,
x2522, x2523));
};
float* x2529 = (float*)myGpuMalloc(x2513 * sizeof(float));
float* x2530 = (float*)myMalloc(1 * sizeof(float));;
x2530[0] = 0.0f;
float* x2532 = (float*)myMalloc(1 * sizeof(float));;
x2532[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2532, x_desc, x2521, x2530, x_desc, x2521));
};
if (x2536) {
} else {
assert(false && "ERROR not specified");
}
float* x2548 = (float*)myGpuMalloc(x2547 * sizeof(float));
float* x2549 = (float*)myMalloc(1 * sizeof(float));;
x2549[0] = 0.0f;
float* x2551 = (float*)myMalloc(1 * sizeof(float));;
x2551[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2551, in_desc, x2521, filt_desc, x463,
conv_desc, algo, ws_data, ws_size,
x2549, out_desc, x2548));
};
float* x2554 = (float*)myGpuMalloc(x2547 * sizeof(float));
float* x2555 = (float*)myGpuMalloc(x2545 * sizeof(float));
float* x2556 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2557 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2558 = (float*)myMalloc(1 * sizeof(float));;
x2558[0] = 0.0f;
float* x2560 = (float*)myMalloc(1 * sizeof(float));;
x2560[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2560, x2558, in_desc, x2548, out_desc, x2555, sbmv_desc, x355,
x991, 0.1, x841, x724, 1.0E-5,
x2556, x2557));
};
float* x2563 = (float*)myGpuMalloc(x2547 * sizeof(float));
if (x2567) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2399) x Sym(2399), res: x Const(64) x Const(1024) x Sym(2542) x Sym(2542)");
}
float* x2572 = (float*)myMalloc(1 * sizeof(float));;
x2572[0] = 1.0f;
float* x2574 = (float*)myMalloc(1 * sizeof(float));;
x2574[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2572, bias_desc, x2412, x2574, out_desc, x2555));
};
float* x2577 = (float*)myMalloc(1 * sizeof(float));;
x2577[0] = 0.0f;
float* x2579 = (float*)myMalloc(1 * sizeof(float));;
x2579[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2579, x_desc, x2555, x2577, x_desc, x2555));
};
if (x2583) {
} else {
assert(false && "ERROR not specified");
}
float* x2595 = (float*)myGpuMalloc(x2594 * sizeof(float));
float* x2596 = (float*)myMalloc(1 * sizeof(float));;
x2596[0] = 0.0f;
float* x2598 = (float*)myMalloc(1 * sizeof(float));;
x2598[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2598, in_desc, x2555, filt_desc, x949,
conv_desc, algo, ws_data, ws_size,
x2596, out_desc, x2595));
};
float* x2601 = (float*)myGpuMalloc(x2594 * sizeof(float));
float* x2602 = (float*)myGpuMalloc(x2592 * sizeof(float));
float* x2603 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2604 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2605 = (float*)myMalloc(1 * sizeof(float));;
x2605[0] = 0.0f;
float* x2607 = (float*)myMalloc(1 * sizeof(float));;
x2607[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2607, x2605, in_desc, x2595, out_desc, x2602, sbmv_desc, x682,
x886, 0.1, x829, x817, 1.0E-5,
x2603, x2604));
};
float* x2610 = (float*)myGpuMalloc(x2594 * sizeof(float));
float* x2611 = (float*)myMalloc(1 * sizeof(float));;
x2611[0] = 0.0f;
float* x2613 = (float*)myMalloc(1 * sizeof(float));;
x2613[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2613, x_desc, x2602, x2611, x_desc, x2602));
};
if (x2618) {
} else {
assert(false && "ERROR not specified");
}
float* x2631 = (float*)myGpuMalloc(x2630 * sizeof(float));
float* x2632 = (float*)myMalloc(1 * sizeof(float));;
x2632[0] = 0.0f;
float* x2634 = (float*)myMalloc(1 * sizeof(float));;
x2634[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2634, in_desc, x2602, filt_desc, x337,
conv_desc, algo, ws_data, ws_size,
x2632, out_desc, x2631));
};
float* x2637 = (float*)myGpuMalloc(x2630 * sizeof(float));
float* x2638 = (float*)myGpuMalloc(x2628 * sizeof(float));
float* x2639 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2640 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2641 = (float*)myMalloc(1 * sizeof(float));;
x2641[0] = 0.0f;
float* x2643 = (float*)myMalloc(1 * sizeof(float));;
x2643[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2643, x2641, in_desc, x2631, out_desc, x2638, sbmv_desc, x979,
x871, 0.1, x667, x484, 1.0E-5,
x2639, x2640));
};
float* x2646 = (float*)myGpuMalloc(x2630 * sizeof(float));
float* x2647 = (float*)myMalloc(1 * sizeof(float));;
x2647[0] = 0.0f;
float* x2649 = (float*)myMalloc(1 * sizeof(float));;
x2649[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2649, x_desc, x2638, x2647, x_desc, x2638));
};
if (x2653) {
} else {
assert(false && "ERROR not specified");
}
float* x2665 = (float*)myGpuMalloc(x2664 * sizeof(float));
float* x2666 = (float*)myMalloc(1 * sizeof(float));;
x2666[0] = 0.0f;
float* x2668 = (float*)myMalloc(1 * sizeof(float));;
x2668[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2668, in_desc, x2638, filt_desc, x643,
conv_desc, algo, ws_data, ws_size,
x2666, out_desc, x2665));
};
float* x2671 = (float*)myGpuMalloc(x2664 * sizeof(float));
float* x2672 = (float*)myGpuMalloc(x2662 * sizeof(float));
float* x2673 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2674 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2675 = (float*)myMalloc(1 * sizeof(float));;
x2675[0] = 0.0f;
float* x2677 = (float*)myMalloc(1 * sizeof(float));;
x2677[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2677, x2675, in_desc, x2665, out_desc, x2672, sbmv_desc, x1084,
x466, 0.1, x715, x859, 1.0E-5,
x2673, x2674));
};
float* x2680 = (float*)myGpuMalloc(x2664 * sizeof(float));
if (x2684) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2542) x Sym(2542), res: x Const(64) x Const(1024) x Sym(2659) x Sym(2659)");
}
float* x2689 = (float*)myMalloc(1 * sizeof(float));;
x2689[0] = 1.0f;
float* x2691 = (float*)myMalloc(1 * sizeof(float));;
x2691[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2689, bias_desc, x2555, x2691, out_desc, x2672));
};
float* x2694 = (float*)myMalloc(1 * sizeof(float));;
x2694[0] = 0.0f;
float* x2696 = (float*)myMalloc(1 * sizeof(float));;
x2696[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2696, x_desc, x2672, x2694, x_desc, x2672));
};
if (x2700) {
} else {
assert(false && "ERROR not specified");
}
float* x2712 = (float*)myGpuMalloc(x2711 * sizeof(float));
float* x2713 = (float*)myMalloc(1 * sizeof(float));;
x2713[0] = 0.0f;
float* x2715 = (float*)myMalloc(1 * sizeof(float));;
x2715[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2715, in_desc, x2672, filt_desc, x313,
conv_desc, algo, ws_data, ws_size,
x2713, out_desc, x2712));
};
float* x2718 = (float*)myGpuMalloc(x2711 * sizeof(float));
float* x2719 = (float*)myGpuMalloc(x2709 * sizeof(float));
float* x2720 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2721 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2722 = (float*)myMalloc(1 * sizeof(float));;
x2722[0] = 0.0f;
float* x2724 = (float*)myMalloc(1 * sizeof(float));;
x2724[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2724, x2722, in_desc, x2712, out_desc, x2719, sbmv_desc, x571,
x1018, 0.1, x784, x589, 1.0E-5,
x2720, x2721));
};
float* x2727 = (float*)myGpuMalloc(x2711 * sizeof(float));
float* x2728 = (float*)myMalloc(1 * sizeof(float));;
x2728[0] = 0.0f;
float* x2730 = (float*)myMalloc(1 * sizeof(float));;
x2730[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2730, x_desc, x2719, x2728, x_desc, x2719));
};
if (x2735) {
} else {
assert(false && "ERROR not specified");
}
float* x2748 = (float*)myGpuMalloc(x2747 * sizeof(float));
float* x2749 = (float*)myMalloc(1 * sizeof(float));;
x2749[0] = 0.0f;
float* x2751 = (float*)myMalloc(1 * sizeof(float));;
x2751[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2751, in_desc, x2719, filt_desc, x1042,
conv_desc, algo, ws_data, ws_size,
x2749, out_desc, x2748));
};
float* x2754 = (float*)myGpuMalloc(x2747 * sizeof(float));
float* x2755 = (float*)myGpuMalloc(x2745 * sizeof(float));
float* x2756 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2757 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2758 = (float*)myMalloc(1 * sizeof(float));;
x2758[0] = 0.0f;
float* x2760 = (float*)myMalloc(1 * sizeof(float));;
x2760[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2760, x2758, in_desc, x2748, out_desc, x2755, sbmv_desc, x517,
x703, 0.1, x853, x985, 1.0E-5,
x2756, x2757));
};
float* x2763 = (float*)myGpuMalloc(x2747 * sizeof(float));
float* x2764 = (float*)myMalloc(1 * sizeof(float));;
x2764[0] = 0.0f;
float* x2766 = (float*)myMalloc(1 * sizeof(float));;
x2766[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2766, x_desc, x2755, x2764, x_desc, x2755));
};
if (x2770) {
} else {
assert(false && "ERROR not specified");
}
float* x2782 = (float*)myGpuMalloc(x2781 * sizeof(float));
float* x2783 = (float*)myMalloc(1 * sizeof(float));;
x2783[0] = 0.0f;
float* x2785 = (float*)myMalloc(1 * sizeof(float));;
x2785[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2785, in_desc, x2755, filt_desc, x562,
conv_desc, algo, ws_data, ws_size,
x2783, out_desc, x2782));
};
float* x2788 = (float*)myGpuMalloc(x2781 * sizeof(float));
float* x2789 = (float*)myGpuMalloc(x2779 * sizeof(float));
float* x2790 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2791 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2792 = (float*)myMalloc(1 * sizeof(float));;
x2792[0] = 0.0f;
float* x2794 = (float*)myMalloc(1 * sizeof(float));;
x2794[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2794, x2792, in_desc, x2782, out_desc, x2789, sbmv_desc, x1009,
x733, 0.1, x988, x778, 1.0E-5,
x2790, x2791));
};
float* x2797 = (float*)myGpuMalloc(x2781 * sizeof(float));
if (x2801) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2659) x Sym(2659), res: x Const(64) x Const(1024) x Sym(2776) x Sym(2776)");
}
float* x2806 = (float*)myMalloc(1 * sizeof(float));;
x2806[0] = 1.0f;
float* x2808 = (float*)myMalloc(1 * sizeof(float));;
x2808[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2806, bias_desc, x2672, x2808, out_desc, x2789));
};
float* x2811 = (float*)myMalloc(1 * sizeof(float));;
x2811[0] = 0.0f;
float* x2813 = (float*)myMalloc(1 * sizeof(float));;
x2813[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2813, x_desc, x2789, x2811, x_desc, x2789));
};
if (x2817) {
} else {
assert(false && "ERROR not specified");
}
float* x2829 = (float*)myGpuMalloc(x2828 * sizeof(float));
float* x2830 = (float*)myMalloc(1 * sizeof(float));;
x2830[0] = 0.0f;
float* x2832 = (float*)myMalloc(1 * sizeof(float));;
x2832[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2832, in_desc, x2789, filt_desc, x361,
conv_desc, algo, ws_data, ws_size,
x2830, out_desc, x2829));
};
float* x2835 = (float*)myGpuMalloc(x2828 * sizeof(float));
float* x2836 = (float*)myGpuMalloc(x2826 * sizeof(float));
float* x2837 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2838 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2839 = (float*)myMalloc(1 * sizeof(float));;
x2839[0] = 0.0f;
float* x2841 = (float*)myMalloc(1 * sizeof(float));;
x2841[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2841, x2839, in_desc, x2829, out_desc, x2836, sbmv_desc, x526,
x850, 0.1, x1057, x502, 1.0E-5,
x2837, x2838));
};
float* x2844 = (float*)myGpuMalloc(x2828 * sizeof(float));
float* x2845 = (float*)myMalloc(1 * sizeof(float));;
x2845[0] = 0.0f;
float* x2847 = (float*)myMalloc(1 * sizeof(float));;
x2847[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2847, x_desc, x2836, x2845, x_desc, x2836));
};
if (x2852) {
} else {
assert(false && "ERROR not specified");
}
float* x2865 = (float*)myGpuMalloc(x2864 * sizeof(float));
float* x2866 = (float*)myMalloc(1 * sizeof(float));;
x2866[0] = 0.0f;
float* x2868 = (float*)myMalloc(1 * sizeof(float));;
x2868[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2868, in_desc, x2836, filt_desc, x1081,
conv_desc, algo, ws_data, ws_size,
x2866, out_desc, x2865));
};
float* x2871 = (float*)myGpuMalloc(x2864 * sizeof(float));
float* x2872 = (float*)myGpuMalloc(x2862 * sizeof(float));
float* x2873 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2874 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2875 = (float*)myMalloc(1 * sizeof(float));;
x2875[0] = 0.0f;
float* x2877 = (float*)myMalloc(1 * sizeof(float));;
x2877[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2877, x2875, in_desc, x2865, out_desc, x2872, sbmv_desc, x799,
x622, 0.1, x1045, x607, 1.0E-5,
x2873, x2874));
};
float* x2880 = (float*)myGpuMalloc(x2864 * sizeof(float));
float* x2881 = (float*)myMalloc(1 * sizeof(float));;
x2881[0] = 0.0f;
float* x2883 = (float*)myMalloc(1 * sizeof(float));;
x2883[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2883, x_desc, x2872, x2881, x_desc, x2872));
};
if (x2887) {
} else {
assert(false && "ERROR not specified");
}
float* x2899 = (float*)myGpuMalloc(x2898 * sizeof(float));
float* x2900 = (float*)myMalloc(1 * sizeof(float));;
x2900[0] = 0.0f;
float* x2902 = (float*)myMalloc(1 * sizeof(float));;
x2902[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2902, in_desc, x2872, filt_desc, x958,
conv_desc, algo, ws_data, ws_size,
x2900, out_desc, x2899));
};
float* x2905 = (float*)myGpuMalloc(x2898 * sizeof(float));
float* x2906 = (float*)myGpuMalloc(x2896 * sizeof(float));
float* x2907 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2908 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x2909 = (float*)myMalloc(1 * sizeof(float));;
x2909[0] = 0.0f;
float* x2911 = (float*)myMalloc(1 * sizeof(float));;
x2911[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2911, x2909, in_desc, x2899, out_desc, x2906, sbmv_desc, x472,
x655, 0.1, x922, x1111, 1.0E-5,
x2907, x2908));
};
float* x2914 = (float*)myGpuMalloc(x2898 * sizeof(float));
if (x2918) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2776) x Sym(2776), res: x Const(64) x Const(1024) x Sym(2893) x Sym(2893)");
}
float* x2923 = (float*)myMalloc(1 * sizeof(float));;
x2923[0] = 1.0f;
float* x2925 = (float*)myMalloc(1 * sizeof(float));;
x2925[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x2923, bias_desc, x2789, x2925, out_desc, x2906));
};
float* x2928 = (float*)myMalloc(1 * sizeof(float));;
x2928[0] = 0.0f;
float* x2930 = (float*)myMalloc(1 * sizeof(float));;
x2930[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2930, x_desc, x2906, x2928, x_desc, x2906));
};
if (x2934) {
} else {
assert(false && "ERROR not specified");
}
float* x2946 = (float*)myGpuMalloc(x2945 * sizeof(float));
float* x2947 = (float*)myMalloc(1 * sizeof(float));;
x2947[0] = 0.0f;
float* x2949 = (float*)myMalloc(1 * sizeof(float));;
x2949[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2949, in_desc, x2906, filt_desc, x748,
conv_desc, algo, ws_data, ws_size,
x2947, out_desc, x2946));
};
float* x2952 = (float*)myGpuMalloc(x2945 * sizeof(float));
float* x2953 = (float*)myGpuMalloc(x2943 * sizeof(float));
float* x2954 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2955 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2956 = (float*)myMalloc(1 * sizeof(float));;
x2956[0] = 0.0f;
float* x2958 = (float*)myMalloc(1 * sizeof(float));;
x2958[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2958, x2956, in_desc, x2946, out_desc, x2953, sbmv_desc, x550,
x1054, 0.1, x535, x823, 1.0E-5,
x2954, x2955));
};
float* x2961 = (float*)myGpuMalloc(x2945 * sizeof(float));
float* x2962 = (float*)myMalloc(1 * sizeof(float));;
x2962[0] = 0.0f;
float* x2964 = (float*)myMalloc(1 * sizeof(float));;
x2964[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x2964, x_desc, x2953, x2962, x_desc, x2953));
};
if (x2969) {
} else {
assert(false && "ERROR not specified");
}
float* x2982 = (float*)myGpuMalloc(x2981 * sizeof(float));
float* x2983 = (float*)myMalloc(1 * sizeof(float));;
x2983[0] = 0.0f;
float* x2985 = (float*)myMalloc(1 * sizeof(float));;
x2985[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x2985, in_desc, x2953, filt_desc, x973,
conv_desc, algo, ws_data, ws_size,
x2983, out_desc, x2982));
};
float* x2988 = (float*)myGpuMalloc(x2981 * sizeof(float));
float* x2989 = (float*)myGpuMalloc(x2979 * sizeof(float));
float* x2990 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2991 = (float*)myGpuMalloc(256 * sizeof(float));
float* x2992 = (float*)myMalloc(1 * sizeof(float));;
x2992[0] = 0.0f;
float* x2994 = (float*)myMalloc(1 * sizeof(float));;
x2994[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x2994, x2992, in_desc, x2982, out_desc, x2989, sbmv_desc, x718,
x862, 0.1, x505, x1015, 1.0E-5,
x2990, x2991));
};
float* x2997 = (float*)myGpuMalloc(x2981 * sizeof(float));
float* x2998 = (float*)myMalloc(1 * sizeof(float));;
x2998[0] = 0.0f;
float* x3000 = (float*)myMalloc(1 * sizeof(float));;
x3000[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3000, x_desc, x2989, x2998, x_desc, x2989));
};
if (x3004) {
} else {
assert(false && "ERROR not specified");
}
float* x3016 = (float*)myGpuMalloc(x3015 * sizeof(float));
float* x3017 = (float*)myMalloc(1 * sizeof(float));;
x3017[0] = 0.0f;
float* x3019 = (float*)myMalloc(1 * sizeof(float));;
x3019[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3019, in_desc, x2989, filt_desc, x586,
conv_desc, algo, ws_data, ws_size,
x3017, out_desc, x3016));
};
float* x3022 = (float*)myGpuMalloc(x3015 * sizeof(float));
float* x3023 = (float*)myGpuMalloc(x3013 * sizeof(float));
float* x3024 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x3025 = (float*)myGpuMalloc(1024 * sizeof(float));
float* x3026 = (float*)myMalloc(1 * sizeof(float));;
x3026[0] = 0.0f;
float* x3028 = (float*)myMalloc(1 * sizeof(float));;
x3028[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3028, x3026, in_desc, x3016, out_desc, x3023, sbmv_desc, x1039,
x574, 0.1, x661, x844, 1.0E-5,
x3024, x3025));
};
float* x3031 = (float*)myGpuMalloc(x3015 * sizeof(float));
if (x3035) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2893) x Sym(2893), res: x Const(64) x Const(1024) x Sym(3010) x Sym(3010)");
}
float* x3040 = (float*)myMalloc(1 * sizeof(float));;
x3040[0] = 1.0f;
float* x3042 = (float*)myMalloc(1 * sizeof(float));;
x3042[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x3040, bias_desc, x2906, x3042, out_desc, x3023));
};
float* x3045 = (float*)myMalloc(1 * sizeof(float));;
x3045[0] = 0.0f;
float* x3047 = (float*)myMalloc(1 * sizeof(float));;
x3047[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3047, x_desc, x3023, x3045, x_desc, x3023));
};
if (x3051) {
} else {
assert(false && "ERROR not specified");
}
float* x3063 = (float*)myGpuMalloc(x3062 * sizeof(float));
float* x3064 = (float*)myMalloc(1 * sizeof(float));;
x3064[0] = 0.0f;
float* x3066 = (float*)myMalloc(1 * sizeof(float));;
x3066[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3066, in_desc, x3023, filt_desc, x712,
conv_desc, algo, ws_data, ws_size,
x3064, out_desc, x3063));
};
float* x3069 = (float*)myGpuMalloc(x3062 * sizeof(float));
float* x3070 = (float*)myGpuMalloc(x3060 * sizeof(float));
float* x3071 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3072 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3073 = (float*)myMalloc(1 * sizeof(float));;
x3073[0] = 0.0f;
float* x3075 = (float*)myMalloc(1 * sizeof(float));;
x3075[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3075, x3073, in_desc, x3063, out_desc, x3070, sbmv_desc, x898,
x967, 0.1, x496, x658, 1.0E-5,
x3071, x3072));
};
float* x3078 = (float*)myGpuMalloc(x3062 * sizeof(float));
float* x3079 = (float*)myMalloc(1 * sizeof(float));;
x3079[0] = 0.0f;
float* x3081 = (float*)myMalloc(1 * sizeof(float));;
x3081[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3081, x_desc, x3070, x3079, x_desc, x3070));
};
if (x3086) {
} else {
assert(false && "ERROR not specified");
}
float* x3099 = (float*)myGpuMalloc(x3098 * sizeof(float));
float* x3100 = (float*)myMalloc(1 * sizeof(float));;
x3100[0] = 0.0f;
float* x3102 = (float*)myMalloc(1 * sizeof(float));;
x3102[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3102, in_desc, x3070, filt_desc, x397,
conv_desc, algo, ws_data, ws_size,
x3100, out_desc, x3099));
};
float* x3105 = (float*)myGpuMalloc(x3098 * sizeof(float));
float* x3106 = (float*)myGpuMalloc(x3096 * sizeof(float));
float* x3107 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3108 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3109 = (float*)myMalloc(1 * sizeof(float));;
x3109[0] = 0.0f;
float* x3111 = (float*)myMalloc(1 * sizeof(float));;
x3111[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3111, x3109, in_desc, x3099, out_desc, x3106, sbmv_desc, x910,
x772, 0.1, x634, x445, 1.0E-5,
x3107, x3108));
};
float* x3114 = (float*)myGpuMalloc(x3098 * sizeof(float));
float* x3115 = (float*)myMalloc(1 * sizeof(float));;
x3115[0] = 0.0f;
float* x3117 = (float*)myMalloc(1 * sizeof(float));;
x3117[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3117, x_desc, x3106, x3115, x_desc, x3106));
};
if (x3121) {
} else {
assert(false && "ERROR not specified");
}
float* x3133 = (float*)myGpuMalloc(x3132 * sizeof(float));
float* x3134 = (float*)myMalloc(1 * sizeof(float));;
x3134[0] = 0.0f;
float* x3136 = (float*)myMalloc(1 * sizeof(float));;
x3136[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3136, in_desc, x3106, filt_desc, x931,
conv_desc, algo, ws_data, ws_size,
x3134, out_desc, x3133));
};
float* x3139 = (float*)myGpuMalloc(x3132 * sizeof(float));
float* x3140 = (float*)myGpuMalloc(x3130 * sizeof(float));
float* x3141 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x3142 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x3143 = (float*)myMalloc(1 * sizeof(float));;
x3143[0] = 0.0f;
float* x3145 = (float*)myMalloc(1 * sizeof(float));;
x3145[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3145, x3143, in_desc, x3133, out_desc, x3140, sbmv_desc, x1012,
x481, 0.1, x640, x874, 1.0E-5,
x3141, x3142));
};
float* x3148 = (float*)myGpuMalloc(x3132 * sizeof(float));
if (x3051) {
} else {
assert(false && "ERROR not specified");
}
float* x3159 = (float*)myGpuMalloc(x3158 * sizeof(float));
float* x3160 = (float*)myMalloc(1 * sizeof(float));;
x3160[0] = 0.0f;
float* x3162 = (float*)myMalloc(1 * sizeof(float));;
x3162[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 1024, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3162, in_desc, x3023, filt_desc, x937,
conv_desc, algo, ws_data, ws_size,
x3160, out_desc, x3159));
};
float* x3165 = (float*)myGpuMalloc(x3158 * sizeof(float));
float* x3166 = (float*)myGpuMalloc(x3156 * sizeof(float));
float* x3167 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x3168 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x3169 = (float*)myMalloc(1 * sizeof(float));;
x3169[0] = 0.0f;
float* x3171 = (float*)myMalloc(1 * sizeof(float));;
x3171[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3171, x3169, in_desc, x3159, out_desc, x3166, sbmv_desc, x814,
x616, 0.1, x487, x670, 1.0E-5,
x3167, x3168));
};
float* x3174 = (float*)myGpuMalloc(x3158 * sizeof(float));
if (x3178) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3153) x Sym(3153), res: x Const(64) x Const(2048) x Sym(3127) x Sym(3127)");
}
float* x3183 = (float*)myMalloc(1 * sizeof(float));;
x3183[0] = 1.0f;
float* x3185 = (float*)myMalloc(1 * sizeof(float));;
x3185[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x3183, bias_desc, x3166, x3185, out_desc, x3140));
};
float* x3188 = (float*)myMalloc(1 * sizeof(float));;
x3188[0] = 0.0f;
float* x3190 = (float*)myMalloc(1 * sizeof(float));;
x3190[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3190, x_desc, x3140, x3188, x_desc, x3140));
};
if (x3194) {
} else {
assert(false && "ERROR not specified");
}
float* x3206 = (float*)myGpuMalloc(x3205 * sizeof(float));
float* x3207 = (float*)myMalloc(1 * sizeof(float));;
x3207[0] = 0.0f;
float* x3209 = (float*)myMalloc(1 * sizeof(float));;
x3209[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3209, in_desc, x3140, filt_desc, x940,
conv_desc, algo, ws_data, ws_size,
x3207, out_desc, x3206));
};
float* x3212 = (float*)myGpuMalloc(x3205 * sizeof(float));
float* x3213 = (float*)myGpuMalloc(x3203 * sizeof(float));
float* x3214 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3215 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3216 = (float*)myMalloc(1 * sizeof(float));;
x3216[0] = 0.0f;
float* x3218 = (float*)myMalloc(1 * sizeof(float));;
x3218[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3218, x3216, in_desc, x3206, out_desc, x3213, sbmv_desc, x433,
x706, 0.1, x757, x490, 1.0E-5,
x3214, x3215));
};
float* x3221 = (float*)myGpuMalloc(x3205 * sizeof(float));
float* x3222 = (float*)myMalloc(1 * sizeof(float));;
x3222[0] = 0.0f;
float* x3224 = (float*)myMalloc(1 * sizeof(float));;
x3224[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3224, x_desc, x3213, x3222, x_desc, x3213));
};
if (x3229) {
} else {
assert(false && "ERROR not specified");
}
float* x3242 = (float*)myGpuMalloc(x3241 * sizeof(float));
float* x3243 = (float*)myMalloc(1 * sizeof(float));;
x3243[0] = 0.0f;
float* x3245 = (float*)myMalloc(1 * sizeof(float));;
x3245[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3245, in_desc, x3213, filt_desc, x760,
conv_desc, algo, ws_data, ws_size,
x3243, out_desc, x3242));
};
float* x3248 = (float*)myGpuMalloc(x3241 * sizeof(float));
float* x3249 = (float*)myGpuMalloc(x3239 * sizeof(float));
float* x3250 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3251 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3252 = (float*)myMalloc(1 * sizeof(float));;
x3252[0] = 0.0f;
float* x3254 = (float*)myMalloc(1 * sizeof(float));;
x3254[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3254, x3252, in_desc, x3242, out_desc, x3249, sbmv_desc, x775,
x493, 0.1, x709, x880, 1.0E-5,
x3250, x3251));
};
float* x3257 = (float*)myGpuMalloc(x3241 * sizeof(float));
float* x3258 = (float*)myMalloc(1 * sizeof(float));;
x3258[0] = 0.0f;
float* x3260 = (float*)myMalloc(1 * sizeof(float));;
x3260[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3260, x_desc, x3249, x3258, x_desc, x3249));
};
if (x3264) {
} else {
assert(false && "ERROR not specified");
}
float* x3276 = (float*)myGpuMalloc(x3275 * sizeof(float));
float* x3277 = (float*)myMalloc(1 * sizeof(float));;
x3277[0] = 0.0f;
float* x3279 = (float*)myMalloc(1 * sizeof(float));;
x3279[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3279, in_desc, x3249, filt_desc, x436,
conv_desc, algo, ws_data, ws_size,
x3277, out_desc, x3276));
};
float* x3282 = (float*)myGpuMalloc(x3275 * sizeof(float));
float* x3283 = (float*)myGpuMalloc(x3273 * sizeof(float));
float* x3284 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x3285 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x3286 = (float*)myMalloc(1 * sizeof(float));;
x3286[0] = 0.0f;
float* x3288 = (float*)myMalloc(1 * sizeof(float));;
x3288[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3288, x3286, in_desc, x3276, out_desc, x3283, sbmv_desc, x577,
x727, 0.1, x499, x1030, 1.0E-5,
x3284, x3285));
};
float* x3291 = (float*)myGpuMalloc(x3275 * sizeof(float));
if (x3295) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3127) x Sym(3127), res: x Const(64) x Const(2048) x Sym(3270) x Sym(3270)");
}
float* x3300 = (float*)myMalloc(1 * sizeof(float));;
x3300[0] = 1.0f;
float* x3302 = (float*)myMalloc(1 * sizeof(float));;
x3302[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x3300, bias_desc, x3140, x3302, out_desc, x3283));
};
float* x3305 = (float*)myMalloc(1 * sizeof(float));;
x3305[0] = 0.0f;
float* x3307 = (float*)myMalloc(1 * sizeof(float));;
x3307[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3307, x_desc, x3283, x3305, x_desc, x3283));
};
if (x3311) {
} else {
assert(false && "ERROR not specified");
}
float* x3323 = (float*)myGpuMalloc(x3322 * sizeof(float));
float* x3324 = (float*)myMalloc(1 * sizeof(float));;
x3324[0] = 0.0f;
float* x3326 = (float*)myMalloc(1 * sizeof(float));;
x3326[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3326, in_desc, x3283, filt_desc, x1090,
conv_desc, algo, ws_data, ws_size,
x3324, out_desc, x3323));
};
float* x3329 = (float*)myGpuMalloc(x3322 * sizeof(float));
float* x3330 = (float*)myGpuMalloc(x3320 * sizeof(float));
float* x3331 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3332 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3333 = (float*)myMalloc(1 * sizeof(float));;
x3333[0] = 0.0f;
float* x3335 = (float*)myMalloc(1 * sizeof(float));;
x3335[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3335, x3333, in_desc, x3323, out_desc, x3330, sbmv_desc, x340,
x529, 0.1, x934, x1060, 1.0E-5,
x3331, x3332));
};
float* x3338 = (float*)myGpuMalloc(x3322 * sizeof(float));
float* x3339 = (float*)myMalloc(1 * sizeof(float));;
x3339[0] = 0.0f;
float* x3341 = (float*)myMalloc(1 * sizeof(float));;
x3341[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3341, x_desc, x3330, x3339, x_desc, x3330));
};
if (x3346) {
} else {
assert(false && "ERROR not specified");
}
float* x3359 = (float*)myGpuMalloc(x3358 * sizeof(float));
float* x3360 = (float*)myMalloc(1 * sizeof(float));;
x3360[0] = 0.0f;
float* x3362 = (float*)myMalloc(1 * sizeof(float));;
x3362[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3362, in_desc, x3330, filt_desc, x379,
conv_desc, algo, ws_data, ws_size,
x3360, out_desc, x3359));
};
float* x3365 = (float*)myGpuMalloc(x3358 * sizeof(float));
float* x3366 = (float*)myGpuMalloc(x3356 * sizeof(float));
float* x3367 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3368 = (float*)myGpuMalloc(512 * sizeof(float));
float* x3369 = (float*)myMalloc(1 * sizeof(float));;
x3369[0] = 0.0f;
float* x3371 = (float*)myMalloc(1 * sizeof(float));;
x3371[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3371, x3369, in_desc, x3359, out_desc, x3366, sbmv_desc, x877,
x802, 0.1, x331, x901, 1.0E-5,
x3367, x3368));
};
float* x3374 = (float*)myGpuMalloc(x3358 * sizeof(float));
float* x3375 = (float*)myMalloc(1 * sizeof(float));;
x3375[0] = 0.0f;
float* x3377 = (float*)myMalloc(1 * sizeof(float));;
x3377[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3377, x_desc, x3366, x3375, x_desc, x3366));
};
if (x3381) {
} else {
assert(false && "ERROR not specified");
}
float* x3393 = (float*)myGpuMalloc(x3392 * sizeof(float));
float* x3394 = (float*)myMalloc(1 * sizeof(float));;
x3394[0] = 0.0f;
float* x3396 = (float*)myMalloc(1 * sizeof(float));;
x3396[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
cudnnHandle,
in_desc, filt_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
// Execute convolution.
CUDNN_CALL(cudnnConvolutionForward(
cudnnHandle,
x3396, in_desc, x3366, filt_desc, x394,
conv_desc, algo, ws_data, ws_size,
x3394, out_desc, x3393));
};
float* x3399 = (float*)myGpuMalloc(x3392 * sizeof(float));
float* x3400 = (float*)myGpuMalloc(x3390 * sizeof(float));
float* x3401 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x3402 = (float*)myGpuMalloc(2048 * sizeof(float));
float* x3403 = (float*)myMalloc(1 * sizeof(float));;
x3403[0] = 0.0f;
float* x3405 = (float*)myMalloc(1 * sizeof(float));;
x3405[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationForwardTraining(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3405, x3403, in_desc, x3393, out_desc, x3400, sbmv_desc, x604,
x838, 0.1, x1075, x664, 1.0E-5,
x3401, x3402));
};
float* x3408 = (float*)myGpuMalloc(x3392 * sizeof(float));
if (x3412) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3270) x Sym(3270), res: x Const(64) x Const(2048) x Sym(3387) x Sym(3387)");
}
float* x3417 = (float*)myMalloc(1 * sizeof(float));;
x3417[0] = 1.0f;
float* x3419 = (float*)myMalloc(1 * sizeof(float));;
x3419[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x3417, bias_desc, x3283, x3419, out_desc, x3400));
};
float* x3422 = (float*)myMalloc(1 * sizeof(float));;
x3422[0] = 0.0f;
float* x3424 = (float*)myMalloc(1 * sizeof(float));;
x3424[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationForward(
cudnnHandle, act_desc,
x3424, x_desc, x3400, x3422, x_desc, x3400));
};
if (x3428) {
} else {
assert(false && "Image too small for averagePool_batch: x Const(64) x Const(2048) x Sym(3387) x Sym(3387)|(2,2)");
}
float* x3433 = (float*)myMalloc(1 * sizeof(float));;
x3433[0] = 0.0f;
float* x3435 = (float*)myMalloc(1 * sizeof(float));;
x3435[0] = 1.0f;
float* x3445 = (float*)myGpuMalloc(x3444 * sizeof(float));
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387) );
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3439, x3439));
cudnnPoolingDescriptor_t poolingDesc;
CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc));
CUDNN_CALL(cudnnSetPooling2dDescriptor(
poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN,
2, 2, 0,
0, 1, 1
));
CUDNN_CALL(cudnnPoolingForward(
cudnnHandle,
poolingDesc,
x3435, in_desc, x3400, x3433, out_desc, x3445));
};
float* x3447 = (float*)myGpuMalloc(x3444 * sizeof(float));
int32_t x3448 = 0;
int32_t x3449 = 1;
x3449 *= 64;
x3448 += 1;
int32_t x3452 = x3448;
bool x3453 = x3452 >= 2;
if (x3453) {
printf("cannot have 2 or more -1s in resize!!\n");
assert(false && "");
} else {
}
bool x3459 = x3452 == 0;
if (x3459) {
int32_t x3460 = x3449;
bool x3461 = x3460 == x3442;
if (x3461) {
} else {
assert(false && "must same size!!");
}
} else {
}
int32_t x3468 = x3449;
// foward of gemm
// gemm: List(Const(64), Sym(3469)), Vector(Const(10), Const(2048))
float* x3473 = (float*)myGpuMalloc(640 * sizeof(float));
float* x3474 = (float*)myMalloc(1 * sizeof(float));;
x3474[0] = 0.0f;
float* x3476 = (float*)myMalloc(1 * sizeof(float));;
x3476[0] = 1.0f;
CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, 10,64,2048,x3476,x976,2048,x3445,2048,x3474,x3473,10));
float* x3479 = (float*)myGpuMalloc(640 * sizeof(float));
float* x3480 = (float*)myMalloc(1 * sizeof(float));;
x3480[0] = 1.0f;
float* x3482 = (float*)myMalloc(1 * sizeof(float));;
x3482[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 10, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 10, 1, 1));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x3480, bias_desc, x439, x3482, out_desc, x3473));
};
int32_t x3485 = 0;
int32_t x3486 = 1;
x3486 *= 64;
x3486 *= 10;
x3486 *= 1;
x3486 *= 1;
int32_t x3491 = x3485;
bool x3492 = x3491 >= 2;
if (x3492) {
printf("cannot have 2 or more -1s in resize!!\n");
assert(false && "");
} else {
}
bool x3497 = x3491 == 0;
if (x3497) {
int32_t x3498 = x3486;
bool x3499 = x3498 == 640;
if (x3499) {
} else {
assert(false && "must same size!!");
}
} else {
}
float* x3506 = (float*)myMalloc(1 * sizeof(float));;
x3506[0] = 0.0f;
float* x3508 = (float*)myMalloc(1 * sizeof(float));;
x3508[0] = 1.0f;
float* x3510 = (float*)myGpuMalloc(640 * sizeof(float));
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 10, 1, 1));
CUDNN_CALL(cudnnSoftmaxForward(
cudnnHandle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_CHANNEL,
x3508, x_desc, x3473, x3506, x_desc, x3510));
};
int32_t x3512 = 0;
int32_t x3513 = 1;
x3513 *= 64;
x3513 *= 10;
int32_t x3516 = x3512;
bool x3517 = x3516 >= 2;
if (x3517) {
printf("cannot have 2 or more -1s in resize!!\n");
assert(false && "");
} else {
}
bool x3522 = x3516 == 0;
if (x3522) {
int32_t x3523 = x3513;
bool x3524 = x3523 == 640;
if (x3524) {
} else {
assert(false && "must same size!!");
}
} else {
}
float* x3531 = (float*)myGpuMalloc(640 * sizeof(float));
float* x3532 = (float*)myGpuMalloc(64 * sizeof(float));
nllLoss<<<64, 1>>>(x3510, 10, x3532, x1405);
float* x3534 = (float*)myGpuMalloc(64 * sizeof(float));
int32_t x3535 = 0;
int32_t x3536 = 1;
x3536 *= 64;
x3536 *= 1;
x3536 *= 1;
x3536 *= 1;
int32_t x3541 = x3535;
bool x3542 = x3541 >= 2;
if (x3542) {
printf("cannot have 2 or more -1s in resize!!\n");
assert(false && "");
} else {
}
bool x3547 = x3541 == 0;
if (x3547) {
int32_t x3548 = x3536;
bool x3549 = x3548 == 64;
if (x3549) {
} else {
assert(false && "must same size!!");
}
} else {
}
float* x3556 = (float*)myGpuMalloc(1 * sizeof(float));
float* x3557 = (float*)myMalloc(1 * sizeof(float));;
x3557[0] = 0.0f;
float* x3559 = (float*)myMalloc(1 * sizeof(float));;
x3559[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1, 1, 1));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1, 1, 1));
cudnnReduceTensorDescriptor_t reduce_desc;
CUDNN_CALL(cudnnCreateReduceTensorDescriptor(&reduce_desc));
CUDNN_CALL(cudnnSetReduceTensorDescriptor(
reduce_desc, CUDNN_REDUCE_TENSOR_AVG, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN,
CUDNN_REDUCE_TENSOR_NO_INDICES, CUDNN_32BIT_INDICES));
void *indices = nullptr; // Don't store indices.
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetReductionWorkspaceSize(
cudnnHandle, reduce_desc, x_desc, out_desc, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnReduceTensor(
cudnnHandle, reduce_desc, indices, 0, ws_data, ws_size,
x3559, x_desc, x3532, x3557, out_desc, x3556));
};
int32_t x3562 = 0;
int32_t x3563 = 1;
x3563 *= 1;
int32_t x3565 = x3562;
bool x3566 = x3565 >= 2;
if (x3566) {
printf("cannot have 2 or more -1s in resize!!\n");
assert(false && "");
} else {
}
bool x3571 = x3565 == 0;
if (x3571) {
int32_t x3572 = x3563;
bool x3573 = x3572 == 1;
if (x3573) {
} else {
assert(false && "must same size!!");
}
} else {
}
float* x3580 = (float*)myGpuMalloc(1 * sizeof(float));
// make sure the size of loss is 1
arrayFill<<<28, 512>>>(x3580, 1.0f, 1);
// backend is lantern.TensorDslCudnn$BackendCudnn@22cd45ab
CUDA_CALL(cudaMemcpy(x1410, x3556, 1 * sizeof(float), cudaMemcpyDeviceToDevice));
// 'mean' gradient
// backprop for mean op
float x3587 = x3580[0];
float x3588 = x3587 / 64.0f;
addScalar<<<28, 512>>>(x3534, x3534, x3588, 64);
// 'nllLossB' gradient.
nllLoss_grad<<<64, 1>>>(10, x3534, x1405, x3531);
int32_t x3592 = 0;
int32_t x3593 = 1;
x3593 *= 64;
x3593 *= 10;
x3593 *= 1;
x3593 *= 1;
int32_t x3598 = x3592;
bool x3599 = x3598 >= 2;
if (x3599) {
printf("cannot have 2 or more -1s in resize!!\n");
assert(false && "");
} else {
}
bool x3604 = x3598 == 0;
if (x3604) {
int32_t x3605 = x3593;
bool x3606 = x3605 == 640;
if (x3606) {
} else {
assert(false && "must same size!!");
}
} else {
}
int32_t x3613 = 0;
int32_t x3614 = 1;
x3614 *= 64;
x3614 *= 10;
x3614 *= 1;
x3614 *= 1;
int32_t x3619 = x3613;
bool x3620 = x3619 >= 2;
if (x3620) {
printf("cannot have 2 or more -1s in resize!!\n");
assert(false && "");
} else {
}
bool x3625 = x3619 == 0;
if (x3625) {
int32_t x3626 = x3614;
bool x3627 = x3626 == 640;
if (x3627) {
} else {
assert(false && "must same size!!");
}
} else {
}
int32_t x3634 = 0;
int32_t x3635 = 1;
x3635 *= 64;
x3635 *= 10;
x3635 *= 1;
x3635 *= 1;
int32_t x3640 = x3634;
bool x3641 = x3640 >= 2;
if (x3641) {
printf("cannot have 2 or more -1s in resize!!\n");
assert(false && "");
} else {
}
bool x3646 = x3640 == 0;
if (x3646) {
int32_t x3647 = x3635;
bool x3648 = x3647 == 640;
if (x3648) {
} else {
assert(false && "must same size!!");
}
} else {
}
int32_t x3655 = 0;
int32_t x3656 = 1;
x3656 *= 64;
x3656 *= 10;
x3656 *= 1;
x3656 *= 1;
int32_t x3661 = x3655;
bool x3662 = x3661 >= 2;
if (x3662) {
printf("cannot have 2 or more -1s in resize!!\n");
assert(false && "");
} else {
}
bool x3667 = x3661 == 0;
if (x3667) {
int32_t x3668 = x3656;
bool x3669 = x3668 == 640;
if (x3669) {
} else {
assert(false && "must same size!!");
}
} else {
}
float* x3676 = (float*)myMalloc(1 * sizeof(float));;
x3676[0] = 1.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 10, 1, 1));
CUDNN_CALL(cudnnSoftmaxBackward(
cudnnHandle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_CHANNEL,
x3676, x_desc, x3510, x_desc, x3531,
x3676, x_desc, x3479));
};
float* x3679 = (float*)myMalloc(1 * sizeof(float));;
x3679[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 10, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 10, 1, 1));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x3679, grad_out_desc, x3479,
x3679, grad_bias_desc, x1155));
};
// backprop for gemm List(Const(64), Sym(3469)), Vector(Const(10), Const(2048))
float* x3683 = (float*)myMalloc(1 * sizeof(float));;
x3683[0] = 1.0f;
float* x3685 = (float*)myMalloc(1 * sizeof(float));;
x3685[0] = 1.0f;
// backprop of gemm
int32_t x3469 = x3442 / x3468;
CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, x3469,64,10,x3683,x976,x3469,x3479,10,x3685,x3447,x3469));
CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, x3469,10,64,x3683,x3445,x3469,x3479,10,x3685,x1334,x3469));
float* x3690 = (float*)myMalloc(1 * sizeof(float));;
x3690[0] = 0.0f;
float* x3692 = (float*)myMalloc(1 * sizeof(float));;
x3692[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3439, x3439));
cudnnPoolingDescriptor_t poolingDesc;
CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc));
CUDNN_CALL(cudnnSetPooling2dDescriptor(
poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN,
2, 2, 0,
0, 1, 1
));
CUDNN_CALL(cudnnPoolingBackward(
cudnnHandle,
poolingDesc,
x3692, out_desc, x3445, out_desc, x3447, in_desc, x3400 , x3690, in_desc, x3408));
};
float* x3695 = (float*)myMalloc(1 * sizeof(float));;
x3695[0] = 1.0f;
float* x3697 = (float*)myMalloc(1 * sizeof(float));;
x3697[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3695, x_desc, x3400, x_desc, x3408, x_desc, x3400,
x3697, x_desc, x3408));
};
if (x3701) {
if (x3704) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3387) x Sym(3387), res: x Const(64) x Const(2048) x Sym(3270) x Sym(3270)");
}
float* x3709 = (float*)myMalloc(1 * sizeof(float));;
x3709[0] = 1.0f;
float* x3711 = (float*)myMalloc(1 * sizeof(float));;
x3711[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x3709, bias_desc, x3408, x3711, out_desc, x3291));
};
} else {
float* x3715 = (float*)myMalloc(1 * sizeof(float));;
x3715[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x3715, grad_out_desc, x3408,
x3715, grad_bias_desc, x3291));
};
}
float* x3720 = (float*)myMalloc(1 * sizeof(float));;
x3720[0] = 0.0f;
float* x3722 = (float*)myMalloc(1 * sizeof(float));;
x3722[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3722, x3722, x3722, x3722, in_desc, x3393,
out_desc, x3408, in_desc, x3399, sbmv_desc, x604,
x1210,x1288, 1.0E-5, x3401, x3402));
};
// conv2D back-propagate
float* x3726 = (float*)myMalloc(1 * sizeof(float));;
x3726[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3726, filt_desc, x394, grad_out_desc, x3399,
conv_desc, algo, ws_data, ws_size,
x3726, grad_in_desc, x3374));
};
float* x3729 = (float*)myMalloc(1 * sizeof(float));;
x3729[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3387, x3387));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3729, in_desc, x3366, grad_out_desc, x3399,
conv_desc, algo, ws_data, ws_size,
x3729, grad_filt_desc, x1140));
};
float* x3732 = (float*)myMalloc(1 * sizeof(float));;
x3732[0] = 1.0f;
float* x3734 = (float*)myMalloc(1 * sizeof(float));;
x3734[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3732, x_desc, x3366, x_desc, x3374, x_desc, x3366,
x3734, x_desc, x3374));
};
float* x3737 = (float*)myMalloc(1 * sizeof(float));;
x3737[0] = 0.0f;
float* x3739 = (float*)myMalloc(1 * sizeof(float));;
x3739[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3739, x3739, x3739, x3739, in_desc, x3359,
out_desc, x3374, in_desc, x3365, sbmv_desc, x877,
x1301,x1276, 1.0E-5, x3367, x3368));
};
// conv2D back-propagate
float* x3743 = (float*)myMalloc(1 * sizeof(float));;
x3743[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3743, filt_desc, x379, grad_out_desc, x3365,
conv_desc, algo, ws_data, ws_size,
x3743, grad_in_desc, x3338));
};
float* x3746 = (float*)myMalloc(1 * sizeof(float));;
x3746[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3353, x3353));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3746, in_desc, x3330, grad_out_desc, x3365,
conv_desc, algo, ws_data, ws_size,
x3746, grad_filt_desc, x1135));
};
float* x3749 = (float*)myMalloc(1 * sizeof(float));;
x3749[0] = 1.0f;
float* x3751 = (float*)myMalloc(1 * sizeof(float));;
x3751[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3749, x_desc, x3330, x_desc, x3338, x_desc, x3330,
x3751, x_desc, x3338));
};
float* x3754 = (float*)myMalloc(1 * sizeof(float));;
x3754[0] = 0.0f;
float* x3756 = (float*)myMalloc(1 * sizeof(float));;
x3756[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3756, x3756, x3756, x3756, in_desc, x3323,
out_desc, x3338, in_desc, x3329, sbmv_desc, x340,
x1122,x1185, 1.0E-5, x3331, x3332));
};
// conv2D back-propagate
float* x3760 = (float*)myMalloc(1 * sizeof(float));;
x3760[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3760, filt_desc, x1090, grad_out_desc, x3329,
conv_desc, algo, ws_data, ws_size,
x3760, grad_in_desc, x3291));
};
float* x3763 = (float*)myMalloc(1 * sizeof(float));;
x3763[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3317, x3317));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3763, in_desc, x3283, grad_out_desc, x3329,
conv_desc, algo, ws_data, ws_size,
x3763, grad_filt_desc, x1372));
};
float* x3766 = (float*)myMalloc(1 * sizeof(float));;
x3766[0] = 1.0f;
float* x3768 = (float*)myMalloc(1 * sizeof(float));;
x3768[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3766, x_desc, x3283, x_desc, x3291, x_desc, x3283,
x3768, x_desc, x3291));
};
if (x3772) {
if (x3774) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3270) x Sym(3270), res: x Const(64) x Const(2048) x Sym(3127) x Sym(3127)");
}
float* x3779 = (float*)myMalloc(1 * sizeof(float));;
x3779[0] = 1.0f;
float* x3781 = (float*)myMalloc(1 * sizeof(float));;
x3781[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x3779, bias_desc, x3291, x3781, out_desc, x3148));
};
} else {
float* x3785 = (float*)myMalloc(1 * sizeof(float));;
x3785[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x3785, grad_out_desc, x3291,
x3785, grad_bias_desc, x3148));
};
}
float* x3790 = (float*)myMalloc(1 * sizeof(float));;
x3790[0] = 0.0f;
float* x3792 = (float*)myMalloc(1 * sizeof(float));;
x3792[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3792, x3792, x3792, x3792, in_desc, x3276,
out_desc, x3291, in_desc, x3282, sbmv_desc, x577,
x1201,x1251, 1.0E-5, x3284, x3285));
};
// conv2D back-propagate
float* x3796 = (float*)myMalloc(1 * sizeof(float));;
x3796[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3796, filt_desc, x436, grad_out_desc, x3282,
conv_desc, algo, ws_data, ws_size,
x3796, grad_in_desc, x3257));
};
float* x3799 = (float*)myMalloc(1 * sizeof(float));;
x3799[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3270, x3270));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3799, in_desc, x3249, grad_out_desc, x3282,
conv_desc, algo, ws_data, ws_size,
x3799, grad_filt_desc, x1154));
};
float* x3802 = (float*)myMalloc(1 * sizeof(float));;
x3802[0] = 1.0f;
float* x3804 = (float*)myMalloc(1 * sizeof(float));;
x3804[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3802, x_desc, x3249, x_desc, x3257, x_desc, x3249,
x3804, x_desc, x3257));
};
float* x3807 = (float*)myMalloc(1 * sizeof(float));;
x3807[0] = 0.0f;
float* x3809 = (float*)myMalloc(1 * sizeof(float));;
x3809[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3809, x3809, x3809, x3809, in_desc, x3242,
out_desc, x3257, in_desc, x3248, sbmv_desc, x775,
x1267,x1173, 1.0E-5, x3250, x3251));
};
// conv2D back-propagate
float* x3813 = (float*)myMalloc(1 * sizeof(float));;
x3813[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3813, filt_desc, x760, grad_out_desc, x3248,
conv_desc, algo, ws_data, ws_size,
x3813, grad_in_desc, x3221));
};
float* x3816 = (float*)myMalloc(1 * sizeof(float));;
x3816[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3236, x3236));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3816, in_desc, x3213, grad_out_desc, x3248,
conv_desc, algo, ws_data, ws_size,
x3816, grad_filt_desc, x1262));
};
float* x3819 = (float*)myMalloc(1 * sizeof(float));;
x3819[0] = 1.0f;
float* x3821 = (float*)myMalloc(1 * sizeof(float));;
x3821[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3819, x_desc, x3213, x_desc, x3221, x_desc, x3213,
x3821, x_desc, x3221));
};
float* x3824 = (float*)myMalloc(1 * sizeof(float));;
x3824[0] = 0.0f;
float* x3826 = (float*)myMalloc(1 * sizeof(float));;
x3826[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3826, x3826, x3826, x3826, in_desc, x3206,
out_desc, x3221, in_desc, x3212, sbmv_desc, x433,
x1153,x1244, 1.0E-5, x3214, x3215));
};
// conv2D back-propagate
float* x3830 = (float*)myMalloc(1 * sizeof(float));;
x3830[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3830, filt_desc, x940, grad_out_desc, x3212,
conv_desc, algo, ws_data, ws_size,
x3830, grad_in_desc, x3148));
};
float* x3833 = (float*)myMalloc(1 * sizeof(float));;
x3833[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 2048, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3200, x3200));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3833, in_desc, x3140, grad_out_desc, x3212,
conv_desc, algo, ws_data, ws_size,
x3833, grad_filt_desc, x1322));
};
float* x3836 = (float*)myMalloc(1 * sizeof(float));;
x3836[0] = 1.0f;
float* x3838 = (float*)myMalloc(1 * sizeof(float));;
x3838[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3836, x_desc, x3140, x_desc, x3148, x_desc, x3140,
x3838, x_desc, x3148));
};
if (x3842) {
if (x3844) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3127) x Sym(3127), res: x Const(64) x Const(2048) x Sym(3153) x Sym(3153)");
}
float* x3849 = (float*)myMalloc(1 * sizeof(float));;
x3849[0] = 1.0f;
float* x3851 = (float*)myMalloc(1 * sizeof(float));;
x3851[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x3849, bias_desc, x3148, x3851, out_desc, x3174));
};
} else {
float* x3855 = (float*)myMalloc(1 * sizeof(float));;
x3855[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x3855, grad_out_desc, x3148,
x3855, grad_bias_desc, x3174));
};
}
float* x3860 = (float*)myMalloc(1 * sizeof(float));;
x3860[0] = 0.0f;
float* x3862 = (float*)myMalloc(1 * sizeof(float));;
x3862[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3862, x3862, x3862, x3862, in_desc, x3159,
out_desc, x3174, in_desc, x3165, sbmv_desc, x814,
x1280,x1214, 1.0E-5, x3167, x3168));
};
// conv2D back-propagate
float* x3866 = (float*)myMalloc(1 * sizeof(float));;
x3866[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 1024, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3866, filt_desc, x937, grad_out_desc, x3165,
conv_desc, algo, ws_data, ws_size,
x3866, grad_in_desc, x3031));
};
float* x3869 = (float*)myMalloc(1 * sizeof(float));;
x3869[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 1024, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3153, x3153));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3869, in_desc, x3023, grad_out_desc, x3165,
conv_desc, algo, ws_data, ws_size,
x3869, grad_filt_desc, x1321));
};
float* x3872 = (float*)myMalloc(1 * sizeof(float));;
x3872[0] = 0.0f;
float* x3874 = (float*)myMalloc(1 * sizeof(float));;
x3874[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 2048, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3874, x3874, x3874, x3874, in_desc, x3133,
out_desc, x3148, in_desc, x3139, sbmv_desc, x1012,
x1346,x1169, 1.0E-5, x3141, x3142));
};
// conv2D back-propagate
float* x3878 = (float*)myMalloc(1 * sizeof(float));;
x3878[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3878, filt_desc, x931, grad_out_desc, x3139,
conv_desc, algo, ws_data, ws_size,
x3878, grad_in_desc, x3114));
};
float* x3881 = (float*)myMalloc(1 * sizeof(float));;
x3881[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
2048, 512, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 2048, x3127, x3127));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3881, in_desc, x3106, grad_out_desc, x3139,
conv_desc, algo, ws_data, ws_size,
x3881, grad_filt_desc, x1319));
};
float* x3884 = (float*)myMalloc(1 * sizeof(float));;
x3884[0] = 1.0f;
float* x3886 = (float*)myMalloc(1 * sizeof(float));;
x3886[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3884, x_desc, x3106, x_desc, x3114, x_desc, x3106,
x3886, x_desc, x3114));
};
float* x3889 = (float*)myMalloc(1 * sizeof(float));;
x3889[0] = 0.0f;
float* x3891 = (float*)myMalloc(1 * sizeof(float));;
x3891[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3891, x3891, x3891, x3891, in_desc, x3099,
out_desc, x3114, in_desc, x3105, sbmv_desc, x910,
x1312,x1266, 1.0E-5, x3107, x3108));
};
// conv2D back-propagate
float* x3895 = (float*)myMalloc(1 * sizeof(float));;
x3895[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3895, filt_desc, x397, grad_out_desc, x3105,
conv_desc, algo, ws_data, ws_size,
x3895, grad_in_desc, x3078));
};
float* x3898 = (float*)myMalloc(1 * sizeof(float));;
x3898[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 512, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3093, x3093));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3898, in_desc, x3070, grad_out_desc, x3105,
conv_desc, algo, ws_data, ws_size,
x3898, grad_filt_desc, x1141));
};
float* x3901 = (float*)myMalloc(1 * sizeof(float));;
x3901[0] = 1.0f;
float* x3903 = (float*)myMalloc(1 * sizeof(float));;
x3903[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3901, x_desc, x3070, x_desc, x3078, x_desc, x3070,
x3903, x_desc, x3078));
};
float* x3906 = (float*)myMalloc(1 * sizeof(float));;
x3906[0] = 0.0f;
float* x3908 = (float*)myMalloc(1 * sizeof(float));;
x3908[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3908, x3908, x3908, x3908, in_desc, x3063,
out_desc, x3078, in_desc, x3069, sbmv_desc, x898,
x1308,x1331, 1.0E-5, x3071, x3072));
};
// conv2D back-propagate
float* x3912 = (float*)myMalloc(1 * sizeof(float));;
x3912[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 1024, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3912, filt_desc, x712, grad_out_desc, x3069,
conv_desc, algo, ws_data, ws_size,
x3912, grad_in_desc, x3031));
};
float* x3915 = (float*)myMalloc(1 * sizeof(float));;
x3915[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 1024, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x3057, x3057));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3915, in_desc, x3023, grad_out_desc, x3069,
conv_desc, algo, ws_data, ws_size,
x3915, grad_filt_desc, x1246));
};
float* x3918 = (float*)myMalloc(1 * sizeof(float));;
x3918[0] = 1.0f;
float* x3920 = (float*)myMalloc(1 * sizeof(float));;
x3920[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3918, x_desc, x3023, x_desc, x3031, x_desc, x3023,
x3920, x_desc, x3031));
};
if (x3924) {
if (x3927) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(3010) x Sym(3010), res: x Const(64) x Const(1024) x Sym(2893) x Sym(2893)");
}
float* x3932 = (float*)myMalloc(1 * sizeof(float));;
x3932[0] = 1.0f;
float* x3934 = (float*)myMalloc(1 * sizeof(float));;
x3934[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x3932, bias_desc, x3031, x3934, out_desc, x2914));
};
} else {
float* x3938 = (float*)myMalloc(1 * sizeof(float));;
x3938[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x3938, grad_out_desc, x3031,
x3938, grad_bias_desc, x2914));
};
}
float* x3943 = (float*)myMalloc(1 * sizeof(float));;
x3943[0] = 0.0f;
float* x3945 = (float*)myMalloc(1 * sizeof(float));;
x3945[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3945, x3945, x3945, x3945, in_desc, x3016,
out_desc, x3031, in_desc, x3022, sbmv_desc, x1039,
x1355,x1200, 1.0E-5, x3024, x3025));
};
// conv2D back-propagate
float* x3949 = (float*)myMalloc(1 * sizeof(float));;
x3949[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3949, filt_desc, x586, grad_out_desc, x3022,
conv_desc, algo, ws_data, ws_size,
x3949, grad_in_desc, x2997));
};
float* x3952 = (float*)myMalloc(1 * sizeof(float));;
x3952[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x3010, x3010));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3952, in_desc, x2989, grad_out_desc, x3022,
conv_desc, algo, ws_data, ws_size,
x3952, grad_filt_desc, x1204));
};
float* x3955 = (float*)myMalloc(1 * sizeof(float));;
x3955[0] = 1.0f;
float* x3957 = (float*)myMalloc(1 * sizeof(float));;
x3957[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3955, x_desc, x2989, x_desc, x2997, x_desc, x2989,
x3957, x_desc, x2997));
};
float* x3960 = (float*)myMalloc(1 * sizeof(float));;
x3960[0] = 0.0f;
float* x3962 = (float*)myMalloc(1 * sizeof(float));;
x3962[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3962, x3962, x3962, x3962, in_desc, x2982,
out_desc, x2997, in_desc, x2988, sbmv_desc, x718,
x1248,x1296, 1.0E-5, x2990, x2991));
};
// conv2D back-propagate
float* x3966 = (float*)myMalloc(1 * sizeof(float));;
x3966[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3966, filt_desc, x973, grad_out_desc, x2988,
conv_desc, algo, ws_data, ws_size,
x3966, grad_in_desc, x2961));
};
float* x3969 = (float*)myMalloc(1 * sizeof(float));;
x3969[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2976, x2976));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3969, in_desc, x2953, grad_out_desc, x2988,
conv_desc, algo, ws_data, ws_size,
x3969, grad_filt_desc, x1333));
};
float* x3972 = (float*)myMalloc(1 * sizeof(float));;
x3972[0] = 1.0f;
float* x3974 = (float*)myMalloc(1 * sizeof(float));;
x3974[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3972, x_desc, x2953, x_desc, x2961, x_desc, x2953,
x3974, x_desc, x2961));
};
float* x3977 = (float*)myMalloc(1 * sizeof(float));;
x3977[0] = 0.0f;
float* x3979 = (float*)myMalloc(1 * sizeof(float));;
x3979[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x3979, x3979, x3979, x3979, in_desc, x2946,
out_desc, x2961, in_desc, x2952, sbmv_desc, x550,
x1192,x1360, 1.0E-5, x2954, x2955));
};
// conv2D back-propagate
float* x3983 = (float*)myMalloc(1 * sizeof(float));;
x3983[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x3983, filt_desc, x748, grad_out_desc, x2952,
conv_desc, algo, ws_data, ws_size,
x3983, grad_in_desc, x2914));
};
float* x3986 = (float*)myMalloc(1 * sizeof(float));;
x3986[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2940, x2940));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x3986, in_desc, x2906, grad_out_desc, x2952,
conv_desc, algo, ws_data, ws_size,
x3986, grad_filt_desc, x1258));
};
float* x3989 = (float*)myMalloc(1 * sizeof(float));;
x3989[0] = 1.0f;
float* x3991 = (float*)myMalloc(1 * sizeof(float));;
x3991[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x3989, x_desc, x2906, x_desc, x2914, x_desc, x2906,
x3991, x_desc, x2914));
};
if (x3995) {
if (x3997) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2893) x Sym(2893), res: x Const(64) x Const(1024) x Sym(2776) x Sym(2776)");
}
float* x4002 = (float*)myMalloc(1 * sizeof(float));;
x4002[0] = 1.0f;
float* x4004 = (float*)myMalloc(1 * sizeof(float));;
x4004[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4002, bias_desc, x2914, x4004, out_desc, x2797));
};
} else {
float* x4008 = (float*)myMalloc(1 * sizeof(float));;
x4008[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4008, grad_out_desc, x2914,
x4008, grad_bias_desc, x2797));
};
}
float* x4013 = (float*)myMalloc(1 * sizeof(float));;
x4013[0] = 0.0f;
float* x4015 = (float*)myMalloc(1 * sizeof(float));;
x4015[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4015, x4015, x4015, x4015, in_desc, x2899,
out_desc, x2914, in_desc, x2905, sbmv_desc, x472,
x1166,x1227, 1.0E-5, x2907, x2908));
};
// conv2D back-propagate
float* x4019 = (float*)myMalloc(1 * sizeof(float));;
x4019[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4019, filt_desc, x958, grad_out_desc, x2905,
conv_desc, algo, ws_data, ws_size,
x4019, grad_in_desc, x2880));
};
float* x4022 = (float*)myMalloc(1 * sizeof(float));;
x4022[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2893, x2893));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4022, in_desc, x2872, grad_out_desc, x2905,
conv_desc, algo, ws_data, ws_size,
x4022, grad_filt_desc, x1328));
};
float* x4025 = (float*)myMalloc(1 * sizeof(float));;
x4025[0] = 1.0f;
float* x4027 = (float*)myMalloc(1 * sizeof(float));;
x4027[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4025, x_desc, x2872, x_desc, x2880, x_desc, x2872,
x4027, x_desc, x2880));
};
float* x4030 = (float*)myMalloc(1 * sizeof(float));;
x4030[0] = 0.0f;
float* x4032 = (float*)myMalloc(1 * sizeof(float));;
x4032[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4032, x4032, x4032, x4032, in_desc, x2865,
out_desc, x2880, in_desc, x2871, sbmv_desc, x799,
x1275,x1216, 1.0E-5, x2873, x2874));
};
// conv2D back-propagate
float* x4036 = (float*)myMalloc(1 * sizeof(float));;
x4036[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4036, filt_desc, x1081, grad_out_desc, x2871,
conv_desc, algo, ws_data, ws_size,
x4036, grad_in_desc, x2844));
};
float* x4039 = (float*)myMalloc(1 * sizeof(float));;
x4039[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2859, x2859));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4039, in_desc, x2836, grad_out_desc, x2871,
conv_desc, algo, ws_data, ws_size,
x4039, grad_filt_desc, x1369));
};
float* x4042 = (float*)myMalloc(1 * sizeof(float));;
x4042[0] = 1.0f;
float* x4044 = (float*)myMalloc(1 * sizeof(float));;
x4044[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4042, x_desc, x2836, x_desc, x2844, x_desc, x2836,
x4044, x_desc, x2844));
};
float* x4047 = (float*)myMalloc(1 * sizeof(float));;
x4047[0] = 0.0f;
float* x4049 = (float*)myMalloc(1 * sizeof(float));;
x4049[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4049, x4049, x4049, x4049, in_desc, x2829,
out_desc, x2844, in_desc, x2835, sbmv_desc, x526,
x1184,x1292, 1.0E-5, x2837, x2838));
};
// conv2D back-propagate
float* x4053 = (float*)myMalloc(1 * sizeof(float));;
x4053[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4053, filt_desc, x361, grad_out_desc, x2835,
conv_desc, algo, ws_data, ws_size,
x4053, grad_in_desc, x2797));
};
float* x4056 = (float*)myMalloc(1 * sizeof(float));;
x4056[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2823, x2823));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4056, in_desc, x2789, grad_out_desc, x2835,
conv_desc, algo, ws_data, ws_size,
x4056, grad_filt_desc, x1129));
};
float* x4059 = (float*)myMalloc(1 * sizeof(float));;
x4059[0] = 1.0f;
float* x4061 = (float*)myMalloc(1 * sizeof(float));;
x4061[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4059, x_desc, x2789, x_desc, x2797, x_desc, x2789,
x4061, x_desc, x2797));
};
if (x4065) {
if (x4067) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2776) x Sym(2776), res: x Const(64) x Const(1024) x Sym(2659) x Sym(2659)");
}
float* x4072 = (float*)myMalloc(1 * sizeof(float));;
x4072[0] = 1.0f;
float* x4074 = (float*)myMalloc(1 * sizeof(float));;
x4074[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4072, bias_desc, x2797, x4074, out_desc, x2680));
};
} else {
float* x4078 = (float*)myMalloc(1 * sizeof(float));;
x4078[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4078, grad_out_desc, x2797,
x4078, grad_bias_desc, x2680));
};
}
float* x4083 = (float*)myMalloc(1 * sizeof(float));;
x4083[0] = 0.0f;
float* x4085 = (float*)myMalloc(1 * sizeof(float));;
x4085[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4085, x4085, x4085, x4085, in_desc, x2782,
out_desc, x2797, in_desc, x2788, sbmv_desc, x1009,
x1345,x1253, 1.0E-5, x2790, x2791));
};
// conv2D back-propagate
float* x4089 = (float*)myMalloc(1 * sizeof(float));;
x4089[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4089, filt_desc, x562, grad_out_desc, x2788,
conv_desc, algo, ws_data, ws_size,
x4089, grad_in_desc, x2763));
};
float* x4092 = (float*)myMalloc(1 * sizeof(float));;
x4092[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2776, x2776));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4092, in_desc, x2755, grad_out_desc, x2788,
conv_desc, algo, ws_data, ws_size,
x4092, grad_filt_desc, x1196));
};
float* x4095 = (float*)myMalloc(1 * sizeof(float));;
x4095[0] = 1.0f;
float* x4097 = (float*)myMalloc(1 * sizeof(float));;
x4097[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4095, x_desc, x2755, x_desc, x2763, x_desc, x2755,
x4097, x_desc, x2763));
};
float* x4100 = (float*)myMalloc(1 * sizeof(float));;
x4100[0] = 0.0f;
float* x4102 = (float*)myMalloc(1 * sizeof(float));;
x4102[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4102, x4102, x4102, x4102, in_desc, x2748,
out_desc, x2763, in_desc, x2754, sbmv_desc, x517,
x1181,x1243, 1.0E-5, x2756, x2757));
};
// conv2D back-propagate
float* x4106 = (float*)myMalloc(1 * sizeof(float));;
x4106[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4106, filt_desc, x1042, grad_out_desc, x2754,
conv_desc, algo, ws_data, ws_size,
x4106, grad_in_desc, x2727));
};
float* x4109 = (float*)myMalloc(1 * sizeof(float));;
x4109[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2742, x2742));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4109, in_desc, x2719, grad_out_desc, x2754,
conv_desc, algo, ws_data, ws_size,
x4109, grad_filt_desc, x1356));
};
float* x4112 = (float*)myMalloc(1 * sizeof(float));;
x4112[0] = 1.0f;
float* x4114 = (float*)myMalloc(1 * sizeof(float));;
x4114[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4112, x_desc, x2719, x_desc, x2727, x_desc, x2719,
x4114, x_desc, x2727));
};
float* x4117 = (float*)myMalloc(1 * sizeof(float));;
x4117[0] = 0.0f;
float* x4119 = (float*)myMalloc(1 * sizeof(float));;
x4119[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4119, x4119, x4119, x4119, in_desc, x2712,
out_desc, x2727, in_desc, x2718, sbmv_desc, x571,
x1199,x1348, 1.0E-5, x2720, x2721));
};
// conv2D back-propagate
float* x4123 = (float*)myMalloc(1 * sizeof(float));;
x4123[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4123, filt_desc, x313, grad_out_desc, x2718,
conv_desc, algo, ws_data, ws_size,
x4123, grad_in_desc, x2680));
};
float* x4126 = (float*)myMalloc(1 * sizeof(float));;
x4126[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2706, x2706));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4126, in_desc, x2672, grad_out_desc, x2718,
conv_desc, algo, ws_data, ws_size,
x4126, grad_filt_desc, x1113));
};
float* x4129 = (float*)myMalloc(1 * sizeof(float));;
x4129[0] = 1.0f;
float* x4131 = (float*)myMalloc(1 * sizeof(float));;
x4131[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4129, x_desc, x2672, x_desc, x2680, x_desc, x2672,
x4131, x_desc, x2680));
};
if (x4135) {
if (x4137) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2659) x Sym(2659), res: x Const(64) x Const(1024) x Sym(2542) x Sym(2542)");
}
float* x4142 = (float*)myMalloc(1 * sizeof(float));;
x4142[0] = 1.0f;
float* x4144 = (float*)myMalloc(1 * sizeof(float));;
x4144[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4142, bias_desc, x2680, x4144, out_desc, x2563));
};
} else {
float* x4148 = (float*)myMalloc(1 * sizeof(float));;
x4148[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4148, grad_out_desc, x2680,
x4148, grad_bias_desc, x2563));
};
}
float* x4153 = (float*)myMalloc(1 * sizeof(float));;
x4153[0] = 0.0f;
float* x4155 = (float*)myMalloc(1 * sizeof(float));;
x4155[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4155, x4155, x4155, x4155, in_desc, x2665,
out_desc, x2680, in_desc, x2671, sbmv_desc, x1084,
x1370,x1164, 1.0E-5, x2673, x2674));
};
// conv2D back-propagate
float* x4159 = (float*)myMalloc(1 * sizeof(float));;
x4159[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4159, filt_desc, x643, grad_out_desc, x2671,
conv_desc, algo, ws_data, ws_size,
x4159, grad_in_desc, x2646));
};
float* x4162 = (float*)myMalloc(1 * sizeof(float));;
x4162[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2659, x2659));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4162, in_desc, x2638, grad_out_desc, x2671,
conv_desc, algo, ws_data, ws_size,
x4162, grad_filt_desc, x1223));
};
float* x4165 = (float*)myMalloc(1 * sizeof(float));;
x4165[0] = 1.0f;
float* x4167 = (float*)myMalloc(1 * sizeof(float));;
x4167[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4165, x_desc, x2638, x_desc, x2646, x_desc, x2638,
x4167, x_desc, x2646));
};
float* x4170 = (float*)myMalloc(1 * sizeof(float));;
x4170[0] = 0.0f;
float* x4172 = (float*)myMalloc(1 * sizeof(float));;
x4172[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4172, x4172, x4172, x4172, in_desc, x2631,
out_desc, x2646, in_desc, x2637, sbmv_desc, x979,
x1335,x1299, 1.0E-5, x2639, x2640));
};
// conv2D back-propagate
float* x4176 = (float*)myMalloc(1 * sizeof(float));;
x4176[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4176, filt_desc, x337, grad_out_desc, x2637,
conv_desc, algo, ws_data, ws_size,
x4176, grad_in_desc, x2610));
};
float* x4179 = (float*)myMalloc(1 * sizeof(float));;
x4179[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2625, x2625));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4179, in_desc, x2602, grad_out_desc, x2637,
conv_desc, algo, ws_data, ws_size,
x4179, grad_filt_desc, x1121));
};
float* x4182 = (float*)myMalloc(1 * sizeof(float));;
x4182[0] = 1.0f;
float* x4184 = (float*)myMalloc(1 * sizeof(float));;
x4184[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4182, x_desc, x2602, x_desc, x2610, x_desc, x2602,
x4184, x_desc, x2610));
};
float* x4187 = (float*)myMalloc(1 * sizeof(float));;
x4187[0] = 0.0f;
float* x4189 = (float*)myMalloc(1 * sizeof(float));;
x4189[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4189, x4189, x4189, x4189, in_desc, x2595,
out_desc, x2610, in_desc, x2601, sbmv_desc, x682,
x1236,x1304, 1.0E-5, x2603, x2604));
};
// conv2D back-propagate
float* x4193 = (float*)myMalloc(1 * sizeof(float));;
x4193[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4193, filt_desc, x949, grad_out_desc, x2601,
conv_desc, algo, ws_data, ws_size,
x4193, grad_in_desc, x2563));
};
float* x4196 = (float*)myMalloc(1 * sizeof(float));;
x4196[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2589, x2589));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4196, in_desc, x2555, grad_out_desc, x2601,
conv_desc, algo, ws_data, ws_size,
x4196, grad_filt_desc, x1325));
};
float* x4199 = (float*)myMalloc(1 * sizeof(float));;
x4199[0] = 1.0f;
float* x4201 = (float*)myMalloc(1 * sizeof(float));;
x4201[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4199, x_desc, x2555, x_desc, x2563, x_desc, x2555,
x4201, x_desc, x2563));
};
if (x4205) {
if (x4207) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2542) x Sym(2542), res: x Const(64) x Const(1024) x Sym(2399) x Sym(2399)");
}
float* x4212 = (float*)myMalloc(1 * sizeof(float));;
x4212[0] = 1.0f;
float* x4214 = (float*)myMalloc(1 * sizeof(float));;
x4214[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4212, bias_desc, x2563, x4214, out_desc, x2420));
};
} else {
float* x4218 = (float*)myMalloc(1 * sizeof(float));;
x4218[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4218, grad_out_desc, x2563,
x4218, grad_bias_desc, x2420));
};
}
float* x4223 = (float*)myMalloc(1 * sizeof(float));;
x4223[0] = 0.0f;
float* x4225 = (float*)myMalloc(1 * sizeof(float));;
x4225[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4225, x4225, x4225, x4225, in_desc, x2548,
out_desc, x2563, in_desc, x2554, sbmv_desc, x355,
x1127,x1339, 1.0E-5, x2556, x2557));
};
// conv2D back-propagate
float* x4229 = (float*)myMalloc(1 * sizeof(float));;
x4229[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4229, filt_desc, x463, grad_out_desc, x2554,
conv_desc, algo, ws_data, ws_size,
x4229, grad_in_desc, x2529));
};
float* x4232 = (float*)myMalloc(1 * sizeof(float));;
x4232[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2542, x2542));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4232, in_desc, x2521, grad_out_desc, x2554,
conv_desc, algo, ws_data, ws_size,
x4232, grad_filt_desc, x1163));
};
float* x4235 = (float*)myMalloc(1 * sizeof(float));;
x4235[0] = 1.0f;
float* x4237 = (float*)myMalloc(1 * sizeof(float));;
x4237[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4235, x_desc, x2521, x_desc, x2529, x_desc, x2521,
x4237, x_desc, x2529));
};
float* x4240 = (float*)myMalloc(1 * sizeof(float));;
x4240[0] = 0.0f;
float* x4242 = (float*)myMalloc(1 * sizeof(float));;
x4242[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4242, x4242, x4242, x4242, in_desc, x2514,
out_desc, x2529, in_desc, x2520, sbmv_desc, x1108,
x1378,x1203, 1.0E-5, x2522, x2523));
};
// conv2D back-propagate
float* x4246 = (float*)myMalloc(1 * sizeof(float));;
x4246[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4246, filt_desc, x388, grad_out_desc, x2520,
conv_desc, algo, ws_data, ws_size,
x4246, grad_in_desc, x2493));
};
float* x4249 = (float*)myMalloc(1 * sizeof(float));;
x4249[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2508, x2508));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4249, in_desc, x2485, grad_out_desc, x2520,
conv_desc, algo, ws_data, ws_size,
x4249, grad_filt_desc, x1138));
};
float* x4252 = (float*)myMalloc(1 * sizeof(float));;
x4252[0] = 1.0f;
float* x4254 = (float*)myMalloc(1 * sizeof(float));;
x4254[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4252, x_desc, x2485, x_desc, x2493, x_desc, x2485,
x4254, x_desc, x2493));
};
float* x4257 = (float*)myMalloc(1 * sizeof(float));;
x4257[0] = 0.0f;
float* x4259 = (float*)myMalloc(1 * sizeof(float));;
x4259[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4259, x4259, x4259, x4259, in_desc, x2478,
out_desc, x2493, in_desc, x2484, sbmv_desc, x385,
x1137,x1326, 1.0E-5, x2486, x2487));
};
// conv2D back-propagate
float* x4263 = (float*)myMalloc(1 * sizeof(float));;
x4263[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4263, filt_desc, x334, grad_out_desc, x2484,
conv_desc, algo, ws_data, ws_size,
x4263, grad_in_desc, x2420));
};
float* x4266 = (float*)myMalloc(1 * sizeof(float));;
x4266[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 1024, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2472, x2472));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4266, in_desc, x2412, grad_out_desc, x2484,
conv_desc, algo, ws_data, ws_size,
x4266, grad_filt_desc, x1120));
};
float* x4269 = (float*)myMalloc(1 * sizeof(float));;
x4269[0] = 1.0f;
float* x4271 = (float*)myMalloc(1 * sizeof(float));;
x4271[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4269, x_desc, x2412, x_desc, x2420, x_desc, x2412,
x4271, x_desc, x2420));
};
if (x4275) {
if (x4277) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2399) x Sym(2399), res: x Const(64) x Const(1024) x Sym(2425) x Sym(2425)");
}
float* x4282 = (float*)myMalloc(1 * sizeof(float));;
x4282[0] = 1.0f;
float* x4284 = (float*)myMalloc(1 * sizeof(float));;
x4284[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4282, bias_desc, x2420, x4284, out_desc, x2446));
};
} else {
float* x4288 = (float*)myMalloc(1 * sizeof(float));;
x4288[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4288, grad_out_desc, x2420,
x4288, grad_bias_desc, x2446));
};
}
float* x4293 = (float*)myMalloc(1 * sizeof(float));;
x4293[0] = 0.0f;
float* x4295 = (float*)myMalloc(1 * sizeof(float));;
x4295[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4295, x4295, x4295, x4295, in_desc, x2431,
out_desc, x2446, in_desc, x2437, sbmv_desc, x382,
x1136,x1327, 1.0E-5, x2439, x2440));
};
// conv2D back-propagate
float* x4299 = (float*)myMalloc(1 * sizeof(float));;
x4299[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 512, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4299, filt_desc, x520, grad_out_desc, x2437,
conv_desc, algo, ws_data, ws_size,
x4299, grad_in_desc, x2303));
};
float* x4302 = (float*)myMalloc(1 * sizeof(float));;
x4302[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 512, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2425, x2425));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4302, in_desc, x2295, grad_out_desc, x2437,
conv_desc, algo, ws_data, ws_size,
x4302, grad_filt_desc, x1182));
};
float* x4305 = (float*)myMalloc(1 * sizeof(float));;
x4305[0] = 0.0f;
float* x4307 = (float*)myMalloc(1 * sizeof(float));;
x4307[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 1024, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4307, x4307, x4307, x4307, in_desc, x2405,
out_desc, x2420, in_desc, x2411, sbmv_desc, x349,
x1125,x1224, 1.0E-5, x2413, x2414));
};
// conv2D back-propagate
float* x4311 = (float*)myMalloc(1 * sizeof(float));;
x4311[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4311, filt_desc, x1102, grad_out_desc, x2411,
conv_desc, algo, ws_data, ws_size,
x4311, grad_in_desc, x2386));
};
float* x4314 = (float*)myMalloc(1 * sizeof(float));;
x4314[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
1024, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 1024, x2399, x2399));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4314, in_desc, x2378, grad_out_desc, x2411,
conv_desc, algo, ws_data, ws_size,
x4314, grad_filt_desc, x1376));
};
float* x4317 = (float*)myMalloc(1 * sizeof(float));;
x4317[0] = 1.0f;
float* x4319 = (float*)myMalloc(1 * sizeof(float));;
x4319[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4317, x_desc, x2378, x_desc, x2386, x_desc, x2378,
x4319, x_desc, x2386));
};
float* x4322 = (float*)myMalloc(1 * sizeof(float));;
x4322[0] = 0.0f;
float* x4324 = (float*)myMalloc(1 * sizeof(float));;
x4324[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4324, x4324, x4324, x4324, in_desc, x2371,
out_desc, x2386, in_desc, x2377, sbmv_desc, x619,
x1215,x1123, 1.0E-5, x2379, x2380));
};
// conv2D back-propagate
float* x4328 = (float*)myMalloc(1 * sizeof(float));;
x4328[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4328, filt_desc, x820, grad_out_desc, x2377,
conv_desc, algo, ws_data, ws_size,
x4328, grad_in_desc, x2350));
};
float* x4331 = (float*)myMalloc(1 * sizeof(float));;
x4331[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 256, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2365, x2365));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4331, in_desc, x2342, grad_out_desc, x2377,
conv_desc, algo, ws_data, ws_size,
x4331, grad_filt_desc, x1282));
};
float* x4334 = (float*)myMalloc(1 * sizeof(float));;
x4334[0] = 1.0f;
float* x4336 = (float*)myMalloc(1 * sizeof(float));;
x4336[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4334, x_desc, x2342, x_desc, x2350, x_desc, x2342,
x4336, x_desc, x2350));
};
float* x4339 = (float*)myMalloc(1 * sizeof(float));;
x4339[0] = 0.0f;
float* x4341 = (float*)myMalloc(1 * sizeof(float));;
x4341[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4341, x4341, x4341, x4341, in_desc, x2335,
out_desc, x2350, in_desc, x2341, sbmv_desc, x1105,
x1377,x1128, 1.0E-5, x2343, x2344));
};
// conv2D back-propagate
float* x4345 = (float*)myMalloc(1 * sizeof(float));;
x4345[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 512, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4345, filt_desc, x835, grad_out_desc, x2341,
conv_desc, algo, ws_data, ws_size,
x4345, grad_in_desc, x2303));
};
float* x4348 = (float*)myMalloc(1 * sizeof(float));;
x4348[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 512, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x2329, x2329));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4348, in_desc, x2295, grad_out_desc, x2341,
conv_desc, algo, ws_data, ws_size,
x4348, grad_filt_desc, x1287));
};
float* x4351 = (float*)myMalloc(1 * sizeof(float));;
x4351[0] = 1.0f;
float* x4353 = (float*)myMalloc(1 * sizeof(float));;
x4353[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4351, x_desc, x2295, x_desc, x2303, x_desc, x2295,
x4353, x_desc, x2303));
};
if (x4357) {
if (x4360) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2282) x Sym(2282), res: x Const(64) x Const(512) x Sym(2165) x Sym(2165)");
}
float* x4365 = (float*)myMalloc(1 * sizeof(float));;
x4365[0] = 1.0f;
float* x4367 = (float*)myMalloc(1 * sizeof(float));;
x4367[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4365, bias_desc, x2303, x4367, out_desc, x2186));
};
} else {
float* x4371 = (float*)myMalloc(1 * sizeof(float));;
x4371[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4371, grad_out_desc, x2303,
x4371, grad_bias_desc, x2186));
};
}
float* x4376 = (float*)myMalloc(1 * sizeof(float));;
x4376[0] = 0.0f;
float* x4378 = (float*)myMalloc(1 * sizeof(float));;
x4378[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4378, x4378, x4378, x4378, in_desc, x2288,
out_desc, x2303, in_desc, x2294, sbmv_desc, x763,
x1263,x1161, 1.0E-5, x2296, x2297));
};
// conv2D back-propagate
float* x4382 = (float*)myMalloc(1 * sizeof(float));;
x4382[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4382, filt_desc, x460, grad_out_desc, x2294,
conv_desc, algo, ws_data, ws_size,
x4382, grad_in_desc, x2269));
};
float* x4385 = (float*)myMalloc(1 * sizeof(float));;
x4385[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2282, x2282));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4385, in_desc, x2261, grad_out_desc, x2294,
conv_desc, algo, ws_data, ws_size,
x4385, grad_filt_desc, x1162));
};
float* x4388 = (float*)myMalloc(1 * sizeof(float));;
x4388[0] = 1.0f;
float* x4390 = (float*)myMalloc(1 * sizeof(float));;
x4390[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4388, x_desc, x2261, x_desc, x2269, x_desc, x2261,
x4390, x_desc, x2269));
};
float* x4393 = (float*)myMalloc(1 * sizeof(float));;
x4393[0] = 0.0f;
float* x4395 = (float*)myMalloc(1 * sizeof(float));;
x4395[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4395, x4395, x4395, x4395, in_desc, x2254,
out_desc, x2269, in_desc, x2260, sbmv_desc, x532,
x1186,x1145, 1.0E-5, x2262, x2263));
};
// conv2D back-propagate
float* x4399 = (float*)myMalloc(1 * sizeof(float));;
x4399[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4399, filt_desc, x790, grad_out_desc, x2260,
conv_desc, algo, ws_data, ws_size,
x4399, grad_in_desc, x2233));
};
float* x4402 = (float*)myMalloc(1 * sizeof(float));;
x4402[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2248, x2248));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4402, in_desc, x2225, grad_out_desc, x2260,
conv_desc, algo, ws_data, ws_size,
x4402, grad_filt_desc, x1272));
};
float* x4405 = (float*)myMalloc(1 * sizeof(float));;
x4405[0] = 1.0f;
float* x4407 = (float*)myMalloc(1 * sizeof(float));;
x4407[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4405, x_desc, x2225, x_desc, x2233, x_desc, x2225,
x4407, x_desc, x2233));
};
float* x4410 = (float*)myMalloc(1 * sizeof(float));;
x4410[0] = 0.0f;
float* x4412 = (float*)myMalloc(1 * sizeof(float));;
x4412[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4412, x4412, x4412, x4412, in_desc, x2218,
out_desc, x2233, in_desc, x2224, sbmv_desc, x412,
x1146,x1349, 1.0E-5, x2226, x2227));
};
// conv2D back-propagate
float* x4416 = (float*)myMalloc(1 * sizeof(float));;
x4416[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4416, filt_desc, x691, grad_out_desc, x2224,
conv_desc, algo, ws_data, ws_size,
x4416, grad_in_desc, x2186));
};
float* x4419 = (float*)myMalloc(1 * sizeof(float));;
x4419[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2212, x2212));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4419, in_desc, x2178, grad_out_desc, x2224,
conv_desc, algo, ws_data, ws_size,
x4419, grad_filt_desc, x1239));
};
float* x4422 = (float*)myMalloc(1 * sizeof(float));;
x4422[0] = 1.0f;
float* x4424 = (float*)myMalloc(1 * sizeof(float));;
x4424[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4422, x_desc, x2178, x_desc, x2186, x_desc, x2178,
x4424, x_desc, x2186));
};
if (x4428) {
if (x4430) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2165) x Sym(2165), res: x Const(64) x Const(512) x Sym(2048) x Sym(2048)");
}
float* x4435 = (float*)myMalloc(1 * sizeof(float));;
x4435[0] = 1.0f;
float* x4437 = (float*)myMalloc(1 * sizeof(float));;
x4437[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4435, bias_desc, x2186, x4437, out_desc, x2069));
};
} else {
float* x4441 = (float*)myMalloc(1 * sizeof(float));;
x4441[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4441, grad_out_desc, x2186,
x4441, grad_bias_desc, x2069));
};
}
float* x4446 = (float*)myMalloc(1 * sizeof(float));;
x4446[0] = 0.0f;
float* x4448 = (float*)myMalloc(1 * sizeof(float));;
x4448[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4448, x4448, x4448, x4448, in_desc, x2171,
out_desc, x2186, in_desc, x2177, sbmv_desc, x796,
x1274,x1189, 1.0E-5, x2179, x2180));
};
// conv2D back-propagate
float* x4452 = (float*)myMalloc(1 * sizeof(float));;
x4452[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4452, filt_desc, x418, grad_out_desc, x2177,
conv_desc, algo, ws_data, ws_size,
x4452, grad_in_desc, x2152));
};
float* x4455 = (float*)myMalloc(1 * sizeof(float));;
x4455[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2165, x2165));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4455, in_desc, x2144, grad_out_desc, x2177,
conv_desc, algo, ws_data, ws_size,
x4455, grad_filt_desc, x1148));
};
float* x4458 = (float*)myMalloc(1 * sizeof(float));;
x4458[0] = 1.0f;
float* x4460 = (float*)myMalloc(1 * sizeof(float));;
x4460[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4458, x_desc, x2144, x_desc, x2152, x_desc, x2144,
x4460, x_desc, x2152));
};
float* x4463 = (float*)myMalloc(1 * sizeof(float));;
x4463[0] = 0.0f;
float* x4465 = (float*)myMalloc(1 * sizeof(float));;
x4465[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4465, x4465, x4465, x4465, in_desc, x2137,
out_desc, x2152, in_desc, x2143, sbmv_desc, x676,
x1234,x1168, 1.0E-5, x2145, x2146));
};
// conv2D back-propagate
float* x4469 = (float*)myMalloc(1 * sizeof(float));;
x4469[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4469, filt_desc, x868, grad_out_desc, x2143,
conv_desc, algo, ws_data, ws_size,
x4469, grad_in_desc, x2116));
};
float* x4472 = (float*)myMalloc(1 * sizeof(float));;
x4472[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2131, x2131));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4472, in_desc, x2108, grad_out_desc, x2143,
conv_desc, algo, ws_data, ws_size,
x4472, grad_filt_desc, x1298));
};
float* x4475 = (float*)myMalloc(1 * sizeof(float));;
x4475[0] = 1.0f;
float* x4477 = (float*)myMalloc(1 * sizeof(float));;
x4477[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4475, x_desc, x2108, x_desc, x2116, x_desc, x2108,
x4477, x_desc, x2116));
};
float* x4480 = (float*)myMalloc(1 * sizeof(float));;
x4480[0] = 0.0f;
float* x4482 = (float*)myMalloc(1 * sizeof(float));;
x4482[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4482, x4482, x4482, x4482, in_desc, x2101,
out_desc, x2116, in_desc, x2107, sbmv_desc, x430,
x1152,x1277, 1.0E-5, x2109, x2110));
};
// conv2D back-propagate
float* x4486 = (float*)myMalloc(1 * sizeof(float));;
x4486[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4486, filt_desc, x883, grad_out_desc, x2107,
conv_desc, algo, ws_data, ws_size,
x4486, grad_in_desc, x2069));
};
float* x4489 = (float*)myMalloc(1 * sizeof(float));;
x4489[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2095, x2095));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4489, in_desc, x2061, grad_out_desc, x2107,
conv_desc, algo, ws_data, ws_size,
x4489, grad_filt_desc, x1303));
};
float* x4492 = (float*)myMalloc(1 * sizeof(float));;
x4492[0] = 1.0f;
float* x4494 = (float*)myMalloc(1 * sizeof(float));;
x4494[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4492, x_desc, x2061, x_desc, x2069, x_desc, x2061,
x4494, x_desc, x2069));
};
if (x4498) {
if (x4500) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2048) x Sym(2048), res: x Const(64) x Const(512) x Sym(1905) x Sym(1905)");
}
float* x4505 = (float*)myMalloc(1 * sizeof(float));;
x4505[0] = 1.0f;
float* x4507 = (float*)myMalloc(1 * sizeof(float));;
x4507[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4505, bias_desc, x2069, x4507, out_desc, x1926));
};
} else {
float* x4511 = (float*)myMalloc(1 * sizeof(float));;
x4511[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4511, grad_out_desc, x2069,
x4511, grad_bias_desc, x1926));
};
}
float* x4516 = (float*)myMalloc(1 * sizeof(float));;
x4516[0] = 0.0f;
float* x4518 = (float*)myMalloc(1 * sizeof(float));;
x4518[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4518, x4518, x4518, x4518, in_desc, x2054,
out_desc, x2069, in_desc, x2060, sbmv_desc, x451,
x1159,x1353, 1.0E-5, x2062, x2063));
};
// conv2D back-propagate
float* x4522 = (float*)myMalloc(1 * sizeof(float));;
x4522[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4522, filt_desc, x628, grad_out_desc, x2060,
conv_desc, algo, ws_data, ws_size,
x4522, grad_in_desc, x2035));
};
float* x4525 = (float*)myMalloc(1 * sizeof(float));;
x4525[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x2048, x2048));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4525, in_desc, x2027, grad_out_desc, x2060,
conv_desc, algo, ws_data, ws_size,
x4525, grad_filt_desc, x1218));
};
float* x4528 = (float*)myMalloc(1 * sizeof(float));;
x4528[0] = 1.0f;
float* x4530 = (float*)myMalloc(1 * sizeof(float));;
x4530[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4528, x_desc, x2027, x_desc, x2035, x_desc, x2027,
x4530, x_desc, x2035));
};
float* x4533 = (float*)myMalloc(1 * sizeof(float));;
x4533[0] = 0.0f;
float* x4535 = (float*)myMalloc(1 * sizeof(float));;
x4535[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4535, x4535, x4535, x4535, in_desc, x2020,
out_desc, x2035, in_desc, x2026, sbmv_desc, x319,
x1115,x1202, 1.0E-5, x2028, x2029));
};
// conv2D back-propagate
float* x4539 = (float*)myMalloc(1 * sizeof(float));;
x4539[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4539, filt_desc, x1000, grad_out_desc, x2026,
conv_desc, algo, ws_data, ws_size,
x4539, grad_in_desc, x1999));
};
float* x4542 = (float*)myMalloc(1 * sizeof(float));;
x4542[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x2014, x2014));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4542, in_desc, x1991, grad_out_desc, x2026,
conv_desc, algo, ws_data, ws_size,
x4542, grad_filt_desc, x1342));
};
float* x4545 = (float*)myMalloc(1 * sizeof(float));;
x4545[0] = 1.0f;
float* x4547 = (float*)myMalloc(1 * sizeof(float));;
x4547[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4545, x_desc, x1991, x_desc, x1999, x_desc, x1991,
x4547, x_desc, x1999));
};
float* x4550 = (float*)myMalloc(1 * sizeof(float));;
x4550[0] = 0.0f;
float* x4552 = (float*)myMalloc(1 * sizeof(float));;
x4552[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4552, x4552, x4552, x4552, in_desc, x1984,
out_desc, x1999, in_desc, x1990, sbmv_desc, x961,
x1329,x1124, 1.0E-5, x1992, x1993));
};
// conv2D back-propagate
float* x4556 = (float*)myMalloc(1 * sizeof(float));;
x4556[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4556, filt_desc, x1063, grad_out_desc, x1990,
conv_desc, algo, ws_data, ws_size,
x4556, grad_in_desc, x1926));
};
float* x4559 = (float*)myMalloc(1 * sizeof(float));;
x4559[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 512, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1978, x1978));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4559, in_desc, x1918, grad_out_desc, x1990,
conv_desc, algo, ws_data, ws_size,
x4559, grad_filt_desc, x1363));
};
float* x4562 = (float*)myMalloc(1 * sizeof(float));;
x4562[0] = 1.0f;
float* x4564 = (float*)myMalloc(1 * sizeof(float));;
x4564[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4562, x_desc, x1918, x_desc, x1926, x_desc, x1918,
x4564, x_desc, x1926));
};
if (x4568) {
if (x4570) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1905) x Sym(1905), res: x Const(64) x Const(512) x Sym(1931) x Sym(1931)");
}
float* x4575 = (float*)myMalloc(1 * sizeof(float));;
x4575[0] = 1.0f;
float* x4577 = (float*)myMalloc(1 * sizeof(float));;
x4577[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4575, bias_desc, x1926, x4577, out_desc, x1952));
};
} else {
float* x4581 = (float*)myMalloc(1 * sizeof(float));;
x4581[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4581, grad_out_desc, x1926,
x4581, grad_bias_desc, x1952));
};
}
float* x4586 = (float*)myMalloc(1 * sizeof(float));;
x4586[0] = 0.0f;
float* x4588 = (float*)myMalloc(1 * sizeof(float));;
x4588[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4588, x4588, x4588, x4588, in_desc, x1937,
out_desc, x1952, in_desc, x1943, sbmv_desc, x916,
x1314,x1226, 1.0E-5, x1945, x1946));
};
// conv2D back-propagate
float* x4592 = (float*)myMalloc(1 * sizeof(float));;
x4592[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4592, filt_desc, x1069, grad_out_desc, x1943,
conv_desc, algo, ws_data, ws_size,
x4592, grad_in_desc, x1809));
};
float* x4595 = (float*)myMalloc(1 * sizeof(float));;
x4595[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1931, x1931));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4595, in_desc, x1801, grad_out_desc, x1943,
conv_desc, algo, ws_data, ws_size,
x4595, grad_filt_desc, x1365));
};
float* x4598 = (float*)myMalloc(1 * sizeof(float));;
x4598[0] = 0.0f;
float* x4600 = (float*)myMalloc(1 * sizeof(float));;
x4600[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 512, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4600, x4600, x4600, x4600, in_desc, x1911,
out_desc, x1926, in_desc, x1917, sbmv_desc, x730,
x1252,x1317, 1.0E-5, x1919, x1920));
};
// conv2D back-propagate
float* x4604 = (float*)myMalloc(1 * sizeof(float));;
x4604[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4604, filt_desc, x613, grad_out_desc, x1917,
conv_desc, algo, ws_data, ws_size,
x4604, grad_in_desc, x1892));
};
float* x4607 = (float*)myMalloc(1 * sizeof(float));;
x4607[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
512, 128, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 512, x1905, x1905));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4607, in_desc, x1884, grad_out_desc, x1917,
conv_desc, algo, ws_data, ws_size,
x4607, grad_filt_desc, x1213));
};
float* x4610 = (float*)myMalloc(1 * sizeof(float));;
x4610[0] = 1.0f;
float* x4612 = (float*)myMalloc(1 * sizeof(float));;
x4612[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4610, x_desc, x1884, x_desc, x1892, x_desc, x1884,
x4612, x_desc, x1892));
};
float* x4615 = (float*)myMalloc(1 * sizeof(float));;
x4615[0] = 0.0f;
float* x4617 = (float*)myMalloc(1 * sizeof(float));;
x4617[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4617, x4617, x4617, x4617, in_desc, x1877,
out_desc, x1892, in_desc, x1883, sbmv_desc, x1051,
x1359,x1297, 1.0E-5, x1885, x1886));
};
// conv2D back-propagate
float* x4621 = (float*)myMalloc(1 * sizeof(float));;
x4621[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4621, filt_desc, x376, grad_out_desc, x1883,
conv_desc, algo, ws_data, ws_size,
x4621, grad_in_desc, x1856));
};
float* x4624 = (float*)myMalloc(1 * sizeof(float));;
x4624[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 128, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1871, x1871));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 2, 2, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4624, in_desc, x1848, grad_out_desc, x1883,
conv_desc, algo, ws_data, ws_size,
x4624, grad_filt_desc, x1134));
};
float* x4627 = (float*)myMalloc(1 * sizeof(float));;
x4627[0] = 1.0f;
float* x4629 = (float*)myMalloc(1 * sizeof(float));;
x4629[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4627, x_desc, x1848, x_desc, x1856, x_desc, x1848,
x4629, x_desc, x1856));
};
float* x4632 = (float*)myMalloc(1 * sizeof(float));;
x4632[0] = 0.0f;
float* x4634 = (float*)myMalloc(1 * sizeof(float));;
x4634[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 128, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4634, x4634, x4634, x4634, in_desc, x1841,
out_desc, x1856, in_desc, x1847, sbmv_desc, x547,
x1191,x1279, 1.0E-5, x1849, x1850));
};
// conv2D back-propagate
float* x4638 = (float*)myMalloc(1 * sizeof(float));;
x4638[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4638, filt_desc, x328, grad_out_desc, x1847,
conv_desc, algo, ws_data, ws_size,
x4638, grad_in_desc, x1809));
};
float* x4641 = (float*)myMalloc(1 * sizeof(float));;
x4641[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
128, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 128, x1835, x1835));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4641, in_desc, x1801, grad_out_desc, x1847,
conv_desc, algo, ws_data, ws_size,
x4641, grad_filt_desc, x1118));
};
float* x4644 = (float*)myMalloc(1 * sizeof(float));;
x4644[0] = 1.0f;
float* x4646 = (float*)myMalloc(1 * sizeof(float));;
x4646[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4644, x_desc, x1801, x_desc, x1809, x_desc, x1801,
x4646, x_desc, x1809));
};
if (x4650) {
if (x4653) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1788) x Sym(1788), res: x Const(64) x Const(256) x Sym(1671) x Sym(1671)");
}
float* x4658 = (float*)myMalloc(1 * sizeof(float));;
x4658[0] = 1.0f;
float* x4660 = (float*)myMalloc(1 * sizeof(float));;
x4660[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4658, bias_desc, x1809, x4660, out_desc, x1692));
};
} else {
float* x4664 = (float*)myMalloc(1 * sizeof(float));;
x4664[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4664, grad_out_desc, x1809,
x4664, grad_bias_desc, x1692));
};
}
float* x4669 = (float*)myMalloc(1 * sizeof(float));;
x4669[0] = 0.0f;
float* x4671 = (float*)myMalloc(1 * sizeof(float));;
x4671[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4671, x4671, x4671, x4671, in_desc, x1794,
out_desc, x1809, in_desc, x1800, sbmv_desc, x406,
x1144,x1354, 1.0E-5, x1802, x1803));
};
// conv2D back-propagate
float* x4675 = (float*)myMalloc(1 * sizeof(float));;
x4675[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4675, filt_desc, x556, grad_out_desc, x1800,
conv_desc, algo, ws_data, ws_size,
x4675, grad_in_desc, x1775));
};
float* x4678 = (float*)myMalloc(1 * sizeof(float));;
x4678[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1788, x1788));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4678, in_desc, x1767, grad_out_desc, x1800,
conv_desc, algo, ws_data, ws_size,
x4678, grad_filt_desc, x1194));
};
float* x4681 = (float*)myMalloc(1 * sizeof(float));;
x4681[0] = 1.0f;
float* x4683 = (float*)myMalloc(1 * sizeof(float));;
x4683[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4681, x_desc, x1767, x_desc, x1775, x_desc, x1767,
x4683, x_desc, x1775));
};
float* x4686 = (float*)myMalloc(1 * sizeof(float));;
x4686[0] = 0.0f;
float* x4688 = (float*)myMalloc(1 * sizeof(float));;
x4688[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4688, x4688, x4688, x4688, in_desc, x1760,
out_desc, x1775, in_desc, x1766, sbmv_desc, x511,
x1179,x1242, 1.0E-5, x1768, x1769));
};
// conv2D back-propagate
float* x4692 = (float*)myMalloc(1 * sizeof(float));;
x4692[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4692, filt_desc, x514, grad_out_desc, x1766,
conv_desc, algo, ws_data, ws_size,
x4692, grad_in_desc, x1739));
};
float* x4695 = (float*)myMalloc(1 * sizeof(float));;
x4695[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1754, x1754));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4695, in_desc, x1731, grad_out_desc, x1766,
conv_desc, algo, ws_data, ws_size,
x4695, grad_filt_desc, x1180));
};
float* x4698 = (float*)myMalloc(1 * sizeof(float));;
x4698[0] = 1.0f;
float* x4700 = (float*)myMalloc(1 * sizeof(float));;
x4700[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4698, x_desc, x1731, x_desc, x1739, x_desc, x1731,
x4700, x_desc, x1739));
};
float* x4703 = (float*)myMalloc(1 * sizeof(float));;
x4703[0] = 0.0f;
float* x4705 = (float*)myMalloc(1 * sizeof(float));;
x4705[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4705, x4705, x4705, x4705, in_desc, x1724,
out_desc, x1739, in_desc, x1730, sbmv_desc, x538,
x1188,x1131, 1.0E-5, x1732, x1733));
};
// conv2D back-propagate
float* x4709 = (float*)myMalloc(1 * sizeof(float));;
x4709[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4709, filt_desc, x745, grad_out_desc, x1730,
conv_desc, algo, ws_data, ws_size,
x4709, grad_in_desc, x1692));
};
float* x4712 = (float*)myMalloc(1 * sizeof(float));;
x4712[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1718, x1718));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4712, in_desc, x1684, grad_out_desc, x1730,
conv_desc, algo, ws_data, ws_size,
x4712, grad_filt_desc, x1257));
};
float* x4715 = (float*)myMalloc(1 * sizeof(float));;
x4715[0] = 1.0f;
float* x4717 = (float*)myMalloc(1 * sizeof(float));;
x4717[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4715, x_desc, x1684, x_desc, x1692, x_desc, x1684,
x4717, x_desc, x1692));
};
if (x4721) {
if (x4723) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1671) x Sym(1671), res: x Const(64) x Const(256) x Sym(1531) x Sym(1531)");
}
float* x4728 = (float*)myMalloc(1 * sizeof(float));;
x4728[0] = 1.0f;
float* x4730 = (float*)myMalloc(1 * sizeof(float));;
x4730[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4728, bias_desc, x1692, x4730, out_desc, x1552));
};
} else {
float* x4734 = (float*)myMalloc(1 * sizeof(float));;
x4734[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4734, grad_out_desc, x1692,
x4734, grad_bias_desc, x1552));
};
}
float* x4739 = (float*)myMalloc(1 * sizeof(float));;
x4739[0] = 0.0f;
float* x4741 = (float*)myMalloc(1 * sizeof(float));;
x4741[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4741, x4741, x4741, x4741, in_desc, x1677,
out_desc, x1692, in_desc, x1683, sbmv_desc, x469,
x1165,x1114, 1.0E-5, x1685, x1686));
};
// conv2D back-propagate
float* x4745 = (float*)myMalloc(1 * sizeof(float));;
x4745[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4745, filt_desc, x685, grad_out_desc, x1683,
conv_desc, algo, ws_data, ws_size,
x4745, grad_in_desc, x1658));
};
float* x4748 = (float*)myMalloc(1 * sizeof(float));;
x4748[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1671, x1671));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4748, in_desc, x1650, grad_out_desc, x1683,
conv_desc, algo, ws_data, ws_size,
x4748, grad_filt_desc, x1237));
};
float* x4751 = (float*)myMalloc(1 * sizeof(float));;
x4751[0] = 1.0f;
float* x4753 = (float*)myMalloc(1 * sizeof(float));;
x4753[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4751, x_desc, x1650, x_desc, x1658, x_desc, x1650,
x4753, x_desc, x1658));
};
float* x4756 = (float*)myMalloc(1 * sizeof(float));;
x4756[0] = 0.0f;
float* x4758 = (float*)myMalloc(1 * sizeof(float));;
x4758[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4758, x4758, x4758, x4758, in_desc, x1643,
out_desc, x1658, in_desc, x1649, sbmv_desc, x919,
x1315,x1260, 1.0E-5, x1651, x1652));
};
// conv2D back-propagate
float* x4762 = (float*)myMalloc(1 * sizeof(float));;
x4762[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4762, filt_desc, x544, grad_out_desc, x1649,
conv_desc, algo, ws_data, ws_size,
x4762, grad_in_desc, x1622));
};
float* x4765 = (float*)myMalloc(1 * sizeof(float));;
x4765[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1637, x1637));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4765, in_desc, x1614, grad_out_desc, x1649,
conv_desc, algo, ws_data, ws_size,
x4765, grad_filt_desc, x1190));
};
float* x4768 = (float*)myMalloc(1 * sizeof(float));;
x4768[0] = 1.0f;
float* x4770 = (float*)myMalloc(1 * sizeof(float));;
x4770[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4768, x_desc, x1614, x_desc, x1622, x_desc, x1614,
x4770, x_desc, x1622));
};
float* x4773 = (float*)myMalloc(1 * sizeof(float));;
x4773[0] = 0.0f;
float* x4775 = (float*)myMalloc(1 * sizeof(float));;
x4775[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4775, x4775, x4775, x4775, in_desc, x1607,
out_desc, x1622, in_desc, x1613, sbmv_desc, x721,
x1249,x1167, 1.0E-5, x1615, x1616));
};
// conv2D back-propagate
float* x4779 = (float*)myMalloc(1 * sizeof(float));;
x4779[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4779, filt_desc, x808, grad_out_desc, x1613,
conv_desc, algo, ws_data, ws_size,
x4779, grad_in_desc, x1552));
};
float* x4782 = (float*)myMalloc(1 * sizeof(float));;
x4782[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 256, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1601, x1601));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4782, in_desc, x1544, grad_out_desc, x1613,
conv_desc, algo, ws_data, ws_size,
x4782, grad_filt_desc, x1278));
};
float* x4785 = (float*)myMalloc(1 * sizeof(float));;
x4785[0] = 1.0f;
float* x4787 = (float*)myMalloc(1 * sizeof(float));;
x4787[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4785, x_desc, x1544, x_desc, x1552, x_desc, x1544,
x4787, x_desc, x1552));
};
if (x4791) {
if (x4793) {
} else {
assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1531) x Sym(1531), res: x Const(64) x Const(256) x Sym(1461) x Sym(1461)");
}
float* x4798 = (float*)myMalloc(1 * sizeof(float));;
x4798[0] = 1.0f;
float* x4800 = (float*)myMalloc(1 * sizeof(float));;
x4800[0] = 1.0f;
{
cudnnTensorDescriptor_t bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
CUDNN_CALL(cudnnAddTensor(
cudnnHandle, x4798, bias_desc, x1552, x4800, out_desc, x1575));
};
} else {
float* x4804 = (float*)myMalloc(1 * sizeof(float));;
x4804[0] = 1.0f;
{
cudnnTensorDescriptor_t grad_bias_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
CUDNN_CALL(cudnnConvolutionBackwardBias(
cudnnHandle, x4804, grad_out_desc, x1552,
x4804, grad_bias_desc, x1575));
};
}
float* x4809 = (float*)myMalloc(1 * sizeof(float));;
x4809[0] = 0.0f;
float* x4811 = (float*)myMalloc(1 * sizeof(float));;
x4811[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4811, x4811, x4811, x4811, in_desc, x1560,
out_desc, x1575, in_desc, x1566, sbmv_desc, x523,
x1183,x1310, 1.0E-5, x1568, x1569));
};
// conv2D back-propagate
float* x4815 = (float*)myMalloc(1 * sizeof(float));;
x4815[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1445, x1445));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4815, filt_desc, x781, grad_out_desc, x1566,
conv_desc, algo, ws_data, ws_size,
x4815, grad_in_desc, x1453));
};
float* x4818 = (float*)myMalloc(1 * sizeof(float));;
x4818[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1461, x1461));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1445, x1445));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4818, in_desc, x1451, grad_out_desc, x1566,
conv_desc, algo, ws_data, ws_size,
x4818, grad_filt_desc, x1269));
};
float* x4821 = (float*)myMalloc(1 * sizeof(float));;
x4821[0] = 0.0f;
float* x4823 = (float*)myMalloc(1 * sizeof(float));;
x4823[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 256, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4823, x4823, x4823, x4823, in_desc, x1537,
out_desc, x1552, in_desc, x1543, sbmv_desc, x892,
x1306,x1233, 1.0E-5, x1545, x1546));
};
// conv2D back-propagate
float* x4827 = (float*)myMalloc(1 * sizeof(float));;
x4827[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4827, filt_desc, x391, grad_out_desc, x1543,
conv_desc, algo, ws_data, ws_size,
x4827, grad_in_desc, x1518));
};
float* x4830 = (float*)myMalloc(1 * sizeof(float));;
x4830[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
256, 64, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 256, x1531, x1531));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4830, in_desc, x1510, grad_out_desc, x1543,
conv_desc, algo, ws_data, ws_size,
x4830, grad_filt_desc, x1139));
};
float* x4833 = (float*)myMalloc(1 * sizeof(float));;
x4833[0] = 1.0f;
float* x4835 = (float*)myMalloc(1 * sizeof(float));;
x4835[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4833, x_desc, x1510, x_desc, x1518, x_desc, x1510,
x4835, x_desc, x1518));
};
float* x4838 = (float*)myMalloc(1 * sizeof(float));;
x4838[0] = 0.0f;
float* x4840 = (float*)myMalloc(1 * sizeof(float));;
x4840[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4840, x4840, x4840, x4840, in_desc, x1503,
out_desc, x1518, in_desc, x1509, sbmv_desc, x787,
x1271,x1156, 1.0E-5, x1511, x1512));
};
// conv2D back-propagate
float* x4844 = (float*)myMalloc(1 * sizeof(float));;
x4844[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4844, filt_desc, x565, grad_out_desc, x1509,
conv_desc, algo, ws_data, ws_size,
x4844, grad_in_desc, x1482));
};
float* x4847 = (float*)myMalloc(1 * sizeof(float));;
x4847[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1497, x1497));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4847, in_desc, x1474, grad_out_desc, x1509,
conv_desc, algo, ws_data, ws_size,
x4847, grad_filt_desc, x1197));
};
float* x4850 = (float*)myMalloc(1 * sizeof(float));;
x4850[0] = 1.0f;
float* x4852 = (float*)myMalloc(1 * sizeof(float));;
x4852[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4850, x_desc, x1474, x_desc, x1482, x_desc, x1474,
x4852, x_desc, x1482));
};
float* x4855 = (float*)myMalloc(1 * sizeof(float));;
x4855[0] = 0.0f;
float* x4857 = (float*)myMalloc(1 * sizeof(float));;
x4857[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4857, x4857, x4857, x4857, in_desc, x1467,
out_desc, x1482, in_desc, x1473, sbmv_desc, x373,
x1133,x1160, 1.0E-5, x1475, x1476));
};
// conv2D back-propagate
float* x4861 = (float*)myMalloc(1 * sizeof(float));;
x4861[0] = 1.0f;
{
cudnnFilterDescriptor_t filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 1, 1));
cudnnTensorDescriptor_t grad_in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1445, x1445));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
filt_desc, grad_out_desc, conv_desc, grad_in_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo));
// algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardData(
cudnnHandle,
x4861, filt_desc, x994, grad_out_desc, x1473,
conv_desc, algo, ws_data, ws_size,
x4861, grad_in_desc, x1453));
};
float* x4864 = (float*)myMalloc(1 * sizeof(float));;
x4864[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 64, 1, 1));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1461, x1461));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1445, x1445));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
0, 0, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4864, in_desc, x1451, grad_out_desc, x1473,
conv_desc, algo, ws_data, ws_size,
x4864, grad_filt_desc, x1340));
};
float* x4867 = (float*)myMalloc(1 * sizeof(float));;
x4867[0] = 0.0f;
float* x4869 = (float*)myMalloc(1 * sizeof(float));;
x4869[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1445, x1445));
cudnnPoolingDescriptor_t poolingDesc;
CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc));
CUDNN_CALL(cudnnSetPooling2dDescriptor(
poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN,
2, 2, 0,
0, 2, 2
));
CUDNN_CALL(cudnnPoolingBackward(
cudnnHandle,
poolingDesc,
x4869, out_desc, x1451, out_desc, x1453, in_desc, x1425 , x4867, in_desc, x1433));
};
float* x4872 = (float*)myMalloc(1 * sizeof(float));;
x4872[0] = 1.0f;
float* x4874 = (float*)myMalloc(1 * sizeof(float));;
x4874[0] = 0.0f;
{
cudnnTensorDescriptor_t x_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412));
cudnnActivationDescriptor_t act_desc;
CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc));
CUDNN_CALL(cudnnSetActivationDescriptor(act_desc,
/*mode=*/ CUDNN_ACTIVATION_RELU,
/*reluNanOpt=*/ CUDNN_PROPAGATE_NAN,
/*relu_coef=*/ 0));
CUDNN_CALL(cudnnActivationBackward(
cudnnHandle, act_desc,
x4872, x_desc, x1425, x_desc, x1433, x_desc, x1425,
x4874, x_desc, x1433));
};
float* x4877 = (float*)myMalloc(1 * sizeof(float));;
x4877[0] = 0.0f;
float* x4879 = (float*)myMalloc(1 * sizeof(float));;
x4879[0] = 1.0f;
{
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412));
cudnnTensorDescriptor_t sbmv_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, 64, 1, 1));
CUDNN_CALL(cudnnBatchNormalizationBackward(
cudnnHandle, CUDNN_BATCHNORM_SPATIAL,
x4879, x4879, x4879, x4879, in_desc, x1418,
out_desc, x1433, in_desc, x1424, sbmv_desc, x913,
x1313,x1358, 1.0E-5, x1426, x1427));
};
// conv2D back-propagate
float* x4883 = (float*)myMalloc(1 * sizeof(float));;
x4883[0] = 1.0f;
{
cudnnFilterDescriptor_t grad_filt_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
64, 3, 3, 3));
cudnnTensorDescriptor_t grad_out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 64, x1412, x1412));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
64, 3, 32, 32));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1, 1, 1, 1, 1,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
// Algorithm.
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
in_desc, grad_out_desc, conv_desc, grad_filt_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo));
algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
// Workspace.
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size));
void *ws_data = myGpuMalloc(ws_size);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
cudnnHandle,
x4883, in_desc, x1402, grad_out_desc, x1424,
conv_desc, algo, ws_data, ws_size,
x4883, grad_filt_desc, x1259));
};
// Tensor 'toCPU' invocation.
float* x4887 = (float*)myMalloc(1 * sizeof(float));;
CUDA_CALL(cudaMemcpy(x4887, x1410, 1 * sizeof(float), cudaMemcpyDeviceToHost));
float x4889 = x4887[0];
x1390 += x4889;
float* x4891 = (float*)myMalloc(1 * sizeof(float));;
x4891[0] = 1.0f;
float* x4893 = (float*)myMalloc(1 * sizeof(float));;
x4893[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x4891,x313,1024,x4893, x1113, 1024, x313,1024));
arrayFill<<<28, 512>>>(x1113, 0.0f, 262144);
float* x4897 = (float*)myMalloc(1 * sizeof(float));;
x4897[0] = 1.0f;
float* x4899 = (float*)myMalloc(1 * sizeof(float));;
x4899[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x4897,x316,1,x4899, x1114, 1, x316,1));
arrayFill<<<28, 512>>>(x1114, 0.0f, 256);
float* x4903 = (float*)myMalloc(1 * sizeof(float));;
x4903[0] = 1.0f;
float* x4905 = (float*)myMalloc(1 * sizeof(float));;
x4905[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x4903,x319,1,x4905, x1115, 1, x319,1));
arrayFill<<<28, 512>>>(x1115, 0.0f, 128);
float* x4909 = (float*)myMalloc(1 * sizeof(float));;
x4909[0] = 1.0f;
float* x4911 = (float*)myMalloc(1 * sizeof(float));;
x4911[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x4909,x322,1,x4911, x1116, 1, x322,1));
arrayFill<<<28, 512>>>(x1116, 0.0f, 128);
float* x4915 = (float*)myMalloc(1 * sizeof(float));;
x4915[0] = 1.0f;
float* x4917 = (float*)myMalloc(1 * sizeof(float));;
x4917[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x4915,x325,1,x4917, x1117, 1, x325,1));
arrayFill<<<28, 512>>>(x1117, 0.0f, 64);
float* x4921 = (float*)myMalloc(1 * sizeof(float));;
x4921[0] = 1.0f;
float* x4923 = (float*)myMalloc(1 * sizeof(float));;
x4923[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,128,x4921,x328,256,x4923, x1118, 256, x328,256));
arrayFill<<<28, 512>>>(x1118, 0.0f, 32768);
float* x4927 = (float*)myMalloc(1 * sizeof(float));;
x4927[0] = 1.0f;
float* x4929 = (float*)myMalloc(1 * sizeof(float));;
x4929[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x4927,x331,1,x4929, x1119, 1, x331,1));
arrayFill<<<28, 512>>>(x1119, 0.0f, 512);
float* x4933 = (float*)myMalloc(1 * sizeof(float));;
x4933[0] = 1.0f;
float* x4935 = (float*)myMalloc(1 * sizeof(float));;
x4935[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x4933,x334,1024,x4935, x1120, 1024, x334,1024));
arrayFill<<<28, 512>>>(x1120, 0.0f, 262144);
float* x4939 = (float*)myMalloc(1 * sizeof(float));;
x4939[0] = 1.0f;
float* x4941 = (float*)myMalloc(1 * sizeof(float));;
x4941[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x4939,x337,2304,x4941, x1121, 2304, x337,2304));
arrayFill<<<28, 512>>>(x1121, 0.0f, 589824);
float* x4945 = (float*)myMalloc(1 * sizeof(float));;
x4945[0] = 1.0f;
float* x4947 = (float*)myMalloc(1 * sizeof(float));;
x4947[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x4945,x340,1,x4947, x1122, 1, x340,1));
arrayFill<<<28, 512>>>(x1122, 0.0f, 512);
float* x4951 = (float*)myMalloc(1 * sizeof(float));;
x4951[0] = 1.0f;
float* x4953 = (float*)myMalloc(1 * sizeof(float));;
x4953[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x4951,x343,1,x4953, x1123, 1, x343,1));
arrayFill<<<28, 512>>>(x1123, 0.0f, 256);
float* x4957 = (float*)myMalloc(1 * sizeof(float));;
x4957[0] = 1.0f;
float* x4959 = (float*)myMalloc(1 * sizeof(float));;
x4959[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x4957,x346,1,x4959, x1124, 1, x346,1));
arrayFill<<<28, 512>>>(x1124, 0.0f, 128);
float* x4963 = (float*)myMalloc(1 * sizeof(float));;
x4963[0] = 1.0f;
float* x4965 = (float*)myMalloc(1 * sizeof(float));;
x4965[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x4963,x349,1,x4965, x1125, 1, x349,1));
arrayFill<<<28, 512>>>(x1125, 0.0f, 1024);
float* x4969 = (float*)myMalloc(1 * sizeof(float));;
x4969[0] = 1.0f;
float* x4971 = (float*)myMalloc(1 * sizeof(float));;
x4971[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x4969,x352,1,x4971, x1126, 1, x352,1));
arrayFill<<<28, 512>>>(x1126, 0.0f, 512);
float* x4975 = (float*)myMalloc(1 * sizeof(float));;
x4975[0] = 1.0f;
float* x4977 = (float*)myMalloc(1 * sizeof(float));;
x4977[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x4975,x355,1,x4977, x1127, 1, x355,1));
arrayFill<<<28, 512>>>(x1127, 0.0f, 1024);
float* x4981 = (float*)myMalloc(1 * sizeof(float));;
x4981[0] = 1.0f;
float* x4983 = (float*)myMalloc(1 * sizeof(float));;
x4983[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x4981,x358,1,x4983, x1128, 1, x358,1));
arrayFill<<<28, 512>>>(x1128, 0.0f, 256);
float* x4987 = (float*)myMalloc(1 * sizeof(float));;
x4987[0] = 1.0f;
float* x4989 = (float*)myMalloc(1 * sizeof(float));;
x4989[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x4987,x361,1024,x4989, x1129, 1024, x361,1024));
arrayFill<<<28, 512>>>(x1129, 0.0f, 262144);
float* x4993 = (float*)myMalloc(1 * sizeof(float));;
x4993[0] = 1.0f;
float* x4995 = (float*)myMalloc(1 * sizeof(float));;
x4995[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x4993,x364,1,x4995, x1130, 1, x364,1));
arrayFill<<<28, 512>>>(x1130, 0.0f, 512);
float* x4999 = (float*)myMalloc(1 * sizeof(float));;
x4999[0] = 1.0f;
float* x5001 = (float*)myMalloc(1 * sizeof(float));;
x5001[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x4999,x367,1,x5001, x1131, 1, x367,1));
arrayFill<<<28, 512>>>(x1131, 0.0f, 64);
float* x5005 = (float*)myMalloc(1 * sizeof(float));;
x5005[0] = 1.0f;
float* x5007 = (float*)myMalloc(1 * sizeof(float));;
x5007[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5005,x370,1,x5007, x1132, 1, x370,1));
arrayFill<<<28, 512>>>(x1132, 0.0f, 512);
float* x5011 = (float*)myMalloc(1 * sizeof(float));;
x5011[0] = 1.0f;
float* x5013 = (float*)myMalloc(1 * sizeof(float));;
x5013[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5011,x373,1,x5013, x1133, 1, x373,1));
arrayFill<<<28, 512>>>(x1133, 0.0f, 64);
float* x5017 = (float*)myMalloc(1 * sizeof(float));;
x5017[0] = 1.0f;
float* x5019 = (float*)myMalloc(1 * sizeof(float));;
x5019[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1152,128,x5017,x376,1152,x5019, x1134, 1152, x376,1152));
arrayFill<<<28, 512>>>(x1134, 0.0f, 147456);
float* x5023 = (float*)myMalloc(1 * sizeof(float));;
x5023[0] = 1.0f;
float* x5025 = (float*)myMalloc(1 * sizeof(float));;
x5025[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 4608,512,x5023,x379,4608,x5025, x1135, 4608, x379,4608));
arrayFill<<<28, 512>>>(x1135, 0.0f, 2359296);
float* x5029 = (float*)myMalloc(1 * sizeof(float));;
x5029[0] = 1.0f;
float* x5031 = (float*)myMalloc(1 * sizeof(float));;
x5031[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5029,x382,1,x5031, x1136, 1, x382,1));
arrayFill<<<28, 512>>>(x1136, 0.0f, 1024);
float* x5035 = (float*)myMalloc(1 * sizeof(float));;
x5035[0] = 1.0f;
float* x5037 = (float*)myMalloc(1 * sizeof(float));;
x5037[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5035,x385,1,x5037, x1137, 1, x385,1));
arrayFill<<<28, 512>>>(x1137, 0.0f, 256);
float* x5041 = (float*)myMalloc(1 * sizeof(float));;
x5041[0] = 1.0f;
float* x5043 = (float*)myMalloc(1 * sizeof(float));;
x5043[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x5041,x388,2304,x5043, x1138, 2304, x388,2304));
arrayFill<<<28, 512>>>(x1138, 0.0f, 589824);
float* x5047 = (float*)myMalloc(1 * sizeof(float));;
x5047[0] = 1.0f;
float* x5049 = (float*)myMalloc(1 * sizeof(float));;
x5049[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x5047,x391,64,x5049, x1139, 64, x391,64));
arrayFill<<<28, 512>>>(x1139, 0.0f, 16384);
float* x5053 = (float*)myMalloc(1 * sizeof(float));;
x5053[0] = 1.0f;
float* x5055 = (float*)myMalloc(1 * sizeof(float));;
x5055[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,2048,x5053,x394,512,x5055, x1140, 512, x394,512));
arrayFill<<<28, 512>>>(x1140, 0.0f, 1048576);
float* x5059 = (float*)myMalloc(1 * sizeof(float));;
x5059[0] = 1.0f;
float* x5061 = (float*)myMalloc(1 * sizeof(float));;
x5061[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 4608,512,x5059,x397,4608,x5061, x1141, 4608, x397,4608));
arrayFill<<<28, 512>>>(x1141, 0.0f, 2359296);
float* x5065 = (float*)myMalloc(1 * sizeof(float));;
x5065[0] = 1.0f;
float* x5067 = (float*)myMalloc(1 * sizeof(float));;
x5067[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5065,x400,1,x5067, x1142, 1, x400,1));
arrayFill<<<28, 512>>>(x1142, 0.0f, 128);
float* x5071 = (float*)myMalloc(1 * sizeof(float));;
x5071[0] = 1.0f;
float* x5073 = (float*)myMalloc(1 * sizeof(float));;
x5073[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5071,x403,1,x5073, x1143, 1, x403,1));
arrayFill<<<28, 512>>>(x1143, 0.0f, 256);
float* x5077 = (float*)myMalloc(1 * sizeof(float));;
x5077[0] = 1.0f;
float* x5079 = (float*)myMalloc(1 * sizeof(float));;
x5079[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5077,x406,1,x5079, x1144, 1, x406,1));
arrayFill<<<28, 512>>>(x1144, 0.0f, 256);
float* x5083 = (float*)myMalloc(1 * sizeof(float));;
x5083[0] = 1.0f;
float* x5085 = (float*)myMalloc(1 * sizeof(float));;
x5085[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5083,x409,1,x5085, x1145, 1, x409,1));
arrayFill<<<28, 512>>>(x1145, 0.0f, 128);
float* x5089 = (float*)myMalloc(1 * sizeof(float));;
x5089[0] = 1.0f;
float* x5091 = (float*)myMalloc(1 * sizeof(float));;
x5091[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5089,x412,1,x5091, x1146, 1, x412,1));
arrayFill<<<28, 512>>>(x1146, 0.0f, 128);
float* x5095 = (float*)myMalloc(1 * sizeof(float));;
x5095[0] = 1.0f;
float* x5097 = (float*)myMalloc(1 * sizeof(float));;
x5097[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5095,x415,1,x5097, x1147, 1, x415,1));
arrayFill<<<28, 512>>>(x1147, 0.0f, 64);
float* x5101 = (float*)myMalloc(1 * sizeof(float));;
x5101[0] = 1.0f;
float* x5103 = (float*)myMalloc(1 * sizeof(float));;
x5103[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,512,x5101,x418,128,x5103, x1148, 128, x418,128));
arrayFill<<<28, 512>>>(x1148, 0.0f, 65536);
float* x5107 = (float*)myMalloc(1 * sizeof(float));;
x5107[0] = 1.0f;
float* x5109 = (float*)myMalloc(1 * sizeof(float));;
x5109[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5107,x421,1,x5109, x1149, 1, x421,1));
arrayFill<<<28, 512>>>(x1149, 0.0f, 512);
float* x5113 = (float*)myMalloc(1 * sizeof(float));;
x5113[0] = 1.0f;
float* x5115 = (float*)myMalloc(1 * sizeof(float));;
x5115[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5113,x424,1,x5115, x1150, 1, x424,1));
arrayFill<<<28, 512>>>(x1150, 0.0f, 128);
float* x5119 = (float*)myMalloc(1 * sizeof(float));;
x5119[0] = 1.0f;
float* x5121 = (float*)myMalloc(1 * sizeof(float));;
x5121[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5119,x427,1,x5121, x1151, 1, x427,1));
arrayFill<<<28, 512>>>(x1151, 0.0f, 64);
float* x5125 = (float*)myMalloc(1 * sizeof(float));;
x5125[0] = 1.0f;
float* x5127 = (float*)myMalloc(1 * sizeof(float));;
x5127[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5125,x430,1,x5127, x1152, 1, x430,1));
arrayFill<<<28, 512>>>(x1152, 0.0f, 128);
float* x5131 = (float*)myMalloc(1 * sizeof(float));;
x5131[0] = 1.0f;
float* x5133 = (float*)myMalloc(1 * sizeof(float));;
x5133[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5131,x433,1,x5133, x1153, 1, x433,1));
arrayFill<<<28, 512>>>(x1153, 0.0f, 512);
float* x5137 = (float*)myMalloc(1 * sizeof(float));;
x5137[0] = 1.0f;
float* x5139 = (float*)myMalloc(1 * sizeof(float));;
x5139[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,2048,x5137,x436,512,x5139, x1154, 512, x436,512));
arrayFill<<<28, 512>>>(x1154, 0.0f, 1048576);
float* x5143 = (float*)myMalloc(1 * sizeof(float));;
x5143[0] = 1.0f;
float* x5145 = (float*)myMalloc(1 * sizeof(float));;
x5145[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,10,x5143,x439,1,x5145, x1155, 1, x439,1));
arrayFill<<<28, 512>>>(x1155, 0.0f, 10);
float* x5149 = (float*)myMalloc(1 * sizeof(float));;
x5149[0] = 1.0f;
float* x5151 = (float*)myMalloc(1 * sizeof(float));;
x5151[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5149,x442,1,x5151, x1156, 1, x442,1));
arrayFill<<<28, 512>>>(x1156, 0.0f, 64);
float* x5155 = (float*)myMalloc(1 * sizeof(float));;
x5155[0] = 1.0f;
float* x5157 = (float*)myMalloc(1 * sizeof(float));;
x5157[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5155,x445,1,x5157, x1157, 1, x445,1));
arrayFill<<<28, 512>>>(x1157, 0.0f, 512);
float* x5161 = (float*)myMalloc(1 * sizeof(float));;
x5161[0] = 1.0f;
float* x5163 = (float*)myMalloc(1 * sizeof(float));;
x5163[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5161,x448,1,x5163, x1158, 1, x448,1));
arrayFill<<<28, 512>>>(x1158, 0.0f, 64);
float* x5167 = (float*)myMalloc(1 * sizeof(float));;
x5167[0] = 1.0f;
float* x5169 = (float*)myMalloc(1 * sizeof(float));;
x5169[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5167,x451,1,x5169, x1159, 1, x451,1));
arrayFill<<<28, 512>>>(x1159, 0.0f, 512);
float* x5173 = (float*)myMalloc(1 * sizeof(float));;
x5173[0] = 1.0f;
float* x5175 = (float*)myMalloc(1 * sizeof(float));;
x5175[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5173,x454,1,x5175, x1160, 1, x454,1));
arrayFill<<<28, 512>>>(x1160, 0.0f, 64);
float* x5179 = (float*)myMalloc(1 * sizeof(float));;
x5179[0] = 1.0f;
float* x5181 = (float*)myMalloc(1 * sizeof(float));;
x5181[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5179,x457,1,x5181, x1161, 1, x457,1));
arrayFill<<<28, 512>>>(x1161, 0.0f, 512);
float* x5185 = (float*)myMalloc(1 * sizeof(float));;
x5185[0] = 1.0f;
float* x5187 = (float*)myMalloc(1 * sizeof(float));;
x5187[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,512,x5185,x460,128,x5187, x1162, 128, x460,128));
arrayFill<<<28, 512>>>(x1162, 0.0f, 65536);
float* x5191 = (float*)myMalloc(1 * sizeof(float));;
x5191[0] = 1.0f;
float* x5193 = (float*)myMalloc(1 * sizeof(float));;
x5193[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x5191,x463,256,x5193, x1163, 256, x463,256));
arrayFill<<<28, 512>>>(x1163, 0.0f, 262144);
float* x5197 = (float*)myMalloc(1 * sizeof(float));;
x5197[0] = 1.0f;
float* x5199 = (float*)myMalloc(1 * sizeof(float));;
x5199[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5197,x466,1,x5199, x1164, 1, x466,1));
arrayFill<<<28, 512>>>(x1164, 0.0f, 1024);
float* x5203 = (float*)myMalloc(1 * sizeof(float));;
x5203[0] = 1.0f;
float* x5205 = (float*)myMalloc(1 * sizeof(float));;
x5205[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5203,x469,1,x5205, x1165, 1, x469,1));
arrayFill<<<28, 512>>>(x1165, 0.0f, 256);
float* x5209 = (float*)myMalloc(1 * sizeof(float));;
x5209[0] = 1.0f;
float* x5211 = (float*)myMalloc(1 * sizeof(float));;
x5211[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5209,x472,1,x5211, x1166, 1, x472,1));
arrayFill<<<28, 512>>>(x1166, 0.0f, 1024);
float* x5215 = (float*)myMalloc(1 * sizeof(float));;
x5215[0] = 1.0f;
float* x5217 = (float*)myMalloc(1 * sizeof(float));;
x5217[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5215,x475,1,x5217, x1167, 1, x475,1));
arrayFill<<<28, 512>>>(x1167, 0.0f, 64);
float* x5221 = (float*)myMalloc(1 * sizeof(float));;
x5221[0] = 1.0f;
float* x5223 = (float*)myMalloc(1 * sizeof(float));;
x5223[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5221,x478,1,x5223, x1168, 1, x478,1));
arrayFill<<<28, 512>>>(x1168, 0.0f, 128);
float* x5227 = (float*)myMalloc(1 * sizeof(float));;
x5227[0] = 1.0f;
float* x5229 = (float*)myMalloc(1 * sizeof(float));;
x5229[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5227,x481,1,x5229, x1169, 1, x481,1));
arrayFill<<<28, 512>>>(x1169, 0.0f, 2048);
float* x5233 = (float*)myMalloc(1 * sizeof(float));;
x5233[0] = 1.0f;
float* x5235 = (float*)myMalloc(1 * sizeof(float));;
x5235[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5233,x484,1,x5235, x1170, 1, x484,1));
arrayFill<<<28, 512>>>(x1170, 0.0f, 256);
float* x5239 = (float*)myMalloc(1 * sizeof(float));;
x5239[0] = 1.0f;
float* x5241 = (float*)myMalloc(1 * sizeof(float));;
x5241[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5239,x487,1,x5241, x1171, 1, x487,1));
arrayFill<<<28, 512>>>(x1171, 0.0f, 2048);
float* x5245 = (float*)myMalloc(1 * sizeof(float));;
x5245[0] = 1.0f;
float* x5247 = (float*)myMalloc(1 * sizeof(float));;
x5247[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5245,x490,1,x5247, x1172, 1, x490,1));
arrayFill<<<28, 512>>>(x1172, 0.0f, 512);
float* x5251 = (float*)myMalloc(1 * sizeof(float));;
x5251[0] = 1.0f;
float* x5253 = (float*)myMalloc(1 * sizeof(float));;
x5253[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5251,x493,1,x5253, x1173, 1, x493,1));
arrayFill<<<28, 512>>>(x1173, 0.0f, 512);
float* x5257 = (float*)myMalloc(1 * sizeof(float));;
x5257[0] = 1.0f;
float* x5259 = (float*)myMalloc(1 * sizeof(float));;
x5259[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5257,x496,1,x5259, x1174, 1, x496,1));
arrayFill<<<28, 512>>>(x1174, 0.0f, 512);
float* x5263 = (float*)myMalloc(1 * sizeof(float));;
x5263[0] = 1.0f;
float* x5265 = (float*)myMalloc(1 * sizeof(float));;
x5265[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5263,x499,1,x5265, x1175, 1, x499,1));
arrayFill<<<28, 512>>>(x1175, 0.0f, 2048);
float* x5269 = (float*)myMalloc(1 * sizeof(float));;
x5269[0] = 1.0f;
float* x5271 = (float*)myMalloc(1 * sizeof(float));;
x5271[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5269,x502,1,x5271, x1176, 1, x502,1));
arrayFill<<<28, 512>>>(x1176, 0.0f, 256);
float* x5275 = (float*)myMalloc(1 * sizeof(float));;
x5275[0] = 1.0f;
float* x5277 = (float*)myMalloc(1 * sizeof(float));;
x5277[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5275,x505,1,x5277, x1177, 1, x505,1));
arrayFill<<<28, 512>>>(x1177, 0.0f, 256);
float* x5281 = (float*)myMalloc(1 * sizeof(float));;
x5281[0] = 1.0f;
float* x5283 = (float*)myMalloc(1 * sizeof(float));;
x5283[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5281,x508,1,x5283, x1178, 1, x508,1));
arrayFill<<<28, 512>>>(x1178, 0.0f, 256);
float* x5287 = (float*)myMalloc(1 * sizeof(float));;
x5287[0] = 1.0f;
float* x5289 = (float*)myMalloc(1 * sizeof(float));;
x5289[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5287,x511,1,x5289, x1179, 1, x511,1));
arrayFill<<<28, 512>>>(x1179, 0.0f, 64);
float* x5293 = (float*)myMalloc(1 * sizeof(float));;
x5293[0] = 1.0f;
float* x5295 = (float*)myMalloc(1 * sizeof(float));;
x5295[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 576,64,x5293,x514,576,x5295, x1180, 576, x514,576));
arrayFill<<<28, 512>>>(x1180, 0.0f, 36864);
float* x5299 = (float*)myMalloc(1 * sizeof(float));;
x5299[0] = 1.0f;
float* x5301 = (float*)myMalloc(1 * sizeof(float));;
x5301[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5299,x517,1,x5301, x1181, 1, x517,1));
arrayFill<<<28, 512>>>(x1181, 0.0f, 256);
float* x5305 = (float*)myMalloc(1 * sizeof(float));;
x5305[0] = 1.0f;
float* x5307 = (float*)myMalloc(1 * sizeof(float));;
x5307[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,1024,x5305,x520,512,x5307, x1182, 512, x520,512));
arrayFill<<<28, 512>>>(x1182, 0.0f, 524288);
float* x5311 = (float*)myMalloc(1 * sizeof(float));;
x5311[0] = 1.0f;
float* x5313 = (float*)myMalloc(1 * sizeof(float));;
x5313[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5311,x523,1,x5313, x1183, 1, x523,1));
arrayFill<<<28, 512>>>(x1183, 0.0f, 256);
float* x5317 = (float*)myMalloc(1 * sizeof(float));;
x5317[0] = 1.0f;
float* x5319 = (float*)myMalloc(1 * sizeof(float));;
x5319[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5317,x526,1,x5319, x1184, 1, x526,1));
arrayFill<<<28, 512>>>(x1184, 0.0f, 256);
float* x5323 = (float*)myMalloc(1 * sizeof(float));;
x5323[0] = 1.0f;
float* x5325 = (float*)myMalloc(1 * sizeof(float));;
x5325[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5323,x529,1,x5325, x1185, 1, x529,1));
arrayFill<<<28, 512>>>(x1185, 0.0f, 512);
float* x5329 = (float*)myMalloc(1 * sizeof(float));;
x5329[0] = 1.0f;
float* x5331 = (float*)myMalloc(1 * sizeof(float));;
x5331[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5329,x532,1,x5331, x1186, 1, x532,1));
arrayFill<<<28, 512>>>(x1186, 0.0f, 128);
float* x5335 = (float*)myMalloc(1 * sizeof(float));;
x5335[0] = 1.0f;
float* x5337 = (float*)myMalloc(1 * sizeof(float));;
x5337[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5335,x535,1,x5337, x1187, 1, x535,1));
arrayFill<<<28, 512>>>(x1187, 0.0f, 256);
float* x5341 = (float*)myMalloc(1 * sizeof(float));;
x5341[0] = 1.0f;
float* x5343 = (float*)myMalloc(1 * sizeof(float));;
x5343[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5341,x538,1,x5343, x1188, 1, x538,1));
arrayFill<<<28, 512>>>(x1188, 0.0f, 64);
float* x5347 = (float*)myMalloc(1 * sizeof(float));;
x5347[0] = 1.0f;
float* x5349 = (float*)myMalloc(1 * sizeof(float));;
x5349[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5347,x541,1,x5349, x1189, 1, x541,1));
arrayFill<<<28, 512>>>(x1189, 0.0f, 512);
float* x5353 = (float*)myMalloc(1 * sizeof(float));;
x5353[0] = 1.0f;
float* x5355 = (float*)myMalloc(1 * sizeof(float));;
x5355[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 576,64,x5353,x544,576,x5355, x1190, 576, x544,576));
arrayFill<<<28, 512>>>(x1190, 0.0f, 36864);
float* x5359 = (float*)myMalloc(1 * sizeof(float));;
x5359[0] = 1.0f;
float* x5361 = (float*)myMalloc(1 * sizeof(float));;
x5361[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5359,x547,1,x5361, x1191, 1, x547,1));
arrayFill<<<28, 512>>>(x1191, 0.0f, 128);
float* x5365 = (float*)myMalloc(1 * sizeof(float));;
x5365[0] = 1.0f;
float* x5367 = (float*)myMalloc(1 * sizeof(float));;
x5367[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5365,x550,1,x5367, x1192, 1, x550,1));
arrayFill<<<28, 512>>>(x1192, 0.0f, 256);
float* x5371 = (float*)myMalloc(1 * sizeof(float));;
x5371[0] = 1.0f;
float* x5373 = (float*)myMalloc(1 * sizeof(float));;
x5373[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5371,x553,1,x5373, x1193, 1, x553,1));
arrayFill<<<28, 512>>>(x1193, 0.0f, 1024);
float* x5377 = (float*)myMalloc(1 * sizeof(float));;
x5377[0] = 1.0f;
float* x5379 = (float*)myMalloc(1 * sizeof(float));;
x5379[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x5377,x556,64,x5379, x1194, 64, x556,64));
arrayFill<<<28, 512>>>(x1194, 0.0f, 16384);
float* x5383 = (float*)myMalloc(1 * sizeof(float));;
x5383[0] = 1.0f;
float* x5385 = (float*)myMalloc(1 * sizeof(float));;
x5385[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5383,x559,1,x5385, x1195, 1, x559,1));
arrayFill<<<28, 512>>>(x1195, 0.0f, 512);
float* x5389 = (float*)myMalloc(1 * sizeof(float));;
x5389[0] = 1.0f;
float* x5391 = (float*)myMalloc(1 * sizeof(float));;
x5391[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x5389,x562,256,x5391, x1196, 256, x562,256));
arrayFill<<<28, 512>>>(x1196, 0.0f, 262144);
float* x5395 = (float*)myMalloc(1 * sizeof(float));;
x5395[0] = 1.0f;
float* x5397 = (float*)myMalloc(1 * sizeof(float));;
x5397[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 576,64,x5395,x565,576,x5397, x1197, 576, x565,576));
arrayFill<<<28, 512>>>(x1197, 0.0f, 36864);
float* x5401 = (float*)myMalloc(1 * sizeof(float));;
x5401[0] = 1.0f;
float* x5403 = (float*)myMalloc(1 * sizeof(float));;
x5403[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5401,x568,1,x5403, x1198, 1, x568,1));
arrayFill<<<28, 512>>>(x1198, 0.0f, 256);
float* x5407 = (float*)myMalloc(1 * sizeof(float));;
x5407[0] = 1.0f;
float* x5409 = (float*)myMalloc(1 * sizeof(float));;
x5409[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5407,x571,1,x5409, x1199, 1, x571,1));
arrayFill<<<28, 512>>>(x1199, 0.0f, 256);
float* x5413 = (float*)myMalloc(1 * sizeof(float));;
x5413[0] = 1.0f;
float* x5415 = (float*)myMalloc(1 * sizeof(float));;
x5415[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5413,x574,1,x5415, x1200, 1, x574,1));
arrayFill<<<28, 512>>>(x1200, 0.0f, 1024);
float* x5419 = (float*)myMalloc(1 * sizeof(float));;
x5419[0] = 1.0f;
float* x5421 = (float*)myMalloc(1 * sizeof(float));;
x5421[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5419,x577,1,x5421, x1201, 1, x577,1));
arrayFill<<<28, 512>>>(x1201, 0.0f, 2048);
float* x5425 = (float*)myMalloc(1 * sizeof(float));;
x5425[0] = 1.0f;
float* x5427 = (float*)myMalloc(1 * sizeof(float));;
x5427[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5425,x580,1,x5427, x1202, 1, x580,1));
arrayFill<<<28, 512>>>(x1202, 0.0f, 128);
float* x5431 = (float*)myMalloc(1 * sizeof(float));;
x5431[0] = 1.0f;
float* x5433 = (float*)myMalloc(1 * sizeof(float));;
x5433[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5431,x583,1,x5433, x1203, 1, x583,1));
arrayFill<<<28, 512>>>(x1203, 0.0f, 256);
float* x5437 = (float*)myMalloc(1 * sizeof(float));;
x5437[0] = 1.0f;
float* x5439 = (float*)myMalloc(1 * sizeof(float));;
x5439[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x5437,x586,256,x5439, x1204, 256, x586,256));
arrayFill<<<28, 512>>>(x1204, 0.0f, 262144);
float* x5443 = (float*)myMalloc(1 * sizeof(float));;
x5443[0] = 1.0f;
float* x5445 = (float*)myMalloc(1 * sizeof(float));;
x5445[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5443,x589,1,x5445, x1205, 1, x589,1));
arrayFill<<<28, 512>>>(x1205, 0.0f, 256);
float* x5449 = (float*)myMalloc(1 * sizeof(float));;
x5449[0] = 1.0f;
float* x5451 = (float*)myMalloc(1 * sizeof(float));;
x5451[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5449,x592,1,x5451, x1206, 1, x592,1));
arrayFill<<<28, 512>>>(x1206, 0.0f, 256);
float* x5455 = (float*)myMalloc(1 * sizeof(float));;
x5455[0] = 1.0f;
float* x5457 = (float*)myMalloc(1 * sizeof(float));;
x5457[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5455,x595,1,x5457, x1207, 1, x595,1));
arrayFill<<<28, 512>>>(x1207, 0.0f, 128);
float* x5461 = (float*)myMalloc(1 * sizeof(float));;
x5461[0] = 1.0f;
float* x5463 = (float*)myMalloc(1 * sizeof(float));;
x5463[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5461,x598,1,x5463, x1208, 1, x598,1));
arrayFill<<<28, 512>>>(x1208, 0.0f, 512);
float* x5467 = (float*)myMalloc(1 * sizeof(float));;
x5467[0] = 1.0f;
float* x5469 = (float*)myMalloc(1 * sizeof(float));;
x5469[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5467,x601,1,x5469, x1209, 1, x601,1));
arrayFill<<<28, 512>>>(x1209, 0.0f, 64);
float* x5473 = (float*)myMalloc(1 * sizeof(float));;
x5473[0] = 1.0f;
float* x5475 = (float*)myMalloc(1 * sizeof(float));;
x5475[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5473,x604,1,x5475, x1210, 1, x604,1));
arrayFill<<<28, 512>>>(x1210, 0.0f, 2048);
float* x5479 = (float*)myMalloc(1 * sizeof(float));;
x5479[0] = 1.0f;
float* x5481 = (float*)myMalloc(1 * sizeof(float));;
x5481[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5479,x607,1,x5481, x1211, 1, x607,1));
arrayFill<<<28, 512>>>(x1211, 0.0f, 256);
float* x5485 = (float*)myMalloc(1 * sizeof(float));;
x5485[0] = 1.0f;
float* x5487 = (float*)myMalloc(1 * sizeof(float));;
x5487[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5485,x610,1,x5487, x1212, 1, x610,1));
arrayFill<<<28, 512>>>(x1212, 0.0f, 64);
float* x5491 = (float*)myMalloc(1 * sizeof(float));;
x5491[0] = 1.0f;
float* x5493 = (float*)myMalloc(1 * sizeof(float));;
x5493[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,512,x5491,x613,128,x5493, x1213, 128, x613,128));
arrayFill<<<28, 512>>>(x1213, 0.0f, 65536);
float* x5497 = (float*)myMalloc(1 * sizeof(float));;
x5497[0] = 1.0f;
float* x5499 = (float*)myMalloc(1 * sizeof(float));;
x5499[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5497,x616,1,x5499, x1214, 1, x616,1));
arrayFill<<<28, 512>>>(x1214, 0.0f, 2048);
float* x5503 = (float*)myMalloc(1 * sizeof(float));;
x5503[0] = 1.0f;
float* x5505 = (float*)myMalloc(1 * sizeof(float));;
x5505[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5503,x619,1,x5505, x1215, 1, x619,1));
arrayFill<<<28, 512>>>(x1215, 0.0f, 256);
float* x5509 = (float*)myMalloc(1 * sizeof(float));;
x5509[0] = 1.0f;
float* x5511 = (float*)myMalloc(1 * sizeof(float));;
x5511[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5509,x622,1,x5511, x1216, 1, x622,1));
arrayFill<<<28, 512>>>(x1216, 0.0f, 256);
float* x5515 = (float*)myMalloc(1 * sizeof(float));;
x5515[0] = 1.0f;
float* x5517 = (float*)myMalloc(1 * sizeof(float));;
x5517[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5515,x625,1,x5517, x1217, 1, x625,1));
arrayFill<<<28, 512>>>(x1217, 0.0f, 64);
float* x5521 = (float*)myMalloc(1 * sizeof(float));;
x5521[0] = 1.0f;
float* x5523 = (float*)myMalloc(1 * sizeof(float));;
x5523[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,512,x5521,x628,128,x5523, x1218, 128, x628,128));
arrayFill<<<28, 512>>>(x1218, 0.0f, 65536);
float* x5527 = (float*)myMalloc(1 * sizeof(float));;
x5527[0] = 1.0f;
float* x5529 = (float*)myMalloc(1 * sizeof(float));;
x5529[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5527,x631,1,x5529, x1219, 1, x631,1));
arrayFill<<<28, 512>>>(x1219, 0.0f, 128);
float* x5533 = (float*)myMalloc(1 * sizeof(float));;
x5533[0] = 1.0f;
float* x5535 = (float*)myMalloc(1 * sizeof(float));;
x5535[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5533,x634,1,x5535, x1220, 1, x634,1));
arrayFill<<<28, 512>>>(x1220, 0.0f, 512);
float* x5539 = (float*)myMalloc(1 * sizeof(float));;
x5539[0] = 1.0f;
float* x5541 = (float*)myMalloc(1 * sizeof(float));;
x5541[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5539,x637,1,x5541, x1221, 1, x637,1));
arrayFill<<<28, 512>>>(x1221, 0.0f, 64);
float* x5545 = (float*)myMalloc(1 * sizeof(float));;
x5545[0] = 1.0f;
float* x5547 = (float*)myMalloc(1 * sizeof(float));;
x5547[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5545,x640,1,x5547, x1222, 1, x640,1));
arrayFill<<<28, 512>>>(x1222, 0.0f, 2048);
float* x5551 = (float*)myMalloc(1 * sizeof(float));;
x5551[0] = 1.0f;
float* x5553 = (float*)myMalloc(1 * sizeof(float));;
x5553[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x5551,x643,256,x5553, x1223, 256, x643,256));
arrayFill<<<28, 512>>>(x1223, 0.0f, 262144);
float* x5557 = (float*)myMalloc(1 * sizeof(float));;
x5557[0] = 1.0f;
float* x5559 = (float*)myMalloc(1 * sizeof(float));;
x5559[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5557,x646,1,x5559, x1224, 1, x646,1));
arrayFill<<<28, 512>>>(x1224, 0.0f, 1024);
float* x5563 = (float*)myMalloc(1 * sizeof(float));;
x5563[0] = 1.0f;
float* x5565 = (float*)myMalloc(1 * sizeof(float));;
x5565[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5563,x649,1,x5565, x1225, 1, x649,1));
arrayFill<<<28, 512>>>(x1225, 0.0f, 64);
float* x5569 = (float*)myMalloc(1 * sizeof(float));;
x5569[0] = 1.0f;
float* x5571 = (float*)myMalloc(1 * sizeof(float));;
x5571[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5569,x652,1,x5571, x1226, 1, x652,1));
arrayFill<<<28, 512>>>(x1226, 0.0f, 512);
float* x5575 = (float*)myMalloc(1 * sizeof(float));;
x5575[0] = 1.0f;
float* x5577 = (float*)myMalloc(1 * sizeof(float));;
x5577[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5575,x655,1,x5577, x1227, 1, x655,1));
arrayFill<<<28, 512>>>(x1227, 0.0f, 1024);
float* x5581 = (float*)myMalloc(1 * sizeof(float));;
x5581[0] = 1.0f;
float* x5583 = (float*)myMalloc(1 * sizeof(float));;
x5583[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5581,x658,1,x5583, x1228, 1, x658,1));
arrayFill<<<28, 512>>>(x1228, 0.0f, 512);
float* x5587 = (float*)myMalloc(1 * sizeof(float));;
x5587[0] = 1.0f;
float* x5589 = (float*)myMalloc(1 * sizeof(float));;
x5589[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5587,x661,1,x5589, x1229, 1, x661,1));
arrayFill<<<28, 512>>>(x1229, 0.0f, 1024);
float* x5593 = (float*)myMalloc(1 * sizeof(float));;
x5593[0] = 1.0f;
float* x5595 = (float*)myMalloc(1 * sizeof(float));;
x5595[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5593,x664,1,x5595, x1230, 1, x664,1));
arrayFill<<<28, 512>>>(x1230, 0.0f, 2048);
float* x5599 = (float*)myMalloc(1 * sizeof(float));;
x5599[0] = 1.0f;
float* x5601 = (float*)myMalloc(1 * sizeof(float));;
x5601[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5599,x667,1,x5601, x1231, 1, x667,1));
arrayFill<<<28, 512>>>(x1231, 0.0f, 256);
float* x5605 = (float*)myMalloc(1 * sizeof(float));;
x5605[0] = 1.0f;
float* x5607 = (float*)myMalloc(1 * sizeof(float));;
x5607[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5605,x670,1,x5607, x1232, 1, x670,1));
arrayFill<<<28, 512>>>(x1232, 0.0f, 2048);
float* x5611 = (float*)myMalloc(1 * sizeof(float));;
x5611[0] = 1.0f;
float* x5613 = (float*)myMalloc(1 * sizeof(float));;
x5613[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5611,x673,1,x5613, x1233, 1, x673,1));
arrayFill<<<28, 512>>>(x1233, 0.0f, 256);
float* x5617 = (float*)myMalloc(1 * sizeof(float));;
x5617[0] = 1.0f;
float* x5619 = (float*)myMalloc(1 * sizeof(float));;
x5619[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5617,x676,1,x5619, x1234, 1, x676,1));
arrayFill<<<28, 512>>>(x1234, 0.0f, 128);
float* x5623 = (float*)myMalloc(1 * sizeof(float));;
x5623[0] = 1.0f;
float* x5625 = (float*)myMalloc(1 * sizeof(float));;
x5625[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5623,x679,1,x5625, x1235, 1, x679,1));
arrayFill<<<28, 512>>>(x1235, 0.0f, 128);
float* x5629 = (float*)myMalloc(1 * sizeof(float));;
x5629[0] = 1.0f;
float* x5631 = (float*)myMalloc(1 * sizeof(float));;
x5631[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5629,x682,1,x5631, x1236, 1, x682,1));
arrayFill<<<28, 512>>>(x1236, 0.0f, 256);
float* x5635 = (float*)myMalloc(1 * sizeof(float));;
x5635[0] = 1.0f;
float* x5637 = (float*)myMalloc(1 * sizeof(float));;
x5637[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x5635,x685,64,x5637, x1237, 64, x685,64));
arrayFill<<<28, 512>>>(x1237, 0.0f, 16384);
float* x5641 = (float*)myMalloc(1 * sizeof(float));;
x5641[0] = 1.0f;
float* x5643 = (float*)myMalloc(1 * sizeof(float));;
x5643[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5641,x688,1,x5643, x1238, 1, x688,1));
arrayFill<<<28, 512>>>(x1238, 0.0f, 256);
float* x5647 = (float*)myMalloc(1 * sizeof(float));;
x5647[0] = 1.0f;
float* x5649 = (float*)myMalloc(1 * sizeof(float));;
x5649[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,128,x5647,x691,512,x5649, x1239, 512, x691,512));
arrayFill<<<28, 512>>>(x1239, 0.0f, 65536);
float* x5653 = (float*)myMalloc(1 * sizeof(float));;
x5653[0] = 1.0f;
float* x5655 = (float*)myMalloc(1 * sizeof(float));;
x5655[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5653,x694,1,x5655, x1240, 1, x694,1));
arrayFill<<<28, 512>>>(x1240, 0.0f, 256);
float* x5659 = (float*)myMalloc(1 * sizeof(float));;
x5659[0] = 1.0f;
float* x5661 = (float*)myMalloc(1 * sizeof(float));;
x5661[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5659,x697,1,x5661, x1241, 1, x697,1));
arrayFill<<<28, 512>>>(x1241, 0.0f, 128);
float* x5665 = (float*)myMalloc(1 * sizeof(float));;
x5665[0] = 1.0f;
float* x5667 = (float*)myMalloc(1 * sizeof(float));;
x5667[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5665,x700,1,x5667, x1242, 1, x700,1));
arrayFill<<<28, 512>>>(x1242, 0.0f, 64);
float* x5671 = (float*)myMalloc(1 * sizeof(float));;
x5671[0] = 1.0f;
float* x5673 = (float*)myMalloc(1 * sizeof(float));;
x5673[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5671,x703,1,x5673, x1243, 1, x703,1));
arrayFill<<<28, 512>>>(x1243, 0.0f, 256);
float* x5677 = (float*)myMalloc(1 * sizeof(float));;
x5677[0] = 1.0f;
float* x5679 = (float*)myMalloc(1 * sizeof(float));;
x5679[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5677,x706,1,x5679, x1244, 1, x706,1));
arrayFill<<<28, 512>>>(x1244, 0.0f, 512);
float* x5683 = (float*)myMalloc(1 * sizeof(float));;
x5683[0] = 1.0f;
float* x5685 = (float*)myMalloc(1 * sizeof(float));;
x5685[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5683,x709,1,x5685, x1245, 1, x709,1));
arrayFill<<<28, 512>>>(x1245, 0.0f, 512);
float* x5689 = (float*)myMalloc(1 * sizeof(float));;
x5689[0] = 1.0f;
float* x5691 = (float*)myMalloc(1 * sizeof(float));;
x5691[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,512,x5689,x712,1024,x5691, x1246, 1024, x712,1024));
arrayFill<<<28, 512>>>(x1246, 0.0f, 524288);
float* x5695 = (float*)myMalloc(1 * sizeof(float));;
x5695[0] = 1.0f;
float* x5697 = (float*)myMalloc(1 * sizeof(float));;
x5697[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5695,x715,1,x5697, x1247, 1, x715,1));
arrayFill<<<28, 512>>>(x1247, 0.0f, 1024);
float* x5701 = (float*)myMalloc(1 * sizeof(float));;
x5701[0] = 1.0f;
float* x5703 = (float*)myMalloc(1 * sizeof(float));;
x5703[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5701,x718,1,x5703, x1248, 1, x718,1));
arrayFill<<<28, 512>>>(x1248, 0.0f, 256);
float* x5707 = (float*)myMalloc(1 * sizeof(float));;
x5707[0] = 1.0f;
float* x5709 = (float*)myMalloc(1 * sizeof(float));;
x5709[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5707,x721,1,x5709, x1249, 1, x721,1));
arrayFill<<<28, 512>>>(x1249, 0.0f, 64);
float* x5713 = (float*)myMalloc(1 * sizeof(float));;
x5713[0] = 1.0f;
float* x5715 = (float*)myMalloc(1 * sizeof(float));;
x5715[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5713,x724,1,x5715, x1250, 1, x724,1));
arrayFill<<<28, 512>>>(x1250, 0.0f, 1024);
float* x5719 = (float*)myMalloc(1 * sizeof(float));;
x5719[0] = 1.0f;
float* x5721 = (float*)myMalloc(1 * sizeof(float));;
x5721[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5719,x727,1,x5721, x1251, 1, x727,1));
arrayFill<<<28, 512>>>(x1251, 0.0f, 2048);
float* x5725 = (float*)myMalloc(1 * sizeof(float));;
x5725[0] = 1.0f;
float* x5727 = (float*)myMalloc(1 * sizeof(float));;
x5727[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5725,x730,1,x5727, x1252, 1, x730,1));
arrayFill<<<28, 512>>>(x1252, 0.0f, 512);
float* x5731 = (float*)myMalloc(1 * sizeof(float));;
x5731[0] = 1.0f;
float* x5733 = (float*)myMalloc(1 * sizeof(float));;
x5733[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5731,x733,1,x5733, x1253, 1, x733,1));
arrayFill<<<28, 512>>>(x1253, 0.0f, 1024);
float* x5737 = (float*)myMalloc(1 * sizeof(float));;
x5737[0] = 1.0f;
float* x5739 = (float*)myMalloc(1 * sizeof(float));;
x5739[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5737,x736,1,x5739, x1254, 1, x736,1));
arrayFill<<<28, 512>>>(x1254, 0.0f, 512);
float* x5743 = (float*)myMalloc(1 * sizeof(float));;
x5743[0] = 1.0f;
float* x5745 = (float*)myMalloc(1 * sizeof(float));;
x5745[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5743,x739,1,x5745, x1255, 1, x739,1));
arrayFill<<<28, 512>>>(x1255, 0.0f, 128);
float* x5749 = (float*)myMalloc(1 * sizeof(float));;
x5749[0] = 1.0f;
float* x5751 = (float*)myMalloc(1 * sizeof(float));;
x5751[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5749,x742,1,x5751, x1256, 1, x742,1));
arrayFill<<<28, 512>>>(x1256, 0.0f, 512);
float* x5755 = (float*)myMalloc(1 * sizeof(float));;
x5755[0] = 1.0f;
float* x5757 = (float*)myMalloc(1 * sizeof(float));;
x5757[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,64,x5755,x745,256,x5757, x1257, 256, x745,256));
arrayFill<<<28, 512>>>(x1257, 0.0f, 16384);
float* x5761 = (float*)myMalloc(1 * sizeof(float));;
x5761[0] = 1.0f;
float* x5763 = (float*)myMalloc(1 * sizeof(float));;
x5763[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x5761,x748,1024,x5763, x1258, 1024, x748,1024));
arrayFill<<<28, 512>>>(x1258, 0.0f, 262144);
float* x5767 = (float*)myMalloc(1 * sizeof(float));;
x5767[0] = 1.0f;
float* x5769 = (float*)myMalloc(1 * sizeof(float));;
x5769[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 27,64,x5767,x751,27,x5769, x1259, 27, x751,27));
arrayFill<<<28, 512>>>(x1259, 0.0f, 1728);
float* x5773 = (float*)myMalloc(1 * sizeof(float));;
x5773[0] = 1.0f;
float* x5775 = (float*)myMalloc(1 * sizeof(float));;
x5775[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5773,x754,1,x5775, x1260, 1, x754,1));
arrayFill<<<28, 512>>>(x1260, 0.0f, 64);
float* x5779 = (float*)myMalloc(1 * sizeof(float));;
x5779[0] = 1.0f;
float* x5781 = (float*)myMalloc(1 * sizeof(float));;
x5781[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5779,x757,1,x5781, x1261, 1, x757,1));
arrayFill<<<28, 512>>>(x1261, 0.0f, 512);
float* x5785 = (float*)myMalloc(1 * sizeof(float));;
x5785[0] = 1.0f;
float* x5787 = (float*)myMalloc(1 * sizeof(float));;
x5787[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 4608,512,x5785,x760,4608,x5787, x1262, 4608, x760,4608));
arrayFill<<<28, 512>>>(x1262, 0.0f, 2359296);
float* x5791 = (float*)myMalloc(1 * sizeof(float));;
x5791[0] = 1.0f;
float* x5793 = (float*)myMalloc(1 * sizeof(float));;
x5793[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5791,x763,1,x5793, x1263, 1, x763,1));
arrayFill<<<28, 512>>>(x1263, 0.0f, 512);
float* x5797 = (float*)myMalloc(1 * sizeof(float));;
x5797[0] = 1.0f;
float* x5799 = (float*)myMalloc(1 * sizeof(float));;
x5799[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5797,x766,1,x5799, x1264, 1, x766,1));
arrayFill<<<28, 512>>>(x1264, 0.0f, 256);
float* x5803 = (float*)myMalloc(1 * sizeof(float));;
x5803[0] = 1.0f;
float* x5805 = (float*)myMalloc(1 * sizeof(float));;
x5805[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5803,x769,1,x5805, x1265, 1, x769,1));
arrayFill<<<28, 512>>>(x1265, 0.0f, 64);
float* x5809 = (float*)myMalloc(1 * sizeof(float));;
x5809[0] = 1.0f;
float* x5811 = (float*)myMalloc(1 * sizeof(float));;
x5811[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5809,x772,1,x5811, x1266, 1, x772,1));
arrayFill<<<28, 512>>>(x1266, 0.0f, 512);
float* x5815 = (float*)myMalloc(1 * sizeof(float));;
x5815[0] = 1.0f;
float* x5817 = (float*)myMalloc(1 * sizeof(float));;
x5817[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5815,x775,1,x5817, x1267, 1, x775,1));
arrayFill<<<28, 512>>>(x1267, 0.0f, 512);
float* x5821 = (float*)myMalloc(1 * sizeof(float));;
x5821[0] = 1.0f;
float* x5823 = (float*)myMalloc(1 * sizeof(float));;
x5823[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5821,x778,1,x5823, x1268, 1, x778,1));
arrayFill<<<28, 512>>>(x1268, 0.0f, 1024);
float* x5827 = (float*)myMalloc(1 * sizeof(float));;
x5827[0] = 1.0f;
float* x5829 = (float*)myMalloc(1 * sizeof(float));;
x5829[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x5827,x781,64,x5829, x1269, 64, x781,64));
arrayFill<<<28, 512>>>(x1269, 0.0f, 16384);
float* x5833 = (float*)myMalloc(1 * sizeof(float));;
x5833[0] = 1.0f;
float* x5835 = (float*)myMalloc(1 * sizeof(float));;
x5835[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5833,x784,1,x5835, x1270, 1, x784,1));
arrayFill<<<28, 512>>>(x1270, 0.0f, 256);
float* x5839 = (float*)myMalloc(1 * sizeof(float));;
x5839[0] = 1.0f;
float* x5841 = (float*)myMalloc(1 * sizeof(float));;
x5841[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5839,x787,1,x5841, x1271, 1, x787,1));
arrayFill<<<28, 512>>>(x1271, 0.0f, 64);
float* x5845 = (float*)myMalloc(1 * sizeof(float));;
x5845[0] = 1.0f;
float* x5847 = (float*)myMalloc(1 * sizeof(float));;
x5847[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1152,128,x5845,x790,1152,x5847, x1272, 1152, x790,1152));
arrayFill<<<28, 512>>>(x1272, 0.0f, 147456);
float* x5851 = (float*)myMalloc(1 * sizeof(float));;
x5851[0] = 1.0f;
float* x5853 = (float*)myMalloc(1 * sizeof(float));;
x5853[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5851,x793,1,x5853, x1273, 1, x793,1));
arrayFill<<<28, 512>>>(x1273, 0.0f, 256);
float* x5857 = (float*)myMalloc(1 * sizeof(float));;
x5857[0] = 1.0f;
float* x5859 = (float*)myMalloc(1 * sizeof(float));;
x5859[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5857,x796,1,x5859, x1274, 1, x796,1));
arrayFill<<<28, 512>>>(x1274, 0.0f, 512);
float* x5863 = (float*)myMalloc(1 * sizeof(float));;
x5863[0] = 1.0f;
float* x5865 = (float*)myMalloc(1 * sizeof(float));;
x5865[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5863,x799,1,x5865, x1275, 1, x799,1));
arrayFill<<<28, 512>>>(x1275, 0.0f, 256);
float* x5869 = (float*)myMalloc(1 * sizeof(float));;
x5869[0] = 1.0f;
float* x5871 = (float*)myMalloc(1 * sizeof(float));;
x5871[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5869,x802,1,x5871, x1276, 1, x802,1));
arrayFill<<<28, 512>>>(x1276, 0.0f, 512);
float* x5875 = (float*)myMalloc(1 * sizeof(float));;
x5875[0] = 1.0f;
float* x5877 = (float*)myMalloc(1 * sizeof(float));;
x5877[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5875,x805,1,x5877, x1277, 1, x805,1));
arrayFill<<<28, 512>>>(x1277, 0.0f, 128);
float* x5881 = (float*)myMalloc(1 * sizeof(float));;
x5881[0] = 1.0f;
float* x5883 = (float*)myMalloc(1 * sizeof(float));;
x5883[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,64,x5881,x808,256,x5883, x1278, 256, x808,256));
arrayFill<<<28, 512>>>(x1278, 0.0f, 16384);
float* x5887 = (float*)myMalloc(1 * sizeof(float));;
x5887[0] = 1.0f;
float* x5889 = (float*)myMalloc(1 * sizeof(float));;
x5889[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5887,x811,1,x5889, x1279, 1, x811,1));
arrayFill<<<28, 512>>>(x1279, 0.0f, 128);
float* x5893 = (float*)myMalloc(1 * sizeof(float));;
x5893[0] = 1.0f;
float* x5895 = (float*)myMalloc(1 * sizeof(float));;
x5895[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5893,x814,1,x5895, x1280, 1, x814,1));
arrayFill<<<28, 512>>>(x1280, 0.0f, 2048);
float* x5899 = (float*)myMalloc(1 * sizeof(float));;
x5899[0] = 1.0f;
float* x5901 = (float*)myMalloc(1 * sizeof(float));;
x5901[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5899,x817,1,x5901, x1281, 1, x817,1));
arrayFill<<<28, 512>>>(x1281, 0.0f, 256);
float* x5905 = (float*)myMalloc(1 * sizeof(float));;
x5905[0] = 1.0f;
float* x5907 = (float*)myMalloc(1 * sizeof(float));;
x5907[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x5905,x820,2304,x5907, x1282, 2304, x820,2304));
arrayFill<<<28, 512>>>(x1282, 0.0f, 589824);
float* x5911 = (float*)myMalloc(1 * sizeof(float));;
x5911[0] = 1.0f;
float* x5913 = (float*)myMalloc(1 * sizeof(float));;
x5913[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5911,x823,1,x5913, x1283, 1, x823,1));
arrayFill<<<28, 512>>>(x1283, 0.0f, 256);
float* x5917 = (float*)myMalloc(1 * sizeof(float));;
x5917[0] = 1.0f;
float* x5919 = (float*)myMalloc(1 * sizeof(float));;
x5919[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5917,x826,1,x5919, x1284, 1, x826,1));
arrayFill<<<28, 512>>>(x1284, 0.0f, 128);
float* x5923 = (float*)myMalloc(1 * sizeof(float));;
x5923[0] = 1.0f;
float* x5925 = (float*)myMalloc(1 * sizeof(float));;
x5925[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5923,x829,1,x5925, x1285, 1, x829,1));
arrayFill<<<28, 512>>>(x1285, 0.0f, 256);
float* x5929 = (float*)myMalloc(1 * sizeof(float));;
x5929[0] = 1.0f;
float* x5931 = (float*)myMalloc(1 * sizeof(float));;
x5931[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5929,x832,1,x5931, x1286, 1, x832,1));
arrayFill<<<28, 512>>>(x1286, 0.0f, 64);
float* x5935 = (float*)myMalloc(1 * sizeof(float));;
x5935[0] = 1.0f;
float* x5937 = (float*)myMalloc(1 * sizeof(float));;
x5937[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,256,x5935,x835,512,x5937, x1287, 512, x835,512));
arrayFill<<<28, 512>>>(x1287, 0.0f, 131072);
float* x5941 = (float*)myMalloc(1 * sizeof(float));;
x5941[0] = 1.0f;
float* x5943 = (float*)myMalloc(1 * sizeof(float));;
x5943[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5941,x838,1,x5943, x1288, 1, x838,1));
arrayFill<<<28, 512>>>(x1288, 0.0f, 2048);
float* x5947 = (float*)myMalloc(1 * sizeof(float));;
x5947[0] = 1.0f;
float* x5949 = (float*)myMalloc(1 * sizeof(float));;
x5949[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5947,x841,1,x5949, x1289, 1, x841,1));
arrayFill<<<28, 512>>>(x1289, 0.0f, 1024);
float* x5953 = (float*)myMalloc(1 * sizeof(float));;
x5953[0] = 1.0f;
float* x5955 = (float*)myMalloc(1 * sizeof(float));;
x5955[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5953,x844,1,x5955, x1290, 1, x844,1));
arrayFill<<<28, 512>>>(x1290, 0.0f, 1024);
float* x5959 = (float*)myMalloc(1 * sizeof(float));;
x5959[0] = 1.0f;
float* x5961 = (float*)myMalloc(1 * sizeof(float));;
x5961[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5959,x847,1,x5961, x1291, 1, x847,1));
arrayFill<<<28, 512>>>(x1291, 0.0f, 256);
float* x5965 = (float*)myMalloc(1 * sizeof(float));;
x5965[0] = 1.0f;
float* x5967 = (float*)myMalloc(1 * sizeof(float));;
x5967[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5965,x850,1,x5967, x1292, 1, x850,1));
arrayFill<<<28, 512>>>(x1292, 0.0f, 256);
float* x5971 = (float*)myMalloc(1 * sizeof(float));;
x5971[0] = 1.0f;
float* x5973 = (float*)myMalloc(1 * sizeof(float));;
x5973[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5971,x853,1,x5973, x1293, 1, x853,1));
arrayFill<<<28, 512>>>(x1293, 0.0f, 256);
float* x5977 = (float*)myMalloc(1 * sizeof(float));;
x5977[0] = 1.0f;
float* x5979 = (float*)myMalloc(1 * sizeof(float));;
x5979[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5977,x856,1,x5979, x1294, 1, x856,1));
arrayFill<<<28, 512>>>(x1294, 0.0f, 64);
float* x5983 = (float*)myMalloc(1 * sizeof(float));;
x5983[0] = 1.0f;
float* x5985 = (float*)myMalloc(1 * sizeof(float));;
x5985[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5983,x859,1,x5985, x1295, 1, x859,1));
arrayFill<<<28, 512>>>(x1295, 0.0f, 1024);
float* x5989 = (float*)myMalloc(1 * sizeof(float));;
x5989[0] = 1.0f;
float* x5991 = (float*)myMalloc(1 * sizeof(float));;
x5991[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5989,x862,1,x5991, x1296, 1, x862,1));
arrayFill<<<28, 512>>>(x1296, 0.0f, 256);
float* x5995 = (float*)myMalloc(1 * sizeof(float));;
x5995[0] = 1.0f;
float* x5997 = (float*)myMalloc(1 * sizeof(float));;
x5997[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5995,x865,1,x5997, x1297, 1, x865,1));
arrayFill<<<28, 512>>>(x1297, 0.0f, 128);
float* x6001 = (float*)myMalloc(1 * sizeof(float));;
x6001[0] = 1.0f;
float* x6003 = (float*)myMalloc(1 * sizeof(float));;
x6003[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1152,128,x6001,x868,1152,x6003, x1298, 1152, x868,1152));
arrayFill<<<28, 512>>>(x1298, 0.0f, 147456);
float* x6007 = (float*)myMalloc(1 * sizeof(float));;
x6007[0] = 1.0f;
float* x6009 = (float*)myMalloc(1 * sizeof(float));;
x6009[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6007,x871,1,x6009, x1299, 1, x871,1));
arrayFill<<<28, 512>>>(x1299, 0.0f, 256);
float* x6013 = (float*)myMalloc(1 * sizeof(float));;
x6013[0] = 1.0f;
float* x6015 = (float*)myMalloc(1 * sizeof(float));;
x6015[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x6013,x874,1,x6015, x1300, 1, x874,1));
arrayFill<<<28, 512>>>(x1300, 0.0f, 2048);
float* x6019 = (float*)myMalloc(1 * sizeof(float));;
x6019[0] = 1.0f;
float* x6021 = (float*)myMalloc(1 * sizeof(float));;
x6021[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6019,x877,1,x6021, x1301, 1, x877,1));
arrayFill<<<28, 512>>>(x1301, 0.0f, 512);
float* x6025 = (float*)myMalloc(1 * sizeof(float));;
x6025[0] = 1.0f;
float* x6027 = (float*)myMalloc(1 * sizeof(float));;
x6027[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6025,x880,1,x6027, x1302, 1, x880,1));
arrayFill<<<28, 512>>>(x1302, 0.0f, 512);
float* x6031 = (float*)myMalloc(1 * sizeof(float));;
x6031[0] = 1.0f;
float* x6033 = (float*)myMalloc(1 * sizeof(float));;
x6033[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,128,x6031,x883,512,x6033, x1303, 512, x883,512));
arrayFill<<<28, 512>>>(x1303, 0.0f, 65536);
float* x6037 = (float*)myMalloc(1 * sizeof(float));;
x6037[0] = 1.0f;
float* x6039 = (float*)myMalloc(1 * sizeof(float));;
x6039[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6037,x886,1,x6039, x1304, 1, x886,1));
arrayFill<<<28, 512>>>(x1304, 0.0f, 256);
float* x6043 = (float*)myMalloc(1 * sizeof(float));;
x6043[0] = 1.0f;
float* x6045 = (float*)myMalloc(1 * sizeof(float));;
x6045[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6043,x889,1,x6045, x1305, 1, x889,1));
arrayFill<<<28, 512>>>(x1305, 0.0f, 256);
float* x6049 = (float*)myMalloc(1 * sizeof(float));;
x6049[0] = 1.0f;
float* x6051 = (float*)myMalloc(1 * sizeof(float));;
x6051[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6049,x892,1,x6051, x1306, 1, x892,1));
arrayFill<<<28, 512>>>(x1306, 0.0f, 256);
float* x6055 = (float*)myMalloc(1 * sizeof(float));;
x6055[0] = 1.0f;
float* x6057 = (float*)myMalloc(1 * sizeof(float));;
x6057[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6055,x895,1,x6057, x1307, 1, x895,1));
arrayFill<<<28, 512>>>(x1307, 0.0f, 256);
float* x6061 = (float*)myMalloc(1 * sizeof(float));;
x6061[0] = 1.0f;
float* x6063 = (float*)myMalloc(1 * sizeof(float));;
x6063[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6061,x898,1,x6063, x1308, 1, x898,1));
arrayFill<<<28, 512>>>(x1308, 0.0f, 512);
float* x6067 = (float*)myMalloc(1 * sizeof(float));;
x6067[0] = 1.0f;
float* x6069 = (float*)myMalloc(1 * sizeof(float));;
x6069[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6067,x901,1,x6069, x1309, 1, x901,1));
arrayFill<<<28, 512>>>(x1309, 0.0f, 512);
float* x6073 = (float*)myMalloc(1 * sizeof(float));;
x6073[0] = 1.0f;
float* x6075 = (float*)myMalloc(1 * sizeof(float));;
x6075[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6073,x904,1,x6075, x1310, 1, x904,1));
arrayFill<<<28, 512>>>(x1310, 0.0f, 256);
float* x6079 = (float*)myMalloc(1 * sizeof(float));;
x6079[0] = 1.0f;
float* x6081 = (float*)myMalloc(1 * sizeof(float));;
x6081[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6079,x907,1,x6081, x1311, 1, x907,1));
arrayFill<<<28, 512>>>(x1311, 0.0f, 128);
float* x6085 = (float*)myMalloc(1 * sizeof(float));;
x6085[0] = 1.0f;
float* x6087 = (float*)myMalloc(1 * sizeof(float));;
x6087[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6085,x910,1,x6087, x1312, 1, x910,1));
arrayFill<<<28, 512>>>(x1312, 0.0f, 512);
float* x6091 = (float*)myMalloc(1 * sizeof(float));;
x6091[0] = 1.0f;
float* x6093 = (float*)myMalloc(1 * sizeof(float));;
x6093[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6091,x913,1,x6093, x1313, 1, x913,1));
arrayFill<<<28, 512>>>(x1313, 0.0f, 64);
float* x6097 = (float*)myMalloc(1 * sizeof(float));;
x6097[0] = 1.0f;
float* x6099 = (float*)myMalloc(1 * sizeof(float));;
x6099[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6097,x916,1,x6099, x1314, 1, x916,1));
arrayFill<<<28, 512>>>(x1314, 0.0f, 512);
float* x6103 = (float*)myMalloc(1 * sizeof(float));;
x6103[0] = 1.0f;
float* x6105 = (float*)myMalloc(1 * sizeof(float));;
x6105[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6103,x919,1,x6105, x1315, 1, x919,1));
arrayFill<<<28, 512>>>(x1315, 0.0f, 64);
float* x6109 = (float*)myMalloc(1 * sizeof(float));;
x6109[0] = 1.0f;
float* x6111 = (float*)myMalloc(1 * sizeof(float));;
x6111[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6109,x922,1,x6111, x1316, 1, x922,1));
arrayFill<<<28, 512>>>(x1316, 0.0f, 1024);
float* x6115 = (float*)myMalloc(1 * sizeof(float));;
x6115[0] = 1.0f;
float* x6117 = (float*)myMalloc(1 * sizeof(float));;
x6117[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6115,x925,1,x6117, x1317, 1, x925,1));
arrayFill<<<28, 512>>>(x1317, 0.0f, 512);
float* x6121 = (float*)myMalloc(1 * sizeof(float));;
x6121[0] = 1.0f;
float* x6123 = (float*)myMalloc(1 * sizeof(float));;
x6123[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6121,x928,1,x6123, x1318, 1, x928,1));
arrayFill<<<28, 512>>>(x1318, 0.0f, 1024);
float* x6127 = (float*)myMalloc(1 * sizeof(float));;
x6127[0] = 1.0f;
float* x6129 = (float*)myMalloc(1 * sizeof(float));;
x6129[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,2048,x6127,x931,512,x6129, x1319, 512, x931,512));
arrayFill<<<28, 512>>>(x1319, 0.0f, 1048576);
float* x6133 = (float*)myMalloc(1 * sizeof(float));;
x6133[0] = 1.0f;
float* x6135 = (float*)myMalloc(1 * sizeof(float));;
x6135[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6133,x934,1,x6135, x1320, 1, x934,1));
arrayFill<<<28, 512>>>(x1320, 0.0f, 512);
float* x6139 = (float*)myMalloc(1 * sizeof(float));;
x6139[0] = 1.0f;
float* x6141 = (float*)myMalloc(1 * sizeof(float));;
x6141[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,2048,x6139,x937,1024,x6141, x1321, 1024, x937,1024));
arrayFill<<<28, 512>>>(x1321, 0.0f, 2097152);
float* x6145 = (float*)myMalloc(1 * sizeof(float));;
x6145[0] = 1.0f;
float* x6147 = (float*)myMalloc(1 * sizeof(float));;
x6147[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2048,512,x6145,x940,2048,x6147, x1322, 2048, x940,2048));
arrayFill<<<28, 512>>>(x1322, 0.0f, 1048576);
float* x6151 = (float*)myMalloc(1 * sizeof(float));;
x6151[0] = 1.0f;
float* x6153 = (float*)myMalloc(1 * sizeof(float));;
x6153[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6151,x943,1,x6153, x1323, 1, x943,1));
arrayFill<<<28, 512>>>(x1323, 0.0f, 1024);
float* x6157 = (float*)myMalloc(1 * sizeof(float));;
x6157[0] = 1.0f;
float* x6159 = (float*)myMalloc(1 * sizeof(float));;
x6159[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6157,x946,1,x6159, x1324, 1, x946,1));
arrayFill<<<28, 512>>>(x1324, 0.0f, 128);
float* x6163 = (float*)myMalloc(1 * sizeof(float));;
x6163[0] = 1.0f;
float* x6165 = (float*)myMalloc(1 * sizeof(float));;
x6165[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x6163,x949,1024,x6165, x1325, 1024, x949,1024));
arrayFill<<<28, 512>>>(x1325, 0.0f, 262144);
float* x6169 = (float*)myMalloc(1 * sizeof(float));;
x6169[0] = 1.0f;
float* x6171 = (float*)myMalloc(1 * sizeof(float));;
x6171[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6169,x952,1,x6171, x1326, 1, x952,1));
arrayFill<<<28, 512>>>(x1326, 0.0f, 256);
float* x6175 = (float*)myMalloc(1 * sizeof(float));;
x6175[0] = 1.0f;
float* x6177 = (float*)myMalloc(1 * sizeof(float));;
x6177[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6175,x955,1,x6177, x1327, 1, x955,1));
arrayFill<<<28, 512>>>(x1327, 0.0f, 1024);
float* x6181 = (float*)myMalloc(1 * sizeof(float));;
x6181[0] = 1.0f;
float* x6183 = (float*)myMalloc(1 * sizeof(float));;
x6183[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x6181,x958,256,x6183, x1328, 256, x958,256));
arrayFill<<<28, 512>>>(x1328, 0.0f, 262144);
float* x6187 = (float*)myMalloc(1 * sizeof(float));;
x6187[0] = 1.0f;
float* x6189 = (float*)myMalloc(1 * sizeof(float));;
x6189[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6187,x961,1,x6189, x1329, 1, x961,1));
arrayFill<<<28, 512>>>(x1329, 0.0f, 128);
float* x6193 = (float*)myMalloc(1 * sizeof(float));;
x6193[0] = 1.0f;
float* x6195 = (float*)myMalloc(1 * sizeof(float));;
x6195[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6193,x964,1,x6195, x1330, 1, x964,1));
arrayFill<<<28, 512>>>(x1330, 0.0f, 512);
float* x6199 = (float*)myMalloc(1 * sizeof(float));;
x6199[0] = 1.0f;
float* x6201 = (float*)myMalloc(1 * sizeof(float));;
x6201[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6199,x967,1,x6201, x1331, 1, x967,1));
arrayFill<<<28, 512>>>(x1331, 0.0f, 512);
float* x6205 = (float*)myMalloc(1 * sizeof(float));;
x6205[0] = 1.0f;
float* x6207 = (float*)myMalloc(1 * sizeof(float));;
x6207[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6205,x970,1,x6207, x1332, 1, x970,1));
arrayFill<<<28, 512>>>(x1332, 0.0f, 128);
float* x6211 = (float*)myMalloc(1 * sizeof(float));;
x6211[0] = 1.0f;
float* x6213 = (float*)myMalloc(1 * sizeof(float));;
x6213[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x6211,x973,2304,x6213, x1333, 2304, x973,2304));
arrayFill<<<28, 512>>>(x1333, 0.0f, 589824);
float* x6217 = (float*)myMalloc(1 * sizeof(float));;
x6217[0] = 1.0f;
float* x6219 = (float*)myMalloc(1 * sizeof(float));;
x6219[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2048,10,x6217,x976,2048,x6219, x1334, 2048, x976,2048));
arrayFill<<<28, 512>>>(x1334, 0.0f, 20480);
float* x6223 = (float*)myMalloc(1 * sizeof(float));;
x6223[0] = 1.0f;
float* x6225 = (float*)myMalloc(1 * sizeof(float));;
x6225[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6223,x979,1,x6225, x1335, 1, x979,1));
arrayFill<<<28, 512>>>(x1335, 0.0f, 256);
float* x6229 = (float*)myMalloc(1 * sizeof(float));;
x6229[0] = 1.0f;
float* x6231 = (float*)myMalloc(1 * sizeof(float));;
x6231[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6229,x982,1,x6231, x1336, 1, x982,1));
arrayFill<<<28, 512>>>(x1336, 0.0f, 256);
float* x6235 = (float*)myMalloc(1 * sizeof(float));;
x6235[0] = 1.0f;
float* x6237 = (float*)myMalloc(1 * sizeof(float));;
x6237[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6235,x985,1,x6237, x1337, 1, x985,1));
arrayFill<<<28, 512>>>(x1337, 0.0f, 256);
float* x6241 = (float*)myMalloc(1 * sizeof(float));;
x6241[0] = 1.0f;
float* x6243 = (float*)myMalloc(1 * sizeof(float));;
x6243[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6241,x988,1,x6243, x1338, 1, x988,1));
arrayFill<<<28, 512>>>(x1338, 0.0f, 1024);
float* x6247 = (float*)myMalloc(1 * sizeof(float));;
x6247[0] = 1.0f;
float* x6249 = (float*)myMalloc(1 * sizeof(float));;
x6249[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6247,x991,1,x6249, x1339, 1, x991,1));
arrayFill<<<28, 512>>>(x1339, 0.0f, 1024);
float* x6253 = (float*)myMalloc(1 * sizeof(float));;
x6253[0] = 1.0f;
float* x6255 = (float*)myMalloc(1 * sizeof(float));;
x6255[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,64,x6253,x994,64,x6255, x1340, 64, x994,64));
arrayFill<<<28, 512>>>(x1340, 0.0f, 4096);
float* x6259 = (float*)myMalloc(1 * sizeof(float));;
x6259[0] = 1.0f;
float* x6261 = (float*)myMalloc(1 * sizeof(float));;
x6261[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6259,x997,1,x6261, x1341, 1, x997,1));
arrayFill<<<28, 512>>>(x1341, 0.0f, 512);
float* x6265 = (float*)myMalloc(1 * sizeof(float));;
x6265[0] = 1.0f;
float* x6267 = (float*)myMalloc(1 * sizeof(float));;
x6267[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1152,128,x6265,x1000,1152,x6267, x1342, 1152, x1000,1152));
arrayFill<<<28, 512>>>(x1342, 0.0f, 147456);
float* x6271 = (float*)myMalloc(1 * sizeof(float));;
x6271[0] = 1.0f;
float* x6273 = (float*)myMalloc(1 * sizeof(float));;
x6273[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6271,x1003,1,x6273, x1343, 1, x1003,1));
arrayFill<<<28, 512>>>(x1343, 0.0f, 128);
float* x6277 = (float*)myMalloc(1 * sizeof(float));;
x6277[0] = 1.0f;
float* x6279 = (float*)myMalloc(1 * sizeof(float));;
x6279[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6277,x1006,1,x6279, x1344, 1, x1006,1));
arrayFill<<<28, 512>>>(x1344, 0.0f, 256);
float* x6283 = (float*)myMalloc(1 * sizeof(float));;
x6283[0] = 1.0f;
float* x6285 = (float*)myMalloc(1 * sizeof(float));;
x6285[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6283,x1009,1,x6285, x1345, 1, x1009,1));
arrayFill<<<28, 512>>>(x1345, 0.0f, 1024);
float* x6289 = (float*)myMalloc(1 * sizeof(float));;
x6289[0] = 1.0f;
float* x6291 = (float*)myMalloc(1 * sizeof(float));;
x6291[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x6289,x1012,1,x6291, x1346, 1, x1012,1));
arrayFill<<<28, 512>>>(x1346, 0.0f, 2048);
float* x6295 = (float*)myMalloc(1 * sizeof(float));;
x6295[0] = 1.0f;
float* x6297 = (float*)myMalloc(1 * sizeof(float));;
x6297[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6295,x1015,1,x6297, x1347, 1, x1015,1));
arrayFill<<<28, 512>>>(x1347, 0.0f, 256);
float* x6301 = (float*)myMalloc(1 * sizeof(float));;
x6301[0] = 1.0f;
float* x6303 = (float*)myMalloc(1 * sizeof(float));;
x6303[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6301,x1018,1,x6303, x1348, 1, x1018,1));
arrayFill<<<28, 512>>>(x1348, 0.0f, 256);
float* x6307 = (float*)myMalloc(1 * sizeof(float));;
x6307[0] = 1.0f;
float* x6309 = (float*)myMalloc(1 * sizeof(float));;
x6309[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6307,x1021,1,x6309, x1349, 1, x1021,1));
arrayFill<<<28, 512>>>(x1349, 0.0f, 128);
float* x6313 = (float*)myMalloc(1 * sizeof(float));;
x6313[0] = 1.0f;
float* x6315 = (float*)myMalloc(1 * sizeof(float));;
x6315[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6313,x1024,1,x6315, x1350, 1, x1024,1));
arrayFill<<<28, 512>>>(x1350, 0.0f, 256);
float* x6319 = (float*)myMalloc(1 * sizeof(float));;
x6319[0] = 1.0f;
float* x6321 = (float*)myMalloc(1 * sizeof(float));;
x6321[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6319,x1027,1,x6321, x1351, 1, x1027,1));
arrayFill<<<28, 512>>>(x1351, 0.0f, 64);
float* x6325 = (float*)myMalloc(1 * sizeof(float));;
x6325[0] = 1.0f;
float* x6327 = (float*)myMalloc(1 * sizeof(float));;
x6327[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x6325,x1030,1,x6327, x1352, 1, x1030,1));
arrayFill<<<28, 512>>>(x1352, 0.0f, 2048);
float* x6331 = (float*)myMalloc(1 * sizeof(float));;
x6331[0] = 1.0f;
float* x6333 = (float*)myMalloc(1 * sizeof(float));;
x6333[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6331,x1033,1,x6333, x1353, 1, x1033,1));
arrayFill<<<28, 512>>>(x1353, 0.0f, 512);
float* x6337 = (float*)myMalloc(1 * sizeof(float));;
x6337[0] = 1.0f;
float* x6339 = (float*)myMalloc(1 * sizeof(float));;
x6339[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6337,x1036,1,x6339, x1354, 1, x1036,1));
arrayFill<<<28, 512>>>(x1354, 0.0f, 256);
float* x6343 = (float*)myMalloc(1 * sizeof(float));;
x6343[0] = 1.0f;
float* x6345 = (float*)myMalloc(1 * sizeof(float));;
x6345[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6343,x1039,1,x6345, x1355, 1, x1039,1));
arrayFill<<<28, 512>>>(x1355, 0.0f, 1024);
float* x6349 = (float*)myMalloc(1 * sizeof(float));;
x6349[0] = 1.0f;
float* x6351 = (float*)myMalloc(1 * sizeof(float));;
x6351[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x6349,x1042,2304,x6351, x1356, 2304, x1042,2304));
arrayFill<<<28, 512>>>(x1356, 0.0f, 589824);
float* x6355 = (float*)myMalloc(1 * sizeof(float));;
x6355[0] = 1.0f;
float* x6357 = (float*)myMalloc(1 * sizeof(float));;
x6357[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6355,x1045,1,x6357, x1357, 1, x1045,1));
arrayFill<<<28, 512>>>(x1357, 0.0f, 256);
float* x6361 = (float*)myMalloc(1 * sizeof(float));;
x6361[0] = 1.0f;
float* x6363 = (float*)myMalloc(1 * sizeof(float));;
x6363[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6361,x1048,1,x6363, x1358, 1, x1048,1));
arrayFill<<<28, 512>>>(x1358, 0.0f, 64);
float* x6367 = (float*)myMalloc(1 * sizeof(float));;
x6367[0] = 1.0f;
float* x6369 = (float*)myMalloc(1 * sizeof(float));;
x6369[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6367,x1051,1,x6369, x1359, 1, x1051,1));
arrayFill<<<28, 512>>>(x1359, 0.0f, 128);
float* x6373 = (float*)myMalloc(1 * sizeof(float));;
x6373[0] = 1.0f;
float* x6375 = (float*)myMalloc(1 * sizeof(float));;
x6375[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6373,x1054,1,x6375, x1360, 1, x1054,1));
arrayFill<<<28, 512>>>(x1360, 0.0f, 256);
float* x6379 = (float*)myMalloc(1 * sizeof(float));;
x6379[0] = 1.0f;
float* x6381 = (float*)myMalloc(1 * sizeof(float));;
x6381[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6379,x1057,1,x6381, x1361, 1, x1057,1));
arrayFill<<<28, 512>>>(x1361, 0.0f, 256);
float* x6385 = (float*)myMalloc(1 * sizeof(float));;
x6385[0] = 1.0f;
float* x6387 = (float*)myMalloc(1 * sizeof(float));;
x6387[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6385,x1060,1,x6387, x1362, 1, x1060,1));
arrayFill<<<28, 512>>>(x1362, 0.0f, 512);
float* x6391 = (float*)myMalloc(1 * sizeof(float));;
x6391[0] = 1.0f;
float* x6393 = (float*)myMalloc(1 * sizeof(float));;
x6393[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,128,x6391,x1063,512,x6393, x1363, 512, x1063,512));
arrayFill<<<28, 512>>>(x1363, 0.0f, 65536);
float* x6397 = (float*)myMalloc(1 * sizeof(float));;
x6397[0] = 1.0f;
float* x6399 = (float*)myMalloc(1 * sizeof(float));;
x6399[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6397,x1066,1,x6399, x1364, 1, x1066,1));
arrayFill<<<28, 512>>>(x1364, 0.0f, 64);
float* x6403 = (float*)myMalloc(1 * sizeof(float));;
x6403[0] = 1.0f;
float* x6405 = (float*)myMalloc(1 * sizeof(float));;
x6405[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,512,x6403,x1069,256,x6405, x1365, 256, x1069,256));
arrayFill<<<28, 512>>>(x1365, 0.0f, 131072);
float* x6409 = (float*)myMalloc(1 * sizeof(float));;
x6409[0] = 1.0f;
float* x6411 = (float*)myMalloc(1 * sizeof(float));;
x6411[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6409,x1072,1,x6411, x1366, 1, x1072,1));
arrayFill<<<28, 512>>>(x1366, 0.0f, 256);
float* x6415 = (float*)myMalloc(1 * sizeof(float));;
x6415[0] = 1.0f;
float* x6417 = (float*)myMalloc(1 * sizeof(float));;
x6417[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x6415,x1075,1,x6417, x1367, 1, x1075,1));
arrayFill<<<28, 512>>>(x1367, 0.0f, 2048);
float* x6421 = (float*)myMalloc(1 * sizeof(float));;
x6421[0] = 1.0f;
float* x6423 = (float*)myMalloc(1 * sizeof(float));;
x6423[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6421,x1078,1,x6423, x1368, 1, x1078,1));
arrayFill<<<28, 512>>>(x1368, 0.0f, 128);
float* x6427 = (float*)myMalloc(1 * sizeof(float));;
x6427[0] = 1.0f;
float* x6429 = (float*)myMalloc(1 * sizeof(float));;
x6429[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x6427,x1081,2304,x6429, x1369, 2304, x1081,2304));
arrayFill<<<28, 512>>>(x1369, 0.0f, 589824);
float* x6433 = (float*)myMalloc(1 * sizeof(float));;
x6433[0] = 1.0f;
float* x6435 = (float*)myMalloc(1 * sizeof(float));;
x6435[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6433,x1084,1,x6435, x1370, 1, x1084,1));
arrayFill<<<28, 512>>>(x1370, 0.0f, 1024);
float* x6439 = (float*)myMalloc(1 * sizeof(float));;
x6439[0] = 1.0f;
float* x6441 = (float*)myMalloc(1 * sizeof(float));;
x6441[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6439,x1087,1,x6441, x1371, 1, x1087,1));
arrayFill<<<28, 512>>>(x1371, 0.0f, 256);
float* x6445 = (float*)myMalloc(1 * sizeof(float));;
x6445[0] = 1.0f;
float* x6447 = (float*)myMalloc(1 * sizeof(float));;
x6447[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2048,512,x6445,x1090,2048,x6447, x1372, 2048, x1090,2048));
arrayFill<<<28, 512>>>(x1372, 0.0f, 1048576);
float* x6451 = (float*)myMalloc(1 * sizeof(float));;
x6451[0] = 1.0f;
float* x6453 = (float*)myMalloc(1 * sizeof(float));;
x6453[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6451,x1093,1,x6453, x1373, 1, x1093,1));
arrayFill<<<28, 512>>>(x1373, 0.0f, 128);
float* x6457 = (float*)myMalloc(1 * sizeof(float));;
x6457[0] = 1.0f;
float* x6459 = (float*)myMalloc(1 * sizeof(float));;
x6459[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6457,x1096,1,x6459, x1374, 1, x1096,1));
arrayFill<<<28, 512>>>(x1374, 0.0f, 1024);
float* x6463 = (float*)myMalloc(1 * sizeof(float));;
x6463[0] = 1.0f;
float* x6465 = (float*)myMalloc(1 * sizeof(float));;
x6465[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6463,x1099,1,x6465, x1375, 1, x1099,1));
arrayFill<<<28, 512>>>(x1375, 0.0f, 128);
float* x6469 = (float*)myMalloc(1 * sizeof(float));;
x6469[0] = 1.0f;
float* x6471 = (float*)myMalloc(1 * sizeof(float));;
x6471[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x6469,x1102,256,x6471, x1376, 256, x1102,256));
arrayFill<<<28, 512>>>(x1376, 0.0f, 262144);
float* x6475 = (float*)myMalloc(1 * sizeof(float));;
x6475[0] = 1.0f;
float* x6477 = (float*)myMalloc(1 * sizeof(float));;
x6477[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6475,x1105,1,x6477, x1377, 1, x1105,1));
arrayFill<<<28, 512>>>(x1377, 0.0f, 256);
float* x6481 = (float*)myMalloc(1 * sizeof(float));;
x6481[0] = 1.0f;
float* x6483 = (float*)myMalloc(1 * sizeof(float));;
x6483[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6481,x1108,1,x6483, x1378, 1, x1108,1));
arrayFill<<<28, 512>>>(x1378, 0.0f, 256);
float* x6487 = (float*)myMalloc(1 * sizeof(float));;
x6487[0] = 1.0f;
float* x6489 = (float*)myMalloc(1 * sizeof(float));;
x6489[0] = -0.005f;
CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6487,x1111,1,x6489, x1379, 1, x1111,1));
arrayFill<<<28, 512>>>(x1379, 0.0f, 1024);
int32_t x6493 = x1396 + 1;
int32_t x6495 = x6493 % x6494;
bool x6496 = x6495 == 0;
if (x6496) {
float x6501 = x1390;
double x6497 = (double)x1397;
double x6498 = 100.0 * x6497;
double x6500 = x6498 / x6499;
float x6502 = (float)x1396;
float x6503 = x6501 / x6502;
printf("Train epoch %d: [%d/%d (%.0f%%)] Average Loss: %.6f\n",x1386,x1397,x11,x6500,x6503);
fflush(stdout);
} else {
}
int64_t x6508 = (long)mallocAddr;
int64_t x6509 = x6508 - x1382;
memset((void*)x1382, 0, x6509);
mallocAddr = (void*)x1382;
int64_t x6512 = (long)gpuMallocAddr;
int64_t x6513 = x6512 - x1383;
cudaMemset((void*)x1383, 0, x6513);
gpuMallocAddr = (void*)x1383;
}
gettimeofday(&end_1, NULL);
timeval_subtract(&diff_1, &end_1, &begin_1);;
int64_t x6520 = ((diff_1.tv_sec * 1000000L) + (diff_1.tv_usec));
double x6521 = (double)x6520;
double x6522 = x6521 / 1000000.0;
x1381[x1386] = x6522;
int64_t x6524 = x6520 / 1000LL;
int64_t x6526 = x6520 / x6525;
printf("Training completed in %ldms (%ld us/images)\n",x6524,x6526);
float x6528 = x1390;
float x6530 = x6528 / x6529;
double x6531 = (double)x6530;
x1380[x1386] = x6531;
}
gettimeofday(&end_0, NULL);
timeval_subtract(&diff_0, &end_0, &begin_0);;
int64_t x6537 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec));
sort(x1381, x1381 + 4);
double x6543 = x1381[2];
int64_t x6544 = (long)fopen(x0, "w");
fprintf((FILE *)x6544, "unit: %s\n", "1 epoch");
for(int x6546=0; x6546 < 4; x6546++) {
double x6547 = x1380[x6546];
fprintf((FILE *)x6544, "%lf\n", x6547);
}
fprintf((FILE *)x6544, "run time: %lf %lf\n", x39, x6543);
fclose((FILE*)x6544);
// Backend cleanup.
CUBLAS_CALL(cublasDestroy(cublasHandle));
CUDA_CALL(cudaFree(gpuMallocBase));
CUDNN_CALL(cudnnDestroy(cudnnHandle));
}
/*****************************************
End of C Generated Code
*******************************************/
|
the_stack
|
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/kernel/transpose.cuh>
#include <nbla/cuda/utils/block_reduce.cuh>
namespace nbla {
// Kernels for SyncBatchNormalization
template <typename T>
__global__ void
forward_batch_mean_sqmean_kernel(const int size1, const int size2,
const int size02, const int size12, const T *x,
T *m, T *v) {
NBLA_CUDA_KERNEL_LOOP(i1, size1) {
T tmp_m = 0;
T tmp_v = 0;
for (int i02 = 0; i02 < size02; ++i02) {
const int i0 = i02 / size2;
const int i2 = i02 % size2;
const int i = i0 * size12 + i1 * size2 + i2;
const T value = x[i];
tmp_m += value;
tmp_v += value * value;
}
tmp_m /= size02;
m[i1] = tmp_m;
tmp_v = tmp_v / size02;
v[i1] = tmp_v;
}
}
template <typename T>
__global__ void
forward_batch_running_mean_var_kernel(const int size1, const int size02,
const int n_procs, const float decay_rate,
T *m, T *v, T *rm, T *rv) {
NBLA_CUDA_KERNEL_LOOP(i1, size1) {
m[i1] /= n_procs;
v[i1] = v[i1] / n_procs - m[i1] * m[i1];
if (rm) {
rm[i1] = decay_rate * rm[i1] + (1. - decay_rate) * m[i1];
}
if (rv) {
rv[i1] = decay_rate * rv[i1] +
(1. - decay_rate) * v[i1] * (n_procs * size02) /
((n_procs * size02) - 1);
}
}
}
template <typename T>
__global__ void backward_batch_data_pre_sync_kernel(
const int size1, const int size2, const int size02, const int size12,
const float decay_rate, const float eps, const T *dy, const T *m,
const T *v, const T *x, const T *g, const T *dm, const T *dv, T *sum_dy,
T *sum_xdy) {
NBLA_CUDA_KERNEL_LOOP(i1, size1) {
sum_dy[i1] = 0;
sum_xdy[i1] = 0;
for (int i02 = 0; i02 < size02; ++i02) {
const int i0 = i02 / size2;
const int i2 = i02 % size2;
const int i = i0 * size12 + i1 * size2 + i2;
sum_dy[i1] += dy[i];
sum_xdy[i1] += x[i] * dy[i];
}
}
}
template <typename T>
__global__ void backward_batch_data_mean_variance_post_sync_kernel(
const int size1, const int size02, const float eps, const T *m, const T *v,
const T *g, const T *dm, const T *dv, const T *sum_dy, const T *sum_xdy,
T *dmean, T *dvar) {
NBLA_CUDA_KERNEL_LOOP(i1, size1) {
dvar[i1] = g[i1] * sum_xdy[i1] - g[i1] * m[i1] * sum_dy[i1];
dvar[i1] = dvar[i1] * -0.5 *
pow(static_cast<T>(v[i1] + eps), static_cast<T>(-1.5)) +
(dv ? dv[i1] : (T)0);
dmean[i1] = g[i1] * sum_dy[i1];
dmean[i1] =
dmean[i1] * (-1 / std::sqrt(v[i1] + eps)) + (dm ? dm[i1] : (T)0);
}
}
template <typename T>
__global__ void backward_batch_data_dx_post_sync_kernel(
const int size102, const int size0, const int size1, const int size2,
const int size02, const int size12, const int n, const float decay_rate,
const float eps, const T *dy, const T *m, const T *v, const T *x,
const T *g, const T *dm, const T *dv, const T *dmean, const T *dvar,
T *dx) {
NBLA_CUDA_KERNEL_LOOP(idx, size102) {
const int i1 = idx / size02;
const int i0 = (idx / size2) % size0;
const int i2 = idx % size2;
const int i = i0 * size12 + i1 * size2 + i2;
dx[i] += dy[i] * g[i1] / sqrt(v[i1] + eps) +
dvar[i1] * 2 * (x[i] - m[i1]) / n + dmean[i1] / n;
}
}
template <typename T>
__global__ void backward_batch_gamma_beta_post_sync_kernel(
const int size1_, const int size2_, const int size02_, const int size12_,
const float eps_, const T *dy, const T *m, const T *v, const T *x,
const T *sum_dy, const T *sum_xdy, T *db, T *dg) {
NBLA_CUDA_KERNEL_LOOP(i1, size1_) {
const T mean = m[i1];
const T inv_sqrt_variance = (T)1 / sqrt(v[i1] + eps_);
T dbeta = sum_dy[i1];
T dgamma = sum_xdy[i1] - mean * sum_dy[i1];
db[i1] += dbeta;
dg[i1] += dgamma * inv_sqrt_variance;
}
}
#define NBLA_CUDA_1D_GRID_STRIDE_LOOP(idx, num) \
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; i < num; \
idx += blockDim.x * gridDim.x)
// prototype
//#define TEST_FEATURE_MEAN_VARIANCE_KERNEL
#ifdef TEST_FEATURE_MEAN_VARIANCE_KERNEL
template <typename T>
__global__ void mean_variance_kernel(const T *in, T *tmp_m, T *tmp_v, T *m,
T *v, int N, int blockNums) {
float2 mean_variance;
mean_variance.x = 0;
mean_variance.y = 0;
NBLA_CUDA_1D_GRID_STRIDE_LOOP(i, N) {
const T value = in[i];
mean_variance.x += value;
mean_variance.y += value * value;
}
int i = blockIdx.x * blockDim.x + threadIdx.x;
mean_variance = blockReduceSumOfFloat2(mean_variance);
if (threadIdx.x == 0) {
tmp_m[blockIdx.x] = mean_variance.x;
tmp_v[blockIdx.x] = mean_variance.y;
}
__syncthreads();
mean_variance.x = 0;
mean_variance.y = 0;
if (i < blockNums) {
mean_variance.x = tmp_m[i];
mean_variance.y = tmp_v[i];
}
mean_variance = blockReduceSumOfFloat2(mean_variance);
if (threadIdx.x == 0) {
m[blockIdx.x] = mean_variance.x;
v[blockIdx.x] = mean_variance.y;
}
}
#endif
// prototype
//#define TEST_FEATURE_MEAN_VARIANCE_AXIS_REDUCTION_KERNEL
#ifdef TEST_FEATURE_MEAN_VARIANCE_AXIS_REDUCTION_KERNEL
template <typename T>
__global__ void mean_variance_with_axis_kernel(const T *in_trans, T *tmp_m,
T *tmp_v, T *m, T *v, int N,
int blockNums, int axis_size) {
float2 mean_variance;
for (int idx = 0; idx < axis_size; ++idx) {
mean_variance.x = 0;
mean_variance.y = 0;
NBLA_CUDA_1D_GRID_STRIDE_LOOP(i, N) {
const T value = in_trans[i + idx * N];
mean_variance.x += value;
mean_variance.y += value * value;
}
int i = blockIdx.x * blockDim.x + threadIdx.x;
mean_variance = blockReduceSumOfFloat2(mean_variance);
if (threadIdx.x == 0) {
tmp_m[blockIdx.x] = mean_variance.x;
tmp_v[blockIdx.x] = mean_variance.y;
}
__syncthreads();
mean_variance.x = 0;
mean_variance.y = 0;
if (i < blockNums) {
mean_variance.x = tmp_m[i];
mean_variance.y = tmp_v[i];
}
mean_variance = blockReduceSumOfFloat2(mean_variance);
if (threadIdx.x == 0) {
m[idx] = mean_variance.x;
v[idx] = mean_variance.y;
}
__syncthreads();
}
}
#endif
/******************************************************************************/
/*** Forward Batch Kernel Implementation */
/******************************************************************************/
template <typename T>
__global__ void forward_batch_mean_variance_kernel(
const int size1, const int size2, const int size02, const int size12,
const float decay_rate, const float eps, const T *x, const T *gamma,
const T *beta, T *m, T *v, T *rm, T *rv) {
NBLA_CUDA_KERNEL_LOOP(i1, size1) {
T tmp_m = 0;
T tmp_v = 0;
for (int i02 = 0; i02 < size02; ++i02) {
const int i0 = i02 / size2;
const int i2 = i02 % size2;
const int i = i0 * size12 + i1 * size2 + i2;
const T value = x[i];
tmp_m += value;
tmp_v += value * value;
}
tmp_m /= size02;
m[i1] = tmp_m;
tmp_v = tmp_v / size02 - tmp_m * tmp_m;
v[i1] = tmp_v;
if (rm) {
rm[i1] = decay_rate * rm[i1] + (1. - decay_rate) * tmp_m;
}
if (rv) {
rv[i1] = decay_rate * rv[i1] +
(1. - decay_rate) * tmp_v * size02 / (size02 - 1);
}
}
}
template <typename T>
__global__ void forward_batch_gamma_beta_kernel(
const int size102, const int size0, const int size2, const int size02,
int const size12, const float decay_rate, const float eps, const T *x,
const T *m, const T *v, const T *rm, const T *rv, const T *gamma,
const T *beta, T *y) {
NBLA_CUDA_KERNEL_LOOP(idx, size102) {
const int i1 = idx / size02;
const int i0 = (idx / size2) % size0;
const int i2 = idx % size2;
const int i = i0 * size12 + i1 * size2 + i2;
const T stdvar = sqrt(v[i1] + eps);
const T scale = gamma ? gamma[i1] : (T)1;
const T bias = beta ? beta[i1] : (T)0;
y[i] = (x[i] - m[i1]) * scale / stdvar + bias;
}
}
template <typename T>
__global__ void forward_batch_kernel_mean_variance_preprocess(const T *x,
const int N, T *m,
T *v) {
float2 mean_variance;
mean_variance.x = 0;
mean_variance.y = 0;
NBLA_CUDA_1D_GRID_STRIDE_LOOP(i, N) {
const T value = x[i];
mean_variance.x += (float)value;
mean_variance.y += (float)(value * value);
}
mean_variance = blockReduceSumOfFloat2(mean_variance);
if (threadIdx.x == 0) {
m[blockIdx.x] = mean_variance.x;
v[blockIdx.x] = mean_variance.y;
}
}
template <typename T>
__global__ void forward_batch_kernel_mean_variance_postprocess(
const T *block_m, const T *block_v, const int block_nums,
const float decay_rate, const float inv_N, const float svar, T *m, T *v,
T *rm, T *rv) {
float2 mean_variance;
mean_variance.x = 0;
mean_variance.y = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < block_nums) {
mean_variance.x = block_m[i];
mean_variance.y = block_v[i];
}
mean_variance = blockReduceSumOfFloat2(mean_variance);
if (threadIdx.x == 0) {
const float mean = mean_variance.x * inv_N;
const float variance = mean_variance.y * inv_N - mean * mean;
m[blockIdx.x] = mean;
v[blockIdx.x] = variance;
if (rm) {
rm[blockIdx.x] = decay_rate * rm[blockIdx.x] + (1. - decay_rate) * mean;
}
if (rv) {
rv[blockIdx.x] =
decay_rate * rv[blockIdx.x] + (1. - decay_rate) * variance * svar;
}
}
}
template <typename T>
__global__ void forward_batch_kernel_gamma_beta_trans(
const int shape_size, const int N, const T *x, const T *gamma,
const T *beta, T *m, T *v, float decay_rate, float eps, const int ndim,
const int *axes, const int *x_strides, const int *y_strides,
const int *y_shape, T *y, T *inv_sqrt_variance) {
NBLA_CUDA_KERNEL_LOOP(o, shape_size) {
int i = 0;
for (int d = 0; d < ndim; ++d) {
const int k = int(o / y_strides[d]) % y_shape[d];
i += k * x_strides[axes[d]];
}
const int axis_idx = int(i / N);
const T inv_stdvar = 1. / sqrt(v[axis_idx] + eps);
inv_sqrt_variance[axis_idx] = inv_stdvar;
const T scale = gamma ? gamma[axis_idx] : (T)1;
const T bias = beta ? beta[axis_idx] : (T)0;
y[o] = (x[i] - m[axis_idx]) * scale * inv_stdvar + bias;
}
}
/******************************************************************************/
/*** Forward Global Kernel Implementation */
/******************************************************************************/
template <typename T>
__global__ void forward_global_kernel(const int size102_, const int size0_,
const int size1_, const int size2_,
const int size02_, const int size12_,
const float decay_rate_, const float eps_,
const T *x, const T *rm, const T *rv,
const T *gamma, const T *beta, T *y) {
NBLA_CUDA_KERNEL_LOOP(idx, size102_) {
const int i1 = idx / size02_;
const int i0 = (idx / size2_) % size0_;
const int i2 = idx % size2_;
const int i = i0 * size12_ + i1 * size2_ + i2;
const T mean = rm[i1];
const T stdvar = sqrt(rv[i1] + eps_);
const T scale = gamma ? gamma[i1] : (T)1;
const T bias = beta ? beta[i1] : (T)0;
y[i] = (x[i] - mean) * scale / stdvar + bias;
}
}
/******************************************************************************/
/*** Backward Batch Data Kernel Implementation */
/******************************************************************************/
template <typename T>
__global__ void backward_batch_data_mean_variance_kernel(
const int size1, const int size2, const int size02, const int size12,
const float decay_rate, const float eps, const T *dy, const T *m,
const T *v, const T *x, const T *g, const T *dm, const T *dv, T *dmean,
T *dvar) {
NBLA_CUDA_KERNEL_LOOP(i1, size1) {
T tmp_dvar = 0;
T tmp_dmean = 0;
T tmp = 0;
for (int i02 = 0; i02 < size02; ++i02) {
const int i0 = i02 / size2;
const int i2 = i02 % size2;
const int i = i0 * size12 + i1 * size2 + i2;
const T dxh = dy[i] * (g ? g[i1] : (T)1); // Grad of x hat.
const T cx = x[i] - m[i1]; // x - mean
tmp_dvar += dxh * cx;
tmp_dmean += dxh;
tmp += cx;
}
T tmp_v = v[i1];
dvar[i1] = tmp_dvar * -0.5 * pow(static_cast<float>(tmp_v + eps),
static_cast<float>(-1.5)) +
(dv ? dv[i1] : (T)0);
dmean[i1] = tmp_dmean * (-1. / sqrt(tmp_v + eps)) +
dvar[i1] * (-2) * tmp / (size02) + (dm ? dm[i1] : (T)0);
}
}
template <typename T>
__global__ void backward_batch_data_dx_kernel(
const int size102, const int size0, const int size1, const int size2,
const int size02, const int size12, const float decay_rate, const float eps,
const T *dy, const T *m, const T *v, const T *x, const T *g, const T *dm,
const T *dv, const T *dmean, const T *dvar, T *dx) {
NBLA_CUDA_KERNEL_LOOP(idx, size102) {
const int i1 = idx / size02;
const int i0 = (idx / size2) % size0;
const int i2 = idx % size2;
const int i = i0 * size12 + i1 * size2 + i2;
dx[i] += dy[i] * (g ? g[i1] : (T)1) / sqrt(v[i1] + eps) +
dvar[i1] * 2 * (x[i] - m[i1]) / (size02) + dmean[i1] / (size02);
}
}
template <typename T>
__global__ void backward_batch_data_kernel_mean_variance_preprocess(
const int N, const T *dy, const T *x, const T *g, const T *m, T *block_m,
T *block_v, T *block_t) {
float3 mean_variance;
mean_variance.x = 0; // dmean
mean_variance.y = 0; // dvar
mean_variance.z = 0; // tmp
NBLA_CUDA_1D_GRID_STRIDE_LOOP(i, N) {
const float dxh = dy[i] * (g ? g[0] : (T)1);
const float cx = x[i] - m[0];
mean_variance.y += (float)(dxh * cx);
mean_variance.x += (float)dxh;
mean_variance.z += (float)cx;
}
mean_variance = blockReduceSumOfFloat3(mean_variance);
if (threadIdx.x == 0) {
block_m[blockIdx.x] = mean_variance.x;
block_v[blockIdx.x] = mean_variance.y;
block_t[blockIdx.x] = mean_variance.z;
}
}
template <typename T>
__global__ void backward_batch_data_kernel_mean_variance_postprocess(
const T *block_m, const T *block_v, const T *block_t, const int block_nums,
const float inv_N, const T *v, const T *dm, const T *dv, const float eps,
const int N, const T *inv_sqrt_variance, const int axis_idx, T *dmean,
T *dvar, T *t) {
float3 mean_variance;
mean_variance.x = 0; // dmean
mean_variance.y = 0; // dvar
mean_variance.z = 0; // tmp
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < block_nums) {
mean_variance.x += (float)block_m[i];
mean_variance.y += (float)block_v[i];
mean_variance.z += (float)block_t[i];
}
mean_variance = blockReduceSumOfFloat3(mean_variance);
if (threadIdx.x == 0) {
const float tmp_dvar =
mean_variance.y * (float)-0.5 *
pow(static_cast<float>(v[0] + eps), static_cast<float>(-1.5)) +
(dv ? dv[axis_idx] : (T)0);
dvar[0] = tmp_dvar;
dmean[0] = mean_variance.x * (-inv_sqrt_variance[0]) +
tmp_dvar * (-2) * mean_variance.z * inv_N +
(dm ? dm[axis_idx] : (T)0);
}
}
template <typename T>
__global__ void backward_batch_data_kernel_gamma_beta_trans(
const int shape_size, const float inv_N, const T *dy, const T *x,
const T *g, const T *v, const T *m, const T *dmean, const T *dvar,
const int ndim, const int *axes, const int *y_strides, const int *x_strides,
const int *x_shape, const T *inv_sqrt_variance, T *dx) {
NBLA_CUDA_KERNEL_LOOP(o, shape_size) {
int i = 0;
for (int d = 0; d < ndim; ++d) {
const int k = int(o / x_strides[d]) % x_shape[d];
i += k * y_strides[axes[d]];
}
int axis_idx = (int)(i * inv_N);
dx[o] += dy[i] * (g ? g[axis_idx] : (T)1) * inv_sqrt_variance[axis_idx] +
dvar[axis_idx] * 2 * (x[i] - m[axis_idx]) * inv_N +
dmean[axis_idx] * inv_N;
}
}
/******************************************************************************/
/*** Backward Batch Gamma Beta Kernel Implementation */
/******************************************************************************/
template <typename T>
__global__ void
backward_batch_gamma_beta_kernel(const int size1_, const int size2_,
const int size02_, const int size12_,
const float eps_, const T *dy, const T *m,
const T *v, const T *x, T *db, T *dg) {
NBLA_CUDA_KERNEL_LOOP(i1, size1_) {
const T mean = m[i1];
const T inv_sqrt_variance = (T)1 / sqrt(v[i1] + eps_);
T dbeta = (T)0;
T dgamma = (T)0;
for (int i02 = 0; i02 < size02_; ++i02) {
const int i0 = i02 / size2_;
const int i2 = i02 % size2_;
const int i = i0 * size12_ + i1 * size2_ + i2;
const T value = dy[i];
dbeta += value;
dgamma += value * (x[i] - mean);
}
if (db)
db[i1] += dbeta;
if (dg)
dg[i1] += dgamma * inv_sqrt_variance;
}
}
template <typename T>
__global__ void backward_batch_kernel_gamma_beta_preprocess(
const int N, const T *dy, const T *x, const T *m,
T *tmp_gamma_buffer_per_block, T *tmp_beta_buffer_per_block,
T *inv_sqrt_variance) {
float2 gamma_beta;
gamma_beta.x = 0; // gamma
gamma_beta.y = 0; // beta
NBLA_CUDA_1D_GRID_STRIDE_LOOP(i, N) {
const T value = dy[i];
gamma_beta.x += (float)(value * (x[i] - m[0]) * inv_sqrt_variance[0]);
gamma_beta.y += (float)value;
}
gamma_beta = blockReduceSumOfFloat2(gamma_beta);
if (threadIdx.x == 0) {
tmp_gamma_buffer_per_block[blockIdx.x] = gamma_beta.x;
tmp_beta_buffer_per_block[blockIdx.x] = gamma_beta.y;
}
}
template <typename T>
__global__ void backward_batch_kernel_gamma_beta_postprocess(
const T *tmp_gamma_buffer_per_block, const T *tmp_beta_buffer_per_block,
const int N, T *dg, T *db) {
float2 gamma_beta;
gamma_beta.x = 0; // gamma
gamma_beta.y = 0; // beta
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
gamma_beta.x += (float)tmp_gamma_buffer_per_block[i];
gamma_beta.y += (float)tmp_beta_buffer_per_block[i];
}
gamma_beta = blockReduceSumOfFloat2(gamma_beta);
if (threadIdx.x == 0) {
if (dg)
dg[blockIdx.x] += gamma_beta.x;
if (db)
db[blockIdx.x] += gamma_beta.y;
}
}
}
|
the_stack
|
// *****************************************************************************
// advectScalar
// *****************************************************************************
__global__ void SemiLagrangeRK2Ours(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid src,
CudaRealGrid dst, const float dt, const int32_t order_space,
const int32_t bnd, const bool line_trace, const bool sample_outside_fluid) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Manta zeros stuff on the border.
dst(i, j, k, b) = 0;
return;
}
if (!flags.isFluid(i, j, k, b)) {
// Don't advect solid geometry!
dst(i, j, k, b) = src(i, j, k, b);
return;
}
const CudaVec3 pos =
CudaVec3((float)i + 0.5f, (float)j + 0.5f, (float)k + 0.5f);
CudaVec3 displacement = vel.getCentered(i, j, k, b) * (-dt * 0.5f);
// Calculate a line trace from pos along displacement.
// NOTE: this is expensive (MUCH more expensive than Manta's routines), but
// can avoid some artifacts which would occur sampling into Geometry.
CudaVec3 half_pos;
const bool hit_bnd_half =
calcLineTrace(pos, displacement, flags, b, &half_pos, line_trace);
if (hit_bnd_half) {
// We hit the boundary, then as per Bridson, we should clamp the backwards
// trace. Note: if we treated this as a full euler step, we would have hit
// the same blocker because the line trace is linear.
if (!sample_outside_fluid) {
dst(i, j, k, b) =
src.getInterpolatedWithFluidHi(flags, half_pos, order_space, b);
} else {
dst(i, j, k, b) =
src.getInterpolatedHi(half_pos, order_space, b);
}
return;
}
// Otherwise, sample the velocity at this half-step location and do another
// backwards trace.
displacement.x = vel.getInterpolatedComponentHi<0>(half_pos, order_space, b);
displacement.y = vel.getInterpolatedComponentHi<1>(half_pos, order_space, b);
if (flags.is_3d()) {
displacement.z =
vel.getInterpolatedComponentHi<2>(half_pos, order_space, b);
}
displacement = displacement * (-dt);
CudaVec3 back_pos;
calcLineTrace(pos, displacement, flags, b, &back_pos, line_trace);
// Note: It actually doesn't matter if we hit the boundary on the second
// trace. We clamp the trace anyway.
// Finally, sample the field at this back position.
if (!sample_outside_fluid) {
dst(i, j, k, b) =
src.getInterpolatedWithFluidHi(flags, back_pos, order_space, b);
} else {
dst(i, j, k, b) =
src.getInterpolatedHi(back_pos, order_space, b);
}
}
__global__ void SemiLagrangeRK3Ours(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid src,
CudaRealGrid dst, const float dt, const int32_t order_space,
const int32_t bnd, const bool line_trace, const bool sample_outside_fluid) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Manta zeros stuff on the border.
dst(i, j, k, b) = 0;
return;
}
if (!flags.isFluid(i, j, k, b)) {
// Don't advect solid geometry!
dst(i, j, k, b) = src(i, j, k, b);
return;
}
// We're implementing the RK3 from Bridson page 242.
// k1 = f(q^n)
const CudaVec3 k1_pos =
CudaVec3((float)i + 0.5f, (float)j + 0.5f, (float)k + 0.5f);
CudaVec3 k1 = vel.getCentered(i, j, k, b);
// Calculate a line trace from pos along displacement.
// NOTE: this is expensive (MUCH more expensive than Manta's routines), but
// can avoid some artifacts which would occur sampling into Geometry.
// k2 = f(q^n - 1/2 * dt * k1)
CudaVec3 k2_pos;
if (calcLineTrace(k1_pos, k1 * (-dt * 0.5f), flags, b, &k2_pos, line_trace)) {
// If we hit the boundary we'll truncate to an Euler step.
if (!sample_outside_fluid) {
dst(i, j, k, b) =
src.getInterpolatedWithFluidHi(flags, k2_pos, order_space, b);
} else {
dst(i, j, k, b) =
src.getInterpolatedHi(k2_pos, order_space, b);
}
return;
}
CudaVec3 k2;
k2.x = vel.getInterpolatedComponentHi<0>(k2_pos, order_space, b);
k2.y = vel.getInterpolatedComponentHi<1>(k2_pos, order_space, b);
if (flags.is_3d()) {
k2.z = vel.getInterpolatedComponentHi<2>(k2_pos, order_space, b);
}
// k3 = f(q^n - 3/4 * dt * k2)
CudaVec3 k3_pos;
if (calcLineTrace(k1_pos, k2 * (-dt * 0.75f), flags, b, &k3_pos,
line_trace)) {
// If we hit the boundary we'll truncate to the k2 position (euler step).
if (!sample_outside_fluid) {
dst(i, j, k, b) =
src.getInterpolatedWithFluidHi(flags, k2_pos, order_space, b);
} else {
dst(i, j, k, b) =
src.getInterpolatedHi(k2_pos, order_space, b);
}
return;
}
CudaVec3 k3;
k3.x = vel.getInterpolatedComponentHi<0>(k3_pos, order_space, b);
k3.y = vel.getInterpolatedComponentHi<1>(k3_pos, order_space, b);
if (flags.is_3d()) {
k3.z = vel.getInterpolatedComponentHi<2>(k3_pos, order_space, b);
}
// Finally calculate the effective velocity and perform a line trace.
CudaVec3 back_pos;
CudaVec3 displacement = (k1 * (-dt * (2.0f / 9.0f)) +
k2 * (-dt * (3.0f / 9.0f)) +
k3 * (-dt * (4.0f / 9.0f)));
calcLineTrace(k1_pos, displacement, flags, b, &back_pos, line_trace);
// Finally, sample the field at this back position.
if (!sample_outside_fluid) {
dst(i, j, k, b) =
src.getInterpolatedWithFluidHi(flags, back_pos, order_space, b);
} else {
dst(i, j, k, b) =
src.getInterpolatedHi(back_pos, order_space, b);
}
}
// This is the same kernel as our other Euler kernel, except it saves the
// particle trace position. This is used for our maccormack routine (we'll do
// a local search around these positions in our clamp routine).
__global__ void SemiLagrangeEulerOursSavePos(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid src,
CudaRealGrid dst, const float dt, const int32_t order_space,
const int32_t bnd, const bool line_trace, const bool sample_outside_fluid,
CudaVecGrid pos) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Manta zeros stuff on the border.
dst(i, j, k, b) = 0;
pos.setSafe(i, j, k, b, CudaVec3(i, j, k) + 0.5f);
return;
}
if (!flags.isFluid(i, j, k, b)) {
// Don't advect solid geometry!
dst(i, j, k, b) = src(i, j, k, b);
pos.setSafe(i, j, k, b, CudaVec3(i, j, k) + 0.5f);
return;
}
const CudaVec3 start_pos =
CudaVec3((float)i + 0.5f, (float)j + 0.5f, (float)k + 0.5f);
CudaVec3 displacement = vel.getCentered(i, j, k, b) * (-dt);
// Calculate a line trace from pos along displacement.
// NOTE: this is expensive (MUCH more expensive than Manta's routines), but
// can avoid some artifacts which would occur sampling into Geometry.
CudaVec3 back_pos;
calcLineTrace(start_pos, displacement, flags, b, &back_pos, line_trace);
pos.setSafe(i, j, k, b, back_pos);
// Sample at this back position.
if (!sample_outside_fluid) {
dst(i, j, k, b) =
src.getInterpolatedWithFluidHi(flags, back_pos, order_space, b);
} else {
dst(i, j, k, b) =
src.getInterpolatedHi(back_pos, order_space, b);
}
}
__global__ void SemiLagrangeEulerOurs(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid src,
CudaRealGrid dst, const float dt, const int32_t order_space,
const int32_t bnd, const bool line_trace, const bool sample_outside_fluid) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Manta zeros stuff on the border.
dst(i, j, k, b) = 0;
return;
}
if (!flags.isFluid(i, j, k, b)) {
// Don't advect solid geometry!
dst(i, j, k, b) = src(i, j, k, b);
return;
}
const CudaVec3 pos =
CudaVec3((float)i + 0.5f, (float)j + 0.5f, (float)k + 0.5f);
CudaVec3 displacement = vel.getCentered(i, j, k, b) * (-dt);
// Calculate a line trace from pos along displacement.
// NOTE: this is expensive (MUCH more expensive than Manta's routines), but
// can avoid some artifacts which would occur sampling into Geometry.
CudaVec3 back_pos;
calcLineTrace(pos, displacement, flags, b, &back_pos, line_trace);
// Sample at this back position.
if (!sample_outside_fluid) {
dst(i, j, k, b) =
src.getInterpolatedWithFluidHi(flags, back_pos, order_space, b);
} else {
dst(i, j, k, b) =
src.getInterpolatedHi(back_pos, order_space, b);
}
}
__global__ void SemiLagrange(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid src,
CudaRealGrid dst, const float dt, const bool is_levelset,
const int32_t order_space, const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Manta zeros stuff on the border.
dst(i, j, k, b) = 0;
return;
}
CudaVec3 pos = (CudaVec3((float)i + 0.5f, (float)j + 0.5f, (float)k + 0.5f) -
vel.getCentered(i, j, k, b) * dt);
dst(i, j, k, b) = src.getInterpolatedHi(pos, order_space, b);
}
__global__ void MacCormackCorrect(
CudaFlagGrid flags, CudaRealGrid old, CudaRealGrid fwd,
CudaRealGrid bwd, CudaRealGrid dst, const float strength,
const bool is_levelset) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
float val = fwd(i, j, k, b);
if (flags.isFluid(i, j, k, b)) {
// Only correct inside fluid region.
val += strength * 0.5f * (old(i, j, k, b) - bwd(i, j, k, b));
}
dst(i, j, k, b) = val;
}
__device__ __forceinline__ void getMinMax(float& minv, float& maxv,
const float& val) {
if (val < minv) {
minv = val;
}
if (val > maxv) {
maxv = val;
}
}
template <typename T>
__device__ __forceinline__ T clamp(const T val, const T vmin, const T vmax) {
if (val < vmin) {
return vmin;
}
if (val > vmax) {
return vmax;
}
return val;
}
__device__ __forceinline__ float doClampComponent(
const Int3& gridSize, float dst, CudaRealGrid orig, const float fwd,
CudaVec3 pos, CudaVec3 vel, const int32_t b) {
float minv = CUDART_INF_F;
float maxv = -CUDART_INF_F;
// forward (and optionally) backward
Int3 positions[2];
positions[0] = toInt3(pos - vel);
positions[1] = toInt3(pos + vel);
for (int32_t l = 0; l < 2; ++l) {
Int3& curr_pos = positions[l];
// clamp forward lookup to grid
const int32_t i0 = clamp<int32_t>(curr_pos.x, 0, gridSize.x - 1);
const int32_t j0 = clamp<int32_t>(curr_pos.y, 0, gridSize.y - 1);
// Note: there's a fix here, the Manta code clamps between 0 and 1 if
// not 3D which is wrong (it should be 0 always).
const int32_t k0 =
orig.is_3d() ? clamp<int32_t>(curr_pos.z, 0, (gridSize.z - 1)) : 0;
const int32_t i1 = i0 + 1;
const int32_t j1 = j0 + 1;
const int32_t k1 = (orig.is_3d() ? (k0 + 1) : k0);
if (!orig.isInBounds(Int3(i0, j0, k0), 0) ||
!orig.isInBounds(Int3(i1, j1, k1), 0)) {
return fwd;
}
// find min/max around source pos
getMinMax(minv, maxv, orig(i0, j0, k0, b));
getMinMax(minv, maxv, orig(i1, j0, k0, b));
getMinMax(minv, maxv, orig(i0, j1, k0, b));
getMinMax(minv, maxv, orig(i1, j1, k0, b));
if (orig.is_3d()) {
getMinMax(minv, maxv, orig(i0, j0, k1, b));
getMinMax(minv, maxv, orig(i1, j0, k1, b));
getMinMax(minv, maxv, orig(i0, j1, k1, b));
getMinMax(minv, maxv, orig(i1, j1, k1, b));
}
}
return clamp<float>(dst, minv, maxv);
}
__global__ void MacCormackClamp(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid dst,
CudaRealGrid orig, CudaRealGrid fwd, const float dt, const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
return;
}
Int3 gridUpper = flags.getSize() - 1;
float dval = dst(i, j, k, b);
dval = doClampComponent(gridUpper, dval, orig, fwd(i, j, k, b),
CudaVec3(i, j, k),
vel.getCentered(i, j, k, b) * dt, b);
// Lookup forward/backward, round to closest NB.
Int3 pos_fwd = toInt3(CudaVec3(i, j, k) +
CudaVec3(0.5f, 0.5f, 0.5f) -
vel.getCentered(i, j, k, b) * dt);
Int3 pos_bwd = toInt3(CudaVec3(i, j, k) +
CudaVec3(0.5f, 0.5f, 0.5f) +
vel.getCentered(i, j, k, b) * dt);
// Test if lookups point out of grid or into obstacle (note doClampComponent
// already checks sides, below is needed for valid flags access).
if (pos_fwd.x < 0 || pos_fwd.y < 0 || pos_fwd.z < 0 ||
pos_bwd.x < 0 || pos_bwd.y < 0 || pos_bwd.z < 0 ||
pos_fwd.x > gridUpper.x || pos_fwd.y > gridUpper.y ||
((pos_fwd.z > gridUpper.z) && flags.is_3d()) ||
pos_bwd.x > gridUpper.x || pos_bwd.y > gridUpper.y ||
((pos_bwd.z > gridUpper.z) && flags.is_3d()) ||
flags.isObstacle(pos_fwd, b) || flags.isObstacle(pos_bwd, b) ) {
dval = fwd(i, j, k, b);
}
dst(i, j, k, b) = dval;
}
// Our version is a little different. It is a search around a single input
// position for min and max values. If no valid values are found, then
// false is returned (indicating that a clamp shouldn't be performed) otherwise
// true is returned (and the clamp min and max bounds are set).
__device__ __forceinline__ float getClampBounds(
CudaRealGrid src, CudaVec3 pos, const int32_t b,
CudaFlagGrid flags, const bool sample_outside_fluid, float* clamp_min,
float* clamp_max) {
float minv = CUDART_INF_F;
float maxv = -CUDART_INF_F;
// clamp forward lookup to grid
const int32_t i0 = clamp<int32_t>(pos.x, 0, flags.xsize() - 1);
const int32_t j0 = clamp<int32_t>(pos.y, 0, flags.ysize() - 1);
const int32_t k0 =
src.is_3d() ? clamp<int32_t>(pos.z, 0, flags.zsize() - 1) : 0;
// Some modification here. Instead of looking just to the RHS, we will search
// all neighbors within a region. This is more expensive but better handles
// border cases.
int32_t ncells = 0;
for (int32_t k = k0 - 1; k <= k0 + 1; k++) {
for (int32_t j = j0 - 1; j <= j0 + 1; j++) {
for (int32_t i = i0 - 1; i <= i0 + 1; i++) {
if (k < 0 || k >= flags.zsize() ||
j < 0 || j >= flags.ysize() ||
i < 0 || i >= flags.xsize()) {
// Outside bounds.
continue;
} else if (sample_outside_fluid || flags.isFluid(i, j, k, b)) {
// Either we don't care about clamping to values inside the fluid, or
// this is a fluid cell...
getMinMax(minv, maxv, src(i, j, k, b));
ncells++;
}
}
}
}
if (ncells < 1) {
// Only a single fluid cell found. Return false to indicate that a clamp
// shouldn't be performed.
return false;
} else {
*clamp_min = minv;
*clamp_max = maxv;
return true;
}
}
__global__ void MacCormackClampOurs(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid dst,
CudaRealGrid src, CudaRealGrid fwd, const float dt, const int32_t bnd,
CudaVecGrid fwd_pos, CudaVecGrid bwd_pos, const bool sample_outside_fluid) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
return;
}
// Calculate the clamp bounds.
float clamp_min = CUDART_INF_F;
float clamp_max = -CUDART_INF_F;
// Calculate the clamp bounds around the forward position.
CudaVec3 pos = fwd_pos(i, j, k, b);
const bool do_clamp_fwd = getClampBounds(
src, pos, b, flags, sample_outside_fluid, &clamp_min, &clamp_max);
// Calculate the clamp bounds around the backward position. Recall that
// the bwd value was sampled on the fwd output (so src is replaced with fwd).
// EDIT(tompson): According to "An unconditionally stable maccormack method"
// only a forward search is required.
// pos = bwd_pos(i, j, k, b);
// const bool do_clamp_bwd = getClampBounds(
// fwd, pos, b, flags, sample_outside_fluid, &clamp_min, &clamp_max);
float dval;
if (!do_clamp_fwd) {
// If the cell is surrounded by fluid neighbors either in the fwd or
// backward directions, then we need to revert to an euler step.
dval = fwd(i, j, k, b);
} else {
// We found valid values with which to clamp the maccormack corrected
// quantity. Apply this clamp.
dval = clamp<float>(dst(i, j, k, b), clamp_min, clamp_max);
}
dst(i, j, k, b) = dval;
}
static int tfluids_CudaMain_advectScalar(lua_State* L) {
THCState* state = cutorch_getstate(L);
// Get the args from the lua stack. NOTE: We do ALL arguments (size checking)
// on the lua stack. We also treat 2D advection as 3D (with depth = 1) and
// no 'w' component for velocity.
float dt = static_cast<float>(lua_tonumber(L, 1));
THCudaTensor* tensor_s = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 3, "torch.CudaTensor"));
THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 4, "torch.CudaTensor"));
THCudaTensor* tensor_fwd = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 5, "torch.CudaTensor"));
THCudaTensor* tensor_bwd = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 6, "torch.CudaTensor"));
const bool is_3d = static_cast<bool>(lua_toboolean(L, 7));
const std::string method_str = static_cast<std::string>(lua_tostring(L, 8));
THCudaTensor* tensor_fwd_pos = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 9, "torch.CudaTensor"));
THCudaTensor* tensor_bwd_pos = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 10, "torch.CudaTensor"));
const int32_t boundary_width = static_cast<int32_t>(lua_tointeger(L, 11));
const bool sample_outside_fluid = static_cast<bool>(lua_toboolean(L, 12));
const float maccormack_strength = static_cast<float>(lua_tonumber(L, 13));
THCudaTensor* tensor_s_dst = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 14, "torch.CudaTensor"));
CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d);
CudaMACGrid vel = toCudaMACGrid(state, tensor_u, is_3d);
CudaRealGrid src = toCudaRealGrid(state, tensor_s, is_3d);
CudaRealGrid dst = toCudaRealGrid(state, tensor_s_dst, is_3d);
// The maccormack method also needs fwd and bwd temporary arrays.
CudaRealGrid fwd = toCudaRealGrid(state, tensor_fwd, is_3d);
CudaRealGrid bwd = toCudaRealGrid(state, tensor_bwd, is_3d);
CudaVecGrid fwd_pos = toCudaVecGrid(state, tensor_fwd_pos, is_3d);
CudaVecGrid bwd_pos = toCudaVecGrid(state, tensor_bwd_pos, is_3d);
AdvectMethod method = StringToAdvectMethod(L, method_str);
const bool is_levelset = false; // We never advect them.
const int32_t order_space = 1;
// A full line trace along every ray is expensive but correct (applies to our
// methods only).
const bool line_trace = true;
// Do the forward step.
// LaunchKernel args: lua_State, func, domain, args...
const int32_t bnd = 1;
if (method == ADVECT_EULER_MANTA) {
LaunchKernel(L, &SemiLagrange, flags,
flags, vel, src, dst, dt, is_levelset, order_space, bnd);
// We're done. The forward Euler step is already in the output array.
} else if (method == ADVECT_RK2_OURS) {
LaunchKernel(L, &SemiLagrangeRK2Ours, flags,
flags, vel, src, dst, dt, order_space, bnd, line_trace,
sample_outside_fluid);
} else if (method == ADVECT_EULER_OURS) {
LaunchKernel(L, &SemiLagrangeEulerOurs, flags,
flags, vel, src, dst, dt, order_space, bnd, line_trace,
sample_outside_fluid);
} else if (method == ADVECT_RK3_OURS) {
LaunchKernel(L, &SemiLagrangeRK3Ours, flags,
flags, vel, src, dst, dt, order_space, bnd, line_trace,
sample_outside_fluid);
} else if (method == ADVECT_MACCORMACK_MANTA ||
method == ADVECT_MACCORMACK_OURS) {
// Do the forwards step.
if (method == ADVECT_MACCORMACK_MANTA) {
LaunchKernel(L, &SemiLagrange, flags,
flags, vel, src, fwd, dt, is_levelset, order_space, bnd);
} else {
LaunchKernel(L, &SemiLagrangeEulerOursSavePos, flags,
flags, vel, src, fwd, dt, order_space, bnd, line_trace,
sample_outside_fluid, fwd_pos);
}
// Do the backwards step.
if (method == ADVECT_MACCORMACK_MANTA) {
LaunchKernel(L, &SemiLagrange, flags,
flags, vel, fwd, bwd, -dt, is_levelset, order_space, bnd);
} else {
LaunchKernel(L, &SemiLagrangeEulerOursSavePos, flags,
flags, vel, fwd, bwd, -dt, order_space, bnd, line_trace,
sample_outside_fluid, bwd_pos);
}
// Perform the correction.
LaunchKernel(L, &MacCormackCorrect, flags,
flags, src, fwd, bwd, dst, maccormack_strength, is_levelset);
// Perform clamping.
if (method == ADVECT_MACCORMACK_MANTA) {
LaunchKernel(L, &MacCormackClamp, flags,
flags, vel, dst, src, fwd, dt, bnd);
} else {
LaunchKernel(L, &MacCormackClampOurs, flags,
flags, vel, dst, src, fwd, dt, bnd, fwd_pos, bwd_pos,
sample_outside_fluid);
}
} else {
std::stringstream ss;
ss << "advection method (" << method_str << ") is not supported";
luaL_error(L, ss.str().c_str());
}
return 0; // Recall: number of return values on the lua stack.
}
// *****************************************************************************
// advectVel
// *****************************************************************************
// Take a step along the vel from pos and sample the velocity there.
__device__ __forceinline__ bool SemiLagrangeStepMAC(
const CudaFlagGrid& flags, const CudaMACGrid& vel, const CudaMACGrid& src,
const float scale, const bool line_trace, const int32_t order_space,
const CudaVec3& pos, const int32_t i, const int32_t j, const int32_t k,
const int32_t b, CudaVec3* val) {
// TODO(tompson): We really want to clamp to the SMALLEST of the steps in each
// dimension, however this is OK for now (because doing so would expensive)...
CudaVec3 xpos;
bool hitx = calcLineTrace(
pos, vel.getAtMACX(i, j, k, b) * scale, flags, b, &xpos, line_trace);
val->x = src.getInterpolatedComponentHi<0>(xpos, order_space, b);
CudaVec3 ypos;
bool hity = calcLineTrace(
pos, vel.getAtMACY(i, j, k, b) * scale, flags, b, &ypos, line_trace);
val->y = src.getInterpolatedComponentHi<1>(ypos, order_space, b);
bool hitz;
if (vel.is_3d()) {
CudaVec3 zpos;
hitz = calcLineTrace(
pos, vel.getAtMACZ(i, j, k, b) * scale, flags, b, &zpos, line_trace);
val->z = src.getInterpolatedComponentHi<2>(zpos, order_space, b);
} else {
val->z = 0;
hitz = false;
}
return hitx || hity || hitz;
}
__global__ void SemiLagrangeEulerOursMAC(
CudaFlagGrid flags, CudaMACGrid vel, CudaMACGrid src,
CudaMACGrid dst, const float dt, const int32_t order_space,
const int32_t bnd, const bool line_trace) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Manta zeros stuff on the border.
dst.setSafe(i, j, k, b, CudaVec3(0, 0, 0));
return;
}
if (!flags.isFluid(i, j, k, b)) {
// Don't advect solid geometry!
dst.setSafe(i, j, k, b, src(i, j, k, b));
return;
}
// Get correct velocity at MAC position.
// No need to shift xpos etc. as lookup field is also shifted.
const CudaVec3 pos(static_cast<float>(i) + 0.5f,
static_cast<float>(j) + 0.5f,
static_cast<float>(k) + 0.5f);
CudaVec3 val;
SemiLagrangeStepMAC(flags, vel, src, -dt, line_trace, order_space, pos,
i, j, k, b, &val);
dst.setSafe(i, j, k, b, val);
}
__global__ void SemiLagrangeMAC(
CudaFlagGrid flags, CudaMACGrid vel, CudaMACGrid src,
CudaMACGrid dst, const float dt, const int32_t order_space,
const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Manta zeros stuff on the border.
dst.setSafe(i, j, k, b, CudaVec3(0, 0, 0));
return;
}
// Get correct velocity at MAC position.
// No need to shift xpos etc. as lookup field is also shifted.
const CudaVec3 pos(static_cast<float>(i) + 0.5f,
static_cast<float>(j) + 0.5f,
static_cast<float>(k) + 0.5f);
CudaVec3 xpos = pos - vel.getAtMACX(i, j, k, b) * dt;
const float vx = src.getInterpolatedComponentHi<0>(xpos, order_space, b);
CudaVec3 ypos = pos - vel.getAtMACY(i, j, k, b) * dt;
const float vy = src.getInterpolatedComponentHi<1>(ypos, order_space, b);
float vz;
if (vel.is_3d()) {
CudaVec3 zpos = pos - vel.getAtMACZ(i, j, k, b) * dt;
vz = src.getInterpolatedComponentHi<2>(zpos, order_space, b);
} else {
vz = 0;
}
dst.setSafe(i, j, k, b, CudaVec3(vx, vy, vz));
}
__global__ void MacCormackCorrectMAC(
CudaFlagGrid flags, CudaMACGrid old, CudaMACGrid fwd,
CudaMACGrid bwd, CudaMACGrid dst, const float strength) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
bool skip[3] = {false, false, false};
if (!flags.isFluid(i, j, k, b)) {
skip[0] = true;
skip[1] = true;
skip[2] = true;
}
// Note: in Manta code there's a isMAC boolean that is always true.
if ((i > 0) && (!flags.isFluid(i - 1, j, k, b))) {
skip[0] = true;
}
if ((j > 0) && (!flags.isFluid(i, j - 1, k, b))) {
skip[1] = true;
}
if (flags.is_3d()) {
if ((k > 0) && (!flags.isFluid(i, j, k - 1, b))) {
skip[2] = true;
}
}
CudaVec3 val(0, 0, 0);
const int32_t nchan = flags.is_3d() ? 3 : 2;
for (int32_t c = 0; c < nchan; ++c) {
if (skip[c]) {
val(c) = fwd(i, j, k, c, b);
} else {
// perform actual correction with given strength.
val(c) = fwd(i, j, k, c, b) + strength * 0.5f * (old(i, j, k, c, b) -
bwd(i, j, k, c, b));
}
}
dst.setSafe(i, j, k, b, val);
}
template <int32_t c>
__device__ __forceinline__ float doClampComponentMAC(
const Int3& gridSize, float dst, const CudaMACGrid& orig,
float fwd, const CudaVec3& pos, const CudaVec3& vel,
int32_t b) {
float minv = CUDART_INF_F;
float maxv = -CUDART_INF_F;
// forward (and optionally) backward
Int3 positions[2];
positions[0] = toInt3(pos - vel);
positions[1] = toInt3(pos + vel);
for (int32_t l = 0; l < 2; ++l) {
Int3& curr_pos = positions[l];
// clamp forward lookup to grid
const int32_t i0 = clamp<int32_t>(curr_pos.x, 0, gridSize.x - 1);
const int32_t j0 = clamp<int32_t>(curr_pos.y, 0, gridSize.y - 1);
const int32_t k0 = clamp<int32_t>(curr_pos.z, 0,
(orig.is_3d() ? (gridSize.z - 1) : 1));
const int32_t i1 = i0 + 1;
const int32_t j1 = j0 + 1;
const int32_t k1 = (orig.is_3d() ? (k0 + 1) : k0);
if (!orig.isInBounds(Int3(i0, j0, k0), 0) ||
!orig.isInBounds(Int3(i1, j1, k1), 0)) {
return fwd;
}
// find min/max around source pos
getMinMax(minv, maxv, orig(i0, j0, k0, c, b));
getMinMax(minv, maxv, orig(i1, j0, k0, c, b));
getMinMax(minv, maxv, orig(i0, j1, k0, c, b));
getMinMax(minv, maxv, orig(i1, j1, k0, c, b));
if (orig.is_3d()) {
getMinMax(minv, maxv, orig(i0, j0, k1, c, b));
getMinMax(minv, maxv, orig(i1, j0, k1, c, b));
getMinMax(minv, maxv, orig(i0, j1, k1, c, b));
getMinMax(minv, maxv, orig(i1, j1, k1, c, b));
}
}
return clamp<float>(dst, minv, maxv);
}
__global__ void MacCormackClampMAC(
CudaFlagGrid flags, CudaMACGrid vel, CudaMACGrid dst,
CudaMACGrid orig, CudaMACGrid fwd, const float dt, const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
return;
}
CudaVec3 pos(static_cast<float>(i), static_cast<float>(j),
static_cast<float>(k));
CudaVec3 dval = dst(i, j, k, b);
CudaVec3 dfwd = fwd(i, j, k, b);
Int3 gridUpper = flags.getSize() - 1;
dval.x = doClampComponentMAC<0>(gridUpper, dval.x, orig, dfwd.x, pos,
vel.getAtMACX(i, j, k, b) * dt, b);
dval.y = doClampComponentMAC<1>(gridUpper, dval.y, orig, dfwd.y, pos,
vel.getAtMACY(i, j, k, b) * dt, b);
if (flags.is_3d()) {
dval.z = doClampComponentMAC<2>(gridUpper, dval.z, orig, dfwd.z, pos,
vel.getAtMACZ(i, j, k, b) * dt, b);
} else {
dval.z = 0;
}
// Note (from Manta): The MAC version currently does not check whether source
// points were inside an obstacle! (unlike centered version) this would need
// to be done for each face separately to stay symmetric.
dst.setSafe(i, j, k, b, dval);
}
static int tfluids_CudaMain_advectVel(lua_State* L) {
THCState* state = cutorch_getstate(L);
// Get the args from the lua stack. NOTE: We do ALL arguments (size checking)
// on the lua stack. We also treat 2D advection as 3D (with depth = 1) and
// no 'w' component for velocity.
const float dt = static_cast<float>(lua_tonumber(L, 1));
THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 3, "torch.CudaTensor"));
THCudaTensor* tensor_fwd = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 4, "torch.CudaTensor"));
THCudaTensor* tensor_bwd = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 5, "torch.CudaTensor"));
const bool is_3d = static_cast<bool>(lua_toboolean(L, 6));
const std::string method_str = static_cast<std::string>(lua_tostring(L, 7));
const int32_t boundary_width = static_cast<int32_t>(lua_tointeger(L, 8));
const float maccormack_strength = static_cast<float>(lua_tonumber(L, 9));
THCudaTensor* tensor_u_dst = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 10, "torch.CudaTensor"));
AdvectMethod method = StringToAdvectMethod(L, method_str);
// TODO(tompson): Implement RK2 and RK3 methods.
if (method == ADVECT_RK2_OURS || method == ADVECT_RK3_OURS) {
// We do not yet have an RK2 or RK3 implementation. Use Maccormack.
method = ADVECT_MACCORMACK_OURS;
}
const int32_t order_space = 1;
// A full line trace along every ray is expensive but correct (applies to our
// methods only).
const bool line_trace = true;
CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d);
CudaMACGrid vel = toCudaMACGrid(state, tensor_u, is_3d);
// We always do self-advection, but we could point orig to another tensor.
CudaMACGrid src = toCudaMACGrid(state, tensor_u, is_3d);
CudaMACGrid dst = toCudaMACGrid(state, tensor_u_dst, is_3d);
// The maccormack method also needs fwd and bwd temporary arrays.
CudaMACGrid fwd = toCudaMACGrid(state, tensor_fwd, is_3d);
CudaMACGrid bwd = toCudaMACGrid(state, tensor_bwd, is_3d);
// LaunchKernel args: lua_State, func, domain, args...
const int32_t bnd = 1;
if (method == ADVECT_EULER_MANTA) {
LaunchKernel(L, &SemiLagrangeMAC, flags,
flags, vel, src, dst, dt, order_space, bnd);
} else if (method == ADVECT_EULER_OURS) {
LaunchKernel(L, &SemiLagrangeEulerOursMAC, flags,
flags, vel, src, dst, dt, order_space, bnd, line_trace);
} else if (method == ADVECT_MACCORMACK_MANTA ||
method == ADVECT_MACCORMACK_OURS) {
// Do the forward step.
if (method == ADVECT_MACCORMACK_MANTA) {
LaunchKernel(L, &SemiLagrangeMAC, flags,
flags, vel, src, fwd, dt, order_space, bnd);
} else {
LaunchKernel(L, &SemiLagrangeEulerOursMAC, flags,
flags, vel, src, fwd, dt, order_space, bnd, line_trace);
}
// Do the backwards step.
if (method == ADVECT_MACCORMACK_MANTA) {
LaunchKernel(L, &SemiLagrangeMAC, flags,
flags, vel, fwd, bwd, -dt, order_space, bnd);
} else {
LaunchKernel(L, &SemiLagrangeEulerOursMAC, flags,
flags, vel, fwd, bwd, -dt, order_space, bnd, line_trace);
}
// Perform the correction.
LaunchKernel(L, &MacCormackCorrectMAC, flags,
flags, src, fwd, bwd, dst, maccormack_strength);
// Perform clamping.
// TODO(tompson): Perform our own clamping.
LaunchKernel(L, &MacCormackClampMAC, flags,
flags, vel, dst, src, fwd, dt, bnd);
} else {
THError("Advection method not supported!");
}
return 0; // Recall: number of return values on the lua stack.
}
// *****************************************************************************
// setWallBcsForward
// *****************************************************************************
__global__ void setWallBcsForward(CudaFlagGrid flags, CudaMACGrid vel) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
const bool cur_fluid = flags.isFluid(i, j, k, b);
const bool cur_obs = flags.isObstacle(i, j, k, b);
if (!cur_fluid && !cur_obs) {
return;
}
// we use i > 0 instead of bnd=1 to check outer wall
if (i > 0 && flags.isObstacle(i - 1, j, k, b)) {
// TODO(tompson): Set to (potentially) non-zero obstacle velocity.
vel(i, j, k, 0, b) = 0;
}
if (i > 0 && cur_obs && flags.isFluid(i - 1, j, k, b)) {
vel(i, j, k, 0, b) = 0;
}
if (j > 0 && flags.isObstacle(i, j - 1, k, b)) {
vel(i, j, k, 1, b) = 0;
}
if (j > 0 && cur_obs && flags.isFluid(i, j - 1, k, b)) {
vel(i, j, k, 1, b) = 0;
}
if (k > 0 && flags.isObstacle(i, j, k - 1, b)) {
vel(i, j, k, 2, b) = 0;
}
if (k > 0 && cur_obs && flags.isFluid(i, j, k - 1, b)) {
vel(i, j, k, 2, b) = 0;
}
if (cur_fluid) {
if ((i > 0 && flags.isStick(i - 1, j, k, b)) ||
(i < flags.xsize() - 1 && flags.isStick(i + 1, j, k, b))) {
vel(i, j, k, 1, b) = 0;
if (vel.is_3d()) {
vel(i, j, k, 2, b) = 0;
}
}
if ((j > 0 && flags.isStick(i, j - 1, k, b)) ||
(j < flags.ysize() - 1 && flags.isStick(i, j + 1, k, b))) {
vel(i, j, k, 0, b) = 0;
if (vel.is_3d()) {
vel(i, j, k, 2, b) = 0;
}
}
if (vel.is_3d() &&
((k > 0 && flags.isStick(i, j, k - 1, b)) ||
(k < flags.zsize() - 1 && flags.isStick(i, j, k + 1, b)))) {
vel(i, j, k, 0, b) = 0;
vel(i, j, k, 1, b) = 0;
}
}
}
static int tfluids_CudaMain_setWallBcsForward(lua_State* L) {
THCState* state = cutorch_getstate(L);
// Get the args from the lua stack. NOTE: We do ALL arguments (size checking)
// on the lua stack.
THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 1, "torch.CudaTensor"));
THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
const bool is_3d = static_cast<bool>(lua_toboolean(L, 3));
CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d);
CudaMACGrid vel = toCudaMACGrid(state, tensor_u, is_3d);
// LaunchKernel args: lua_State, func, domain, args...
LaunchKernel(L, &setWallBcsForward, flags,
flags, vel);
return 0; // Recall: number of return values on the lua stack.
}
// *****************************************************************************
// velocityDivergenceForward
// *****************************************************************************
__global__ void velocityDivergenceForward(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid rhs, const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Manta zeros stuff on the border.
rhs(i, j, k, b) = 0;
return;
}
if (!flags.isFluid(i, j, k, b)) {
rhs(i, j, k, b) = 0;
return;
}
// compute divergence
// no flag checks: assumes vel at obstacle interfaces is set to zero.
float div = vel(i, j, k, 0, b) - vel(i + 1, j, k, 0, b) +
vel(i, j, k, 1, b) - vel(i, j + 1, k, 1, b);
if (flags.is_3d()) {
div += (vel(i, j, k, 2, b) - vel(i, j, k + 1, 2, b));
}
rhs(i, j, k, b) = div;
}
static int tfluids_CudaMain_velocityDivergenceForward(lua_State* L) {
THCState* state = cutorch_getstate(L);
// Get the args from the lua stack. NOTE: We do ALL arguments (size checking)
// on the lua stack.
THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 1, "torch.CudaTensor"));
THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
THCudaTensor* tensor_u_div = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 3, "torch.CudaTensor"));
const bool is_3d = static_cast<bool>(lua_toboolean(L, 4));
CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d);
CudaMACGrid vel = toCudaMACGrid(state, tensor_u, is_3d);
CudaRealGrid rhs = toCudaRealGrid(state, tensor_u_div, is_3d);
// LaunchKernel args: lua_State, func, domain, args...
const int32_t bnd = 1;
LaunchKernel(L, &velocityDivergenceForward, flags,
flags, vel, rhs, bnd);
return 0; // Recall: number of return values on the lua stack.
}
// *****************************************************************************
// velocityUpdateForward
// *****************************************************************************
__global__ void velocityUpdateForward(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid pressure,
const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Manta doesn't touch the velocity on the boundaries (i.e.
// it stays constant).
return;
}
if (flags.isFluid(i, j, k, b)) {
if (flags.isFluid(i - 1, j, k, b)) {
vel(i, j, k, 0, b) -= (pressure(i, j, k, b) -
pressure(i - 1, j, k, b));
}
if (flags.isFluid(i, j - 1, k, b)) {
vel(i, j, k, 1, b) -= (pressure(i, j, k, b) -
pressure(i, j - 1, k, b));
}
if (flags.is_3d() && flags.isFluid(i, j, k - 1, b)) {
vel(i, j, k, 2, b) -= (pressure(i, j, k, b) -
pressure(i, j, k - 1, b));
}
if (flags.isEmpty(i - 1, j, k, b)) {
vel(i, j, k, 0, b) -= pressure(i, j, k, b);
}
if (flags.isEmpty(i, j - 1, k, b)) {
vel(i, j, k, 1, b) -= pressure(i, j, k, b);
}
if (flags.is_3d() && flags.isEmpty(i, j, k - 1, b)) {
vel(i, j, k, 2, b) -= pressure(i, j, k, b);
}
}
else if (flags.isEmpty(i, j, k, b) && !flags.isOutflow(i, j, k, b)) {
// don't change velocities in outflow cells
if (flags.isFluid(i - 1, j, k, b)) {
vel(i, j, k, 0, b) += pressure(i - 1, j, k, b);
} else {
vel(i, j, k, 0, b) = 0.f;
}
if (flags.isFluid(i, j - 1, k, b)) {
vel(i, j, k, 1, b) += pressure(i, j - 1, k, b);
} else {
vel(i, j, k, 1, b) = 0.f;
}
if (flags.is_3d()) {
if (flags.isFluid(i, j, k - 1, b)) {
vel(i, j, k, 2, b) += pressure(i, j, k - 1, b);
} else {
vel(i, j, k, 2, b) = 0.f;
}
}
}
}
static int tfluids_CudaMain_velocityUpdateForward(lua_State* L) {
THCState* state = cutorch_getstate(L);
// Get the args from the lua stack. NOTE: We do ALL arguments (size checking)
// on the lua stack.
THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 1, "torch.CudaTensor"));
THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
THCudaTensor* tensor_p = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 3, "torch.CudaTensor"));
const bool is_3d = static_cast<bool>(lua_toboolean(L, 4));
CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d);
CudaMACGrid vel = toCudaMACGrid(state, tensor_u, is_3d);
CudaRealGrid pressure = toCudaRealGrid(state, tensor_p, is_3d);
const int32_t bnd = 1;
// LaunchKernel args: lua_State, func, domain, args...
LaunchKernel(L, &velocityUpdateForward, flags,
flags, vel, pressure, bnd);
return 0; // Recall: number of return values on the lua stack.
}
// *****************************************************************************
// addBuoyancy
// *****************************************************************************
__global__ void addBuoyancy(
CudaFlagGrid flags, CudaMACGrid vel, CudaRealGrid factor,
THCDeviceTensor<float, 1> strength, const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
return;
}
if (!flags.isFluid(i, j, k, b)) {
return;
}
if (flags.isFluid(i - 1, j, k, b)) {
vel(i, j, k, 0, b) += (0.5f * strength[0] *
(factor(i, j, k, b) + factor(i - 1, j, k, b)));
}
if (flags.isFluid(i, j - 1, k, b)) {
vel(i, j, k, 1, b) += (0.5f * strength[1] *
(factor(i, j, k, b) + factor(i, j - 1, k, b)));
}
if (flags.is_3d() && flags.isFluid(i, j, k - 1, b)) {
vel(i, j, k, 2, b) += (0.5f * strength[2] *
(factor(i, j, k, b) + factor(i, j, k - 1, b)));
}
}
static int tfluids_CudaMain_addBuoyancy(lua_State* L) {
THCState* state = cutorch_getstate(L);
// Get the args from the lua stack. NOTE: We do ALL arguments (size checking)
// on the lua stack.
THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 1, "torch.CudaTensor"));
THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
THCudaTensor* tensor_density = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 3, "torch.CudaTensor"));
THCudaTensor* tensor_gravity = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 4, "torch.CudaTensor"));
THCudaTensor* tensor_strength = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 5, "torch.CudaTensor"));
const float dt = static_cast<float>(lua_tonumber(L, 6));
const bool is_3d = static_cast<bool>(lua_toboolean(L, 7));
if (tensor_gravity->nDimension != 1 || tensor_gravity->size[0] != 3) {
luaL_error(L, "ERROR: gravity must be a 3D vector (even in 2D)");
}
if (tensor_strength->nDimension != 1 || tensor_strength->size[0] != 3) {
luaL_error(L, "ERROR: gravity must be a 3D vector (even in 2D)");
}
CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d);
CudaMACGrid vel = toCudaMACGrid(state, tensor_u, is_3d);
CudaRealGrid factor = toCudaRealGrid(state, tensor_density, is_3d);
THCudaTensor_copy(state, tensor_strength, tensor_gravity);
THCudaTensor_mul(state, tensor_strength, tensor_strength,
-1.0f * dt / flags.getDx());
THCDeviceTensor<float, 1> dev_strength =
toDeviceTensor<float, 1>(state, tensor_strength);
const int32_t bnd = 1;
// LaunchKernel args: lua_State, func, domain, args...
LaunchKernel(L, &addBuoyancy, flags,
flags, vel, factor, dev_strength, bnd);
return 0; // Recall: number of return values on the lua stack.
}
// *****************************************************************************
// addGravity
// *****************************************************************************
__global__ void addGravity(
CudaFlagGrid flags, CudaMACGrid vel, THCDeviceTensor<float, 1> force,
const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
return;
}
const bool curFluid = flags.isFluid(i, j, k, b);
const bool curEmpty = flags.isEmpty(i, j, k, b);
if (!curFluid && !curEmpty) {
return;
}
if (flags.isFluid(i - 1, j, k, b) ||
(curFluid && flags.isEmpty(i - 1, j, k, b))) {
vel(i, j, k, 0, b) += force[0];
}
if (flags.isFluid(i, j - 1, k, b) ||
(curFluid && flags.isEmpty(i, j - 1, k, b))) {
vel(i, j, k, 1, b) += force[1];
}
if (flags.is_3d() && (flags.isFluid(i, j, k - 1, b) ||
(curFluid && flags.isEmpty(i, j, k - 1, b)))) {
vel(i, j, k, 2, b) += force[2];
}
}
static int tfluids_CudaMain_addGravity(lua_State* L) {
THCState* state = cutorch_getstate(L);
// Get the args from the lua stack. NOTE: We do ALL arguments (size checking)
// on the lua stack.
THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 1, "torch.CudaTensor"));
THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
THCudaTensor* tensor_gravity = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 3, "torch.CudaTensor"));
const float dt = static_cast<float>(lua_tonumber(L, 4));
const bool is_3d = static_cast<bool>(lua_toboolean(L, 5));
THCudaTensor* tensor_force = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 6, "torch.CudaTensor"));
if (tensor_gravity->nDimension != 1 || tensor_gravity->size[0] != 3) {
luaL_error(L, "ERROR: gravity must be a 3D vector (even in 2D)");
}
CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d);
CudaMACGrid vel = toCudaMACGrid(state, tensor_u, is_3d);
const float mult = dt / flags.getDx();
THCudaTensor_mul(state, tensor_force, tensor_gravity, mult);
THCDeviceTensor<float, 1> force =
toDeviceTensor<float, 1>(state, tensor_force);
const int32_t bnd = 1;
// LaunchKernel args: lua_State, func, domain, args...
LaunchKernel(L, &addGravity, flags,
flags, vel, force, bnd);
return 0; // Recall: number of return values on the lua stack.
}
// *****************************************************************************
// vorticityConfinement
// *****************************************************************************
__global__ void AddForceField(
CudaFlagGrid flags, CudaMACGrid vel, CudaVecGrid force, const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
const bool curFluid = flags.isFluid(i, j, k, b);
const bool curEmpty = flags.isEmpty(i, j, k, b);
if (!curFluid && !curEmpty) {
return;
}
if (flags.isFluid(i - 1, j, k, b) ||
(curFluid && flags.isEmpty(i - 1, j, k, b))) {
vel(i, j, k, 0, b) += (0.5f *
(force(i - 1, j, k, 0, b) + force(i, j, k, 0, b)));
}
if (flags.isFluid(i, j - 1, k, b) ||
(curFluid && flags.isEmpty(i, j - 1, k, b))) {
vel(i, j, k, 1, b) += (0.5f *
(force(i, j - 1, k, 1, b) + force(i, j, k, 1, b)));
}
if (flags.is_3d() && (flags.isFluid(i, j, k - 1, b) ||
(curFluid && flags.isEmpty(i, j, k - 1, b)))) {
vel(i, j, k, 2, b) += (0.5f *
(force(i, j, k - 1, 2, b) + force(i, j, k, 2, b)));
}
}
__global__ void GetCentered(CudaFlagGrid flags, CudaMACGrid vel,
CudaVecGrid centered, const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
centered.setSafe(i, j, k, b, CudaVec3(0, 0, 0));
return;
}
centered.setSafe(i, j, k, b, vel.getCentered(i, j, k, b));
}
__global__ void GetCurlAndCurlNorm(
CudaFlagGrid flags, CudaVecGrid centered, CudaVecGrid curl,
CudaRealGrid curl_norm, const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
curl.setSafe(i, j, k, b, CudaVec3(0, 0, 0));
curl_norm(i, j, k, b) = 0;
return;
}
const CudaVec3 cur_curl(centered.curl(i, j, k, b));
curl.setSafe(i, j, k, b, cur_curl);
curl_norm(i, j, k, b) = cur_curl.norm();
}
__global__ void GetVorticityConfinementForce(
CudaFlagGrid flags, CudaVecGrid curl, CudaRealGrid curl_norm,
const float strength, CudaVecGrid force, const int32_t bnd) {
int32_t b, chan, k, j, i;
if (GetKernelIndices(flags, b, chan, k, j, i)) {
return;
}
if (i < bnd || i > flags.xsize() - 1 - bnd ||
j < bnd || j > flags.ysize() - 1 - bnd ||
(flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) {
// Don't add force on the boundaries.
force.setSafe(i, j, k, b, CudaVec3(0, 0, 0));
return;
}
CudaVec3 grad(0, 0, 0);
grad.x = 0.5f * (curl_norm(i + 1, j, k, b) - curl_norm(i - 1, j, k, b));
grad.y = 0.5f * (curl_norm(i, j + 1, k, b) - curl_norm(i, j - 1, k, b));
if (flags.is_3d()) {
grad.z = 0.5f * (curl_norm(i, j, k + 1, b) - curl_norm(i, j, k - 1, b));
}
grad.normalize();
force.setSafe(i, j, k, b, CudaVec3::cross(grad, curl(i, j, k, b)) * strength);
}
static int tfluids_CudaMain_vorticityConfinement(lua_State* L) {
THCState* state = cutorch_getstate(L);
// Get the args from the lua stack. NOTE: We do ALL arguments (size checking)
// on the lua stack.
THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 1, "torch.CudaTensor"));
THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 2, "torch.CudaTensor"));
const float strength = static_cast<float>(lua_tonumber(L, 3));
THCudaTensor* tensor_centered = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 4, "torch.CudaTensor"));
THCudaTensor* tensor_curl = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 5, "torch.CudaTensor"));
THCudaTensor* tensor_curl_norm = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 6, "torch.CudaTensor"));
THCudaTensor* tensor_force = reinterpret_cast<THCudaTensor*>(
luaT_checkudata(L, 7, "torch.CudaTensor"));
const bool is_3d = static_cast<bool>(lua_toboolean(L, 8));
CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d);
CudaMACGrid vel = toCudaMACGrid(state, tensor_u, is_3d);
CudaVecGrid centered = toCudaVecGrid(state, tensor_centered, is_3d);
CudaVecGrid curl = toCudaVecGrid(state, tensor_curl, true); // Always 3D.
CudaRealGrid curl_norm = toCudaRealGrid(state, tensor_curl_norm, is_3d);
CudaVecGrid force = toCudaVecGrid(state, tensor_force, is_3d);
// First calculate the centered velocity.
// LaunchKernel args: lua_State, func, domain, args...
const int32_t bnd = 1;
LaunchKernel(L, &GetCentered, flags,
flags, vel, centered, bnd);
// Now calculate the curl and it's (l2) norm (of the centered velocities).
LaunchKernel(L, &GetCurlAndCurlNorm, flags,
flags, centered, curl, curl_norm, bnd);
// Now calculate the vorticity confinement force.
LaunchKernel(L, &GetVorticityConfinementForce, flags,
flags, curl, curl_norm, strength, force, bnd);
// Now apply the force.
LaunchKernel(L, &AddForceField, flags,
flags, vel, force, bnd);
return 0; // Recall: number of return values on the lua stack.
}
|
the_stack
|
//#include <Graph/GraphBase.hpp>
#include <Graph/GraphStd.hpp>
#include <Graph/GraphWeight.hpp>
#include <Graph/BellmanFord.hpp>
#include <Graph/Dijkstra.hpp>
#include <iostream>
#include "Device/Util/Timer.cuh"
#include "Device/DataMovement/impl/Block.i.cuh"
#include <cooperative_groups.h>
//#define ENABLE_MGPU
#include <random>
#include <chrono>
#include "StandardAPI.hpp"
#if defined(ENABLE_MGPU)
#include <moderngpu/kernel_load_balance.hxx>
#endif
using namespace graph;
using namespace timer;
using namespace hornets_nest;
template<int ITEMS_PER_BLOCK, int BLOCK_SIZE>
__global__
void MergePathTest2(const int* __restrict__ d_partitions,
int num_partitions,
const int* __restrict__ d_prefixsum,
int prefixsum_size,
int* __restrict__ d_pos,
int* __restrict__ d_offset) {
__shared__ int smem[ITEMS_PER_BLOCK];
const auto& lambda = [&](int pos, int, int index) {
d_pos[index] = pos;
//d_offset[index] = offset;
};
//xlib::binarySearchLB2<BLOCK_SIZE, ITEMS_PER_BLOCK / BLOCK_SIZE, true>
// (d_partitions, num_partitions, d_prefixsum, prefixsum_size, smem, lambda);
xlib::mergePathLB<BLOCK_SIZE, ITEMS_PER_BLOCK>
(d_partitions, num_partitions, d_prefixsum, prefixsum_size, smem, lambda);
}
#if 0//used by (commented out) unrechable code, may better be deleted unless this code will be reused.
const bool PRINT = false;
const int BLOCK_SIZE = 128;
#endif
__device__ int d_value;
template<int ITEMS_PER_BLOCK, int BLOCK_SIZE>
__global__
void copyKernel(const int* __restrict__ input, int num_blocks, int smem_size) {
__shared__ int smem[ITEMS_PER_BLOCK];
for (int i = blockIdx.x; i < num_blocks; i += gridDim.x) {
xlib::block::StrideOp<0, ITEMS_PER_BLOCK, BLOCK_SIZE>
::copy(input + i * ITEMS_PER_BLOCK, smem_size, smem);
/*auto smem_tmp = smem + threadIdx.x;
auto d_tmp = input + i * ITEMS_PER_BLOCK + threadIdx.x;
#pragma unroll
for (int i = 0; i < ITEMS_PER_BLOCK; i += BLOCK_SIZE)
smem_tmp[i] = (i + threadIdx.x < smem_size) ? d_tmp[i] : 0;*/
if (threadIdx.x > 1023)
d_value = smem[threadIdx.x];
}
}
template<int ITEMS_PER_BLOCK, int BLOCK_SIZE>
__global__
void copyKernel2(const int* __restrict__ input, int num_blocks, int smem_size) {
//__shared__ int smem[ITEMS_PER_BLOCK];
for (int i = blockIdx.x; i < num_blocks; i += gridDim.x) {
auto smem_tmp = xlib::dyn_smem + threadIdx.x;
auto d_tmp = input + i * ITEMS_PER_BLOCK + threadIdx.x;
for (int i = threadIdx.x; i < smem_size; i += BLOCK_SIZE) {
*smem_tmp = *d_tmp;
smem_tmp += BLOCK_SIZE;
d_tmp += BLOCK_SIZE;
}
if (threadIdx.x > 1023)
d_value = xlib::dyn_smem[threadIdx.x];
}
}
__global__
void noLambdaKernel(const int* __restrict__ ptr2, int* __restrict__ ptr1, int size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < size; i += stride) {
ptr1[i] = ptr2[i];
ptr1[i + 10] = ptr2[i + 10];
ptr1[i + 20] = ptr2[i + 20];
}
}
template<typename Lambda>
__global__
void lambdaKernel(Lambda lambda, int size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < size; i += stride)
lambda(i);
}
template<typename Lambda, typename... TArgs>
__global__
void lambdaKernel2(Lambda lambda, int size, TArgs* __restrict__ ... args) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < size; i += stride)
lambda(i, args...);
}
struct LL {
int* __restrict__ ptr1;
const int* __restrict__ ptr2;
__device__ __forceinline__
void operator()(int i) {
const int* __restrict__ vv2 = ptr2;
int* __restrict__ vv1 = ptr1;
vv1[i] = vv2[i];
vv1[i + 10] = vv2[i + 10];
vv1[i + 20] = vv2[i + 20];
}
};
int exec(int argc, char* argv[]) {
using namespace graph;
GraphStd<int, int> graph1;
graph1.read(argv[1]);
graph1.print_degree_distrib();
graph1.print_analysis();
auto weights = new int[graph1.nV()];
auto seed = std::chrono::high_resolution_clock::now().time_since_epoch()
.count();
std::mt19937 engine(seed);
std::uniform_int_distribution<int> distrib(0, 100);
std::generate(weights, weights + graph1.nV(),
[&](){ return distrib(engine); } );
GraphWeight<int, int, int> graph_weight(graph1.csr_out_edges(), graph1.nV(),
graph1.csr_out_edges(), graph1.nE(),
weights);
Timer<HOST> TM1;
Dijkstra<int, int, int> dijkstra(graph_weight);
TM1.start();
for (int i = 0; i < graph1.nV(); i++) {
dijkstra.run(i);
dijkstra.reset();
}
TM1.stop();
TM1.print("Dijkstra");
return 1;
#if 0//unrechable code, may better be deleted unless this code will be reused.
const int THREAD_ITEMS = 11;
const int ITEMS_PER_BLOCK = BLOCK_SIZE * THREAD_ITEMS;
int num_blocks_copy = 100000;
int* d_input;
gpu::allocate(d_input, ITEMS_PER_BLOCK * num_blocks_copy);
Timer<DEVICE, micro> TM;
TM.start();
copyKernel<ITEMS_PER_BLOCK, BLOCK_SIZE>
<<< num_blocks_copy, BLOCK_SIZE >>> (d_input, num_blocks_copy, 9 * BLOCK_SIZE);
TM.stop();
TM.print("copy1");
TM.start();
copyKernel2<ITEMS_PER_BLOCK, BLOCK_SIZE>
<<< num_blocks_copy, BLOCK_SIZE >>> (d_input, num_blocks_copy, 9 * BLOCK_SIZE);
TM.stop();
TM.print("copy2");
gpu::free(d_input);
return 1;
GraphStd<> graph;
graph.read(argv[1], parsing_prop::PRINT_INFO | parsing_prop::RM_SINGLETON);
int size = graph.nV();
auto prefixsum = graph.csr_out_offsets();
int ceil_total = xlib::upper_approx(graph.nE(), ITEMS_PER_BLOCK);
//--------------------------------------------------------------------------
// HOST
auto h_pos = new int[ceil_total];
auto h_offset = new int[ceil_total];
for (int i = 0, k = 0; i < size; i++) {
for (int j = prefixsum[i]; j < prefixsum[i + 1]; j++) {
h_pos[k] = i;
h_offset[k++] = j - prefixsum[i];
}
}
for (int i = prefixsum[size]; i < ceil_total; i++)
h_pos[i] = -1;
//--------------------------------------------------------------------------
int num_merge = graph.nE() + graph.nV();
if (PRINT) {
graph.print_raw();
std::cout << "Experted results:\n\n";
host::printArray(prefixsum, size + 1);
host::printArray(h_pos, prefixsum[size]);
host::printArray(h_offset, prefixsum[size]);
}
int* d_prefixsum, *d_pos, *d_offset, *d_partitions;
int merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merge);
int merge_block_partitions = xlib::ceil_div<BLOCK_SIZE>(merge_blocks);
int num_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(graph.nE());
int num_block_partitions = xlib::ceil_div<BLOCK_SIZE>(num_blocks);
std::cout << " THREAD_ITEMS: " << THREAD_ITEMS
<< "\n ITEMS_PER_BLOCK: " << ITEMS_PER_BLOCK
<< "\n Total items: " << graph.nE()
<< "\n Num blocks: " << num_blocks
<< "\n Num Merges Part.: " << merge_blocks
<< "\n" << std::endl;
gpu::allocate(d_prefixsum, size + 1);
gpu::allocate(d_pos, ceil_total);
gpu::allocate(d_offset, ceil_total);
gpu::allocate(d_partitions, merge_blocks + 1);
host::copyToDevice(prefixsum, size + 1, d_prefixsum);
gpu::memsetZero(d_pos, ceil_total);
gpu::memsetZero(d_offset, ceil_total);
gpu::memsetZero(d_partitions, num_blocks + 1);
//--------------------------------------------------------------------------
TM.start();
xlib::mergePathLBPartition <ITEMS_PER_BLOCK>
<<< merge_block_partitions, BLOCK_SIZE >>>
(d_prefixsum, size, graph.nE(), num_merge, d_partitions, merge_blocks);
TM.stop();
TM.print("Partition: ");
TM.start();
MergePathTest2<ITEMS_PER_BLOCK, BLOCK_SIZE> <<< merge_blocks, BLOCK_SIZE >>>
(d_partitions, merge_blocks, d_prefixsum, size + 1, d_pos, d_offset);
TM.stop();
TM.print("BinarySearch: ");
CHECK_CUDA_ERROR
//--------------------------------------------------------------------------
if (PRINT) {
std::cout << "Results:\n\n";
gpu::printArray(d_pos, graph.nE());
gpu::printArray(d_offset, graph.nE());
}
std::cout << "\n Check Positions: "
<< gpu::equal(h_pos, h_pos + graph.nE(), d_pos)
//<< "\n Check Offsets: "
//<< gpu::equal(h_offset, h_offset + graph.nE(), d_offset)
<< "\n" << std::endl;
//L1:
#if defined(ENABLE_MGPU)
using namespace mgpu;
standard_context_t context;
int num_segments = graph.nV();
int count = graph.nE();
const auto& vector = std::vector<int>(prefixsum, prefixsum + num_segments);
mem_t<int> segments = to_mem(vector, context);
mem_t<int> lbs(count, context);
TM.start();
load_balance_search(count, segments.data(), num_segments, lbs.data(),
context);
TM.stop();
TM.print("ModernGPU: ");
auto lbs_host = from_mem(lbs);
std::cout << "\n Check Offsets: "
<< std::equal(h_pos, h_pos + graph.nE(), lbs_host.data())
<< "\n" << std::endl;
#endif
gpu::free(d_partitions, d_offset, d_pos, d_prefixsum);
return 0;
#endif
}
int main(int argc, char* argv[]) {
int ret = 0;
#if defined(RMM_WRAPPER)
gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
#endif
ret = exec(argc, argv);
#if defined(RMM_WRAPPER)
}//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
gpu::finalizeRMMPoolAllocation();
#endif
return ret;
}
|
the_stack
|
#include <algorithm>
#include <LightGBM/cuda/cuda_algorithms.hpp>
#include "cuda_best_split_finder.hpp"
namespace LightGBM {
__device__ void ReduceBestGainWarp(double gain, bool found, uint32_t thread_index, double* out_gain, bool* out_found, uint32_t* out_thread_index) {
const uint32_t mask = 0xffffffff;
const uint32_t warpLane = threadIdx.x % warpSize;
for (uint32_t offset = warpSize / 2; offset > 0; offset >>= 1) {
const bool other_found = __shfl_down_sync(mask, found, offset);
const double other_gain = __shfl_down_sync(mask, gain, offset);
const uint32_t other_thread_index = __shfl_down_sync(mask, thread_index, offset);
if ((other_found && found && other_gain > gain) || (!found && other_found)) {
found = other_found;
gain = other_gain;
thread_index = other_thread_index;
}
}
if (warpLane == 0) {
*out_gain = gain;
*out_found = found;
*out_thread_index = thread_index;
}
}
__device__ uint32_t ReduceBestGainBlock(double gain, bool found, uint32_t thread_index) {
const uint32_t mask = 0xffffffff;
for (uint32_t offset = warpSize / 2; offset > 0; offset >>= 1) {
const bool other_found = __shfl_down_sync(mask, found, offset);
const double other_gain = __shfl_down_sync(mask, gain, offset);
const uint32_t other_thread_index = __shfl_down_sync(mask, thread_index, offset);
if ((other_found && found && other_gain > gain) || (!found && other_found)) {
found = other_found;
gain = other_gain;
thread_index = other_thread_index;
}
}
return thread_index;
}
__device__ uint32_t ReduceBestGain(double gain, bool found, uint32_t thread_index,
double* shared_gain_buffer, bool* shared_found_buffer, uint32_t* shared_thread_index_buffer) {
const uint32_t warpID = threadIdx.x / warpSize;
const uint32_t warpLane = threadIdx.x % warpSize;
const uint32_t num_warp = blockDim.x / warpSize;
ReduceBestGainWarp(gain, found, thread_index, shared_gain_buffer + warpID, shared_found_buffer + warpID, shared_thread_index_buffer + warpID);
__syncthreads();
if (warpID == 0) {
gain = warpLane < num_warp ? shared_gain_buffer[warpLane] : kMinScore;
found = warpLane < num_warp ? shared_found_buffer[warpLane] : false;
thread_index = warpLane < num_warp ? shared_thread_index_buffer[warpLane] : 0;
thread_index = ReduceBestGainBlock(gain, found, thread_index);
}
return thread_index;
}
__device__ void ReduceBestGainForLeaves(double* gain, int* leaves, int cuda_cur_num_leaves) {
const unsigned int tid = threadIdx.x;
for (unsigned int s = 1; s < cuda_cur_num_leaves; s *= 2) {
if (tid % (2 * s) == 0 && (tid + s) < cuda_cur_num_leaves) {
const uint32_t tid_s = tid + s;
if ((leaves[tid] == -1 && leaves[tid_s] != -1) || (leaves[tid] != -1 && leaves[tid_s] != -1 && gain[tid_s] > gain[tid])) {
gain[tid] = gain[tid_s];
leaves[tid] = leaves[tid_s];
}
}
__syncthreads();
}
}
__device__ void ReduceBestGainForLeavesWarp(double gain, int leaf_index, double* out_gain, int* out_leaf_index) {
const uint32_t mask = 0xffffffff;
const uint32_t warpLane = threadIdx.x % warpSize;
for (uint32_t offset = warpSize / 2; offset > 0; offset >>= 1) {
const int other_leaf_index = __shfl_down_sync(mask, leaf_index, offset);
const double other_gain = __shfl_down_sync(mask, gain, offset);
if ((leaf_index != -1 && other_leaf_index != -1 && other_gain > gain) || (leaf_index == -1 && other_leaf_index != -1)) {
gain = other_gain;
leaf_index = other_leaf_index;
}
}
if (warpLane == 0) {
*out_gain = gain;
*out_leaf_index = leaf_index;
}
}
__device__ int ReduceBestGainForLeavesBlock(double gain, int leaf_index) {
const uint32_t mask = 0xffffffff;
for (uint32_t offset = warpSize / 2; offset > 0; offset >>= 1) {
const int other_leaf_index = __shfl_down_sync(mask, leaf_index, offset);
const double other_gain = __shfl_down_sync(mask, gain, offset);
if ((leaf_index != -1 && other_leaf_index != -1 && other_gain > gain) || (leaf_index == -1 && other_leaf_index != -1)) {
gain = other_gain;
leaf_index = other_leaf_index;
}
}
return leaf_index;
}
__device__ int ReduceBestGainForLeaves(double gain, int leaf_index, double* shared_gain_buffer, int* shared_leaf_index_buffer) {
const uint32_t warpID = threadIdx.x / warpSize;
const uint32_t warpLane = threadIdx.x % warpSize;
const uint32_t num_warp = blockDim.x / warpSize;
ReduceBestGainForLeavesWarp(gain, leaf_index, shared_gain_buffer + warpID, shared_leaf_index_buffer + warpID);
__syncthreads();
if (warpID == 0) {
gain = warpLane < num_warp ? shared_gain_buffer[warpLane] : kMinScore;
leaf_index = warpLane < num_warp ? shared_leaf_index_buffer[warpLane] : -1;
leaf_index = ReduceBestGainForLeavesBlock(gain, leaf_index);
}
return leaf_index;
}
template <bool USE_RAND, bool USE_L1, bool USE_SMOOTHING, bool REVERSE>
__device__ void FindBestSplitsForLeafKernelInner(
// input feature information
const hist_t* feature_hist_ptr,
// input task information
const SplitFindTask* task,
CUDARandom* cuda_random,
// input config parameter values
const double lambda_l1,
const double lambda_l2,
const double path_smooth,
const data_size_t min_data_in_leaf,
const double min_sum_hessian_in_leaf,
const double min_gain_to_split,
// input parent node information
const double parent_gain,
const double sum_gradients,
const double sum_hessians,
const data_size_t num_data,
const double parent_output,
// output parameters
CUDASplitInfo* cuda_best_split_info) {
const double cnt_factor = num_data / sum_hessians;
const double min_gain_shift = parent_gain + min_gain_to_split;
cuda_best_split_info->is_valid = false;
hist_t local_grad_hist = 0.0f;
hist_t local_hess_hist = 0.0f;
double local_gain = 0.0f;
bool threshold_found = false;
uint32_t threshold_value = 0;
__shared__ int rand_threshold;
if (USE_RAND && threadIdx.x == 0) {
if (task->num_bin - 2 > 0) {
rand_threshold = cuda_random->NextInt(0, task->num_bin - 2);
}
}
__shared__ uint32_t best_thread_index;
__shared__ double shared_double_buffer[32];
__shared__ bool shared_bool_buffer[32];
__shared__ uint32_t shared_int_buffer[32];
const unsigned int threadIdx_x = threadIdx.x;
const bool skip_sum = REVERSE ?
(task->skip_default_bin && (task->num_bin - 1 - threadIdx_x) == static_cast<int>(task->default_bin)) :
(task->skip_default_bin && (threadIdx_x + task->mfb_offset) == static_cast<int>(task->default_bin));
const uint32_t feature_num_bin_minus_offset = task->num_bin - task->mfb_offset;
if (!REVERSE) {
if (task->na_as_missing && task->mfb_offset == 1) {
if (threadIdx_x < static_cast<uint32_t>(task->num_bin) && threadIdx_x > 0) {
const unsigned int bin_offset = (threadIdx_x - 1) << 1;
local_grad_hist = feature_hist_ptr[bin_offset];
local_hess_hist = feature_hist_ptr[bin_offset + 1];
}
} else {
if (threadIdx_x < feature_num_bin_minus_offset && !skip_sum) {
const unsigned int bin_offset = threadIdx_x << 1;
local_grad_hist = feature_hist_ptr[bin_offset];
local_hess_hist = feature_hist_ptr[bin_offset + 1];
}
}
} else {
if (threadIdx_x >= static_cast<unsigned int>(task->na_as_missing) &&
threadIdx_x < feature_num_bin_minus_offset && !skip_sum) {
const unsigned int read_index = feature_num_bin_minus_offset - 1 - threadIdx_x;
const unsigned int bin_offset = read_index << 1;
local_grad_hist = feature_hist_ptr[bin_offset];
local_hess_hist = feature_hist_ptr[bin_offset + 1];
}
}
__syncthreads();
if (!REVERSE && task->na_as_missing && task->mfb_offset == 1) {
const hist_t sum_gradients_non_default = ShuffleReduceSum<hist_t>(local_grad_hist, shared_double_buffer, blockDim.x);
__syncthreads();
const hist_t sum_hessians_non_default = ShuffleReduceSum<hist_t>(local_hess_hist, shared_double_buffer, blockDim.x);
if (threadIdx_x == 0) {
local_grad_hist += (sum_gradients - sum_gradients_non_default);
local_hess_hist += (sum_hessians - sum_hessians_non_default);
}
}
if (threadIdx_x == 0) {
local_hess_hist += kEpsilon;
}
local_gain = kMinScore;
local_grad_hist = ShufflePrefixSum(local_grad_hist, shared_double_buffer);
__syncthreads();
local_hess_hist = ShufflePrefixSum(local_hess_hist, shared_double_buffer);
if (REVERSE) {
if (threadIdx_x >= static_cast<unsigned int>(task->na_as_missing) && threadIdx_x <= task->num_bin - 2 && !skip_sum) {
const double sum_right_gradient = local_grad_hist;
const double sum_right_hessian = local_hess_hist;
const data_size_t right_count = static_cast<data_size_t>(__double2int_rn(sum_right_hessian * cnt_factor));
const double sum_left_gradient = sum_gradients - sum_right_gradient;
const double sum_left_hessian = sum_hessians - sum_right_hessian;
const data_size_t left_count = num_data - right_count;
if (sum_left_hessian >= min_sum_hessian_in_leaf && left_count >= min_data_in_leaf &&
sum_right_hessian >= min_sum_hessian_in_leaf && right_count >= min_data_in_leaf &&
(!USE_RAND || static_cast<int>(task->num_bin - 2 - threadIdx_x) == rand_threshold)) {
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, lambda_l1,
lambda_l2, path_smooth, left_count, right_count, parent_output);
// gain with split is worse than without split
if (current_gain > min_gain_shift) {
local_gain = current_gain - min_gain_shift;
threshold_value = static_cast<uint32_t>(task->num_bin - 2 - threadIdx_x);
threshold_found = true;
}
}
}
} else {
const uint32_t end = (task->na_as_missing && task->mfb_offset == 1) ? static_cast<uint32_t>(task->num_bin - 2) : feature_num_bin_minus_offset - 2;
if (threadIdx_x <= end && !skip_sum) {
const double sum_left_gradient = local_grad_hist;
const double sum_left_hessian = local_hess_hist;
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = num_data - left_count;
if (sum_left_hessian >= min_sum_hessian_in_leaf && left_count >= min_data_in_leaf &&
sum_right_hessian >= min_sum_hessian_in_leaf && right_count >= min_data_in_leaf &&
(!USE_RAND || static_cast<int>(threadIdx_x + task->mfb_offset) == rand_threshold)) {
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, lambda_l1,
lambda_l2, path_smooth, left_count, right_count, parent_output);
// gain with split is worse than without split
if (current_gain > min_gain_shift) {
local_gain = current_gain - min_gain_shift;
threshold_value = (task->na_as_missing && task->mfb_offset == 1) ?
static_cast<uint32_t>(threadIdx_x) :
static_cast<uint32_t>(threadIdx_x + task->mfb_offset);
threshold_found = true;
}
}
}
}
__syncthreads();
const uint32_t result = ReduceBestGain(local_gain, threshold_found, threadIdx_x, shared_double_buffer, shared_bool_buffer, shared_int_buffer);
if (threadIdx_x == 0) {
best_thread_index = result;
}
__syncthreads();
if (threshold_found && threadIdx_x == best_thread_index) {
cuda_best_split_info->is_valid = true;
cuda_best_split_info->threshold = threshold_value;
cuda_best_split_info->gain = local_gain;
cuda_best_split_info->default_left = task->assume_out_default_left;
if (REVERSE) {
const double sum_right_gradient = local_grad_hist;
const double sum_right_hessian = local_hess_hist - kEpsilon;
const data_size_t right_count = static_cast<data_size_t>(__double2int_rn(sum_right_hessian * cnt_factor));
const double sum_left_gradient = sum_gradients - sum_right_gradient;
const double sum_left_hessian = sum_hessians - sum_right_hessian - kEpsilon;
const data_size_t left_count = num_data - right_count;
const double left_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_left_gradient,
sum_left_hessian, lambda_l1, lambda_l2, path_smooth, left_count, parent_output);
const double right_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_right_gradient,
sum_right_hessian, lambda_l1, lambda_l2, path_smooth, right_count, parent_output);
cuda_best_split_info->left_sum_gradients = sum_left_gradient;
cuda_best_split_info->left_sum_hessians = sum_left_hessian;
cuda_best_split_info->left_count = left_count;
cuda_best_split_info->right_sum_gradients = sum_right_gradient;
cuda_best_split_info->right_sum_hessians = sum_right_hessian;
cuda_best_split_info->right_count = right_count;
cuda_best_split_info->left_value = left_output;
cuda_best_split_info->left_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_left_gradient,
sum_left_hessian, lambda_l1, lambda_l2, left_output);
cuda_best_split_info->right_value = right_output;
cuda_best_split_info->right_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_right_gradient,
sum_right_hessian, lambda_l1, lambda_l2, right_output);
} else {
const double sum_left_gradient = local_grad_hist;
const double sum_left_hessian = local_hess_hist - kEpsilon;
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian - kEpsilon;
const data_size_t right_count = num_data - left_count;
const double left_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_left_gradient,
sum_left_hessian, lambda_l1, lambda_l2, path_smooth, left_count, parent_output);
const double right_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_right_gradient,
sum_right_hessian, lambda_l1, lambda_l2, path_smooth, right_count, parent_output);
cuda_best_split_info->left_sum_gradients = sum_left_gradient;
cuda_best_split_info->left_sum_hessians = sum_left_hessian;
cuda_best_split_info->left_count = left_count;
cuda_best_split_info->right_sum_gradients = sum_right_gradient;
cuda_best_split_info->right_sum_hessians = sum_right_hessian;
cuda_best_split_info->right_count = right_count;
cuda_best_split_info->left_value = left_output;
cuda_best_split_info->left_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_left_gradient,
sum_left_hessian, lambda_l1, lambda_l2, left_output);
cuda_best_split_info->right_value = right_output;
cuda_best_split_info->right_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_right_gradient,
sum_right_hessian, lambda_l1, lambda_l2, right_output);
}
}
}
template <bool USE_RAND, bool USE_L1, bool USE_SMOOTHING>
__device__ void FindBestSplitsForLeafKernelCategoricalInner(
// input feature information
const hist_t* feature_hist_ptr,
// input task information
const SplitFindTask* task,
CUDARandom* cuda_random,
// input config parameter values
const double lambda_l1,
const double lambda_l2,
const double path_smooth,
const data_size_t min_data_in_leaf,
const double min_sum_hessian_in_leaf,
const double min_gain_to_split,
const double cat_smooth,
const double cat_l2,
const int max_cat_threshold,
const int min_data_per_group,
// input parent node information
const double parent_gain,
const double sum_gradients,
const double sum_hessians,
const data_size_t num_data,
const double parent_output,
// output parameters
CUDASplitInfo* cuda_best_split_info) {
__shared__ double shared_gain_buffer[32];
__shared__ bool shared_found_buffer[32];
__shared__ uint32_t shared_thread_index_buffer[32];
__shared__ uint32_t best_thread_index;
const double cnt_factor = num_data / sum_hessians;
const double min_gain_shift = parent_gain + min_gain_to_split;
double l2 = lambda_l2;
double local_gain = min_gain_shift;
bool threshold_found = false;
cuda_best_split_info->is_valid = false;
const int bin_start = 1 - task->mfb_offset;
const int bin_end = task->num_bin - task->mfb_offset;
const int threadIdx_x = static_cast<int>(threadIdx.x);
__shared__ int rand_threshold;
if (task->is_one_hot) {
if (USE_RAND && threadIdx.x == 0) {
rand_threshold = 0;
if (bin_end > bin_start) {
rand_threshold = cuda_random->NextInt(bin_start, bin_end);
}
}
__syncthreads();
if (threadIdx_x >= bin_start && threadIdx_x < bin_end) {
const int bin_offset = (threadIdx_x << 1);
const hist_t grad = feature_hist_ptr[bin_offset];
const hist_t hess = feature_hist_ptr[bin_offset + 1];
data_size_t cnt =
static_cast<data_size_t>(__double2int_rn(hess * cnt_factor));
if (cnt >= min_data_in_leaf && hess >= min_sum_hessian_in_leaf) {
const data_size_t other_count = num_data - cnt;
if (other_count >= min_data_in_leaf) {
const double sum_other_hessian = sum_hessians - hess - kEpsilon;
if (sum_other_hessian >= min_sum_hessian_in_leaf && (!USE_RAND || static_cast<int>(threadIdx_x) == rand_threshold)) {
const double sum_other_gradient = sum_gradients - grad;
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_other_gradient, sum_other_hessian, grad,
hess + kEpsilon, lambda_l1,
l2, path_smooth, other_count, cnt, parent_output);
if (current_gain > min_gain_shift) {
local_gain = current_gain;
threshold_found = true;
}
}
}
}
}
__syncthreads();
const uint32_t result = ReduceBestGain(local_gain, threshold_found, threadIdx_x, shared_gain_buffer, shared_found_buffer, shared_thread_index_buffer);
if (threadIdx_x == 0) {
best_thread_index = result;
}
__syncthreads();
if (threshold_found && threadIdx_x == best_thread_index) {
cuda_best_split_info->is_valid = true;
cuda_best_split_info->num_cat_threshold = 1;
cuda_best_split_info->gain = local_gain - min_gain_shift;
*(cuda_best_split_info->cat_threshold) = static_cast<uint32_t>(threadIdx_x + task->mfb_offset);
cuda_best_split_info->default_left = false;
const int bin_offset = (threadIdx_x << 1);
const hist_t sum_left_gradient = feature_hist_ptr[bin_offset];
const hist_t sum_left_hessian = feature_hist_ptr[bin_offset + 1];
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = static_cast<data_size_t>(__double2int_rn(sum_right_hessian * cnt_factor));
const double left_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_left_gradient,
sum_left_hessian, lambda_l1, l2, path_smooth, left_count, parent_output);
const double right_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_right_gradient,
sum_right_hessian, lambda_l1, l2, path_smooth, right_count, parent_output);
cuda_best_split_info->left_sum_gradients = sum_left_gradient;
cuda_best_split_info->left_sum_hessians = sum_left_hessian;
cuda_best_split_info->left_count = left_count;
cuda_best_split_info->right_sum_gradients = sum_right_gradient;
cuda_best_split_info->right_sum_hessians = sum_right_hessian;
cuda_best_split_info->right_count = right_count;
cuda_best_split_info->left_value = left_output;
cuda_best_split_info->left_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_left_gradient,
sum_left_hessian, lambda_l1, l2, left_output);
cuda_best_split_info->right_value = right_output;
cuda_best_split_info->right_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_right_gradient,
sum_right_hessian, lambda_l1, l2, right_output);
}
} else {
__shared__ double shared_value_buffer[NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER];
__shared__ int16_t shared_index_buffer[NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER];
__shared__ uint16_t shared_mem_buffer_uint16[32];
__shared__ double shared_mem_buffer_double[32];
__shared__ int used_bin;
l2 += cat_l2;
uint16_t is_valid_bin = 0;
int best_dir = 0;
double best_sum_left_gradient = 0.0f;
double best_sum_left_hessian = 0.0f;
if (threadIdx_x >= bin_start && threadIdx_x < bin_end) {
const int bin_offset = (threadIdx_x << 1);
const double hess = feature_hist_ptr[bin_offset + 1];
if (__double2int_rn(hess * cnt_factor) >= cat_smooth) {
const double grad = feature_hist_ptr[bin_offset];
shared_value_buffer[threadIdx_x] = grad / (hess + cat_smooth);
is_valid_bin = 1;
} else {
shared_value_buffer[threadIdx_x] = kMaxScore;
}
} else {
shared_value_buffer[threadIdx_x] = kMaxScore;
}
shared_index_buffer[threadIdx_x] = threadIdx_x;
__syncthreads();
const int local_used_bin = ShuffleReduceSum<uint16_t>(is_valid_bin, shared_mem_buffer_uint16, blockDim.x);
if (threadIdx_x == 0) {
used_bin = local_used_bin;
}
__syncthreads();
BitonicArgSort_1024<double, int16_t, true>(shared_value_buffer, shared_index_buffer, bin_end);
__syncthreads();
const int max_num_cat = min(max_cat_threshold, (used_bin + 1) / 2);
if (USE_RAND) {
rand_threshold = 0;
const int max_threshold = max(min(max_num_cat, used_bin) - 1, 0);
if (max_threshold > 0) {
rand_threshold = cuda_random->NextInt(0, max_threshold);
}
}
// left to right
double grad = 0.0f;
double hess = 0.0f;
if (threadIdx_x < used_bin && threadIdx_x < max_num_cat) {
const int bin_offset = (shared_index_buffer[threadIdx_x] << 1);
grad = feature_hist_ptr[bin_offset];
hess = feature_hist_ptr[bin_offset + 1];
}
if (threadIdx_x == 0) {
hess += kEpsilon;
}
__syncthreads();
double sum_left_gradient = ShufflePrefixSum<double>(grad, shared_mem_buffer_double);
__syncthreads();
double sum_left_hessian = ShufflePrefixSum<double>(hess, shared_mem_buffer_double);
if (threadIdx_x < used_bin && threadIdx_x < max_num_cat) {
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = num_data - left_count;
if (sum_left_hessian >= min_sum_hessian_in_leaf && left_count >= min_data_in_leaf &&
sum_right_hessian >= min_sum_hessian_in_leaf && right_count >= min_data_in_leaf &&
(!USE_RAND || threadIdx_x == static_cast<int>(rand_threshold))) {
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, lambda_l1,
l2, path_smooth, left_count, right_count, parent_output);
// gain with split is worse than without split
if (current_gain > local_gain) {
local_gain = current_gain;
threshold_found = true;
best_dir = 1;
best_sum_left_gradient = sum_left_gradient;
best_sum_left_hessian = sum_left_hessian;
}
}
}
__syncthreads();
// right to left
grad = 0.0f;
hess = 0.0f;
if (threadIdx_x < used_bin && threadIdx_x < max_num_cat) {
const int bin_offset = (shared_index_buffer[used_bin - 1 - threadIdx_x] << 1);
grad = feature_hist_ptr[bin_offset];
hess = feature_hist_ptr[bin_offset + 1];
}
if (threadIdx_x == 0) {
hess += kEpsilon;
}
__syncthreads();
sum_left_gradient = ShufflePrefixSum<double>(grad, shared_mem_buffer_double);
__syncthreads();
sum_left_hessian = ShufflePrefixSum<double>(hess, shared_mem_buffer_double);
if (threadIdx_x < used_bin && threadIdx_x < max_num_cat) {
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = num_data - left_count;
if (sum_left_hessian >= min_sum_hessian_in_leaf && left_count >= min_data_in_leaf &&
sum_right_hessian >= min_sum_hessian_in_leaf && right_count >= min_data_in_leaf &&
(!USE_RAND || threadIdx_x == static_cast<int>(rand_threshold))) {
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, lambda_l1,
l2, path_smooth, left_count, right_count, parent_output);
// gain with split is worse than without split
if (current_gain > local_gain) {
local_gain = current_gain;
threshold_found = true;
best_dir = -1;
best_sum_left_gradient = sum_left_gradient;
best_sum_left_hessian = sum_left_hessian;
}
}
}
__syncthreads();
const uint32_t result = ReduceBestGain(local_gain, threshold_found, threadIdx_x, shared_gain_buffer, shared_found_buffer, shared_thread_index_buffer);
if (threadIdx_x == 0) {
best_thread_index = result;
}
__syncthreads();
if (threshold_found && threadIdx_x == best_thread_index) {
cuda_best_split_info->is_valid = true;
cuda_best_split_info->num_cat_threshold = threadIdx_x + 1;
cuda_best_split_info->gain = local_gain - min_gain_shift;
if (best_dir == 1) {
for (int i = 0; i < threadIdx_x + 1; ++i) {
(cuda_best_split_info->cat_threshold)[i] = shared_index_buffer[i] + task->mfb_offset;
}
} else {
for (int i = 0; i < threadIdx_x + 1; ++i) {
(cuda_best_split_info->cat_threshold)[i] = shared_index_buffer[used_bin - 1 - i] + task->mfb_offset;
}
}
cuda_best_split_info->default_left = false;
const hist_t sum_left_gradient = best_sum_left_gradient;
const hist_t sum_left_hessian = best_sum_left_hessian;
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = static_cast<data_size_t>(__double2int_rn(sum_right_hessian * cnt_factor));
const double left_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_left_gradient,
sum_left_hessian, lambda_l1, l2, path_smooth, left_count, parent_output);
const double right_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_right_gradient,
sum_right_hessian, lambda_l1, l2, path_smooth, right_count, parent_output);
cuda_best_split_info->left_sum_gradients = sum_left_gradient;
cuda_best_split_info->left_sum_hessians = sum_left_hessian;
cuda_best_split_info->left_count = left_count;
cuda_best_split_info->right_sum_gradients = sum_right_gradient;
cuda_best_split_info->right_sum_hessians = sum_right_hessian;
cuda_best_split_info->right_count = right_count;
cuda_best_split_info->left_value = left_output;
cuda_best_split_info->left_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_left_gradient,
sum_left_hessian, lambda_l1, l2, left_output);
cuda_best_split_info->right_value = right_output;
cuda_best_split_info->right_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_right_gradient,
sum_right_hessian, lambda_l1, l2, right_output);
}
}
}
template <bool USE_RAND, bool USE_L1, bool USE_SMOOTHING, bool IS_LARGER>
__global__ void FindBestSplitsForLeafKernel(
// input feature information
const int8_t* is_feature_used_bytree,
// input task information
const int num_tasks,
const SplitFindTask* tasks,
CUDARandom* cuda_randoms,
// input leaf information
const CUDALeafSplitsStruct* smaller_leaf_splits,
const CUDALeafSplitsStruct* larger_leaf_splits,
// input config parameter values
const data_size_t min_data_in_leaf,
const double min_sum_hessian_in_leaf,
const double min_gain_to_split,
const double lambda_l1,
const double lambda_l2,
const double path_smooth,
const double cat_smooth,
const double cat_l2,
const int max_cat_threshold,
const int min_data_per_group,
// output
CUDASplitInfo* cuda_best_split_info) {
const unsigned int task_index = blockIdx.x;
const SplitFindTask* task = tasks + task_index;
const int inner_feature_index = task->inner_feature_index;
const double parent_gain = IS_LARGER ? larger_leaf_splits->gain : smaller_leaf_splits->gain;
const double sum_gradients = IS_LARGER ? larger_leaf_splits->sum_of_gradients : smaller_leaf_splits->sum_of_gradients;
const double sum_hessians = (IS_LARGER ? larger_leaf_splits->sum_of_hessians : smaller_leaf_splits->sum_of_hessians) + 2 * kEpsilon;
const data_size_t num_data = IS_LARGER ? larger_leaf_splits->num_data_in_leaf : smaller_leaf_splits->num_data_in_leaf;
const double parent_output = IS_LARGER ? larger_leaf_splits->leaf_value : smaller_leaf_splits->leaf_value;
const unsigned int output_offset = IS_LARGER ? (task_index + num_tasks) : task_index;
CUDASplitInfo* out = cuda_best_split_info + output_offset;
CUDARandom* cuda_random = USE_RAND ?
(IS_LARGER ? cuda_randoms + task_index * 2 + 1 : cuda_randoms + task_index * 2) : nullptr;
if (is_feature_used_bytree[inner_feature_index]) {
const hist_t* hist_ptr = (IS_LARGER ? larger_leaf_splits->hist_in_leaf : smaller_leaf_splits->hist_in_leaf) + task->hist_offset * 2;
if (task->is_categorical) {
FindBestSplitsForLeafKernelCategoricalInner<USE_RAND, USE_L1, USE_SMOOTHING>(
// input feature information
hist_ptr,
// input task information
task,
cuda_random,
// input config parameter values
lambda_l1,
lambda_l2,
path_smooth,
min_data_in_leaf,
min_sum_hessian_in_leaf,
min_gain_to_split,
cat_smooth,
cat_l2,
max_cat_threshold,
min_data_per_group,
// input parent node information
parent_gain,
sum_gradients,
sum_hessians,
num_data,
parent_output,
// output parameters
out);
} else {
if (!task->reverse) {
FindBestSplitsForLeafKernelInner<USE_RAND, USE_L1, USE_SMOOTHING, false>(
// input feature information
hist_ptr,
// input task information
task,
cuda_random,
// input config parameter values
lambda_l1,
lambda_l2,
path_smooth,
min_data_in_leaf,
min_sum_hessian_in_leaf,
min_gain_to_split,
// input parent node information
parent_gain,
sum_gradients,
sum_hessians,
num_data,
parent_output,
// output parameters
out);
} else {
FindBestSplitsForLeafKernelInner<USE_RAND, USE_L1, USE_SMOOTHING, true>(
// input feature information
hist_ptr,
// input task information
task,
cuda_random,
// input config parameter values
lambda_l1,
lambda_l2,
path_smooth,
min_data_in_leaf,
min_sum_hessian_in_leaf,
min_gain_to_split,
// input parent node information
parent_gain,
sum_gradients,
sum_hessians,
num_data,
parent_output,
// output parameters
out);
}
}
} else {
out->is_valid = false;
}
}
template <bool USE_RAND, bool USE_L1, bool USE_SMOOTHING, bool REVERSE>
__device__ void FindBestSplitsForLeafKernelInner_GlobalMemory(
// input feature information
const hist_t* feature_hist_ptr,
// input task information
const SplitFindTask* task,
CUDARandom* cuda_random,
// input config parameter values
const double lambda_l1,
const double lambda_l2,
const double path_smooth,
const data_size_t min_data_in_leaf,
const double min_sum_hessian_in_leaf,
const double min_gain_to_split,
// input parent node information
const double parent_gain,
const double sum_gradients,
const double sum_hessians,
const data_size_t num_data,
const double parent_output,
// output parameters
CUDASplitInfo* cuda_best_split_info,
// buffer
hist_t* hist_grad_buffer_ptr,
hist_t* hist_hess_buffer_ptr) {
const double cnt_factor = num_data / sum_hessians;
const double min_gain_shift = parent_gain + min_gain_to_split;
cuda_best_split_info->is_valid = false;
double local_gain = 0.0f;
bool threshold_found = false;
uint32_t threshold_value = 0;
__shared__ int rand_threshold;
if (USE_RAND && threadIdx.x == 0) {
if (task->num_bin - 2 > 0) {
rand_threshold = cuda_random->NextInt(0, task->num_bin - 2);
}
}
__shared__ uint32_t best_thread_index;
__shared__ double shared_double_buffer[32];
__shared__ bool shared_found_buffer[32];
__shared__ uint32_t shared_thread_index_buffer[32];
const unsigned int threadIdx_x = threadIdx.x;
const uint32_t feature_num_bin_minus_offset = task->num_bin - task->mfb_offset;
if (!REVERSE) {
if (task->na_as_missing && task->mfb_offset == 1) {
uint32_t bin_start = threadIdx_x > 0 ? threadIdx_x : blockDim.x;
hist_t thread_sum_gradients = 0.0f;
hist_t thread_sum_hessians = 0.0f;
for (unsigned int bin = bin_start; bin < static_cast<uint32_t>(task->num_bin); bin += blockDim.x) {
const unsigned int bin_offset = (bin - 1) << 1;
const hist_t grad = feature_hist_ptr[bin_offset];
const hist_t hess = feature_hist_ptr[bin_offset + 1];
hist_grad_buffer_ptr[bin] = grad;
hist_hess_buffer_ptr[bin] = hess;
thread_sum_gradients += grad;
thread_sum_hessians += hess;
}
const hist_t sum_gradients_non_default = ShuffleReduceSum<double>(thread_sum_gradients, shared_double_buffer, blockDim.x);
__syncthreads();
const hist_t sum_hessians_non_default = ShuffleReduceSum<double>(thread_sum_hessians, shared_double_buffer, blockDim.x);
if (threadIdx_x == 0) {
hist_grad_buffer_ptr[0] = sum_gradients - sum_gradients_non_default;
hist_hess_buffer_ptr[0] = sum_hessians - sum_hessians_non_default;
}
} else {
for (unsigned int bin = threadIdx_x; bin < feature_num_bin_minus_offset; bin += blockDim.x) {
const bool skip_sum =
(task->skip_default_bin && (bin + task->mfb_offset) == static_cast<int>(task->default_bin));
if (!skip_sum) {
const unsigned int bin_offset = bin << 1;
hist_grad_buffer_ptr[bin] = feature_hist_ptr[bin_offset];
hist_hess_buffer_ptr[bin] = feature_hist_ptr[bin_offset + 1];
} else {
hist_grad_buffer_ptr[bin] = 0.0f;
hist_hess_buffer_ptr[bin] = 0.0f;
}
}
}
} else {
for (unsigned int bin = threadIdx_x; bin < feature_num_bin_minus_offset; bin += blockDim.x) {
const bool skip_sum = bin >= static_cast<unsigned int>(task->na_as_missing) &&
(task->skip_default_bin && (task->num_bin - 1 - bin) == static_cast<int>(task->default_bin));
if (!skip_sum) {
const unsigned int read_index = feature_num_bin_minus_offset - 1 - bin;
const unsigned int bin_offset = read_index << 1;
hist_grad_buffer_ptr[bin] = feature_hist_ptr[bin_offset];
hist_hess_buffer_ptr[bin] = feature_hist_ptr[bin_offset + 1];
} else {
hist_grad_buffer_ptr[bin] = 0.0f;
hist_hess_buffer_ptr[bin] = 0.0f;
}
}
}
__syncthreads();
if (threadIdx_x == 0) {
hist_hess_buffer_ptr[0] += kEpsilon;
}
local_gain = kMinScore;
GlobalMemoryPrefixSum(hist_grad_buffer_ptr, static_cast<size_t>(feature_num_bin_minus_offset));
__syncthreads();
GlobalMemoryPrefixSum(hist_hess_buffer_ptr, static_cast<size_t>(feature_num_bin_minus_offset));
if (REVERSE) {
for (unsigned int bin = threadIdx_x; bin < feature_num_bin_minus_offset; bin += blockDim.x) {
const bool skip_sum = (bin >= static_cast<unsigned int>(task->na_as_missing) &&
(task->skip_default_bin && (task->num_bin - 1 - bin) == static_cast<int>(task->default_bin)));
if (!skip_sum) {
const double sum_right_gradient = hist_grad_buffer_ptr[bin];
const double sum_right_hessian = hist_hess_buffer_ptr[bin];
const data_size_t right_count = static_cast<data_size_t>(__double2int_rn(sum_right_hessian * cnt_factor));
const double sum_left_gradient = sum_gradients - sum_right_gradient;
const double sum_left_hessian = sum_hessians - sum_right_hessian;
const data_size_t left_count = num_data - right_count;
if (sum_left_hessian >= min_sum_hessian_in_leaf && left_count >= min_data_in_leaf &&
sum_right_hessian >= min_sum_hessian_in_leaf && right_count >= min_data_in_leaf &&
(!USE_RAND || static_cast<int>(task->num_bin - 2 - bin) == rand_threshold)) {
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, lambda_l1,
lambda_l2, path_smooth, left_count, right_count, parent_output);
// gain with split is worse than without split
if (current_gain > min_gain_shift) {
local_gain = current_gain - min_gain_shift;
threshold_value = static_cast<uint32_t>(task->num_bin - 2 - bin);
threshold_found = true;
}
}
}
}
} else {
const uint32_t end = (task->na_as_missing && task->mfb_offset == 1) ? static_cast<uint32_t>(task->num_bin - 2) : feature_num_bin_minus_offset - 2;
for (unsigned int bin = threadIdx_x; bin <= end; bin += blockDim.x) {
const bool skip_sum =
(task->skip_default_bin && (bin + task->mfb_offset) == static_cast<int>(task->default_bin));
if (!skip_sum) {
const double sum_left_gradient = hist_grad_buffer_ptr[bin];
const double sum_left_hessian = hist_hess_buffer_ptr[bin];
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = num_data - left_count;
if (sum_left_hessian >= min_sum_hessian_in_leaf && left_count >= min_data_in_leaf &&
sum_right_hessian >= min_sum_hessian_in_leaf && right_count >= min_data_in_leaf &&
(!USE_RAND || static_cast<int>(bin + task->mfb_offset) == rand_threshold)) {
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, lambda_l1,
lambda_l2, path_smooth, left_count, right_count, parent_output);
// gain with split is worse than without split
if (current_gain > min_gain_shift) {
local_gain = current_gain - min_gain_shift;
threshold_value = (task->na_as_missing && task->mfb_offset == 1) ?
bin : static_cast<uint32_t>(bin + task->mfb_offset);
threshold_found = true;
}
}
}
}
}
__syncthreads();
const uint32_t result = ReduceBestGain(local_gain, threshold_found, threadIdx_x, shared_double_buffer, shared_found_buffer, shared_thread_index_buffer);
if (threadIdx_x == 0) {
best_thread_index = result;
}
__syncthreads();
if (threshold_found && threadIdx_x == best_thread_index) {
cuda_best_split_info->is_valid = true;
cuda_best_split_info->threshold = threshold_value;
cuda_best_split_info->gain = local_gain;
cuda_best_split_info->default_left = task->assume_out_default_left;
if (REVERSE) {
const unsigned int best_bin = static_cast<uint32_t>(task->num_bin - 2 - threshold_value);
const double sum_right_gradient = hist_grad_buffer_ptr[best_bin];
const double sum_right_hessian = hist_hess_buffer_ptr[best_bin] - kEpsilon;
const data_size_t right_count = static_cast<data_size_t>(__double2int_rn(sum_right_hessian * cnt_factor));
const double sum_left_gradient = sum_gradients - sum_right_gradient;
const double sum_left_hessian = sum_hessians - sum_right_hessian - kEpsilon;
const data_size_t left_count = num_data - right_count;
const double left_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_left_gradient,
sum_left_hessian, lambda_l1, lambda_l2, path_smooth, left_count, parent_output);
const double right_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_right_gradient,
sum_right_hessian, lambda_l1, lambda_l2, path_smooth, right_count, parent_output);
cuda_best_split_info->left_sum_gradients = sum_left_gradient;
cuda_best_split_info->left_sum_hessians = sum_left_hessian;
cuda_best_split_info->left_count = left_count;
cuda_best_split_info->right_sum_gradients = sum_right_gradient;
cuda_best_split_info->right_sum_hessians = sum_right_hessian;
cuda_best_split_info->right_count = right_count;
cuda_best_split_info->left_value = left_output;
cuda_best_split_info->left_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_left_gradient,
sum_left_hessian, lambda_l1, lambda_l2, left_output);
cuda_best_split_info->right_value = right_output;
cuda_best_split_info->right_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_right_gradient,
sum_right_hessian, lambda_l1, lambda_l2, right_output);
} else {
const unsigned int best_bin = (task->na_as_missing && task->mfb_offset == 1) ?
threshold_value : static_cast<uint32_t>(threshold_value - task->mfb_offset);
const double sum_left_gradient = hist_grad_buffer_ptr[best_bin];
const double sum_left_hessian = hist_hess_buffer_ptr[best_bin] - kEpsilon;
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian - kEpsilon;
const data_size_t right_count = num_data - left_count;
const double left_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_left_gradient,
sum_left_hessian, lambda_l1, lambda_l2, path_smooth, left_count, parent_output);
const double right_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_right_gradient,
sum_right_hessian, lambda_l1, lambda_l2, path_smooth, right_count, parent_output);
cuda_best_split_info->left_sum_gradients = sum_left_gradient;
cuda_best_split_info->left_sum_hessians = sum_left_hessian;
cuda_best_split_info->left_count = left_count;
cuda_best_split_info->right_sum_gradients = sum_right_gradient;
cuda_best_split_info->right_sum_hessians = sum_right_hessian;
cuda_best_split_info->right_count = right_count;
cuda_best_split_info->left_value = left_output;
cuda_best_split_info->left_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_left_gradient,
sum_left_hessian, lambda_l1, lambda_l2, left_output);
cuda_best_split_info->right_value = right_output;
cuda_best_split_info->right_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_right_gradient,
sum_right_hessian, lambda_l1, lambda_l2, right_output);
}
}
}
template <bool USE_RAND, bool USE_L1, bool USE_SMOOTHING>
__device__ void FindBestSplitsForLeafKernelCategoricalInner_GlobalMemory(
// input feature information
const hist_t* feature_hist_ptr,
// input task information
const SplitFindTask* task,
CUDARandom* cuda_random,
// input config parameter values
const double lambda_l1,
const double lambda_l2,
const double path_smooth,
const data_size_t min_data_in_leaf,
const double min_sum_hessian_in_leaf,
const double min_gain_to_split,
const double cat_smooth,
const double cat_l2,
const int max_cat_threshold,
const int min_data_per_group,
// input parent node information
const double parent_gain,
const double sum_gradients,
const double sum_hessians,
const data_size_t num_data,
const double parent_output,
// buffer
hist_t* hist_grad_buffer_ptr,
hist_t* hist_hess_buffer_ptr,
hist_t* hist_stat_buffer_ptr,
data_size_t* hist_index_buffer_ptr,
// output parameters
CUDASplitInfo* cuda_best_split_info) {
__shared__ double shared_gain_buffer[32];
__shared__ bool shared_found_buffer[32];
__shared__ uint32_t shared_thread_index_buffer[32];
__shared__ uint32_t best_thread_index;
const double cnt_factor = num_data / sum_hessians;
const double min_gain_shift = parent_gain + min_gain_to_split;
double l2 = lambda_l2;
double local_gain = kMinScore;
bool threshold_found = false;
cuda_best_split_info->is_valid = false;
__shared__ int rand_threshold;
const int bin_start = 1 - task->mfb_offset;
const int bin_end = task->num_bin - task->mfb_offset;
int best_threshold = -1;
const int threadIdx_x = static_cast<int>(threadIdx.x);
if (task->is_one_hot) {
if (USE_RAND && threadIdx.x == 0) {
rand_threshold = 0;
if (bin_end > bin_start) {
rand_threshold = cuda_random->NextInt(bin_start, bin_end);
}
}
__syncthreads();
for (int bin = bin_start + threadIdx_x; bin < bin_end; bin += static_cast<int>(blockDim.x)) {
const int bin_offset = (bin << 1);
const hist_t grad = feature_hist_ptr[bin_offset];
const hist_t hess = feature_hist_ptr[bin_offset + 1];
data_size_t cnt =
static_cast<data_size_t>(__double2int_rn(hess * cnt_factor));
if (cnt >= min_data_in_leaf && hess >= min_sum_hessian_in_leaf) {
const data_size_t other_count = num_data - cnt;
if (other_count >= min_data_in_leaf) {
const double sum_other_hessian = sum_hessians - hess - kEpsilon;
if (sum_other_hessian >= min_sum_hessian_in_leaf && (!USE_RAND || bin == rand_threshold)) {
const double sum_other_gradient = sum_gradients - grad;
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_other_gradient, sum_other_hessian, grad,
hess + kEpsilon, lambda_l1,
l2, path_smooth, other_count, cnt, parent_output);
if (current_gain > min_gain_shift) {
best_threshold = bin;
local_gain = current_gain - min_gain_shift;
threshold_found = true;
}
}
}
}
}
__syncthreads();
const uint32_t result = ReduceBestGain(local_gain, threshold_found, threadIdx_x, shared_gain_buffer, shared_found_buffer, shared_thread_index_buffer);
if (threadIdx_x == 0) {
best_thread_index = result;
}
__syncthreads();
if (threshold_found && threadIdx_x == best_thread_index) {
cuda_best_split_info->is_valid = true;
cuda_best_split_info->num_cat_threshold = 1;
cuda_best_split_info->cat_threshold = new uint32_t[1];
*(cuda_best_split_info->cat_threshold) = static_cast<uint32_t>(best_threshold);
cuda_best_split_info->default_left = false;
const int bin_offset = (best_threshold << 1);
const hist_t sum_left_gradient = feature_hist_ptr[bin_offset];
const hist_t sum_left_hessian = feature_hist_ptr[bin_offset + 1];
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = static_cast<data_size_t>(__double2int_rn(sum_right_hessian * cnt_factor));
const double left_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_left_gradient,
sum_left_hessian, lambda_l1, l2, path_smooth, left_count, parent_output);
const double right_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_right_gradient,
sum_right_hessian, lambda_l1, l2, path_smooth, right_count, parent_output);
cuda_best_split_info->left_sum_gradients = sum_left_gradient;
cuda_best_split_info->left_sum_hessians = sum_left_hessian;
cuda_best_split_info->left_count = left_count;
cuda_best_split_info->right_sum_gradients = sum_right_gradient;
cuda_best_split_info->right_sum_hessians = sum_right_hessian;
cuda_best_split_info->right_count = right_count;
cuda_best_split_info->left_value = left_output;
cuda_best_split_info->left_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_left_gradient,
sum_left_hessian, lambda_l1, l2, left_output);
cuda_best_split_info->right_value = right_output;
cuda_best_split_info->right_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_right_gradient,
sum_right_hessian, lambda_l1, l2, right_output);
}
} else {
__shared__ uint16_t shared_mem_buffer_uint16[32];
__shared__ int used_bin;
l2 += cat_l2;
uint16_t is_valid_bin = 0;
int best_dir = 0;
double best_sum_left_gradient = 0.0f;
double best_sum_left_hessian = 0.0f;
for (int bin = 0; bin < bin_end; bin += static_cast<int>(blockDim.x)) {
if (bin >= bin_start) {
const int bin_offset = (bin << 1);
const double hess = feature_hist_ptr[bin_offset + 1];
if (__double2int_rn(hess * cnt_factor) >= cat_smooth) {
const double grad = feature_hist_ptr[bin_offset];
hist_stat_buffer_ptr[bin] = grad / (hess + cat_smooth);
hist_index_buffer_ptr[bin] = threadIdx_x;
is_valid_bin = 1;
} else {
hist_stat_buffer_ptr[bin] = kMaxScore;
hist_index_buffer_ptr[bin] = -1;
}
}
}
__syncthreads();
const int local_used_bin = ShuffleReduceSum<uint16_t>(is_valid_bin, shared_mem_buffer_uint16, blockDim.x);
if (threadIdx_x == 0) {
used_bin = local_used_bin;
}
__syncthreads();
BitonicArgSortDevice<double, data_size_t, true, NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER, 11>(
hist_stat_buffer_ptr, hist_index_buffer_ptr, task->num_bin - task->mfb_offset);
const int max_num_cat = min(max_cat_threshold, (used_bin + 1) / 2);
if (USE_RAND) {
rand_threshold = 0;
const int max_threshold = max(min(max_num_cat, used_bin) - 1, 0);
if (max_threshold > 0) {
rand_threshold = cuda_random->NextInt(0, max_threshold);
}
}
__syncthreads();
// left to right
for (int bin = static_cast<int>(threadIdx_x); bin < used_bin && bin < max_num_cat; bin += static_cast<int>(blockDim.x)) {
const int bin_offset = (hist_index_buffer_ptr[bin] << 1);
hist_grad_buffer_ptr[bin] = feature_hist_ptr[bin_offset];
hist_hess_buffer_ptr[bin] = feature_hist_ptr[bin_offset + 1];
}
if (threadIdx_x == 0) {
hist_hess_buffer_ptr[0] += kEpsilon;
}
__syncthreads();
GlobalMemoryPrefixSum<double>(hist_grad_buffer_ptr, static_cast<size_t>(bin_end));
__syncthreads();
GlobalMemoryPrefixSum<double>(hist_hess_buffer_ptr, static_cast<size_t>(bin_end));
for (int bin = static_cast<int>(threadIdx_x); bin < used_bin && bin < max_num_cat; bin += static_cast<int>(blockDim.x)) {
const double sum_left_gradient = hist_grad_buffer_ptr[bin];
const double sum_left_hessian = hist_hess_buffer_ptr[bin];
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = num_data - left_count;
if (sum_left_hessian >= min_sum_hessian_in_leaf && left_count >= min_data_in_leaf &&
sum_right_hessian >= min_sum_hessian_in_leaf && right_count >= min_data_in_leaf) {
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, lambda_l1,
l2, path_smooth, left_count, right_count, parent_output);
// gain with split is worse than without split
if (current_gain > min_gain_shift) {
local_gain = current_gain - min_gain_shift;
threshold_found = true;
best_dir = 1;
best_sum_left_gradient = sum_left_gradient;
best_sum_left_hessian = sum_left_hessian;
best_threshold = bin;
}
}
}
__syncthreads();
// right to left
for (int bin = static_cast<int>(threadIdx_x); bin < used_bin && bin < max_num_cat; bin += static_cast<int>(blockDim.x)) {
const int bin_offset = (hist_index_buffer_ptr[used_bin - 1 - bin] << 1);
hist_grad_buffer_ptr[bin] = feature_hist_ptr[bin_offset];
hist_hess_buffer_ptr[bin] = feature_hist_ptr[bin_offset + 1];
}
if (threadIdx_x == 0) {
hist_hess_buffer_ptr[0] += kEpsilon;
}
__syncthreads();
GlobalMemoryPrefixSum<double>(hist_grad_buffer_ptr, static_cast<size_t>(bin_end));
__syncthreads();
GlobalMemoryPrefixSum<double>(hist_hess_buffer_ptr, static_cast<size_t>(bin_end));
for (int bin = static_cast<int>(threadIdx_x); bin < used_bin && bin < max_num_cat; bin += static_cast<int>(blockDim.x)) {
const double sum_left_gradient = hist_grad_buffer_ptr[bin];
const double sum_left_hessian = hist_hess_buffer_ptr[bin];
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = num_data - left_count;
if (sum_left_hessian >= min_sum_hessian_in_leaf && left_count >= min_data_in_leaf &&
sum_right_hessian >= min_sum_hessian_in_leaf && right_count >= min_data_in_leaf) {
double current_gain = CUDALeafSplits::GetSplitGains<USE_L1, USE_SMOOTHING>(
sum_left_gradient, sum_left_hessian, sum_right_gradient,
sum_right_hessian, lambda_l1,
l2, path_smooth, left_count, right_count, parent_output);
// gain with split is worse than without split
if (current_gain > min_gain_shift) {
local_gain = current_gain - min_gain_shift;
threshold_found = true;
best_dir = -1;
best_sum_left_gradient = sum_left_gradient;
best_sum_left_hessian = sum_left_hessian;
best_threshold = bin;
}
}
}
__syncthreads();
const uint32_t result = ReduceBestGain(local_gain, threshold_found, threadIdx_x, shared_gain_buffer, shared_found_buffer, shared_thread_index_buffer);
if (threadIdx_x == 0) {
best_thread_index = result;
}
__syncthreads();
if (threshold_found && threadIdx_x == best_thread_index) {
cuda_best_split_info->is_valid = true;
cuda_best_split_info->num_cat_threshold = best_threshold + 1;
cuda_best_split_info->cat_threshold = new uint32_t[best_threshold + 1];
cuda_best_split_info->gain = local_gain;
if (best_dir == 1) {
for (int i = 0; i < best_threshold + 1; ++i) {
(cuda_best_split_info->cat_threshold)[i] = hist_index_buffer_ptr[i] + task->mfb_offset;
}
} else {
for (int i = 0; i < best_threshold + 1; ++i) {
(cuda_best_split_info->cat_threshold)[i] = hist_index_buffer_ptr[used_bin - 1 - i] + task->mfb_offset;
}
}
cuda_best_split_info->default_left = false;
const hist_t sum_left_gradient = best_sum_left_gradient;
const hist_t sum_left_hessian = best_sum_left_hessian;
const data_size_t left_count = static_cast<data_size_t>(__double2int_rn(sum_left_hessian * cnt_factor));
const double sum_right_gradient = sum_gradients - sum_left_gradient;
const double sum_right_hessian = sum_hessians - sum_left_hessian;
const data_size_t right_count = static_cast<data_size_t>(__double2int_rn(sum_right_hessian * cnt_factor));
const double left_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_left_gradient,
sum_left_hessian, lambda_l1, l2, path_smooth, left_count, parent_output);
const double right_output = CUDALeafSplits::CalculateSplittedLeafOutput<USE_L1, USE_SMOOTHING>(sum_right_gradient,
sum_right_hessian, lambda_l1, l2, path_smooth, right_count, parent_output);
cuda_best_split_info->left_sum_gradients = sum_left_gradient;
cuda_best_split_info->left_sum_hessians = sum_left_hessian;
cuda_best_split_info->left_count = left_count;
cuda_best_split_info->right_sum_gradients = sum_right_gradient;
cuda_best_split_info->right_sum_hessians = sum_right_hessian;
cuda_best_split_info->right_count = right_count;
cuda_best_split_info->left_value = left_output;
cuda_best_split_info->left_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_left_gradient,
sum_left_hessian, lambda_l1, l2, left_output);
cuda_best_split_info->right_value = right_output;
cuda_best_split_info->right_gain = CUDALeafSplits::GetLeafGainGivenOutput<USE_L1>(sum_right_gradient,
sum_right_hessian, lambda_l1, l2, right_output);
}
}
}
template <bool USE_RAND, bool USE_L1, bool USE_SMOOTHING, bool IS_LARGER>
__global__ void FindBestSplitsForLeafKernel_GlobalMemory(
// input feature information
const int8_t* is_feature_used_bytree,
// input task information
const int num_tasks,
const SplitFindTask* tasks,
CUDARandom* cuda_randoms,
// input leaf information
const CUDALeafSplitsStruct* smaller_leaf_splits,
const CUDALeafSplitsStruct* larger_leaf_splits,
// input config parameter values
const data_size_t min_data_in_leaf,
const double min_sum_hessian_in_leaf,
const double min_gain_to_split,
const double lambda_l1,
const double lambda_l2,
const double path_smooth,
const double cat_smooth,
const double cat_l2,
const int max_cat_threshold,
const int min_data_per_group,
// output
CUDASplitInfo* cuda_best_split_info,
// buffer
hist_t* feature_hist_grad_buffer,
hist_t* feature_hist_hess_buffer,
hist_t* feature_hist_stat_buffer,
data_size_t* feature_hist_index_buffer) {
const unsigned int task_index = blockIdx.x;
const SplitFindTask* task = tasks + task_index;
const double parent_gain = IS_LARGER ? larger_leaf_splits->gain : smaller_leaf_splits->gain;
const double sum_gradients = IS_LARGER ? larger_leaf_splits->sum_of_gradients : smaller_leaf_splits->sum_of_gradients;
const double sum_hessians = (IS_LARGER ? larger_leaf_splits->sum_of_hessians : smaller_leaf_splits->sum_of_hessians) + 2 * kEpsilon;
const data_size_t num_data = IS_LARGER ? larger_leaf_splits->num_data_in_leaf : smaller_leaf_splits->num_data_in_leaf;
const double parent_output = IS_LARGER ? larger_leaf_splits->leaf_value : smaller_leaf_splits->leaf_value;
const unsigned int output_offset = IS_LARGER ? (task_index + num_tasks) : task_index;
CUDASplitInfo* out = cuda_best_split_info + output_offset;
CUDARandom* cuda_random = USE_RAND ?
(IS_LARGER ? cuda_randoms + task_index * 2 + 1: cuda_randoms + task_index * 2) : nullptr;
if (is_feature_used_bytree[task->inner_feature_index]) {
const uint32_t hist_offset = task->hist_offset;
const hist_t* hist_ptr = (IS_LARGER ? larger_leaf_splits->hist_in_leaf : smaller_leaf_splits->hist_in_leaf) + hist_offset * 2;
hist_t* hist_grad_buffer_ptr = feature_hist_grad_buffer + hist_offset * 2;
hist_t* hist_hess_buffer_ptr = feature_hist_hess_buffer + hist_offset * 2;
hist_t* hist_stat_buffer_ptr = feature_hist_stat_buffer + hist_offset * 2;
data_size_t* hist_index_buffer_ptr = feature_hist_index_buffer + hist_offset * 2;
if (task->is_categorical) {
FindBestSplitsForLeafKernelCategoricalInner_GlobalMemory<USE_RAND, USE_L1, USE_SMOOTHING>(
// input feature information
hist_ptr,
// input task information
task,
cuda_random,
// input config parameter values
lambda_l1,
lambda_l2,
path_smooth,
min_data_in_leaf,
min_sum_hessian_in_leaf,
min_gain_to_split,
cat_smooth,
cat_l2,
max_cat_threshold,
min_data_per_group,
// input parent node information
parent_gain,
sum_gradients,
sum_hessians,
num_data,
parent_output,
// buffer
hist_grad_buffer_ptr,
hist_hess_buffer_ptr,
hist_stat_buffer_ptr,
hist_index_buffer_ptr,
// output parameters
out);
} else {
if (!task->reverse) {
FindBestSplitsForLeafKernelInner_GlobalMemory<USE_RAND, USE_L1, USE_SMOOTHING, false>(
// input feature information
hist_ptr,
// input task information
task,
cuda_random,
// input config parameter values
lambda_l1,
lambda_l2,
path_smooth,
min_data_in_leaf,
min_sum_hessian_in_leaf,
min_gain_to_split,
// input parent node information
parent_gain,
sum_gradients,
sum_hessians,
num_data,
parent_output,
// output parameters
out,
// buffer
hist_grad_buffer_ptr,
hist_hess_buffer_ptr);
} else {
FindBestSplitsForLeafKernelInner_GlobalMemory<USE_RAND, USE_L1, USE_SMOOTHING, true>(
// input feature information
hist_ptr,
// input task information
task,
cuda_random,
// input config parameter values
lambda_l1,
lambda_l2,
path_smooth,
min_data_in_leaf,
min_sum_hessian_in_leaf,
min_gain_to_split,
// input parent node information
parent_gain,
sum_gradients,
sum_hessians,
num_data,
parent_output,
// output parameters
out,
// buffer
hist_grad_buffer_ptr,
hist_hess_buffer_ptr);
}
}
} else {
out->is_valid = false;
}
}
#define LaunchFindBestSplitsForLeafKernel_PARAMS \
const CUDALeafSplitsStruct* smaller_leaf_splits, \
const CUDALeafSplitsStruct* larger_leaf_splits, \
const int smaller_leaf_index, \
const int larger_leaf_index, \
const bool is_smaller_leaf_valid, \
const bool is_larger_leaf_valid
#define LaunchFindBestSplitsForLeafKernel_ARGS \
smaller_leaf_splits, \
larger_leaf_splits, \
smaller_leaf_index, \
larger_leaf_index, \
is_smaller_leaf_valid, \
is_larger_leaf_valid
#define FindBestSplitsForLeafKernel_ARGS \
cuda_is_feature_used_bytree_, \
num_tasks_, \
cuda_split_find_tasks_.RawData(), \
cuda_randoms_.RawData(), \
smaller_leaf_splits, \
larger_leaf_splits, \
min_data_in_leaf_, \
min_sum_hessian_in_leaf_, \
min_gain_to_split_, \
lambda_l1_, \
lambda_l2_, \
path_smooth_, \
cat_smooth_, \
cat_l2_, \
max_cat_threshold_, \
min_data_per_group_, \
cuda_best_split_info_
#define GlobalMemory_Buffer_ARGS \
cuda_feature_hist_grad_buffer_, \
cuda_feature_hist_hess_buffer_, \
cuda_feature_hist_stat_buffer_, \
cuda_feature_hist_index_buffer_
void CUDABestSplitFinder::LaunchFindBestSplitsForLeafKernel(LaunchFindBestSplitsForLeafKernel_PARAMS) {
if (!is_smaller_leaf_valid && !is_larger_leaf_valid) {
return;
}
if (!extra_trees_) {
LaunchFindBestSplitsForLeafKernelInner0<false>(LaunchFindBestSplitsForLeafKernel_ARGS);
} else {
LaunchFindBestSplitsForLeafKernelInner0<true>(LaunchFindBestSplitsForLeafKernel_ARGS);
}
}
template <bool USE_RAND>
void CUDABestSplitFinder::LaunchFindBestSplitsForLeafKernelInner0(LaunchFindBestSplitsForLeafKernel_PARAMS) {
if (lambda_l1_ <= 0.0f) {
LaunchFindBestSplitsForLeafKernelInner1<USE_RAND, false>(LaunchFindBestSplitsForLeafKernel_ARGS);
} else {
LaunchFindBestSplitsForLeafKernelInner1<USE_RAND, true>(LaunchFindBestSplitsForLeafKernel_ARGS);
}
}
template <bool USE_RAND, bool USE_L1>
void CUDABestSplitFinder::LaunchFindBestSplitsForLeafKernelInner1(LaunchFindBestSplitsForLeafKernel_PARAMS) {
if (!use_smoothing_) {
LaunchFindBestSplitsForLeafKernelInner2<USE_RAND, USE_L1, false>(LaunchFindBestSplitsForLeafKernel_ARGS);
} else {
LaunchFindBestSplitsForLeafKernelInner2<USE_RAND, USE_L1, true>(LaunchFindBestSplitsForLeafKernel_ARGS);
}
}
template <bool USE_RAND, bool USE_L1, bool USE_SMOOTHING>
void CUDABestSplitFinder::LaunchFindBestSplitsForLeafKernelInner2(LaunchFindBestSplitsForLeafKernel_PARAMS) {
if (!use_global_memory_) {
if (is_smaller_leaf_valid) {
FindBestSplitsForLeafKernel<USE_RAND, USE_L1, USE_SMOOTHING, false>
<<<num_tasks_, NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER, 0, cuda_streams_[0]>>>
(FindBestSplitsForLeafKernel_ARGS);
}
SynchronizeCUDADevice(__FILE__, __LINE__);
if (is_larger_leaf_valid) {
FindBestSplitsForLeafKernel<USE_RAND, USE_L1, USE_SMOOTHING, true>
<<<num_tasks_, NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER, 0, cuda_streams_[1]>>>
(FindBestSplitsForLeafKernel_ARGS);
}
} else {
if (is_smaller_leaf_valid) {
FindBestSplitsForLeafKernel_GlobalMemory<USE_RAND, USE_L1, USE_SMOOTHING, false>
<<<num_tasks_, NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER, 0, cuda_streams_[0]>>>
(FindBestSplitsForLeafKernel_ARGS, GlobalMemory_Buffer_ARGS);
}
SynchronizeCUDADevice(__FILE__, __LINE__);
if (is_larger_leaf_valid) {
FindBestSplitsForLeafKernel_GlobalMemory<USE_RAND, USE_L1, USE_SMOOTHING, true>
<<<num_tasks_, NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER, 0, cuda_streams_[1]>>>
(FindBestSplitsForLeafKernel_ARGS, GlobalMemory_Buffer_ARGS);
}
}
}
#undef LaunchFindBestSplitsForLeafKernel_PARAMS
#undef FindBestSplitsForLeafKernel_ARGS
#undef GlobalMemory_Buffer_ARGS
__device__ void ReduceBestSplit(bool* found, double* gain, uint32_t* shared_read_index,
uint32_t num_features_aligned) {
const uint32_t threadIdx_x = threadIdx.x;
for (unsigned int s = 1; s < num_features_aligned; s <<= 1) {
if (threadIdx_x % (2 * s) == 0 && (threadIdx_x + s) < num_features_aligned) {
const uint32_t pos_to_compare = threadIdx_x + s;
if ((!found[threadIdx_x] && found[pos_to_compare]) ||
(found[threadIdx_x] && found[pos_to_compare] && gain[threadIdx_x] < gain[pos_to_compare])) {
found[threadIdx_x] = found[pos_to_compare];
gain[threadIdx_x] = gain[pos_to_compare];
shared_read_index[threadIdx_x] = shared_read_index[pos_to_compare];
}
}
__syncthreads();
}
}
__global__ void SyncBestSplitForLeafKernel(const int smaller_leaf_index, const int larger_leaf_index,
CUDASplitInfo* cuda_leaf_best_split_info,
// input parameters
const SplitFindTask* tasks,
const CUDASplitInfo* cuda_best_split_info,
const int num_tasks,
const int num_tasks_aligned,
const int num_blocks_per_leaf,
const bool larger_only,
const int num_leaves) {
__shared__ double shared_gain_buffer[32];
__shared__ bool shared_found_buffer[32];
__shared__ uint32_t shared_thread_index_buffer[32];
const uint32_t threadIdx_x = threadIdx.x;
const uint32_t blockIdx_x = blockIdx.x;
bool best_found = false;
double best_gain = kMinScore;
uint32_t shared_read_index = 0;
const bool is_smaller = (blockIdx_x < static_cast<unsigned int>(num_blocks_per_leaf) && !larger_only);
const uint32_t leaf_block_index = (is_smaller || larger_only) ? blockIdx_x : (blockIdx_x - static_cast<unsigned int>(num_blocks_per_leaf));
const int task_index = static_cast<int>(leaf_block_index * blockDim.x + threadIdx_x);
const uint32_t read_index = is_smaller ? static_cast<uint32_t>(task_index) : static_cast<uint32_t>(task_index + num_tasks);
if (task_index < num_tasks) {
best_found = cuda_best_split_info[read_index].is_valid;
best_gain = cuda_best_split_info[read_index].gain;
shared_read_index = read_index;
} else {
best_found = false;
}
__syncthreads();
const uint32_t best_read_index = ReduceBestGain(best_gain, best_found, shared_read_index,
shared_gain_buffer, shared_found_buffer, shared_thread_index_buffer);
if (threadIdx.x == 0) {
const int leaf_index_ref = is_smaller ? smaller_leaf_index : larger_leaf_index;
const unsigned buffer_write_pos = static_cast<unsigned int>(leaf_index_ref) + leaf_block_index * num_leaves;
CUDASplitInfo* cuda_split_info = cuda_leaf_best_split_info + buffer_write_pos;
const CUDASplitInfo* best_split_info = cuda_best_split_info + best_read_index;
if (best_split_info->is_valid) {
*cuda_split_info = *best_split_info;
cuda_split_info->inner_feature_index = is_smaller ? tasks[best_read_index].inner_feature_index :
tasks[static_cast<int>(best_read_index) - num_tasks].inner_feature_index;
cuda_split_info->is_valid = true;
} else {
cuda_split_info->gain = kMinScore;
cuda_split_info->is_valid = false;
}
}
}
__global__ void SyncBestSplitForLeafKernelAllBlocks(
const int smaller_leaf_index,
const int larger_leaf_index,
const unsigned int num_blocks_per_leaf,
const int num_leaves,
CUDASplitInfo* cuda_leaf_best_split_info,
const bool larger_only) {
if (!larger_only) {
if (blockIdx.x == 0) {
CUDASplitInfo* smaller_leaf_split_info = cuda_leaf_best_split_info + smaller_leaf_index;
for (unsigned int block_index = 1; block_index < num_blocks_per_leaf; ++block_index) {
const unsigned int leaf_read_pos = static_cast<unsigned int>(smaller_leaf_index) + block_index * static_cast<unsigned int>(num_leaves);
const CUDASplitInfo* other_split_info = cuda_leaf_best_split_info + leaf_read_pos;
if ((other_split_info->is_valid && smaller_leaf_split_info->is_valid &&
other_split_info->gain > smaller_leaf_split_info->gain) ||
(!smaller_leaf_split_info->is_valid && other_split_info->is_valid)) {
*smaller_leaf_split_info = *other_split_info;
}
}
}
}
if (larger_leaf_index >= 0) {
if (blockIdx.x == 1 || larger_only) {
CUDASplitInfo* larger_leaf_split_info = cuda_leaf_best_split_info + larger_leaf_index;
for (unsigned int block_index = 1; block_index < num_blocks_per_leaf; ++block_index) {
const unsigned int leaf_read_pos = static_cast<unsigned int>(larger_leaf_index) + block_index * static_cast<unsigned int>(num_leaves);
const CUDASplitInfo* other_split_info = cuda_leaf_best_split_info + leaf_read_pos;
if ((other_split_info->is_valid && larger_leaf_split_info->is_valid &&
other_split_info->gain > larger_leaf_split_info->gain) ||
(!larger_leaf_split_info->is_valid && other_split_info->is_valid)) {
*larger_leaf_split_info = *other_split_info;
}
}
}
}
}
__global__ void SetInvalidLeafSplitInfoKernel(
CUDASplitInfo* cuda_leaf_best_split_info,
const bool is_smaller_leaf_valid,
const bool is_larger_leaf_valid,
const int smaller_leaf_index,
const int larger_leaf_index) {
if (!is_smaller_leaf_valid) {
cuda_leaf_best_split_info[smaller_leaf_index].is_valid = false;
}
if (!is_larger_leaf_valid && larger_leaf_index >= 0) {
cuda_leaf_best_split_info[larger_leaf_index].is_valid = false;
}
}
void CUDABestSplitFinder::LaunchSyncBestSplitForLeafKernel(
const int host_smaller_leaf_index,
const int host_larger_leaf_index,
const bool is_smaller_leaf_valid,
const bool is_larger_leaf_valid) {
if (!is_smaller_leaf_valid || !is_larger_leaf_valid) {
SetInvalidLeafSplitInfoKernel<<<1, 1>>>(
cuda_leaf_best_split_info_,
is_smaller_leaf_valid, is_larger_leaf_valid,
host_smaller_leaf_index, host_larger_leaf_index);
}
if (!is_smaller_leaf_valid && !is_larger_leaf_valid) {
return;
}
int num_tasks = num_tasks_;
int num_tasks_aligned = 1;
num_tasks -= 1;
while (num_tasks > 0) {
num_tasks_aligned <<= 1;
num_tasks >>= 1;
}
const int num_blocks_per_leaf = (num_tasks_ + NUM_TASKS_PER_SYNC_BLOCK - 1) / NUM_TASKS_PER_SYNC_BLOCK;
if (host_larger_leaf_index >= 0 && is_smaller_leaf_valid && is_larger_leaf_valid) {
SyncBestSplitForLeafKernel<<<num_blocks_per_leaf, NUM_TASKS_PER_SYNC_BLOCK, 0, cuda_streams_[0]>>>(
host_smaller_leaf_index,
host_larger_leaf_index,
cuda_leaf_best_split_info_,
cuda_split_find_tasks_.RawData(),
cuda_best_split_info_,
num_tasks_,
num_tasks_aligned,
num_blocks_per_leaf,
false,
num_leaves_);
if (num_blocks_per_leaf > 1) {
SyncBestSplitForLeafKernelAllBlocks<<<1, 1, 0, cuda_streams_[0]>>>(
host_smaller_leaf_index,
host_larger_leaf_index,
num_blocks_per_leaf,
num_leaves_,
cuda_leaf_best_split_info_,
false);
}
SynchronizeCUDADevice(__FILE__, __LINE__);
SyncBestSplitForLeafKernel<<<num_blocks_per_leaf, NUM_TASKS_PER_SYNC_BLOCK, 0, cuda_streams_[1]>>>(
host_smaller_leaf_index,
host_larger_leaf_index,
cuda_leaf_best_split_info_,
cuda_split_find_tasks_.RawData(),
cuda_best_split_info_,
num_tasks_,
num_tasks_aligned,
num_blocks_per_leaf,
true,
num_leaves_);
if (num_blocks_per_leaf > 1) {
SyncBestSplitForLeafKernelAllBlocks<<<1, 1, 0, cuda_streams_[1]>>>(
host_smaller_leaf_index,
host_larger_leaf_index,
num_blocks_per_leaf,
num_leaves_,
cuda_leaf_best_split_info_,
true);
}
} else {
const bool larger_only = (!is_smaller_leaf_valid && is_larger_leaf_valid);
SyncBestSplitForLeafKernel<<<num_blocks_per_leaf, NUM_TASKS_PER_SYNC_BLOCK>>>(
host_smaller_leaf_index,
host_larger_leaf_index,
cuda_leaf_best_split_info_,
cuda_split_find_tasks_.RawData(),
cuda_best_split_info_,
num_tasks_,
num_tasks_aligned,
num_blocks_per_leaf,
larger_only,
num_leaves_);
if (num_blocks_per_leaf > 1) {
SynchronizeCUDADevice(__FILE__, __LINE__);
SyncBestSplitForLeafKernelAllBlocks<<<1, 1>>>(
host_smaller_leaf_index,
host_larger_leaf_index,
num_blocks_per_leaf,
num_leaves_,
cuda_leaf_best_split_info_,
larger_only);
}
}
}
__global__ void FindBestFromAllSplitsKernel(const int cur_num_leaves,
CUDASplitInfo* cuda_leaf_best_split_info,
int* cuda_best_split_info_buffer) {
__shared__ double gain_shared_buffer[32];
__shared__ int leaf_index_shared_buffer[32];
double thread_best_gain = kMinScore;
int thread_best_leaf_index = -1;
const int threadIdx_x = static_cast<int>(threadIdx.x);
for (int leaf_index = threadIdx_x; leaf_index < cur_num_leaves; leaf_index += static_cast<int>(blockDim.x)) {
const double leaf_best_gain = cuda_leaf_best_split_info[leaf_index].gain;
if (cuda_leaf_best_split_info[leaf_index].is_valid && leaf_best_gain > thread_best_gain) {
thread_best_gain = leaf_best_gain;
thread_best_leaf_index = leaf_index;
}
}
const int best_leaf_index = ReduceBestGainForLeaves(thread_best_gain, thread_best_leaf_index, gain_shared_buffer, leaf_index_shared_buffer);
if (threadIdx_x == 0) {
cuda_best_split_info_buffer[6] = best_leaf_index;
if (best_leaf_index != -1) {
cuda_leaf_best_split_info[best_leaf_index].is_valid = false;
cuda_leaf_best_split_info[cur_num_leaves].is_valid = false;
cuda_best_split_info_buffer[7] = cuda_leaf_best_split_info[best_leaf_index].num_cat_threshold;
}
}
}
__global__ void PrepareLeafBestSplitInfo(const int smaller_leaf_index, const int larger_leaf_index,
int* cuda_best_split_info_buffer,
const CUDASplitInfo* cuda_leaf_best_split_info) {
const unsigned int threadIdx_x = blockIdx.x;
if (threadIdx_x == 0) {
cuda_best_split_info_buffer[0] = cuda_leaf_best_split_info[smaller_leaf_index].inner_feature_index;
} else if (threadIdx_x == 1) {
cuda_best_split_info_buffer[1] = cuda_leaf_best_split_info[smaller_leaf_index].threshold;
} else if (threadIdx_x == 2) {
cuda_best_split_info_buffer[2] = cuda_leaf_best_split_info[smaller_leaf_index].default_left;
}
if (larger_leaf_index >= 0) {
if (threadIdx_x == 3) {
cuda_best_split_info_buffer[3] = cuda_leaf_best_split_info[larger_leaf_index].inner_feature_index;
} else if (threadIdx_x == 4) {
cuda_best_split_info_buffer[4] = cuda_leaf_best_split_info[larger_leaf_index].threshold;
} else if (threadIdx_x == 5) {
cuda_best_split_info_buffer[5] = cuda_leaf_best_split_info[larger_leaf_index].default_left;
}
}
}
void CUDABestSplitFinder::LaunchFindBestFromAllSplitsKernel(
const int cur_num_leaves,
const int smaller_leaf_index, const int larger_leaf_index,
int* smaller_leaf_best_split_feature,
uint32_t* smaller_leaf_best_split_threshold,
uint8_t* smaller_leaf_best_split_default_left,
int* larger_leaf_best_split_feature,
uint32_t* larger_leaf_best_split_threshold,
uint8_t* larger_leaf_best_split_default_left,
int* best_leaf_index,
int* num_cat_threshold) {
FindBestFromAllSplitsKernel<<<1, NUM_THREADS_FIND_BEST_LEAF, 0, cuda_streams_[1]>>>(cur_num_leaves,
cuda_leaf_best_split_info_,
cuda_best_split_info_buffer_);
PrepareLeafBestSplitInfo<<<6, 1, 0, cuda_streams_[0]>>>(smaller_leaf_index, larger_leaf_index,
cuda_best_split_info_buffer_,
cuda_leaf_best_split_info_);
std::vector<int> host_leaf_best_split_info_buffer(8, 0);
SynchronizeCUDADevice(__FILE__, __LINE__);
CopyFromCUDADeviceToHost<int>(host_leaf_best_split_info_buffer.data(), cuda_best_split_info_buffer_, 8, __FILE__, __LINE__);
*smaller_leaf_best_split_feature = host_leaf_best_split_info_buffer[0];
*smaller_leaf_best_split_threshold = static_cast<uint32_t>(host_leaf_best_split_info_buffer[1]);
*smaller_leaf_best_split_default_left = static_cast<uint8_t>(host_leaf_best_split_info_buffer[2]);
if (larger_leaf_index >= 0) {
*larger_leaf_best_split_feature = host_leaf_best_split_info_buffer[3];
*larger_leaf_best_split_threshold = static_cast<uint32_t>(host_leaf_best_split_info_buffer[4]);
*larger_leaf_best_split_default_left = static_cast<uint8_t>(host_leaf_best_split_info_buffer[5]);
}
*best_leaf_index = host_leaf_best_split_info_buffer[6];
*num_cat_threshold = host_leaf_best_split_info_buffer[7];
}
__global__ void AllocateCatVectorsKernel(
CUDASplitInfo* cuda_split_infos, size_t len,
const int max_num_categories_in_split,
const bool has_categorical_feature,
uint32_t* cat_threshold_vec,
int* cat_threshold_real_vec) {
const size_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < len) {
if (has_categorical_feature) {
cuda_split_infos[i].cat_threshold = cat_threshold_vec + i * max_num_categories_in_split;
cuda_split_infos[i].cat_threshold_real = cat_threshold_real_vec + i * max_num_categories_in_split;
cuda_split_infos[i].num_cat_threshold = 0;
} else {
cuda_split_infos[i].cat_threshold = nullptr;
cuda_split_infos[i].cat_threshold_real = nullptr;
cuda_split_infos[i].num_cat_threshold = 0;
}
}
}
void CUDABestSplitFinder::LaunchAllocateCatVectorsKernel(
CUDASplitInfo* cuda_split_infos, uint32_t* cat_threshold_vec, int* cat_threshold_real_vec, size_t len) {
const int num_blocks = (static_cast<int>(len) + NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER - 1) / NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER;
AllocateCatVectorsKernel<<<num_blocks, NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER>>>(
cuda_split_infos, len, max_num_categories_in_split_, has_categorical_feature_, cat_threshold_vec, cat_threshold_real_vec);
}
__global__ void InitCUDARandomKernel(
const int seed,
const int num_tasks,
CUDARandom* cuda_randoms) {
const int task_index = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
if (task_index < num_tasks) {
cuda_randoms[task_index].SetSeed(seed + task_index);
}
}
void CUDABestSplitFinder::LaunchInitCUDARandomKernel() {
const int num_blocks = (static_cast<int>(cuda_randoms_.Size()) +
NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER - 1) / NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER;
InitCUDARandomKernel<<<num_blocks, NUM_THREADS_PER_BLOCK_BEST_SPLIT_FINDER>>>(extra_seed_,
static_cast<int>(cuda_randoms_.Size()), cuda_randoms_.RawData());
}
} // namespace LightGBM
#endif // USE_CUDA_EXP
|
the_stack
|
#include "SFSSolverParameters.h"
#include "SFSSolverState.h"
#include "SFSSolverTerms.h"
#include "SFSSolverUtil.h"
#include "SFSSolverEquations.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "ConvergenceAnalysis.h"
#include "CUDATimer.h"
#ifdef _WIN32
#include <conio.h>
#endif
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
#define WARP_SIZE 32u
#define WARP_MASK (WARP_SIZE-1u)
#define DEBUG_PRINT_INFO 0
/////////////////////////////////////////////////////////////////////////
// Eval Residual
//////////////////////// /////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int N = input.N;
float residual = 0.0f;
if (x < N)
{
residual = evalFDevice(x, input, state, parameters);
}
// Must do shuffle in entire warp
float r = warpReduce(residual);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_sumResidual, r);
}
}
float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
float residual = 0.0f;
cudaSafeCall(cudaDeviceSynchronize());
const unsigned int N = input.N; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
cudaSafeCall(cudaDeviceSynchronize());
timer.startEvent("EvalResidual");
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize());
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
cudaSafeCall(cudaMemcpy(&residual, &state.d_sumResidual[0], sizeof(float), cudaMemcpyDeviceToHost));
return residual;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float pre = 1.0f;
float residuum = evalMinusJTFDevice(x, input, state, parameters, pre); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
residuum = 2.0f * residuum;//TODO: Check if results are still okay once we fix this
state.d_r[x] = residuum; // store for next iteration
state.d_preconditioner[x] = pre;
const float p = pre * residuum; // apply preconditioner M^-1
state.d_p[x] = p;
d = residuum * p; // x-th term of nomimator for computing alpha and denominator for computing beta
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_rDotzOld[x] = state.d_scanAlpha[0];
state.d_delta[x] = 0.0;
}
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
timer.startEvent("PCGInit_Kernel1");
PCGInit_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
#if DEBUG_PRINT_INFO
float scanAlpha = 0.0f;
cudaSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost));
printf("ScanAlpha: %f\n", scanAlpha);
#endif
timer.startEvent("PCGInit_Kernel2");
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(N, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
__global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float tmp = 2.0*applyJTJDevice(x, input, state, parameters); // A x p_k => J^T x J x p_k
state.d_Ap_X[x] = tmp; // store for next kernel call
d = state.d_p[x] * tmp; // x-th term of denominator of alpha
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step
float r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum
state.d_r[x] = r; // store for next kernel call
float z = state.d_preconditioner[x] * r; // apply preconditioner M^-1
state.d_z[x] = z; // save for next kernel call
b = z * r; // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float rDotzNew = state.d_scanBeta[0]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction
}
}
void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
timer.startEvent("PCGStep_Kernel1");
PCGStep_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK>> >(input, state, parameters);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
#if DEBUG_PRINT_INFO
float scanAlpha = 0.0f;
cudaSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost));
printf("ScanAlpha: %f\n", scanAlpha);
#endif
cudaSafeCall(cudaMemset(state.d_scanBeta, 0, sizeof(float)));
timer.startEvent("PCGStep_Kernel2");
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK>> >(input, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
#if DEBUG_PRINT_INFO
float scanBeta = 0.0f;
cudaSafeCall(cudaMemcpy(&scanBeta, state.d_scanBeta, sizeof(float), cudaMemcpyDeviceToHost));
printf("ScanBeta: %f\n", scanBeta);
#endif
timer.startEvent("PCGStep_Kernel3");
PCGStep_Kernel3 << <blocksPerGrid, THREADS_PER_BLOCK>> >(input, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////////////////////////////////
// Apply Update
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_x[x] = state.d_x[x] + state.d_delta[x];
}
}
void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
timer.startEvent("ApplyLinearUpdateDevice");
ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize()); // Hm
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void Precompute_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
int W = input.width;
int posy; int posx; get2DIdx(x, input.width, input.height, posy, posx);
if (posx > 0 && posy > 0 && posx < input.width - 2 && posy < input.height - 2) {
float4 temp = calShading2depthGradCompute(state, posx, posy, input);
state.B_I_dx0[x] = temp.x;
state.B_I_dx1[x] = temp.y;
state.B_I_dx2[x] = temp.z;
state.B_I[x] = temp.w;
float d = readX(state, posy, posx, W);
float d0 = readX(state, posy, posx - 1, W);
float d1 = readX(state, posy, posx + 1, W);
float d2 = readX(state, posy - 1, posx, W);
float d3 = readX(state, posy + 1, posx, W);
state.pguard[x] =
IsValidPoint(d) && IsValidPoint(d0) && IsValidPoint(d1) && IsValidPoint(d2) && IsValidPoint(d3)
&& abs(d - d0)<DEPTH_DISCONTINUITY_THRE
&& abs(d - d1)<DEPTH_DISCONTINUITY_THRE
&& abs(d - d2)<DEPTH_DISCONTINUITY_THRE
&& abs(d - d3)<DEPTH_DISCONTINUITY_THRE;
}
}
}
void Precompute(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
cudaSafeCall(cudaDeviceSynchronize());
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
timer.startEvent("Precompute_Kernel");
Precompute_Kernel << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize());
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
//cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" double solveSFSStub(SolverInput& input, SolverState& state, SolverParameters& parameters, ConvergenceAnalysis<float>* ca)
{
CUDATimer timer;
parameters.weightShading = parameters.weightShadingStart;
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
Precompute(input, state, parameters, timer);
float residual = EvalResidual(input, state, parameters, timer);
printf("%i: cost: %f\n", nIter, residual);
Initialization(input, state, parameters, timer);
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
PCGIteration(input, state, parameters, timer);
parameters.weightShading += parameters.weightShadingIncrement;
if (ca != NULL)
ca->addSample(FunctionValue<float>(EvalResidual(input, state, parameters, timer)));
}
ApplyLinearUpdate(input, state, parameters, timer); //this should be also done in the last PCGIteration
timer.nextIteration();
}
Precompute(input, state, parameters, timer);
float residual = EvalResidual(input, state, parameters, timer);
printf("final cost: %f\n", residual);
timer.evaluate();
return (double)residual;
}
__global__ void PCGStep_Kernel_SaveInitialCostJTFAndPre(SolverInput input, SolverState state, SolverParameters parameters,
float* costResult, float* jtfResult, float* preResult) {
const unsigned int N = input.N;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
float pre = 1.0f;
costResult[x] = evalFDevice(x, input, state, parameters);
const float residuum = evalMinusJTFDevice(x, input, state, parameters, pre); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
jtfResult[x] = -2.0f*residuum;//TODO: port
preResult[x] = pre;
}
}
__global__ void PCGStep_Kernel_SaveJTJ(SolverInput input, SolverState state, SolverParameters parameters, float* jtjResult)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
jtjResult[x] = 2.0f * applyJTJDevice(x, input, state, parameters); // A x p_k => J^T x J x p_k
}
}
void NonPatchSaveInitialCostJTFAndPreAndJTJ(SolverInput& input, SolverState& state, SolverParameters& parameters, float* costResult, float* jtfResult, float* preResult, float* jtjResult)
{
const unsigned int N = input.N; // Number of block variables
CUDATimer timer;
Precompute(input, state, parameters, timer);
cudaSafeCall(cudaDeviceSynchronize());
PCGStep_Kernel_SaveInitialCostJTFAndPre<< <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters, costResult, jtfResult, preResult);
cudaSafeCall(cudaDeviceSynchronize());
Initialization(input, state, parameters, timer);
PCGStep_Kernel_SaveJTJ<< <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters, jtjResult);
}
extern "C" void solveSFSEvalCurrentCostJTFPreAndJTJStub(SolverInput& input, SolverState& state, SolverParameters& parameters, float* costResult, float* jtfResult, float* preResult, float* jtjResult)
{
parameters.weightShading = parameters.weightShadingStart;
NonPatchSaveInitialCostJTFAndPreAndJTJ(input, state, parameters, costResult, jtfResult, preResult, jtjResult);
}
|
the_stack
|
using namespace std;
#define NUM_ELEM_PT 16
#define NUM_ELEM_BITSHIFT 4
#define ORDERV(x,a,b) { bool swap = reverse ^ (x[a]<x[b]); \
T auxa = x[a]; \
if (swap) { x[a] = x[b]; x[b] = auxa; } }
#define B2V(x,a) { ORDERV(x,a,a+1) }
#define B4V(x,a) { for (int i4=0;i4<2;i4++) { ORDERV(x,a+i4,a+i4+2) } B2V(x,a) B2V(x,a+2) }
#define B8V(x,a) { for (int i8=0;i8<4;i8++) { ORDERV(x,a+i8,a+i8+4) } B4V(x,a) B4V(x,a+4) }
#define B16V(x,a) { for (int i16=0;i16<8;i16++) { ORDERV(x,a+i16,a+i16+8) } B8V(x,a) B8V(x,a+8) }
#define B32V(x,a) { for (int i32=0;i32<16;i32++) { ORDERV(x,a+i32,a+i32+16) } B16V(x,a) B16V(x,a+16) }
#define B64V(x,a) { for (int i64=0;i64<32;i64++) { ORDERV(x,a+i64,a+i64+32) } B32V(x,a) B32V(x,a+32) }
template<typename T>
__forceinline__
__device__ T get(T* sdata, int i) {
return sdata[i + (i>>5)];
}
template <typename T>
__forceinline__ __device__ T max(T& a, T& b) {
return a > b ? a : b;
}
#define set(a,b,c) { int tempIndex = b; a[tempIndex + (tempIndex >> 5)] = c; }
#define NUM_GROUPS (NUM_ELEM_PT/2)
#define NUM_GROUPS_BITSHIFT (NUM_ELEM_BITSHIFT-1)
#define RUN_64(X) { \
inc >>= 5; \
low = t & (inc - 1); \
tCur = ((t - low) << 6) + low; \
reverse = ((dir & tCur) == 0); \
for (int j=0; j<NUM_GROUPS/(32 * X); j++) { \
for (int i=0; i<64; i++) x[i] = get(sdata, tCur+i*inc); \
B64V(x,0); \
for (int i=0; i<64; i++) set(sdata, tCur+i*inc, x[i]); \
} \
inc >>= 1; \
}
#define RUN_32(X) { \
inc >>= 4; \
low = t & (inc - 1); \
tCur = ((t - low) << 5) + low; \
reverse = ((dir & tCur) == 0); \
for (int j=0; j<NUM_GROUPS/(16 * X); j++) { \
for (int i=0; i<32; i++) x[i] = get(sdata, tCur+i*inc); \
B32V(x,0); \
for (int i=0; i<32; i++) set(sdata, tCur+i*inc, x[i]); \
} \
inc >>= 1; \
}
#define RUN_16(X) { \
inc >>= 3; \
low = t & (inc - 1); \
tCur = ((t - low) << 4) + low; \
reverse = ((dir & tCur) == 0); \
for (int j=0; j<NUM_GROUPS/(8 * X); j++) { \
for (int i=0; i<16; i++) x[i] = get(sdata, tCur+i*inc); \
B16V(x,0); \
for (int i=0; i<16; i++) set(sdata, tCur+i*inc, x[i]); \
} \
inc >>= 1; \
}
#define RUN_8(X) { \
inc >>= 2; \
low = t & (inc - 1); \
tCur = ((t - low) << 3) + low; \
reverse = ((dir & tCur) == 0); \
for (int j=0; j<NUM_GROUPS/(4 * X); j++) { \
for (int i=0; i<8; i++) x[i] = get(sdata, tCur+i*inc); \
B8V(x,0); \
for (int i=0; i<8; i++) set(sdata, tCur+i*inc, x[i]); \
} \
inc >>= 1; \
}
#define RUN_4(X) { \
inc >>= 1; \
low = t & (inc - 1); \
tCur = ((t - low) << 2) + low; \
reverse = ((dir & tCur) == 0); \
for (int j=0; j<NUM_GROUPS/(2 * X); j++) { \
for (int i=0;i<4;i++) x[i] = get(sdata, 4*wg*j + tCur + i*inc); \
B4V(x,0); \
for (int i=0;i<4;i++) set(sdata, 4*wg*j + tCur + i*inc, x[i]); \
} \
inc >>= 1; \
}
#define RUN_2(X) { \
low = t & (inc - 1); \
tCur = ((t - low) << 1) + low; \
reverse = ((dir & tCur) == 0); \
for (int j=0; j<NUM_GROUPS/(X); j++) { \
for (int i=0;i<2;i++) x[i] = get(sdata, 2*wg*j + tCur + i*inc); \
B2V(x,0); \
for (int i=0;i<2;i++) set(sdata, 2*wg*j + tCur + i*inc, x[i]); \
} \
inc >>= 1; \
}
#define REDUCE(X) { \
tCur = ((t >> klog2) << (klog2 + 1)) + (t & (k-1)); \
for(int j=0; j<NUM_GROUPS/(X); j++) { \
x[j] = max(get(sdata, 2*wg*j + tCur), get(sdata, 2*wg*j + tCur + k)); \
} \
__syncthreads(); \
for(int j=0; j<NUM_GROUPS/(X); j++) { \
set(sdata, wg*j + t, x[j]); \
} \
}
template<typename T>
__global__ void Bitonic_TopKLocalSortInPlace(T* __restrict__ in, T* __restrict__ out,
const int k, const int klog2) {
/* const int k = K;*/
/*const int klog2 = KLog2;*/
// Shared mem size is determined by the host app at run time.
// For n elements, we have n * 33/32 shared memory.
// We use this to break bank conflicts.
SharedMemory<T> smem;
T* sdata = smem.getPointer();
const int t = threadIdx.x; // index in workgroup
const int wg = blockDim.x; // workgroup size = block size, power of 2
const int gid = blockIdx.x;
int length = min(NUM_GROUPS, k >> 1);
int inc = length;
inc >>= NUM_GROUPS_BITSHIFT;
int low = t & (inc - 1);
int dir = length << 1;
bool reverse;
T x[NUM_ELEM_PT];
// Move IN, OUT to block start
in += NUM_ELEM_PT * gid * wg;
int tCur = t << NUM_ELEM_BITSHIFT;
for (int i=0; i<NUM_ELEM_PT; i++) x[i] = in[tCur + i];
for (int i=0; i<NUM_ELEM_PT; i+=2) {
reverse = ((i >> 1) + 1)&1;
B2V(x,i);
}
if (k > 2) {
#if NUM_ELEM_PT > 4
for (int i=0; i<NUM_ELEM_PT; i+=4) {
reverse = ((i >> 2) + 1)&1;
B4V(x,i);
}
if (k > 4) {
#if NUM_ELEM_PT > 8
for (int i=0; i<NUM_ELEM_PT; i+=8) {
reverse = ((i >> 3) + 1)&1;
B8V(x,i);
}
if (k > 8) {
#if NUM_ELEM_PT > 16
for (int i=0; i<NUM_ELEM_PT; i+=16) {
reverse = ((i >> 4) + 1)&1;
B16V(x,i);
}
if (k > 16) {
#if NUM_ELEM_PT > 32
for (int i=0; i<NUM_ELEM_PT; i+=32) {
reverse = ((i >> 5) + 1)&1;
B32V(x,i);
}
if (k > 32) {
reverse = ((dir & tCur) == 0); B64V(x,0);
}
#else
reverse = ((dir & tCur) == 0); B32V(x,0);
#endif
}
#else
reverse = ((dir & tCur) == 0); B16V(x,0);
#endif
}
#else
reverse = ((dir & tCur) == 0); B8V(x,0);
#endif
}
#else
reverse = ((dir & tCur) == 0); B4V(x,0);
#endif
}
for (int i=0; i<NUM_ELEM_PT; i++) set(sdata, tCur+i, x[i]);
__syncthreads();
// Complete the remaining steps to create sorted sequences of length k.
int mod;
unsigned int mask;
for (length=NUM_ELEM_PT; length<k; length<<=1)
{
dir = length << 1;
// Loop on comparison distance (between keys)
inc = length;
mod = inc;
mask = ~(NUM_ELEM_PT/(1) - 1);
while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 0);
if (mod & 1)
{
RUN_2(1)
__syncthreads();
}
if (mod & 2)
{
RUN_4(1)
__syncthreads();
}
#if NUM_ELEM_PT > 8
if (mod & 4)
{
RUN_8(1)
__syncthreads();
}
#if NUM_ELEM_PT > 16
if (mod & 8)
{
RUN_16(1)
__syncthreads();
}
while (inc > 8)
{
RUN_32(1)
__syncthreads();
}
#else
while (inc > 4)
{
RUN_16(1)
__syncthreads();
}
#endif // NUM_ELEM_PT > 16
#else
while (inc > 2)
{
RUN_8(1)
__syncthreads();
}
#endif // NUM_ELEM_PT > 8
}
// Step 2: Reduce the size by factor 2 by pairwise comparing adjacent sequences.
REDUCE(1)
__syncthreads();
// End of Step 2;
// Step 3: Construct sorted sequence of length k from bitonic sequence of length k.
// We now have n/2 elements.
length = k >> 1;
dir = length << 1;
// Loop on comparison distance (between keys)
inc = length;
mod = inc;
mask = ~(NUM_ELEM_PT/(1) - 1);
while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 0);
if (mod & 1)
{
RUN_2(2)
__syncthreads();
}
#if NUM_ELEM_PT > 4
if (mod & 2)
{
RUN_4(2)
__syncthreads();
}
#if NUM_ELEM_PT > 8
if (mod & 4)
{
RUN_8(2)
__syncthreads();
}
while (inc > 4)
{
if (t < (wg >> 1)) {
RUN_16(1)
} else {
inc >>= 4;
}
__syncthreads();
}
#else
while (inc > 2)
{
RUN_8(2)
__syncthreads();
}
#endif // NUM_ELEM_PT > 16
#else
while (inc > 1)
{
RUN_4(2)
__syncthreads();
}
#endif // NUM_ELEM_PT > 8
// Step 4: Reduce size again by 2.
REDUCE(2)
__syncthreads();
// End of Step 1;
length = k >> 1;
dir = length << 1;
// Loop on comparison distance (between keys)
inc = length;
mod = inc;
mask = ~(NUM_ELEM_PT/(2) - 1);
while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 1);
#if NUM_ELEM_PT > 4
if (mod & 1)
{
RUN_2(4)
__syncthreads();
}
#if NUM_ELEM_PT > 8
if (mod & 2)
{
RUN_4(4)
__syncthreads();
}
while (inc > 2)
{
if (t < (wg >> 1)) {
RUN_8(2)
} else {
inc >>= 3;
}
__syncthreads();
}
#else
while (inc > 1)
{
RUN_4(4)
__syncthreads();
}
#endif // NUM_ELEM_PT > 16
#else
while (inc > 0)
{
RUN_2(4)
__syncthreads();
}
#endif // NUM_ELEM_PT > 8 while (inc > 0)
// Step 4: Reduce size again by 2.
REDUCE(4)
__syncthreads();
// End of Step 1;
length = k >> 1;
dir = length << 1;
// Loop on comparison distance (between keys)
inc = length;
mod = inc;
mask = ~(NUM_ELEM_PT/(4) - 1);
while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 2);
if (mod & 1)
{
RUN_2(8)
__syncthreads();
}
while (inc > 0)
{
if (t < (wg >> 1)) {
RUN_4(4)
} else {
inc >>= 2;
}
__syncthreads();
}
out += (NUM_ELEM_PT/16) * gid * wg;
tCur = ((t >> klog2) << (klog2+1)) + (t&(k-1));
for (int j=0; j<NUM_GROUPS/8; j++) {
T x0 = get(sdata, 2*wg*j + tCur);
T x1 = get(sdata, 2*wg*j + tCur + k);
out[wg*j + t] = max(x0, x1);
}
/* out += (NUM_ELEM_PT/8) * gid * wg;*/
//tCur = ((t >> klog2) << (klog2+1)) + (t&(k-1));
//for (int j=0; j<NUM_GROUPS/4; j++) {
//T x0 = get(sdata, 2*wg*j + tCur);
//T x1 = get(sdata, 2*wg*j + tCur + k);
//out[wg*j + t] = max(x0, x1);
/*
}*/
}
template<typename T>
__global__ void Bitonic_TopKReduce(T* __restrict__ in, T* __restrict__ out,
const int k, const int klog2)
{
/* const int k = K;*/
/*const int klog2 = KLog2;*/
// Shared mem size is determined by the host app at run time.
// For n elements, we have n * 33/32 shared memory.
// We use this to break bank conflicts.
SharedMemory<T> smem;
T* sdata = smem.getPointer();
const int t = threadIdx.x; // index in workgroup
const int wg = blockDim.x; // workgroup size = block size, power of 2
const int gid = blockIdx.x;
int length = min(NUM_GROUPS, k >> 1);
int inc = length;
inc >>= NUM_GROUPS_BITSHIFT;
int low = t & (inc - 1);
int dir = length << 1;
bool reverse;
T x[NUM_ELEM_PT];
// Move IN, OUT to block start
in += NUM_ELEM_PT * gid * wg;
int tCur = t << NUM_ELEM_BITSHIFT;
for (int i=0; i<NUM_ELEM_PT; i++) x[i] = in[tCur + i];
for (int i=0; i<NUM_ELEM_PT; i++) set(sdata, tCur+i, x[i]);
__syncthreads();
// Complete the remaining steps to create sorted sequences of length k.
int mod;
unsigned int mask;
length = (k >> 1);
dir = length << 1;
// Loop on comparison distance (between keys)
inc = length;
mod = inc;
mask = ~(NUM_ELEM_PT/(1) - 1);
while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 0);
if (mod & 1)
{
RUN_2(1)
__syncthreads();
}
if (mod & 2)
{
RUN_4(1)
__syncthreads();
}
#if NUM_ELEM_PT > 8
if (mod & 4)
{
RUN_8(1)
__syncthreads();
}
#if NUM_ELEM_PT > 16
if (mod & 8)
{
RUN_16(1)
__syncthreads();
}
while (inc > 8)
{
RUN_32(1)
__syncthreads();
}
#else
while (inc > 4)
{
RUN_16(1)
__syncthreads();
}
#endif // NUM_ELEM_PT > 16
#else
while (inc > 2)
{
RUN_8(1)
__syncthreads();
}
#endif // NUM_ELEM_PT > 8
// Step 2: Reduce the size by factor 2 by pairwise comparing adjacent sequences.
REDUCE(1)
__syncthreads();
// End of Step 2;
// Step 3: Construct sorted sequence of length k from bitonic sequence of length k.
// We now have n/2 elements.
length = k >> 1;
dir = length << 1;
// Loop on comparison distance (between keys)
inc = length;
mod = inc;
mask = ~(NUM_ELEM_PT/(1) - 1);
while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 0);
if (mod & 1)
{
RUN_2(2)
__syncthreads();
}
#if NUM_ELEM_PT > 4
if (mod & 2)
{
RUN_4(2)
__syncthreads();
}
#if NUM_ELEM_PT > 8
if (mod & 4)
{
RUN_8(2)
__syncthreads();
}
while (inc > 4)
{
if (t < (wg >> 1)) {
RUN_16(1)
} else {
inc >>= 4;
}
__syncthreads();
}
#else
while (inc > 2)
{
RUN_8(2)
__syncthreads();
}
#endif // NUM_ELEM_PT > 16
#else
while (inc > 1)
{
RUN_4(2)
__syncthreads();
}
#endif // NUM_ELEM_PT > 8
// Step 4: Reduce size again by 2.
REDUCE(2)
__syncthreads();
// End of Step 1;
length = k >> 1;
dir = length << 1;
// Loop on comparison distance (between keys)
inc = length;
mod = inc;
mask = ~(NUM_ELEM_PT/(2) - 1);
while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 1);
#if NUM_ELEM_PT > 4
if (mod & 1)
{
RUN_2(4)
__syncthreads();
}
#if NUM_ELEM_PT > 8
if (mod & 2)
{
RUN_4(4)
__syncthreads();
}
while (inc > 2)
{
if (t < (wg >> 1)) {
RUN_8(2)
} else {
inc >>= 3;
}
__syncthreads();
}
#else
while (inc > 1)
{
RUN_4(4)
__syncthreads();
}
#endif // NUM_ELEM_PT > 16
#else
while (inc > 0)
{
RUN_2(4)
__syncthreads();
}
#endif // NUM_ELEM_PT > 8 while (inc > 0)
// Step 4: Reduce size again by 2.
REDUCE(4)
__syncthreads();
// End of Step 1;
length = k >> 1;
dir = length << 1;
// Loop on comparison distance (between keys)
inc = length;
mod = inc;
mask = ~(NUM_ELEM_PT/(4) - 1);
while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 2);
if (mod & 1)
{
RUN_2(8)
__syncthreads();
}
while (inc > 0)
{
if (t < (wg >> 1)) {
RUN_4(4)
} else {
inc >>= 2;
}
__syncthreads();
}
out += (NUM_ELEM_PT/16) * gid * wg;
tCur = ((t >> klog2) << (klog2+1)) + (t&(k-1));
for (int j=0; j<NUM_GROUPS/8; j++) {
T x0 = get(sdata, 2*wg*j + tCur);
T x1 = get(sdata, 2*wg*j + tCur + k);
out[wg*j + t] = max(x0, x1);
}
}
/*
const int tab32[32] = {
0, 9, 1, 10, 13, 21, 2, 29,
11, 14, 16, 18, 22, 25, 3, 30,
8, 12, 20, 28, 15, 17, 24, 7,
19, 27, 23, 6, 26, 5, 4, 31
};
int log2_32 (uint value) {
value |= value >> 1;
value |= value >> 2;
value |= value >> 4;
value |= value >> 8;
value |= value >> 16;
return tab32[(uint)(value*0x07C4ACDD) >> 27];
}
*/
/// d_keys_in & d_keys_buff: sizeof(KeyT) * num_items
/// d_keys_out: sizeof(KeyT) * k
template<typename KeyT>
void bitonicTopK(KeyT *d_keys_in,
KeyT* d_keys_buff,
const size_t num_items,
const size_t k, vector<KeyT>& vec) {
if (k < 16) {
printf("Fatal: k must be greater than 16! %s, %d", __FILE__, __LINE__);
exit(-1);
}
DoubleBuffer<KeyT> d_buffer(d_keys_in, d_keys_buff);
int klog2 = log2_32(k);
int current = 0;
int numThreads = num_items;
int wg_size = 64 > k ? 64 : k;
numThreads >>= 1; // Each thread processes 2 elements.
numThreads >>= NUM_GROUPS_BITSHIFT;
Bitonic_TopKLocalSortInPlace<KeyT><<<numThreads/wg_size, wg_size, ((2*NUM_GROUPS*wg_size*33)/32)*sizeof(KeyT)>>>(d_buffer.Current(), d_buffer.Alternate(), k, klog2);
current = 1-current;
// Toggle the buffer index in the double buffer
d_buffer.selector = d_buffer.selector ^ 1;
numThreads >>= (1 + NUM_GROUPS_BITSHIFT);
while (numThreads >= wg_size) {
Bitonic_TopKReduce<KeyT><<<numThreads/wg_size, wg_size, ((2*NUM_GROUPS*wg_size*33)/32)*sizeof(KeyT)>>>(d_buffer.Current(), d_buffer.Alternate(), k, klog2);
// Toggle the buffer index in the double buffer
d_buffer.selector = d_buffer.selector ^ 1;
numThreads >>= (1 + NUM_GROUPS_BITSHIFT);
}
//vector<KeyT> res_vec(2*numThreads*NUM_GROUPS);
//cudaMemcpy(res_vec.data(), d_buffer.Current(), 2 * numThreads * NUM_GROUPS * sizeof(KeyT), cudaMemcpyDeviceToHost);
//std::sort(res_vec.begin(), res_vec.end(), std::greater<KeyT>());
//cudaMemcpy(d_keys_out, res_vec.data(), k * sizeof(KeyT), cudaMemcpyHostToDevice);
vec.resize(2*numThreads*NUM_GROUPS);
cudaMemcpy(vec.data(), d_buffer.Current(), 2 * numThreads * NUM_GROUPS * sizeof(KeyT), cudaMemcpyDeviceToHost);
std::sort(vec.begin(), vec.end(), std::greater<KeyT>());
}
/// merge-sort
template <typename T>
__device__ void merge(T* left, const size_t left_len, T* right, const size_t right_len,
T* dest, bool greater) {
size_t i = 0, j = 0, k = 0;
while(i < left_len && j < right_len) {
if(greater) {
if(left[i] > right[j]) dest[k++] = left[i++];
else dest[k++] = right[j++];
} else {
if(left[i] < right[j]) dest[k++] = left[i++];
else dest[k++] = right[j++];
}
}
while( i < left_len ) dest[k++] = left[i++];
while( j < right_len ) dest[k++] = right[j++];
}
template <typename T>
__global__ void merge_sort_kernel(T* in, const size_t num, T* out, bool greater) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t tid = threadIdx.x;
/// 256 threads per block
__shared__ T smem[256];
__shared__ T sout[256];
smem[tid] = in[gid];
__syncthreads();
if (tid < 128) merge<T>(smem+tid, 1, smem+(tid+128), 1, sout+2*tid, greater);
__syncthreads();
if (tid < 64) merge<T>(sout+tid*2, 2, sout+(tid+64)*2, 2, smem+4*tid, greater);
__syncthreads();
if (tid < 32) merge<T>(smem+tid*4, 4, smem+(tid+32)*4, 4, sout+8*tid, greater);
__syncthreads();
if (tid < 16) merge<T>(sout+tid*8, 8, sout+(tid+16)*8, 8, smem+16*tid, greater);
__syncthreads();
if (tid < 8) merge<T>(smem+tid*16, 16, smem+(tid+8)*16, 16, sout+32*tid, greater);
__syncthreads();
if (tid < 4) merge<T>(sout+tid*32, 32, sout+(tid+4)*32, 32, smem+64*tid, greater);
__syncthreads();
if (tid < 2) merge<T>(smem+tid*64, 64, smem+(tid+2)*64, 64, sout+128*tid, greater);
__syncthreads();
if (tid < 1) merge<T>(sout+tid*128, 128, sout+(tid+1)*128, 128, smem+256*tid, greater);
__syncthreads();
out[gid] = smem[tid];
__syncthreads();
}
template <typename T>
__global__ void merge_blocks_result(T* data, const size_t num, const size_t seg_len, const size_t mid,
T* out, const bool greater) {
const size_t tid = threadIdx.x;
merge<T>(data + seg_len*tid, seg_len, data+seg_len*(tid+mid), seg_len, out+seg_len*2*tid, greater);
}
template <typename T1, typename T2>
T1 divUp(T1 a, T2 b) {
return (a + b - 1) / b;
}
template <typename T>
void mergeSort(T* in, const size_t num, T*& out, T*& out2, bool greater) {
const size_t threads_per_block = 256;
const size_t num_blocks = divUp(num, threads_per_block);
//int k = log2_32(num);
//size_t power_k = 2 << k;
///printf("num:%d, log2:%d, diff:%d\n", num - power_k);
merge_sort_kernel<T><<<num_blocks, threads_per_block>>>(
in, num, out, greater);
/// merge blocks' result
DoubleBuffer<T> buffers(out, out2);
int threads = num_blocks >> 1;
size_t seg_len = threads_per_block;
buffers.selector = 0;
while (threads) {
merge_blocks_result<T><<<1, threads>>>(buffers.Current(), num, seg_len, threads, buffers.Alternate(), greater);
buffers.selector = buffers.selector ^ 1;
threads >>= 1;
seg_len <<= 1;
}
if(buffers.Current() != out2) {
out2 = buffers.Current();
out = buffers.Alternate();
}
}
template <typename T>
void fastTopK(T* data, T* buff, const size_t num, const size_t k, T* out) {
int log_k = log2_32(k);
int power_k = 2 << (log_k - 1);
size_t new_k = k;
if (power_k != k) {
new_k = 2 << log_k;
}
assert (new_k < num);
/// stage 1 : 2 ^ log2(num)
int log_num = log2_32(num);
size_t power_num = 2 << (log_num-1);
size_t diff_num = 0;
if (power_num != num) {
diff_num = num - power_num;
}
vector<T> out_buff[4];
bitonicTopK<T>(data, buff, power_num, new_k, out_buff[0]);
if (!diff_num) {
cudaMemcpy(out, out_buff[0].data(), sizeof(T) * k, cudaMemcpyHostToDevice);
return;
}
/// stage 2: diff = num - 2^(log2(num))
if (diff_num < 1024 || diff_num <= new_k) {
out_buff[1].resize(power_num + diff_num);
memcpy(out_buff[1].data(), data+power_num, diff_num * sizeof(T));
memcpy(out_buff[1].data() + diff_num, out_buff[0].data(), sizeof(T)*new_k);
std::sort(out_buff[1].begin(), out_buff[1].end(), std::greater<T>());
cudaMemcpy(out, out_buff[1].data(), sizeof(T) * k, cudaMemcpyHostToDevice);
return;
}
/// stage 3: diff2 = diff - 2^(log2(diff))
int log_diff = log2_32(diff_num);
size_t power_diff = 2 << (log_diff-1);
bitonicTopK<T>(data + power_num, buff+power_num, power_diff, new_k, out_buff[1]);
size_t diff2 = diff_num - power_diff;
int log_diff2 = log2_32(diff2);
size_t power_diff2 = 2 << (log_diff2-1);
size_t diff3 = diff2 - power_diff2;
///printf("diff:%d, diff2:%d, diff3:%d\n", diff_num, diff2, diff3);
if (diff2 < 1024 || diff2 <= new_k) {
out_buff[2].resize(new_k * 2 + diff2);
if (diff2) cudaMemcpy(out_buff[2].data(), data+power_num+power_diff, sizeof(T) * diff2, cudaMemcpyDeviceToHost);
memcpy(out_buff[2].data() + diff2, out_buff[0].data(), sizeof(T) * new_k );
memcpy(out_buff[2].data() + new_k + diff2, out_buff[1].data(), sizeof(T) * new_k);
std::sort(out_buff[2].begin(), out_buff[2].end(), std::greater<T>());
cudaMemcpy(out, out_buff[2].data(), sizeof(T) * k, cudaMemcpyHostToDevice);
return;
}
bitonicTopK<T>(data+power_num+power_diff, buff+power_num+power_diff, power_diff2, new_k, out_buff[2]);
out_buff[3].resize(new_k * 3 + diff3);
cudaMemcpy(out_buff[3].data(), data+power_num+power_diff+power_diff2, diff3 * sizeof(T), cudaMemcpyDeviceToHost);
memcpy(out_buff[3].data() + diff3, out_buff[0].data(), sizeof(T) * new_k);
memcpy(out_buff[3].data() + diff3 + new_k, out_buff[1].data(), sizeof(T) * new_k);
memcpy(out_buff[3].data() + diff3 + new_k *2, out_buff[2].data(), sizeof(T) * new_k);
std::sort(out_buff[3].begin(), out_buff[3].end(), std::greater<T>());
cudaMemcpy(out, out_buff[3].data(), sizeof(T)*k, cudaMemcpyHostToDevice);
}
template void bitonicTopK<float>(float*, float*, const size_t, const size_t, vector<float>&);
template void bitonicTopK<double>(double*, double*, const size_t, const size_t, vector<double>&);
template void bitonicTopK<int>(int*, int*, const size_t, const size_t, vector<int>&);
template void fastTopK<float>(float* , float* , const size_t , const size_t , float* );
template void fastTopK<double>(double* , double* , const size_t , const size_t , double* );
template void fastTopK<int>(int* , int* , const size_t , const size_t , int* );
|
the_stack
|
#include <iostream>
#include <algorithm>
#include <chrono>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
//////////////////////////////
// forward
//////////////////////////////
template <typename T=float, bool fill=true>
__global__ void kernal_UpSampling_Forward(
T const *x_buf,
T *y_buf,
int c_size,
int input_h_size,
int input_w_size,
int output_h_size,
int output_w_size,
int filter_h_size,
int filter_w_size,
int frame_size,
int frame_stride
)
{
int id = threadIdx.x;
int id_step = blockDim.x;
int ix = blockIdx.y * blockDim.y + threadIdx.y;
int iy = blockIdx.z * blockDim.z + threadIdx.z;
if ( iy < input_h_size && ix < input_w_size ) {
for ( int c = 0; c < c_size; ++c ) {
int input_node = (c * input_h_size + iy) * input_w_size + ix;
T const *x_ptr = &x_buf[input_node * frame_stride];
for ( int frame = id; frame < frame_size; frame += id_step ) {
T x_val = x_ptr[frame];
for ( int fy = 0; fy < filter_h_size; ++fy) {
int oy = iy * filter_h_size + fy;
for (int fx = 0; fx < filter_w_size; ++fx) {
int ox = ix * filter_w_size + fx;
int output_node = (c * output_h_size + oy) * output_w_size + ox;
T *y_ptr = &y_buf[output_node * frame_stride];
if ( fill ) {
y_ptr[frame] = x_val;
}
else {
if ( fx == (filter_w_size / 2) && fy == (filter_h_size / 2) ) {
y_ptr[frame] = x_val;
}
else {
y_ptr[frame] = 0;
}
}
}
}
}
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_UpSampling_Forward
(
float const *dev_x_buf,
float *dev_y_buf,
int input_w_size,
int input_h_size,
int c_size,
int filter_w_size,
int filter_h_size,
int fill,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
int output_w_size = input_w_size * filter_w_size;
int output_h_size = input_h_size * filter_h_size;
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(1024, 1, 1);
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= input_w_size ) { block.y /= 2; block.z *= 2; }
while ( (int)block.z / 2 >= input_h_size ) { block.z /= 2; }
dim3 grid(1, (input_w_size + (block.y - 1)) / block.y, (input_h_size + (block.z - 1)) / block.z);
if ( fill ) {
kernal_UpSampling_Forward<float, true><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
c_size,
input_h_size,
input_w_size,
output_h_size,
output_w_size,
filter_h_size,
filter_w_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
else {
kernal_UpSampling_Forward<float, false><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
c_size,
input_h_size,
input_w_size,
output_h_size,
output_w_size,
filter_h_size,
filter_w_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
return 0;
}
BBCU_DLL_EXPORT int bbcu_bit_UpSampling_Forward
(
int const *dev_x_buf,
int *dev_y_buf,
int input_w_size,
int input_h_size,
int c_size,
int filter_w_size,
int filter_h_size,
int fill,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
int output_w_size = input_w_size * filter_w_size;
int output_h_size = input_h_size * filter_h_size;
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
frame_size = (frame_size + 31) / 32;
dim3 block(1024, 1, 1);
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= input_w_size ) { block.y /= 2; block.z *= 2; }
while ( (int)block.z / 2 >= input_h_size ) { block.z /= 2; }
dim3 grid(1, (input_w_size + (block.y - 1)) / block.y, (input_h_size + (block.z - 1)) / block.z);
if ( fill ) {
kernal_UpSampling_Forward<int, true><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
c_size,
input_h_size,
input_w_size,
output_h_size,
output_w_size,
filter_h_size,
filter_w_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
else {
kernal_UpSampling_Forward<int, false><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
c_size,
input_h_size,
input_w_size,
output_h_size,
output_w_size,
filter_h_size,
filter_w_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
return 0;
}
//////////////////////////////
// backward
//////////////////////////////
template <typename T=float, bool fill=true>
__global__ void kernal_UpSampling_Backward(
T const *dy_buf,
T *dx_buf,
int c_size,
int input_h_size,
int input_w_size,
int output_h_size,
int output_w_size,
int filter_h_size,
int filter_w_size,
int frame_size,
int frame_stride
)
{
int id = threadIdx.x;
int id_step = blockDim.x;
int ix = blockIdx.y * blockDim.y + threadIdx.y;
int iy = blockIdx.z * blockDim.z + threadIdx.z;
if ( iy < input_h_size && ix < input_w_size ) {
for ( int c = 0; c < c_size; ++c ) {
int input_node = (c * input_h_size + iy) * input_w_size + ix;
T *dx_ptr = &dx_buf[input_node * frame_stride];
if ( fill ) {
for ( int frame = id; frame < frame_size; frame += id_step ) {
T dx_val = 0;
for ( int fy = 0; fy < filter_h_size; ++fy) {
int oy = iy * filter_h_size + fy;
for (int fx = 0; fx < filter_w_size; ++fx) {
int ox = ix * filter_w_size + fx;
int output_node = (c * output_h_size + oy) * output_w_size + ox;
T const *dy_ptr = &dy_buf[output_node * frame_stride];
dx_val += dy_ptr[frame];
}
}
dx_ptr[frame] = dx_val;
}
}
else {
int fx = (filter_w_size / 2);
int fy = (filter_h_size / 2);
int oy = iy * filter_h_size + fy;
int ox = ix * filter_w_size + fx;
int output_node = (c * output_h_size + oy) * output_w_size + ox;
T const *dy_ptr = &dy_buf[output_node * frame_stride];
for ( int frame = id; frame < frame_size; frame += id_step ) {
dx_ptr[frame] = dy_ptr[frame];
}
}
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_UpSampling_Backward
(
float const *dev_dy_buf,
float *dev_dx_buf,
int input_w_size,
int input_h_size,
int c_size,
int filter_w_size,
int filter_h_size,
int fill,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
int output_w_size = input_w_size * filter_w_size;
int output_h_size = input_h_size * filter_h_size;
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(1024, 1, 1);
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= input_w_size ) { block.y /= 2; block.z *= 2; }
while ( (int)block.z / 2 >= input_h_size ) { block.z /= 2; }
dim3 grid(1, (input_w_size + (block.y - 1)) / block.y, (input_h_size + (block.z - 1)) / block.z);
if ( fill ) {
kernal_UpSampling_Backward<float, true><<<grid, block, 0, streamId>>>(
dev_dy_buf,
dev_dx_buf,
c_size,
input_h_size,
input_w_size,
output_h_size,
output_w_size,
filter_h_size,
filter_w_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
else {
kernal_UpSampling_Backward<float, false><<<grid, block, 0, streamId>>>(
dev_dy_buf,
dev_dx_buf,
c_size,
input_h_size,
input_w_size,
output_h_size,
output_w_size,
filter_h_size,
filter_w_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
return 0;
}
// end of file
|
the_stack
|
* @file
* @brief
* This file contains preprocessors from raw image to a common format for
* computational kernels. It also does color space transformations.
*/
#include "gpujpeg_colorspace.h"
#include "gpujpeg_preprocessor_common.h"
#include "gpujpeg_preprocessor.h"
#include "gpujpeg_util.h"
/**
* Store value to component data buffer in specified position by buffer size and subsampling
*/
template<
unsigned int s_samp_factor_h,
unsigned int s_samp_factor_v
>
static __device__ void
gpujpeg_preprocessor_raw_to_comp_store(uint8_t value, unsigned int position_x, unsigned int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
const unsigned int samp_factor_h = ( s_samp_factor_h == GPUJPEG_DYNAMIC ) ? comp.sampling_factor.horizontal : s_samp_factor_h;
const unsigned int samp_factor_v = ( s_samp_factor_v == GPUJPEG_DYNAMIC ) ? comp.sampling_factor.vertical : s_samp_factor_v;
if ( (position_x % samp_factor_h) || (position_y % samp_factor_v) )
return;
position_x = position_x / samp_factor_h;
position_y = position_y / samp_factor_v;
const unsigned int data_position = position_y * comp.data_width + position_x;
comp.d_data[data_position] = value;
}
template<enum gpujpeg_pixel_format>
inline __device__ void raw_to_comp_load(const uint8_t* d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3);
template<>
inline __device__ void raw_to_comp_load<GPUJPEG_U8>(const uint8_t* d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
r1 = d_data_raw[image_position];
r2 = 128;
r3 = 128;
}
template<>
inline __device__ void raw_to_comp_load<GPUJPEG_444_U8_P0P1P2>(const uint8_t* d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
r1 = d_data_raw[image_position];
r2 = d_data_raw[image_width * image_height + image_position];
r3 = d_data_raw[2 * image_width * image_height + image_position];
}
template<>
inline __device__ void raw_to_comp_load<GPUJPEG_422_U8_P0P1P2>(const uint8_t* d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
r1 = d_data_raw[image_position];
r2 = d_data_raw[image_width * image_height + image_position / 2];
r3 = d_data_raw[image_width * image_height + image_height * ((image_width + 1) / 2) + image_position / 2];
}
template<>
inline __device__ void raw_to_comp_load<GPUJPEG_420_U8_P0P1P2>(const uint8_t* d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
r1 = d_data_raw[image_position];
r2 = d_data_raw[image_width * image_height + y / 2 * ((image_width + 1) / 2) + x / 2];
r3 = d_data_raw[image_width * image_height + ((image_height + 1) / 2 + y / 2) * ((image_width + 1) / 2) + x / 2];
}
template<>
inline __device__ void raw_to_comp_load<GPUJPEG_444_U8_P012>(const uint8_t* d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
const unsigned int offset = image_position * 3;
r1 = d_data_raw[offset];
r2 = d_data_raw[offset + 1];
r3 = d_data_raw[offset + 2];
}
template<>
inline __device__ void raw_to_comp_load<GPUJPEG_444_U8_P012A>(const uint8_t* d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
const unsigned int offset = image_position * 4;
r1 = d_data_raw[offset];
r2 = d_data_raw[offset + 1];
r3 = d_data_raw[offset + 2];
}
template<>
inline __device__ void raw_to_comp_load<GPUJPEG_422_U8_P1020>(const uint8_t* d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uint8_t &r1, uint8_t &r2, uint8_t &r3)
{
const unsigned int offset = image_position * 2;
r1 = d_data_raw[offset + 1];
if ( image_position % 2 == 0 ) {
r2 = d_data_raw[offset];
r3 = d_data_raw[offset + 2];
} else {
r2 = d_data_raw[offset - 2];
r3 = d_data_raw[offset];
}
}
/**
* Kernel - Copy raw image source data into three separated component buffers
*/
typedef void (*gpujpeg_preprocessor_encode_kernel)(struct gpujpeg_preprocessor_data data, const uint8_t* d_data_raw, const uint8_t* d_data_raw_end, int image_width, int image_height, uint32_t width_div_mul, uint32_t width_div_shift);
/**
* @note
* In previous versions, there was an optimalization with aligned preloading to shared memory.
* This was, however, removed because it didn't exhibit any performance improvement anymore
* (actually removing that yields slight performance gain).
*/
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
enum gpujpeg_pixel_format pixel_format,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_raw_to_comp_kernel(struct gpujpeg_preprocessor_data data, const uint8_t* d_data_raw, const uint8_t* d_data_raw_end, int image_width, int image_height, uint32_t width_div_mul, uint32_t width_div_shift)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
// Position
int image_position = gX + x;
int image_position_y = gpujpeg_const_div_divide(image_position, width_div_mul, width_div_shift);
int image_position_x = image_position - (image_position_y * image_width);
// Load
uint8_t r1;
uint8_t r2;
uint8_t r3;
raw_to_comp_load<pixel_format>(d_data_raw, image_width, image_height, image_position, image_position_x, image_position_y, r1, r2, r3);
// Color transform
gpujpeg_color_transform<color_space, color_space_internal>::perform(r1, r2, r3);
// Store
if ( image_position < (image_width * image_height) ) {
gpujpeg_preprocessor_raw_to_comp_store<s_comp1_samp_factor_h, s_comp1_samp_factor_v>(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp2_samp_factor_h, s_comp2_samp_factor_v>(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp3_samp_factor_h, s_comp3_samp_factor_v>(r3, image_position_x, image_position_y, data.comp[2]);
}
}
/**
* Select preprocessor encode kernel
*
* @param encoder
* @return kernel
*/
template<enum gpujpeg_color_space color_space_internal>
gpujpeg_preprocessor_encode_kernel
gpujpeg_preprocessor_select_encode_kernel(struct gpujpeg_coder* coder)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = gpujpeg_preprocessor_make_sampling_factor(
coder->sampling_factor.horizontal / coder->component[0].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[0].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[1].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[1].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[2].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[2].sampling_factor.vertical
);
#define RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, P1, P2, P3, P4, P5, P6) \
if ( sampling_factor == gpujpeg_preprocessor_make_sampling_factor(P1, P2, P3, P4, P5, P6) ) { \
int max_h = max(P1, max(P3, P5)); \
int max_v = max(P2, max(P4, P6)); \
if ( coder->param.verbose >= 2 ) { \
printf("Using faster kernel for preprocessor (precompiled %dx%d, %dx%d, %dx%d).\n", max_h / P1, max_v / P2, max_h / P3, max_v / P4, max_h / P5, max_v / P6); \
} \
if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012A || PIXEL_FORMAT == GPUJPEG_444_U8_P012Z ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012A, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_422_U8_P1020 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P1020, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P0P1P2, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_422_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P0P1P2, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_420_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_420_U8_P0P1P2, P1, P2, P3, P4, P5, P6>; \
} else if ( PIXEL_FORMAT == GPUJPEG_U8 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_U8, P1, P2, P3, P4, P5, P6>; \
} else { \
assert(false); \
} \
}
#define RETURN_KERNEL(PIXEL_FORMAT, COLOR) \
RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 1, 1, 1) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 2, 2, 2) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 2, 1, 2) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 1, 2, 1) \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 4, 4, 4, 4) \
else { \
if ( coder->param.verbose >= 2 ) { \
printf("Using slower kernel for preprocessor (dynamic %dx%d, %dx%d, %dx%d).\n", coder->component[0].sampling_factor.horizontal, coder->component[0].sampling_factor.vertical, coder->component[1].sampling_factor.horizontal, coder->component[1].sampling_factor.vertical, coder->component[2].sampling_factor.horizontal, coder->component[2].sampling_factor.vertical); \
} \
if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P012A || PIXEL_FORMAT == GPUJPEG_444_U8_P012Z ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012A, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_422_U8_P1020 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P1020, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_444_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P0P1P2, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_422_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P0P1P2, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_420_U8_P0P1P2 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_420_U8_P0P1P2, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else if ( PIXEL_FORMAT == GPUJPEG_U8 ) { \
return &gpujpeg_preprocessor_raw_to_comp_kernel<color_space_internal, COLOR, GPUJPEG_U8, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} else { \
assert(false); \
} \
} \
// None color space
if ( coder->param_image.color_space == GPUJPEG_NONE ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_NONE);
}
// RGB color space
else if ( coder->param_image.color_space == GPUJPEG_RGB ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_RGB);
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601);
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601_256LVLS ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601_256LVLS);
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT709 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT709);
}
// YUV color space
else if ( coder->param_image.color_space == GPUJPEG_YUV ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YUV);
}
// Unknown color space
else {
assert(false);
}
#undef RETURN_KERNEL_IF
#undef RETURN_KERNEL
return NULL;
}
static int gpujpeg_preprocessor_encode_no_transform(struct gpujpeg_coder * coder)
{
if (gpujpeg_pixel_format_is_interleaved(coder->param_image.pixel_format)) {
return 0;
}
if (coder->param_image.comp_count == 3 && coder->param_image.color_space != coder->param.color_space_internal) {
return 0;
}
const int *sampling_factors = gpujpeg_pixel_format_get_sampling_factor(coder->param_image.pixel_format);
for (int i = 0; i < coder->param_image.comp_count; ++i) {
if (coder->component[i].sampling_factor.horizontal != sampling_factors[i * 2]
|| coder->component[i].sampling_factor.vertical != sampling_factors[i * 2 + 1]) {
return 0;
}
}
return 1;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_encoder_init(struct gpujpeg_coder* coder)
{
coder->preprocessor = NULL;
if ( coder->param_image.comp_count == 1 ) {
return 0;
}
if ( gpujpeg_preprocessor_encode_no_transform(coder) ) {
if ( coder->param.verbose >= 2 ) {
printf("Matching format detected - not using preprocessor, using memcpy instead.");
}
return 0;
}
assert(coder->param_image.comp_count == 3);
if (coder->param.color_space_internal == GPUJPEG_NONE) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_NONE>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_RGB) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_RGB>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_YCBCR_BT601>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601_256LVLS) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_YCBCR_BT601_256LVLS>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT709) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_YCBCR_BT709>(coder);
}
if ( coder->preprocessor == NULL ) {
return -1;
}
return 0;
}
int
gpujpeg_preprocessor_encode_interlaced(struct gpujpeg_encoder * encoder)
{
struct gpujpeg_coder* coder = &encoder->coder;
assert(coder->param_image.comp_count == 3);
cudaMemsetAsync(coder->d_data, 0, coder->data_size * sizeof(uint8_t), encoder->stream);
gpujpeg_cuda_check_error("Preprocessor memset failed", return -1);
// Select kernel
gpujpeg_preprocessor_encode_kernel kernel = (gpujpeg_preprocessor_encode_kernel) coder->preprocessor;
assert(kernel != NULL);
int image_width = coder->param_image.width;
int image_height = coder->param_image.height;
// When loading 4:2:2 data of odd width, the data in fact has even width, so round it
// (at least imagemagick convert tool generates data stream in this way)
if (coder->param_image.pixel_format == GPUJPEG_422_U8_P1020) {
image_width = (coder->param_image.width + 1) & ~1;
}
// Prepare unit size
/// @todo this stuff doesn't look correct - we multiply by unitSize and then divide by it
int unitSize = gpujpeg_pixel_format_get_unit_size(coder->param_image.pixel_format);
if (unitSize == 0) {
unitSize = 1;
}
// Prepare kernel
int alignedSize = gpujpeg_div_and_round_up(image_width * image_height, RGB_8BIT_THREADS) * RGB_8BIT_THREADS * unitSize;
dim3 threads (RGB_8BIT_THREADS);
dim3 grid (alignedSize / (RGB_8BIT_THREADS * unitSize));
assert(alignedSize % (RGB_8BIT_THREADS * unitSize) == 0);
while ( grid.x > GPUJPEG_CUDA_MAXIMUM_GRID_SIZE ) {
grid.y *= 2;
grid.x = gpujpeg_div_and_round_up(grid.x, 2);
}
// Decompose input image width for faster division using multiply-high and right shift
uint32_t width_div_mul, width_div_shift;
gpujpeg_const_div_prepare(image_width, width_div_mul, width_div_shift);
// Run kernel
struct gpujpeg_preprocessor_data data;
for ( int comp = 0; comp < 3; comp++ ) {
assert(coder->sampling_factor.horizontal % coder->component[comp].sampling_factor.horizontal == 0);
assert(coder->sampling_factor.vertical % coder->component[comp].sampling_factor.vertical == 0);
data.comp[comp].d_data = coder->component[comp].d_data;
data.comp[comp].sampling_factor.horizontal = coder->sampling_factor.horizontal / coder->component[comp].sampling_factor.horizontal;
data.comp[comp].sampling_factor.vertical = coder->sampling_factor.vertical / coder->component[comp].sampling_factor.vertical;
data.comp[comp].data_width = coder->component[comp].data_width;
}
kernel<<<grid, threads, 0, encoder->stream>>>(
data,
coder->d_data_raw,
coder->d_data_raw + coder->data_raw_size,
image_width,
image_height,
width_div_mul,
width_div_shift
);
gpujpeg_cuda_check_error("Preprocessor encoding failed", return -1);
return 0;
}
/**
* Copies raw data from source image to GPU memory without running
* any preprocessor kernel.
*
* This assumes that the JPEG has same color space as input raw image and
* currently also that the component subsampling correspond between raw and
* JPEG (although at least different horizontal subsampling can be quite
* easily done).
*
* @invariant gpujpeg_preprocessor_encode_no_transform(coder) != 0
*/
static int
gpujpeg_preprocessor_encoder_copy_planar_data(struct gpujpeg_encoder * encoder)
{
struct gpujpeg_coder * coder = &encoder->coder;
assert(coder->param_image.comp_count == 1 ||
coder->param_image.comp_count == 3);
size_t data_raw_offset = 0;
bool needs_stride = false; // true if width is not divisible by MCU width
for (int i = 0; i < coder->param_image.comp_count; ++i) {
needs_stride = needs_stride || coder->component[i].width != coder->component[i].data_width;
}
if (!needs_stride) {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
size_t component_size = coder->component[i].width * coder->component[i].height;
cudaMemcpyAsync(coder->component[i].d_data, coder->d_data_raw + data_raw_offset, component_size, cudaMemcpyDeviceToDevice, encoder->stream);
data_raw_offset += component_size;
}
} else {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
int spitch = coder->component[i].width;
int dpitch = coder->component[i].data_width;
size_t component_size = spitch * coder->component[i].height;
cudaMemcpy2DAsync(coder->component[i].d_data, dpitch, coder->d_data_raw + data_raw_offset, spitch, spitch, coder->component[i].height, cudaMemcpyDeviceToDevice, encoder->stream);
data_raw_offset += component_size;
}
}
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_encode(struct gpujpeg_encoder * encoder)
{
struct gpujpeg_coder * coder = &encoder->coder;
if (coder->preprocessor) {
return gpujpeg_preprocessor_encode_interlaced(encoder);
} else {
return gpujpeg_preprocessor_encoder_copy_planar_data(encoder);
}
}
/* vi: set expandtab sw=4: */
|
the_stack
|
__device__ half2 cH2(const half* ptr, int offset){
return __ldg( (half2*)(ptr+offset) );
}
__forceinline__ __device__ unsigned lane_id()
{
unsigned ret;
asm volatile ("mov.u32 %0, %laneid;" : "=r"(ret));
return ret;
}
__forceinline__ __device__ unsigned warp_id()
{
// this is not equal to threadIdx.x / 32
unsigned ret;
asm volatile ("mov.u32 %0, %warpid;" : "=r"(ret));
return ret;
}
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : -1e30f;
val = warpReduceMax(val);
return val;
}
/********************** Kernels ************************/
template<typename T>
void __global__ prepareMatrixes(
T* q_buf, T* q_buf_bd, T* q_buf_ef,
T* k_buf, T* k_buf_bd, T* k_buf_ef,
const T* query_buf,
const T* key_buf, const T* k_head_r, const T* attr_seg_embed,
const T* attr_bias_Q_w, const T* attr_bias_Q_r, const T* attr_bias_Q_s,
const int off0, const int i_off1,const int o_off1, int off2){
int batch=blockIdx.y;
int seq=blockIdx.x;
int head_loc=threadIdx.x;
T tmp;
if(head_loc<i_off1){
int head=head_loc/off2;
int loc=head_loc%off2;
int index=batch*off0+seq*i_off1+head_loc;
tmp=query_buf[index];
int index_out=batch*off0+head*o_off1+seq*off2+loc;
//left matrix
q_buf[index_out]=tmp+__ldg(attr_bias_Q_w+head_loc);//tex2D(t_attr_bias_Q_w, loc, head);
q_buf_bd[index_out]=tmp+__ldg(attr_bias_Q_r+head_loc);//tex2D(t_attr_bias_Q_r, loc, head);
q_buf_ef[index_out]=tmp+__ldg(attr_bias_Q_s+head_loc);//tex2D(t_attr_bias_Q_s, loc, head);
//right matrix
k_buf[index_out]=key_buf[index];//ac
//bd
index=seq*i_off1+head_loc;//(seq, head_loc)
tmp=k_head_r[index];
index_out=index_out+batch*off0+head*o_off1;//(batch, head,seq,loc)
k_buf_bd[index_out]=tmp;
index=index+off0;//(seq+seq_len, head_loc)
tmp=k_head_r[index];
index_out=index_out+o_off1;//(batch, head,seq+seq_len,loc)
k_buf_bd[index_out]=tmp;
//ef
if(seq<=1){
index=seq*i_off1+head_loc;//(seq, head, loc)
tmp=attr_seg_embed[index];
index_out=batch*2*i_off1+(head*2+seq)*off2+loc;//(head,seq,loc)
k_buf_ef[index_out]=tmp;
}
}
}
template<>
void __global__ prepareMatrixes(
__half* q_buf, __half* q_buf_bd, __half* q_buf_ef,
__half* k_buf, __half* k_buf_bd, __half* k_buf_ef,
const __half* query_buf,
const __half* key_buf, const __half* k_head_r, const __half* attr_seg_embed,
const __half* attr_bias_Q_w, const __half* attr_bias_Q_r, const __half* attr_bias_Q_s,
const int off0, const int i_off1,const int o_off1, int off2){
int batch=blockIdx.y;
int seq=blockIdx.x;
int head_loc=threadIdx.x*2;
half2 tmp;
if(head_loc<i_off1){
int head=head_loc/off2;
int loc=head_loc%off2;
int h2_index=(batch*off0+seq*i_off1+head_loc)>>1;
tmp=((half2*)query_buf)[h2_index];
int h2_index_out=(batch*off0+head*o_off1+seq*off2+loc)>>1;
//left matrix
((half2*)q_buf)[h2_index_out]= __hadd2(tmp,cH2(attr_bias_Q_w, head_loc));
((half2*)q_buf_bd)[h2_index_out]=__hadd2(tmp,cH2(attr_bias_Q_r, head_loc));
((half2*)q_buf_ef)[h2_index_out]= __hadd2(tmp,cH2(attr_bias_Q_s, head_loc));
//right matrix
((half2*)k_buf)[h2_index_out]=((half2*)key_buf)[h2_index];//ac
//bd
h2_index=(seq*i_off1+head_loc)>>1;//(seq, head_loc)
tmp=((half2*)k_head_r)[h2_index];
h2_index_out=(batch*off0*2+head*o_off1*2+seq*off2+loc)>>1;//(batch, head,seq,loc)
((half2*)k_buf_bd)[h2_index_out]=tmp;
h2_index=(seq*i_off1+head_loc+off0)>>1;//(seq+seq_len, head_loc)
tmp=((half2*)k_head_r)[h2_index];
h2_index_out=(batch*off0*2+(head*2+1)*o_off1+seq*off2+loc)>>1;//(batch, head,seq+seq_len,loc)
((half2*)k_buf_bd)[h2_index_out]=tmp;
//ef
if(seq<=1){
h2_index=(seq*i_off1+head_loc)>>1;//(seq, head, loc)
tmp=((half2*)attr_seg_embed)[h2_index];
h2_index_out=(batch*2*i_off1+(head*2+seq)*off2+loc)>>1;//(head,seq,loc)
((half2*)k_buf_ef)[h2_index_out]=tmp;
}
}
}
template<typename T>
void __global__
transpose102(T* dst, const T* src, const int off0, const int i_off1, const int o_off1, const int off2)
{
int x[4]={0};
x[0]=blockIdx.x;//[0,7]
x[1]=blockIdx.y;//[0,11]
x[2]=threadIdx.x;//[0,127]
x[3]=threadIdx.y;//[0,1]
int input_index=x[0]*off0
+x[1]*i_off1
+x[2]*off2+x[3];// [batch, 0, 1, 2]=[d0,d1,d2,d3]
int out_index=x[0]*off0
+x[2]*o_off1
+x[1]*off2+x[3];// [batch, 1, 0, 2]=[d0,d2,d1,d3]
dst[out_index]=src[input_index];
}
template<>
void __global__ transpose102(__half* dst, const __half* src,
const int off0, const int i_off1, const int o_off1, const int off2)
{
int x[4]={0};
x[0]=blockIdx.x;//[0,7]
x[1]=blockIdx.y;//[0,11]
x[2]=threadIdx.x;//[0,127]
int input_index=(x[0]*off0
+x[1]*i_off1
+x[2]*off2)>>1;// [batch, 0, 1, 2]=[d0,d1,d2,d3]
int out_index=(x[0]*off0
+x[2]*o_off1
+x[1]*off2)>>1;// [batch, 1, 0, 2]=[d0,d2,d1,d3]
((half2*)dst)[out_index]=((half2*)src)[input_index];
}
void __global__ transpose201(float* dst, const float* src,
const int off0, const int i_off1, const int head_num, const int o_off1, const int seq_len )
{
int batch=blockIdx.x;
int d0=blockIdx.y;
int d1=threadIdx.x;
extern __shared__ float sdata[];
int i=0;
//Read data into shared memory
int index=batch*off0+d0*i_off1;//d1*i_off2+d2
int offset=d1;
src=src+index;
int row=offset/head_num;
int col=offset%head_num;
for(i=0;i<head_num;i++){
sdata[row*(head_num+1)+col]=src[offset];
offset+=seq_len;
row=offset/head_num;
col=offset%head_num;
}
__syncthreads();
index=batch*off0+d0*seq_len+d1;
offset=0;
dst=dst+index;
for(i=0;i<head_num;i++){
dst[offset]=sdata[d1*(head_num+1)+i];
offset+=o_off1;
}
}
void __global__ transpose201(__half* dst, const __half* src,
const int off0, const int i_off1, const int head_num, const int o_off1, const int seq_len )
{
int batch=blockIdx.x;
int d0=blockIdx.y;
int d1=threadIdx.x;
extern __shared__ float sdata[];
int i=0;
//Read data into shared memory
int index=batch*off0+d0*i_off1;//d1*i_off2+d2
int offset=d1;
src=src+index;
int row=offset/head_num;
int col=offset%head_num;
for(i=0;i<head_num;i++){
sdata[row*(head_num+1)+col]=__half2float(src[offset]);
offset+=seq_len;
row=offset/head_num;
col=offset%head_num;
}
__syncthreads();
index=batch*off0+d0*seq_len+d1;
offset=0;
dst=dst+index;
for(i=0;i<head_num;i++){
dst[offset]=__float2half(sdata[d1*(head_num+1)+i]);
offset+=o_off1;
}
}
/*dim3 grid_shift(batch_size, head_num, seq_len);
dim3 block_shift(seq_len*2);
int off0=head_num*seq_len*seq_len;
int off1=seq_len*seq_len; */
template<typename T>
void __global__ relShiftBd(T* outMatrix, const T* inputMatrix, const int off0, const int off1, const int seq_len){
int batch=blockIdx.x;//[0,7]
int head=blockIdx.y;//[0,11]
int row=blockIdx.z;//[0,127]
int col=threadIdx.x; //[0,255]
int input_index=(batch*off0+head*off1+row*seq_len)*2+col;
if (col>=seq_len||row!=0){
T idata=inputMatrix[input_index];
//int tmp_index=row*(2*seq_len-1)+row+col-seq_len;
int tmp_index=row*2*seq_len+col-seq_len;
int out_row=tmp_index/(seq_len*2-1);
int out_col=tmp_index%(seq_len*2-1);
if(out_col<seq_len){
int out_index=batch*off0+ head*off1+out_row*seq_len+out_col;
outMatrix[out_index]=idata;
}
}
}
/*int threads=512;
seq_dim1=threads/seq_len
seq_dim2=seq_len/dimx
dim3 grid_rel(batch_size, head_num, seq_dim2);
dim3 block_rel(seq_dim1, seq_len);*/
template<>
void __global__ relShiftBd(__half* outMatrix, const __half* inputMatrix, const int off0, const int off1, const int seq_len){
int batch=blockIdx.x;//[0,7]
int head=blockIdx.y;//[0,11]
int row=blockIdx.z*blockDim.x+threadIdx.x;//[0,127]
int col=threadIdx.y*2; //[0,255]
int input_index=(batch*off0+head*off1+row*seq_len)*2+col;
int out_index;
int out_row;
int out_col;
int tmp_index;
half2 idata;
if (col>=seq_len||row!=0){
idata=((half2*)inputMatrix)[input_index>>1];
//int tmp_index=row*(2*seq_len-1)+row+col-seq_len;
tmp_index=row*2*seq_len+col-seq_len;
out_row=tmp_index/(seq_len*2-1);
out_col=tmp_index%(seq_len*2-1);
if(out_col<seq_len){
out_index=(batch*off0+head*off1+out_row*seq_len+out_col);
outMatrix[out_index]=__low2half(idata);
}
tmp_index+=1;
out_row=tmp_index/(seq_len*2-1);
out_col=tmp_index%(seq_len*2-1);
if(out_col<seq_len){
out_index=(batch*off0+head*off1+out_row*seq_len+out_col);
outMatrix[out_index]=__high2half(idata);
}
}
}
/*dim3 grid_score(batch_size,head_num,seq_len);
dim3 block_score(next_pow2(seq_len));
int off0=head_num*seq_len*seq_len;
int off1=seq_len*seq_len;
float p=(1/(pow(size_per_head,0.5)));
int voff0=head_num*seq_len*size_per_head;
int v_o_off1=seq_len*size_per_head;
int voff2=size_per_head;
int v_i_off1=head_num*size_per_head;*/
template<typename T>
__global__
void calAttnScore_valueBuf(T* attn_score, const T* ac, const T* bd, const T* ef, const T* attn_mask,
const int off0, const int off1,const int seq_len, const float p,
T* value_buf_trans, const T* value_buf,
const int voff0, const int v_i_off1,
const int v_o_off1, const int voff2)
{
int batch=blockIdx.x;
int head=blockIdx.y;
int seq1=blockIdx.z;
int seq2=threadIdx.x;
int offset=batch*off0+head*off1+seq1*seq_len;
int index=offset+seq2;
int out_index;
T score;
T mask;
if(seq2<seq_len){
score=ac[index]+bd[index]+ef[index];
score=score*p;
out_index=batch*off1+seq1*seq_len+seq2;
mask=attn_mask[out_index]*(-1e30);
score=score+mask;
}
//softmax(attn_score+offset,seq_len, seq2);
__shared__ float s_sum, s_max;
float tmp = seq2 < seq_len? score : -1e30f;
float max_val = blockReduceMax<float>(tmp);
if(seq2 == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = seq2 < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
__syncthreads();
if(seq2 == 0)
{
s_sum = sum_val ;
}
__syncthreads();
if(seq2 < seq_len){
attn_score[index] = (T)(qk_tmp / s_sum);
}
//end softmax
offset=seq2;
while(offset<voff2){
out_index=batch*voff0+head*v_o_off1+seq1*voff2+offset;
index=batch*voff0+seq1*v_i_off1+head*voff2+offset;
value_buf_trans[out_index]=value_buf[index];
offset+=seq_len;
}
}
void __global__ calAttnScore_valueBuf_small(__half* attn_score, const __half* ac,
const __half* bd, const __half* ef, const __half* attn_mask,
const int off0, const int off1,const int seq_len, int n_seq1,const float p,
__half* value_buf_trans, const __half* value_buf,
const int voff0, const int v_i_off1, const int v_o_off1, const int voff2)
{
int lid=lane_id();
int tid=threadIdx.x;
int wid=tid/32;
int seq2=lid<<1;
int batch=blockIdx.x;
int head=blockIdx.y;
int seq1=blockIdx.z*n_seq1+wid;
int offset=batch*off0+head*off1+seq1*seq_len;
int index=(offset+seq2)>>1;
int out_index;
float2 tmp1, tmp2;
// Data prepare section
if(seq2<seq_len){
tmp1=__half22float2(((half2*)ac)[index]);
tmp2=__half22float2(((half2*)bd)[index]);
tmp1.x+=tmp2.x;
tmp1.y+=tmp2.y;
//tmp1=__hadd2(tmp1,tmp2);
tmp2=__half22float2(((half2*)ef)[index]);
tmp1.x+=tmp2.x;
tmp1.y+=tmp2.y;
//half2 score=__hadd2(tmp1, tmp2);
tmp1.x=tmp1.x*p;
tmp1.y=tmp1.y*p;
out_index=(batch*off1+seq1*seq_len+seq2)>>1;
tmp2=__half22float2(((half2*)attn_mask)[out_index]);
tmp1.x=tmp1.x+-1e30*tmp2.x;
tmp1.y=tmp1.y+-1e30*tmp2.y;
}else{
tmp1.x=tmp1.y=-1e31f;
}
//Softmax section
float tmp=tmp1.x>tmp1.y? tmp1.x:tmp1.y;
for(int mask = 16; mask > 0; mask >>= 1){
tmp=max(tmp,__shfl_xor_sync(FINAL_MASK, tmp, mask, 32));
}
tmp= __shfl_sync(FINAL_MASK, tmp, 0);
///normalize the input
tmp1.x = seq2 < seq_len? __expf((float)(tmp1.x - tmp)) : 0.0f;
tmp1.y =seq2 < seq_len? __expf((float)(tmp1.y - tmp)) : 0.0f;
tmp=tmp1.x+tmp1.y;
/// get sum of the normalized value
for(int mask = 16; mask > 0; mask >>= 1){
tmp=tmp+__shfl_xor_sync(FINAL_MASK, tmp, mask, 32);
}
if(seq2 == 0){
tmp = tmp;
}
tmp= __shfl_sync(FINAL_MASK, tmp, 0);
/// set the value
if(seq2<seq_len){
tmp1.x=tmp1.x/tmp;
tmp1.y=tmp1.y/tmp;
((half2*)attn_score)[index]=__float22half2_rn(tmp1);
}
// value_buf section
offset=seq2;
while(offset<voff2){
index=(batch*voff0+seq1*v_i_off1+head*voff2+offset)>>1;
half2 v=((half2*)value_buf)[index];
out_index=(batch*voff0+head*v_o_off1+seq1*voff2+offset)>>1;
((half2*)value_buf_trans)[out_index]=v;
offset+=seq_len;
}
}
void __global__ calAttnScore_valueBuf_large(__half* attn_score, const __half* ac,
const __half* bd, const __half* ef, const __half* attn_mask,
const int off0, const int off1,const int seq_len, const float p,
__half* value_buf_trans, const __half* value_buf,
const int voff0, const int v_i_off1, const int v_o_off1, const int voff2)
{
int batch=blockIdx.x;
int head=blockIdx.y;
int seq1=blockIdx.z;
int lid=lane_id();
int tid=threadIdx.x;
int wid=tid/32;
int seq2=tid<<1;
int offset=batch*off0+head*off1+seq1*seq_len;
int index=(offset+seq2)>>1;
int out_index;
float2 tmp1, tmp2;
__shared__ float sdata[32];
__shared__ float s_max;
__shared__ float s_sum;
// Data prepare section
if(seq2<seq_len){
tmp1=__half22float2(((half2*)ac)[index]);
tmp2=__half22float2(((half2*)bd)[index]);
tmp1.x+=tmp2.x;
tmp1.y+=tmp2.y;
//tmp1=__hadd2(tmp1,tmp2);
tmp2=__half22float2(((half2*)ef)[index]);
tmp1.x+=tmp2.x;
tmp1.y+=tmp2.y;
//half2 score=__hadd2(tmp1, tmp2);
tmp1.x=tmp1.x*p;
tmp1.y=tmp1.y*p;
out_index=(batch*off1+seq1*seq_len+seq2)>>1;
tmp2=__half22float2(((half2*)attn_mask)[out_index]);
tmp1.x=tmp1.x+-1e30*tmp2.x;
tmp1.y=tmp1.y+-1e30*tmp2.y;
}else{
tmp1.x=tmp1.y=-1e30f;
}
//Softmax section
float tmp=tmp1.x>tmp1.y? tmp1.x:tmp1.y;
for(int mask = 16; mask > 0; mask/=2){
tmp=max(tmp,__shfl_xor_sync(FINAL_MASK, tmp, mask, 32));
}
if(wid==0){
sdata[lid]=-1e30f;
}
__syncthreads();
if(lid==0){
sdata[wid]=tmp;
}
__syncthreads();
if(wid==0){
tmp=sdata[lid];
for(int mask = 16; mask > 0; mask /=2){
tmp=max(tmp,__shfl_xor_sync(FINAL_MASK, tmp, mask, 32));
}
}
if(tid==0){
s_max=tmp;
}
__syncthreads();
///normalize the input
tmp1.x = seq2 < seq_len ? __expf((float)(tmp1.x - s_max)) : 0.0f;
tmp1.y =seq2 < seq_len ? __expf((float)(tmp1.y - s_max)) : 0.0f;
tmp=tmp1.x+tmp1.y;
/// get sum of the normalized value
for(int mask = 16; mask > 0; mask /=2){
tmp=tmp+__shfl_xor_sync(FINAL_MASK, tmp, mask, 32);
}
if(wid==0){
sdata[lid]=0;
}
__syncthreads();
if(lid==0){
sdata[wid]=tmp;
}
__syncthreads();
if(wid==0){
tmp=sdata[tid];
for(int mask = 16; mask > 0; mask/=2){
tmp=tmp+__shfl_xor_sync(FINAL_MASK, tmp, mask, 32);
}
}
if(tid==0){
s_sum=tmp;
}
__syncthreads();
/// set the value
if(seq2<seq_len){
tmp1.x=tmp1.x/s_sum;
tmp1.y=tmp1.y/s_sum;
((half2*)attn_score)[index]=__float22half2_rn(tmp1);
}
// value_buf section
offset=seq2;
while(offset<voff2){
index=(batch*voff0+seq1*v_i_off1+head*voff2+offset)>>1;
half2 v=((half2*)value_buf)[index];
out_index=(batch*voff0+head*v_o_off1+seq1*voff2+offset)>>1;
((half2*)value_buf_trans)[out_index]=v;
offset+=seq_len;
}
}
//dim3 grid_trans_v(batch_size,seq_len, head_num);
//dim3 block_trans_v(size_per_head);
template<typename T>
__global__
void transpose102_v2(T* dst, const T* src, const int off0, const int i_off1, const int o_off1, const int off2)
{
int x[4]={0};
x[0]=blockIdx.x;
x[1]=threadIdx.x/off2;
x[2]=blockIdx.y;//[0,128] seq_len
x[3]=threadIdx.x%off2;//[0,31] size_per_head
T tmp;
if(x[3]<off2){
int input_index=x[0]*off0
+x[1]*i_off1
+x[2]*off2+x[3];// [batch, 0, 1, 2]=[d0,d1,d2,d3]
tmp=src[input_index];
int out_index=x[0]*off0
+x[2]*o_off1
+x[1]*off2+x[3];// [batch, 1, 0, 2]=[d0,d2,d1,d3]
dst[out_index]=tmp;
}
}
template<>
__global__
void transpose102_v2(__half* dst, const __half* src,
const int off0, const int i_off1, const int o_off1, const int off2)
{
int x[4]={0};
x[0]=blockIdx.x;//[0,7] batch_size
x[1]=threadIdx.x*2/off2;//head_num
x[2]=blockIdx.y;//seq_len
x[3]=threadIdx.x*2%off2;//[0,63] size_per_head
if(x[3]<off2){
half2 tmp;
int in_index=(x[0]*off0 +x[1]*i_off1+x[2]*off2+x[3])>>1;// [batch, 0, 1, 2]=[d0,d1,d2,d3]
tmp=((half2*)src)[in_index];
int out_index=(x[0]*off0 +x[2]*o_off1 +x[1]*off2+x[3])>>1;// [batch, 1, 0, 2]=[d0,d2,d1,d3]
((half2*)dst)[out_index]=tmp;
}
}
template <typename T>
__global__
void addBias_layerNorm(T* out, const T* input, const T* bias, const T* gamma, const T* beta, int m, int n, float epsilon)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = 0.0f;
int id=blockIdx.x * n + tid;
local_out += (float)(input[id]);
local_out +=(float)(__ldg(&bias[id]));
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean));
if(threadIdx.x == 0)
s_variance = variance / n+epsilon;
__syncthreads();
out[id] =
(T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid])));
}
template <>
__global__
void addBias_layerNorm(__half* out, const __half* input, const __half* bias,
const __half* gamma, const __half* beta, int m, int n, float epsilon)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
half2* out_ptr = (half2*)out;
const half2* input_ptr = (const half2*)input;
const half2* bias_ptr = (const half2*)bias;//blockIdx.x * n + i
const half2* gamma_ptr= (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
float local_out = 0.0f;
int id = (blockIdx.x * n + tid*2)>>1;
local_out_fp2 = __half22float2(__hadd2(input_ptr[id], __ldg(&bias_ptr[id])));
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean);
variance += (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean);
variance = blockReduceSum<float>(variance);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + epsilon);
__syncthreads();
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
out_ptr[id] = __float22half2_rn(local_out_fp2);
}
/*width=hidden_dim_ff;
height=seq_len;
dim3 block(1024);
dim3 grid(batch_size, seq_len);
gelu_bias_loop<<<grid, block, 0, stream>>>(output_fc1, output_fc1, attr_fc1_bias, hidden_dim_ff,seq_len); */
template<typename T>
__global__ void gelu_bias_loop(T *src,T* bias, int width, int height) {
int batch =blockIdx.x;
int x = blockIdx.y;
int y = threadIdx.x;
if(x<height){
int index=batch*width*height+x*width;
float v_src;
float v_bias;
float v;
for(;y<width;y=y+blockDim.x){
v_bias=bias[y];
v_src=src[index+y];
v=v_src+v_bias;
src[index+y] = (T)(0.5f * v * (1.0f + tanhf(0.79788456f * (v + 0.044715f * v * v * v))));
}
}
}
template<>
__global__ void gelu_bias_loop(__half *src,__half* bias, int width, int height) {
int batch =blockIdx.x;
int x = blockIdx.y;
int y = threadIdx.x*2;
if(x<height){
int index=batch*width*height+x*width;
half2 v_src;
half2 v_bias;
half2 v;
float2 t;
for(;y<width;y=y+blockDim.x*2){
v_bias=((half2*)bias)[y>>1];
v_src=((half2*)src)[(index+y)>>1];
v=__hadd2(v_src,v_bias);
t=__half22float2(v);
t.x = (0.5f * t.x * (1.0f + tanhf(0.79788456f * (t.x + 0.044715f * t.x * t.x * t.x))));
t.y = (0.5f * t.y * (1.0f + tanhf(0.79788456f * (t.y + 0.044715f * t.y * t.y * t.y))));
((half2*)src)[(index+y)>>1]=__float22half2_rn(t);
}
}
}
template <typename T>
__global__
void addBias_layerNorm2(T* out, const T* input, const T* add, const T* bias,
const T* gamma, const T* beta, int m, int n, float epsilon)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = 0.0f;
int id=blockIdx.x * n + tid;
local_out += (float)(input[id]+add[id]+bias[tid]);
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean));
if(threadIdx.x == 0)
s_variance = variance / n+epsilon;
__syncthreads();
out[id] =
(T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid])));
}
template <>
__global__
void addBias_layerNorm2(__half* out, const __half* input, const __half* add, const __half* bias,
const __half* gamma, const __half* beta, int m, int n, float epsilon)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
half2* out_ptr = (half2*)out;
const half2* input_ptr = (const half2*)input;
const half2* add_ptr = (const half2*)add;
const half2* bias_ptr = (const half2*)bias;//blockIdx.x * n + i
const half2* gamma_ptr= (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
float local_out = 0.0f;
int id = (blockIdx.x * n + tid*2)>>1;
half2 tmp=input_ptr[id];
local_out_fp2=__half22float2(tmp);
tmp=__ldg(&bias_ptr[tid]);
local_out_fp2.x+=__half22float2(tmp).x;
local_out_fp2.y+=__half22float2(tmp).y;
tmp=__ldg(&add_ptr[id]);
local_out_fp2.x+=__half22float2(tmp).x;
local_out_fp2.y+=__half22float2(tmp).y;
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean);
variance += (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean);
variance = blockReduceSum<float>(variance);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + epsilon);
__syncthreads();
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
out_ptr[id] = __float22half2_rn(local_out_fp2);
}
/*********************The explicit instantiation part***********************/
template void __global__ prepareMatrixes<float>(
float* q_buf, float* q_buf_bd, float* q_buf_ef,
float* k_buf, float* k_buf_bd, float* k_buf_ef,
const float* query_buf,
const float* key_buf, const float* k_head_r, const float* attr_seg_embed,
const float* attr_bias_Q_w, const float* attr_bias_Q_r, const float* attr_bias_Q_s,
const int off0, const int i_off1,const int o_off1, int off2);
template void __global__ transpose102<float>(float* dst, const float* src, const int off0,
const int i_off1, const int o_off1, const int off2);
template void __global__ transpose102<__half>(__half* dst, const __half* src, const int off0,
const int i_off1, const int o_off1, const int off2);
template void __global__ relShiftBd<float>(float* outMatrix, const float* inputMatrix,
const int off0, const int off1, const int seq_len);
template __global__ void calAttnScore_valueBuf<float>(float* attn_score, const float* ac,
const float* bd, const float* ef, const float* attn_mask,
const int off0, const int off1,const int seq_len, const float p,
float* value_buf_trans, const float* value_buf,
const int voff0, const int v_i_off1,
const int v_o_off1, const int voff2);
template __global__ void transpose102_v2<float>(float* dst, const float* src, const int off0,
const int i_off1, const int o_off1, const int off2);
template __global__ void addBias_layerNorm<float>(float* out, const float* input, const float* bias,
const float* gamma, const float* beta, int m, int n, float epsilon);
template __global__ void gelu_bias_loop<float>(float *src,float* bias, int width, int height);
template __global__ void addBias_layerNorm2<float>(float* out, const float* input, const float* add,
const float* bias,const float* gamma, const float* beta, int m, int n, float epsilon);
|
the_stack
|
#include "utils.hpp"
#include "kernels.cu"
void benchmark(
complex_t *sigma_in,
complex_t *sigma_out,
complex_t *hamiltonian,
size_t size_sigma,
size_t size_hamiltonian,
complex_t *sigma_reference,
complex_t *sigma_reference_transformed ,
const int dim,
const int num, // global_work_size
const int kernel_id,
size_t vec_length,
decltype(&transform_matrices_aos_to_aosoa) transformation_sigma,
bool scale_hamiltonian,
decltype(&transform_matrix_aos_to_soa) transformation_hamiltonian)
{
initialise_hamiltonian(hamiltonian, dim);
if (scale_hamiltonian)
transform_matrix_scale_aos(hamiltonian, dim); // pre-scale hamiltonian
if (transformation_hamiltonian)
transformation_hamiltonian(hamiltonian, dim);
initialise_sigma(sigma_in, sigma_out, dim, num);
std::memcpy(sigma_reference_transformed, sigma_reference, size_sigma * sizeof(complex_t));
// transform memory layout if a transformation is specified
if (transformation_sigma) {
// transform reference for comparison
transformation_sigma(sigma_reference_transformed, dim, num, vec_length);
// transform sigma
transformation_sigma(sigma_in, dim, num, vec_length);
}
// extract the real and imag data
real_2_t* ham = allocate_aligned<real_2_t>(size_hamiltonian);
real_2_t* sin = allocate_aligned<real_2_t>(size_sigma);
real_2_t* sout = allocate_aligned<real_2_t>(size_sigma);
for (size_t i = 0; i < size_hamiltonian; i++) {
ham[i].x = hamiltonian[i].real();
ham[i].y = hamiltonian[i].imag();
}
for (size_t i = 0; i < size_sigma; i++) {
sin[i].x = sigma_in[i].real();
sin[i].y = sigma_in[i].imag();
}
for (size_t i = 0; i < size_sigma; i++) {
sout[i].x = sigma_out[i].real();
sout[i].y = sigma_out[i].imag();
}
// allocate device memory
real_2_t *d_hamiltonian;
real_2_t *d_sigma_in;
real_2_t *d_sigma_out;
hipMalloc((void**)&d_hamiltonian, sizeof(real_2_t) * size_hamiltonian);
hipMemcpy(d_hamiltonian, ham, sizeof(real_2_t) * size_hamiltonian,
hipMemcpyHostToDevice);
hipMalloc((void**)&d_sigma_in, sizeof(real_2_t) * size_sigma);
hipMemcpy(d_sigma_in, sin, sizeof(real_2_t) * size_sigma,
hipMemcpyHostToDevice);
hipMalloc((void**)&d_sigma_out, sizeof(real_2_t) * size_sigma);
// benchmark loop
for (size_t i = 0; i < NUM_ITERATIONS; ++i) {
// clear output
hipMemcpy(d_sigma_out, sout, sizeof(real_2_t) * size_sigma,
hipMemcpyHostToDevice);
// empty kernel
switch(kernel_id) {
case 0: {
dim3 k0_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k0_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_empty, dim3(k0_gws), dim3(k0_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// initial kernel
case 1: {
dim3 k1_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k1_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_init, dim3(k1_gws), dim3(k1_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// refactored initial kernel
case 2: {
dim3 k2_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k2_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_refactor, dim3(k2_gws), dim3(k2_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// refactored initial kernel with direct store
case 3: {
dim3 k3_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k3_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_refactor_direct_store, dim3(k3_gws), dim3(k3_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range
case 4: {
dim3 k4_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k4_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_naive, dim3(k4_gws), dim3(k4_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range and compile time constants
case 5: {
dim3 k5_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k5_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_naive_constants, dim3(k5_gws), dim3(k5_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range, compile time constants, and permuted loops with temporaries
case 6: {
dim3 k6_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k6_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_naive_constants_perm, dim3(k6_gws), dim3(k6_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 1D range and direct store
case 7: {
dim3 k7_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k7_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_naive_direct, dim3(k7_gws), dim3(k7_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 1D range, compile time constants, and direct store
case 8: {
dim3 k8_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k8_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_naive_constants_direct, dim3(k8_gws), dim3(k8_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 1D range, compile time constants, direct store, and permuted loops with temporaries
case 9: {
dim3 k9_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG));
dim3 k9_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_naive_constants_direct_perm, dim3(k9_gws), dim3(k9_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 2D-range
case 10: {
dim3 k10_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k10_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa, dim3(k10_gws), dim3(k10_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 2D-range and compile-time constants
case 11: {
dim3 k11_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k11_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_constants, dim3(k11_gws), dim3(k11_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 2D-range, compile-time constants, and permuted loops with temporaries
case 12: {
dim3 k12_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k12_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_constants_perm, dim3(k12_gws), dim3(k12_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with 2D range and direct store
case 13: {
dim3 k13_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k13_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_direct, dim3(k13_gws), dim3(k13_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// vectorised kernel with 2D range, compile-time constants, and direct store
case 14: {
dim3 k14_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k14_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_constants_direct, dim3(k14_gws), dim3(k14_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// vectorised kernel with compile-time constants, direct store, and permuted loops with temporaries
case 15: {
dim3 k15_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG);
dim3 k15_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG);
hipLaunchKernelGGL(comm_aosoa_constants_direct_perm, dim3(k15_gws), dim3(k15_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel
case 16: {
dim3 k16_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k16_lws (VEC_LENGTH);
hipLaunchKernelGGL(comm_manual_aosoa, dim3(k16_gws), dim3(k16_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// manually vectorised kernel with compile-time constants
case 17: {
dim3 k17_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k17_lws (VEC_LENGTH);
hipLaunchKernelGGL(comm_manual_aosoa_constants, dim3(k17_gws), dim3(k17_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile-time constants and permuted loops with temporaries
case 18: {
dim3 k18_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k18_lws (VEC_LENGTH);
hipLaunchKernelGGL(comm_manual_aosoa_constants_perm, dim3(k18_gws), dim3(k18_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile-time constants and prefetch
case 19: {
dim3 k19_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k19_lws (VEC_LENGTH);
hipLaunchKernelGGL(comm_manual_aosoa_constants_perm_prefetch, dim3(k19_gws), dim3(k19_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with direct store
case 20: {
dim3 k20_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k20_lws (VEC_LENGTH);
hipLaunchKernelGGL(comm_manual_aosoa_direct, dim3(k20_gws), dim3(k20_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, dim);
break;
}
// manually vectorised kernel with compile time constants and direct store
case 21: {
dim3 k21_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k21_lws (VEC_LENGTH);
hipLaunchKernelGGL(comm_manual_aosoa_constants_direct, dim3(k21_gws), dim3(k21_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile time constants, direct store, and prefetch
case 22: {
dim3 k22_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k22_lws (VEC_LENGTH);
hipLaunchKernelGGL(comm_manual_aosoa_constants_direct_prefetch, dim3(k22_gws), dim3(k22_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// manually vectorised kernel with compile time constants, direct store, and permuted loops with temporaries
case 23: {
dim3 k23_gws (num / (VEC_LENGTH * VEC_LENGTH));
dim3 k23_lws (VEC_LENGTH);
hipLaunchKernelGGL(comm_manual_aosoa_constants_direct_perm, dim3(k23_gws), dim3(k23_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian);
break;
}
// final GPGPU kernel optimised for an Nvidia GPU
case 24: {
size_t block_dim_x = (dim * dim + WARP_SIZE - 1) / WARP_SIZE * WARP_SIZE;
size_t block_dim_y = NUM_SUB_GROUPS;
dim3 k24_gws (num / (block_dim_y * CHUNK_SIZE), 1);
dim3 k24_lws (block_dim_x, block_dim_y);
hipLaunchKernelGGL(final_gpu_kernel, dim3(k24_gws), dim3(k24_lws), 0, 0, d_sigma_in, d_sigma_out, d_hamiltonian, num);
break;
}
default: std::cerr << "ERROR: **** benchmark kernel unavailable **** \n";
}
}
real_t deviation = 0;
// the deviation of an empty kernel does not make sense
if (kernel_id > 0) {
hipMemcpy(sout, d_sigma_out, sizeof(real_2_t) * size_sigma, hipMemcpyDeviceToHost);
for (size_t i = 0; i < size_sigma; i++) {
sigma_out[i] = {sout[i].x, sout[i].y};
}
// measure the differences between the CPU and GPU results
deviation = compare_matrices(sigma_out, sigma_reference_transformed, dim, num);
std::cout << "Deviation of kernel " << look_up(kernel_id) << ": " << deviation << std::endl;
}
hipFree(d_hamiltonian);
hipFree(d_sigma_in);
hipFree(d_sigma_out);
free(sin);
free(sout);
free(ham);
}
int main(int argc, char* argv[])
{
// debugging
print_compile_config(std::cout);
// constants
const size_t dim = DIM;
const size_t num = NUM;
// allocate host memory
size_t size_hamiltonian = dim * dim;
size_t size_sigma = size_hamiltonian * num;
size_t size_sigma_byte = sizeof(complex_t) * size_sigma;
complex_t* hamiltonian = allocate_aligned<complex_t>(size_hamiltonian);
complex_t* sigma_in = allocate_aligned<complex_t>(size_sigma);
complex_t* sigma_out = allocate_aligned<complex_t>(size_sigma);
complex_t* sigma_reference = allocate_aligned<complex_t>(size_sigma);
complex_t* sigma_reference_transformed = allocate_aligned<complex_t>(size_sigma);
// perform reference computation for correctness analysis
initialise_hamiltonian(hamiltonian, dim);
initialise_sigma(sigma_in, sigma_out, dim, num);
commutator_reference(sigma_in, sigma_out, hamiltonian, dim, num);
// copy reference results
std::memcpy(sigma_reference, sigma_out, size_sigma_byte);
// The macro "BENCHMARK(...)" is defined in utils.hpp
BENCHMARK(0, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM);
BENCHMARK(1, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM);
BENCHMARK(2, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM);
BENCHMARK(3, VEC_LENGTH, NO_TRANSFORM, SCALE_HAMILT, NO_TRANSFORM);
BENCHMARK(4, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(5, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(6, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(7, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(8, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(9, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(10, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(11, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(12, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(13, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(14, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(15, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(16, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(17, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(18, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(19, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(20, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(21, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(22, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(23, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa);
BENCHMARK(24, 2, NO_TRANSFORM, SCALE_HAMILT, NO_TRANSFORM);
free(hamiltonian);
free(sigma_in);
free(sigma_out);
free(sigma_reference);
free(sigma_reference_transformed);
return 0;
}
|
the_stack
|
#include "join_common_utils.hpp"
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <cub/cub.cuh>
namespace cudf {
namespace detail {
/**
* @brief Remaps a hash value to a new value if it is equal to the specified sentinel value.
*
* @param hash The hash value to potentially remap
* @param sentinel The reserved value
*/
template <typename H, typename S>
constexpr auto remap_sentinel_hash(H hash, S sentinel)
{
// Arbitrarily choose hash - 1
return (hash == sentinel) ? (hash - 1) : hash;
}
/**
* @brief Device functor to create a pair of hash value and index for a given row.
*/
class make_pair_function {
public:
CUDF_HOST_DEVICE make_pair_function(row_hash const& hash,
hash_value_type const empty_key_sentinel)
: _hash{hash}, _empty_key_sentinel{empty_key_sentinel}
{
}
__device__ __forceinline__ cudf::detail::pair_type operator()(size_type i) const noexcept
{
// Compute the hash value of row `i`
auto row_hash_value = remap_sentinel_hash(_hash(i), _empty_key_sentinel);
return cuco::make_pair(row_hash_value, i);
}
private:
row_hash _hash;
hash_value_type const _empty_key_sentinel;
};
/**
* @brief Device functor to determine if a row is valid.
*/
class row_is_valid {
public:
row_is_valid(bitmask_type const* row_bitmask) : _row_bitmask{row_bitmask} {}
__device__ __inline__ bool operator()(const size_type& i) const noexcept
{
return cudf::bit_is_set(_row_bitmask, i);
}
private:
bitmask_type const* _row_bitmask;
};
/**
* @brief Device functor to determine if two pairs are identical.
*
* This equality comparator is designed for use with cuco::static_multimap's
* pair* APIs, which will compare equality based on comparing (key, value)
* pairs. In the context of joins, these pairs are of the form
* (row_hash, row_id). A hash probe hit indicates that hash of a probe row's hash is
* equal to the hash of the hash of some row in the multimap, at which point we need an
* equality comparator that will check whether the contents of the rows are
* identical. This comparator does so by verifying key equality (i.e. that
* probe_row_hash == build_row_hash) and then using a row_equality_comparator
* to compare the contents of the row indices that are stored as the payload in
* the hash map.
*/
class pair_equality {
public:
pair_equality(table_device_view lhs,
table_device_view rhs,
nullate::DYNAMIC has_nulls,
null_equality nulls_are_equal = null_equality::EQUAL)
: _check_row_equality{has_nulls, lhs, rhs, nulls_are_equal}
{
}
__device__ __forceinline__ bool operator()(const pair_type& lhs,
const pair_type& rhs) const noexcept
{
return lhs.first == rhs.first and _check_row_equality(rhs.second, lhs.second);
}
private:
row_equality _check_row_equality;
};
/**
* @brief Computes the trivial left join operation for the case when the
* right table is empty.
*
* In this case all the valid indices of the left table
* are returned with their corresponding right indices being set to
* JoinNoneValue, i.e. -1.
*
* @param left Table of left columns to join
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the result
*
* @return Join output indices vector pair
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
get_trivial_left_join_indices(
table_view const& left,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Builds the hash table based on the given `build_table`.
*
* @tparam MultimapType The type of the hash table
*
* @param build Table of columns used to build join hash.
* @param hash_table Build hash table.
* @param nulls_equal Flag to denote nulls are equal or not.
* @param bitmask Bitmask to denote whether a row is valid.
* @param stream CUDA stream used for device memory operations and kernel launches.
*
*/
template <typename MultimapType>
void build_join_hash_table(cudf::table_view const& build,
MultimapType& hash_table,
null_equality const nulls_equal,
[[maybe_unused]] bitmask_type const* bitmask,
rmm::cuda_stream_view stream)
{
auto build_table_ptr = cudf::table_device_view::create(build, stream);
CUDF_EXPECTS(0 != build_table_ptr->num_columns(), "Selected build dataset is empty");
CUDF_EXPECTS(0 != build_table_ptr->num_rows(), "Build side table has no rows");
row_hash hash_build{nullate::DYNAMIC{cudf::has_nulls(build)}, *build_table_ptr};
auto const empty_key_sentinel = hash_table.get_empty_key_sentinel();
make_pair_function pair_func{hash_build, empty_key_sentinel};
auto iter = cudf::detail::make_counting_transform_iterator(0, pair_func);
size_type const build_table_num_rows{build_table_ptr->num_rows()};
if (nulls_equal == cudf::null_equality::EQUAL or (not nullable(build))) {
hash_table.insert(iter, iter + build_table_num_rows, stream.value());
} else {
thrust::counting_iterator<size_type> stencil(0);
row_is_valid pred{bitmask};
// insert valid rows
hash_table.insert_if(iter, iter + build_table_num_rows, stencil, pred, stream.value());
}
}
// Convenient alias for a pair of unique pointers to device uvectors.
using VectorPair = std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>;
/**
* @brief Takes two pairs of vectors and returns a single pair where the first
* element is a vector made from concatenating the first elements of both input
* pairs and the second element is a vector made from concatenating the second
* elements of both input pairs.
*
* This function's primary use is for computing the indices of a full join by
* first performing a left join, then separately getting the complementary
* right join indices, then finally calling this function to concatenate the
* results. In this case, each input VectorPair contains the left and right
* indices from a join.
*
* Note that this is a destructive operation, in that at least one of a or b
* will be invalidated (by a move) by this operation. Calling code should
* assume that neither input VectorPair is valid after this function executes.
*
* @param a The first pair of vectors.
* @param b The second pair of vectors.
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return A pair of vectors containing the concatenated output.
*/
VectorPair concatenate_vector_pairs(VectorPair& a, VectorPair& b, rmm::cuda_stream_view stream);
/**
* @brief Creates a table containing the complement of left join indices.
*
* This table has two columns. The first one is filled with JoinNoneValue(-1)
* and the second one contains values from 0 to right_table_row_count - 1
* excluding those found in the right_indices column.
*
* @param right_indices Vector of indices
* @param left_table_row_count Number of rows of left table
* @param right_table_row_count Number of rows of right table
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned vectors.
*
* @return Pair of vectors containing the left join indices complement
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
get_left_join_indices_complement(std::unique_ptr<rmm::device_uvector<size_type>>& right_indices,
size_type left_table_row_count,
size_type right_table_row_count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Device functor to determine if an index is contained in a range.
*/
template <typename T>
struct valid_range {
T start, stop;
__host__ __device__ valid_range(const T begin, const T end) : start(begin), stop(end) {}
__host__ __device__ __forceinline__ bool operator()(const T index)
{
return ((index >= start) && (index < stop));
}
};
/**
* @brief Adds a pair of indices to the shared memory cache
*
* @param[in] first The first index in the pair
* @param[in] second The second index in the pair
* @param[in,out] current_idx_shared Pointer to shared index that determines
* where in the shared memory cache the pair will be written
* @param[in] warp_id The ID of the warp of the calling the thread
* @param[out] joined_shared_l Pointer to the shared memory cache for left indices
* @param[out] joined_shared_r Pointer to the shared memory cache for right indices
*/
__inline__ __device__ void add_pair_to_cache(const size_type first,
const size_type second,
size_type* current_idx_shared,
const int warp_id,
size_type* joined_shared_l,
size_type* joined_shared_r)
{
size_type my_current_idx{atomicAdd(current_idx_shared + warp_id, size_type(1))};
// its guaranteed to fit into the shared cache
joined_shared_l[my_current_idx] = first;
joined_shared_r[my_current_idx] = second;
}
template <int num_warps, cudf::size_type output_cache_size>
__device__ void flush_output_cache(const unsigned int activemask,
const cudf::size_type max_size,
const int warp_id,
const int lane_id,
cudf::size_type* current_idx,
cudf::size_type current_idx_shared[num_warps],
size_type join_shared_l[num_warps][output_cache_size],
size_type join_shared_r[num_warps][output_cache_size],
size_type* join_output_l,
size_type* join_output_r)
{
// count how many active threads participating here which could be less than warp_size
int num_threads = __popc(activemask);
cudf::size_type output_offset = 0;
if (0 == lane_id) { output_offset = atomicAdd(current_idx, current_idx_shared[warp_id]); }
// No warp sync is necessary here because we are assuming that ShuffleIndex
// is internally using post-CUDA 9.0 synchronization-safe primitives
// (__shfl_sync instead of __shfl). __shfl is technically not guaranteed to
// be safe by the compiler because it is not required by the standard to
// converge divergent branches before executing.
output_offset = cub::ShuffleIndex<detail::warp_size>(output_offset, 0, activemask);
for (int shared_out_idx = lane_id; shared_out_idx < current_idx_shared[warp_id];
shared_out_idx += num_threads) {
cudf::size_type thread_offset = output_offset + shared_out_idx;
if (thread_offset < max_size) {
join_output_l[thread_offset] = join_shared_l[warp_id][shared_out_idx];
join_output_r[thread_offset] = join_shared_r[warp_id][shared_out_idx];
}
}
}
} // namespace detail
} // namespace cudf
|
the_stack
|
extern "C" {
#include <ccv.h>
#include <ccv_internal.h>
#include <nnc/ccv_nnc.h>
#include <nnc/ccv_nnc_easy.h>
#include <nnc/ccv_nnc_internal.h>
}
#include <nnc/gpu/ccv_nnc_compat.h>
#ifdef HAVE_CUDNN
enum {
CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_IMPLICIT_GEMM, // CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM
CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, // CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM
CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_GEMM, // CUDNN_CONVOLUTION_FWD_ALGO_GEMM
CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_DIRECT, // CUDNN_CONVOLUTION_FWD_ALGO_DIRECT
CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_FFT, // CUDNN_CONVOLUTION_FWD_ALGO_FFT
CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_FFT_TILING, // CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING
CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_WINOGRAD, // CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD
CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_WINOGRAD_NONFUSED, // CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED
CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_COUNT
};
static int _ccv_nnc_conv_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
assert(input_size >= 2);
assert(output_size == 1);
cudnnHandle_t cudnn = ccv_nnc_stream_context_get_cudnn(stream_context);
const ccv_nnc_cudnn_tensor_view_descriptor_t a = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)inputs[0]);
const ccv_nnc_cudnn_filter_descriptor_t w = ccv_nnc_cudnn_get_filter_descriptor(stream_context, (const ccv_nnc_tensor_t*)inputs[1]);
const ccv_nnc_cudnn_tensor_view_descriptor_t b = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)outputs[0]);
const ccv_nnc_cudnn_convolution_descriptor_t conv = ccv_nnc_cudnn_get_convolution_descriptor(stream_context, hint, inputs[1]->info.datatype);
cudnnSetConvolutionGroupCount(conv.descriptor, cmd.info.convolution.groups);
cudnnConvolutionFwdAlgo_t algo;
// Choose an algorithm
switch (cmd.algorithm)
{
case CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_IMPLICIT_GEMM:
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM;
break;
case CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_IMPLICIT_PRECOMP_GEMM:
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
break;
case CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_GEMM:
algo = CUDNN_CONVOLUTION_FWD_ALGO_GEMM;
break;
case CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_DIRECT:
algo = CUDNN_CONVOLUTION_FWD_ALGO_DIRECT;
break;
case CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_FFT:
algo = CUDNN_CONVOLUTION_FWD_ALGO_FFT;
break;
case CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_FFT_TILING:
algo = CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING;
break;
case CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_WINOGRAD:
algo = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD;
break;
case CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_WINOGRAD_NONFUSED:
algo = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED;
break;
default: // -1: Using preferences to find a suitable algorithm
#if CUDNN_VERSION >= 7000
int algo_count;
cudnnConvolutionFwdAlgoPerf_t perf;
CUDNN_ENFORCE(cudnnGetConvolutionForwardAlgorithm_v7(cudnn, a.descriptor, w.descriptor, conv.descriptor, b.descriptor, 1, &algo_count, &perf));
assert(algo_count > 0);
algo = perf.algo;
#else
CUDNN_ENFORCE(cudnnGetConvolutionForwardAlgorithm(cudnn, a.descriptor, w.descriptor, conv.descriptor, b.descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
#endif
}
size_t workspace_size = 0;
CUDNN_ENFORCE(cudnnGetConvolutionForwardWorkspaceSize(cudnn, a.descriptor, w.descriptor, conv.descriptor, b.descriptor, algo, &workspace_size));
void* workspace = 0;
// TODO: If error, return OOM
if (workspace_size)
workspace = ccv_nnc_stream_context_get_workspace(stream_context, workspace_size, CCV_TENSOR_GPU_MEMORY);
static const float one = 1, zero = 0;
CUDNN_ENFORCE(cudnnConvolutionForward(cudnn, &one, a.descriptor, a.data.u8, w.descriptor, w.data.u8, conv.descriptor, algo, workspace, workspace_size, &zero, b.descriptor, b.data.u8));
if (input_size > 2 && inputs[2])
{
const ccv_nnc_cudnn_tensor_view_descriptor_t bias = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)inputs[2]);
CUDNN_ENFORCE(cudnnAddTensor(cudnn, &one, bias.descriptor, bias.data.u8, &one, b.descriptor, b.data.u8));
ccv_nnc_cudnn_deinit_tensor_view_descriptor(bias);
}
ccv_nnc_cudnn_deinit_tensor_view_descriptor(a);
ccv_nnc_cudnn_deinit_filter_descriptor(w);
ccv_nnc_cudnn_deinit_tensor_view_descriptor(b);
ccv_nnc_cudnn_deinit_convolution_descriptor(conv);
return CCV_NNC_EXEC_SUCCESS;
}
static int _ccv_nnc_conv_forw_autotune(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
assert(input_size >= 2);
assert(output_size == 1);
cudnnHandle_t cudnn = ccv_nnc_stream_context_get_cudnn(stream_context);
void* const workmem = ccv_nnc_stream_context_get_workspace(stream_context, max_workspace_size, CCV_TENSOR_GPU_MEMORY);
if (max_workspace_size && !workmem)
return -1;
const ccv_nnc_cudnn_tensor_view_descriptor_t a = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)inputs[0]);
const ccv_nnc_cudnn_filter_descriptor_t w = ccv_nnc_cudnn_get_filter_descriptor(stream_context, (const ccv_nnc_tensor_t*)inputs[1]);
const ccv_nnc_cudnn_tensor_view_descriptor_t b = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)outputs[0]);
const ccv_nnc_cudnn_convolution_descriptor_t conv = ccv_nnc_cudnn_get_convolution_descriptor(stream_context, hint, inputs[1]->info.datatype);
cudnnSetConvolutionGroupCount(conv.descriptor, cmd.info.convolution.groups);
int count = 0;
cudnnConvolutionFwdAlgoPerf_t perfs[CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_COUNT];
CUDNN_ENFORCE(cudnnFindConvolutionForwardAlgorithmEx(cudnn, a.descriptor, a.data.u8, w.descriptor, w.data.u8, conv.descriptor, b.descriptor, b.data.u8, CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_COUNT, &count, perfs, workmem, max_workspace_size));
int i;
cudnnConvolutionFwdAlgo_t algorithm;
for(i = 0; i < count; i++)
if ((size_t)perfs[i].memory <= max_workspace_size && perfs[i].status == CUDNN_STATUS_SUCCESS)
{
algorithm = perfs[i].algo;
break;
}
ccv_nnc_cudnn_deinit_tensor_view_descriptor(a);
ccv_nnc_cudnn_deinit_filter_descriptor(w);
ccv_nnc_cudnn_deinit_tensor_view_descriptor(b);
ccv_nnc_cudnn_deinit_convolution_descriptor(conv);
switch (algorithm)
{
case CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM:
return CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_IMPLICIT_GEMM;
case CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM:
return CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
case CUDNN_CONVOLUTION_FWD_ALGO_GEMM:
return CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_GEMM;
case CUDNN_CONVOLUTION_FWD_ALGO_DIRECT:
return CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_DIRECT;
case CUDNN_CONVOLUTION_FWD_ALGO_FFT:
return CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_FFT;
case CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING:
return CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_FFT_TILING;
case CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD:
return CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_WINOGRAD;
case CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED:
return CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_WINOGRAD_NONFUSED;
case CUDNN_CONVOLUTION_FWD_ALGO_COUNT:
break;
}
return -1; // Return the most efficient algorithm, return -1 if cannot find one.
}
enum {
CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_0, // CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0
CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_1, // CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1
CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_FFT, // CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT
CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_3, // CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3
CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_FFT_TILING, // CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING
CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_WINOGRAD, // CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD
CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_WINOGRAD_NONFUSED, // CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED
CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_COUNT
};
enum {
CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_0, // CUDNN_CONVOLUTION_BWD_DATA_ALGO_0
CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_1, // CUDNN_CONVOLUTION_BWD_DATA_ALGO_1
CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_FFT, // CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT
CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_FFT_TILING, // CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING
CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_WINOGRAD, // CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD
CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_WINOGRAD_NONFUSED, // CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_COUNT
};
static int _ccv_nnc_conv_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
// inputs: gradient, forw prop input, [w]
// outputs: [output gradient], weight updates, bias updates
assert((input_size >= 2 && output_size >= 2));
cudnnHandle_t cudnn = ccv_nnc_stream_context_get_cudnn(stream_context);
const ccv_nnc_cudnn_tensor_view_descriptor_t g = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)inputs[0]);
const ccv_nnc_cudnn_tensor_view_descriptor_t a = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)inputs[1]);
const ccv_nnc_cudnn_filter_descriptor_t dw = ccv_nnc_cudnn_get_filter_descriptor(stream_context, (const ccv_nnc_tensor_t*)outputs[1]);
const ccv_nnc_cudnn_convolution_descriptor_t conv = ccv_nnc_cudnn_get_convolution_descriptor(stream_context, hint, outputs[1]->info.datatype);
cudnnSetConvolutionGroupCount(conv.descriptor, cmd.info.convolution.groups);
cudnnConvolutionBwdFilterAlgo_t filter_algo;
// Choose an algorithm
switch (cmd.algorithm % CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_COUNT)
{
case CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_0:
filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_1:
filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_FFT:
filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_3:
filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_FFT_TILING:
filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_WINOGRAD:
filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_WINOGRAD_NONFUSED:
filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED;
break;
default: // -1: Using preferences to find a suitable algorithm
#if CUDNN_VERSION >= 7000
int filter_algo_count;
cudnnConvolutionBwdFilterAlgoPerf_t filter_perf;
CUDNN_ENFORCE(cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnn, a.descriptor, g.descriptor, conv.descriptor, dw.descriptor, 1, &filter_algo_count, &filter_perf));
assert(filter_algo_count > 0);
filter_algo = filter_perf.algo;
#else
CUDNN_ENFORCE(cudnnGetConvolutionBackwardFilterAlgorithm(cudnn, a.descriptor, g.descriptor, conv.descriptor, dw.descriptor, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &filter_algo));
#endif
}
size_t workspace_size = 0;
CUDNN_ENFORCE(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn, a.descriptor, g.descriptor, conv.descriptor, dw.descriptor, filter_algo, &workspace_size));
void* workspace = 0;
// TODO: If error, return OOM
if (workspace_size)
workspace = ccv_nnc_stream_context_get_workspace(stream_context, workspace_size, CCV_TENSOR_GPU_MEMORY);
static const float one = 1, zero = 0;
if ((flags & CCV_NNC_ACCUMULATE_OUTPUT)) // accumulating results to bias and dw
{
CUDNN_ENFORCE(cudnnConvolutionBackwardFilter(cudnn, &one, a.descriptor, a.data.u8, g.descriptor, g.data.u8, conv.descriptor, filter_algo, workspace, workspace_size, &one, dw.descriptor, dw.data.u8));
if (output_size > 2 && outputs[2])
{
const ccv_nnc_cudnn_tensor_view_descriptor_t bias = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)outputs[2]);
CUDNN_ENFORCE(cudnnConvolutionBackwardBias(cudnn, &one, g.descriptor, g.data.u8, &one, bias.descriptor, bias.data.u8));
ccv_nnc_cudnn_deinit_tensor_view_descriptor(bias);
}
} else {
CUDNN_ENFORCE(cudnnConvolutionBackwardFilter(cudnn, &one, a.descriptor, a.data.u8, g.descriptor, g.data.u8, conv.descriptor, filter_algo, workspace, workspace_size, &zero, dw.descriptor, dw.data.u8));
if (output_size > 2 && outputs[2])
{
const ccv_nnc_cudnn_tensor_view_descriptor_t bias = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)outputs[2]);
CUDNN_ENFORCE(cudnnConvolutionBackwardBias(cudnn, &one, g.descriptor, g.data.u8, &zero, bias.descriptor, bias.data.u8));
ccv_nnc_cudnn_deinit_tensor_view_descriptor(bias);
}
}
// If h is available, therefore, we need to propagate the gradients back
if (outputs[0])
{
assert(input_size >= 3);
const ccv_nnc_cudnn_filter_descriptor_t w = ccv_nnc_cudnn_get_filter_descriptor(stream_context, (const ccv_nnc_tensor_t*)inputs[2]);
const ccv_nnc_cudnn_tensor_view_descriptor_t h = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)outputs[0]);
cudnnConvolutionBwdDataAlgo_t data_algo;
const int data_algorithm = cmd.algorithm < 0 ? -1 : cmd.algorithm / CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_COUNT;
switch (data_algorithm)
{
case CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_0:
data_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_0;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_1:
data_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_FFT:
data_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_FFT_TILING:
data_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_WINOGRAD:
data_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD;
break;
case CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_WINOGRAD_NONFUSED:
data_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED;
break;
default: // -1: Using preferences to find a suitable algorithm
#if CUDNN_VERSION >= 7000
int data_algo_count;
cudnnConvolutionBwdDataAlgoPerf_t data_perf;
CUDNN_ENFORCE(cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnn, w.descriptor, g.descriptor, conv.descriptor, h.descriptor, 1, &data_algo_count, &data_perf));
assert(data_algo_count > 0);
data_algo = data_perf.algo;
#else
CUDNN_ENFORCE(cudnnGetConvolutionBackwardDataAlgorithm(cudnn, w.descriptor, g.descriptor, conv.descriptor, h.descriptor, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &data_algo));
#endif
}
size_t workspace_size = 0;
CUDNN_ENFORCE(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn, w.descriptor, g.descriptor, conv.descriptor, h.descriptor, data_algo, &workspace_size));
void* workspace = 0;
// TODO: If error, return OOM
if (workspace_size)
workspace = ccv_nnc_stream_context_get_workspace(stream_context, workspace_size, CCV_TENSOR_GPU_MEMORY);
CUDNN_ENFORCE(cudnnConvolutionBackwardData(cudnn, &one, w.descriptor, w.data.u8, g.descriptor, g.data.u8, conv.descriptor, data_algo, workspace, workspace_size, &zero, h.descriptor, h.data.u8));
ccv_nnc_cudnn_deinit_filter_descriptor(w);
ccv_nnc_cudnn_deinit_tensor_view_descriptor(h);
}
ccv_nnc_cudnn_deinit_tensor_view_descriptor(a);
ccv_nnc_cudnn_deinit_tensor_view_descriptor(g);
ccv_nnc_cudnn_deinit_filter_descriptor(dw);
ccv_nnc_cudnn_deinit_convolution_descriptor(conv);
return CCV_NNC_EXEC_SUCCESS;
}
static int _ccv_nnc_conv_back_autotune(const ccv_nnc_cmd_t cmd, const size_t max_workspace_size, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
// inputs: gradient, forw prop input, w
// outputs: output gradient, weight updates, bias updates [unused]
assert(input_size >= 2 && output_size >= 2);
cudnnHandle_t cudnn = ccv_nnc_stream_context_get_cudnn(stream_context);
void* const workmem = ccv_nnc_stream_context_get_workspace(stream_context, max_workspace_size, CCV_TENSOR_GPU_MEMORY);
if (max_workspace_size && !workmem)
return -1;
const ccv_nnc_cudnn_tensor_view_descriptor_t g = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)inputs[0]);
const ccv_nnc_cudnn_tensor_view_descriptor_t a = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)inputs[1]);
const ccv_nnc_cudnn_filter_descriptor_t dw = ccv_nnc_cudnn_get_filter_descriptor(stream_context, (const ccv_nnc_tensor_t*)outputs[1]);
const ccv_nnc_cudnn_convolution_descriptor_t conv = ccv_nnc_cudnn_get_convolution_descriptor(stream_context, hint, outputs[1]->info.datatype);
cudnnSetConvolutionGroupCount(conv.descriptor, cmd.info.convolution.groups);
int count = 0;
cudnnConvolutionBwdFilterAlgoPerf_t filter_perfs[CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_COUNT];
CUDNN_ENFORCE(cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnn, a.descriptor, a.data.u8, g.descriptor, g.data.u8, conv.descriptor, dw.descriptor, dw.data.u8, CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_COUNT, &count, filter_perfs, workmem, max_workspace_size));
int i;
cudnnConvolutionBwdFilterAlgo_t filter_algorithm = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT;
for(i = 0; i < count; i++)
if ((size_t)filter_perfs[i].memory <= max_workspace_size && filter_perfs[i].status == CUDNN_STATUS_SUCCESS)
{
filter_algorithm = filter_perfs[i].algo;
break;
}
cudnnConvolutionBwdDataAlgo_t data_algorithm = CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT;
if (outputs[0])
{
const ccv_nnc_cudnn_filter_descriptor_t w = ccv_nnc_cudnn_get_filter_descriptor(stream_context, (const ccv_nnc_tensor_t*)inputs[2]);
const ccv_nnc_cudnn_tensor_view_descriptor_t h = ccv_nnc_cudnn_get_tensor_view_descriptor(stream_context, (const ccv_nnc_tensor_view_t*)outputs[0]);
cudnnConvolutionBwdDataAlgoPerf_t data_perfs[CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_COUNT];
CUDNN_ENFORCE(cudnnFindConvolutionBackwardDataAlgorithmEx(cudnn, w.descriptor, w.data.u8, g.descriptor, g.data.u8, conv.descriptor, h.descriptor, h.data.u8, CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_COUNT, &count, data_perfs, workmem, max_workspace_size));
for(i = 0; i < count; i++)
if ((size_t)data_perfs[i].memory <= max_workspace_size && data_perfs[i].status == CUDNN_STATUS_SUCCESS)
{
data_algorithm = data_perfs[i].algo;
break;
}
ccv_nnc_cudnn_deinit_filter_descriptor(w);
ccv_nnc_cudnn_deinit_tensor_view_descriptor(h);
}
ccv_nnc_cudnn_deinit_tensor_view_descriptor(a);
ccv_nnc_cudnn_deinit_tensor_view_descriptor(g);
ccv_nnc_cudnn_deinit_filter_descriptor(dw);
ccv_nnc_cudnn_deinit_convolution_descriptor(conv);
int filter = -1, data = -1;
switch (filter_algorithm)
{
case CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0:
filter = CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_0;
break;
case CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1:
filter = CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_1;
break;
case CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT:
filter = CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_FFT;
break;
case CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3:
filter = CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_3;
break;
case CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING:
filter = CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_FFT_TILING;
break;
case CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD:
filter = CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_WINOGRAD;
break;
case CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED:
filter = CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_WINOGRAD_NONFUSED;
break;
case CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT:
break;
}
switch (data_algorithm)
{
case CUDNN_CONVOLUTION_BWD_DATA_ALGO_0:
data = CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_0;
break;
case CUDNN_CONVOLUTION_BWD_DATA_ALGO_1:
data = CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_1;
break;
case CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT:
data = CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_FFT;
break;
case CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING:
data = CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_FFT_TILING;
break;
case CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD:
data = CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_WINOGRAD;
break;
case CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED:
data = CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_WINOGRAD_NONFUSED;
break;
case CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT:
break;
}
return data * CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_COUNT + filter;
}
#endif
REGISTER_COMMAND_BACKEND(CCV_NNC_CONVOLUTION_FORWARD, CCV_NNC_BACKEND_GPU_CUDNN)(ccv_nnc_cmd_backend_registry_t* const registry)
{
#ifdef HAVE_CUDNN
registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC;
registry->tensor_datatypes = CCV_32F | CCV_16F;
registry->tensor_memory = CCV_TENSOR_GPU_MEMORY;
registry->algorithms = CCV_NNC_CMD_CUDNN_CONV_FWD_ALGO_COUNT;
registry->exec = _ccv_nnc_conv_forw;
registry->autotune = _ccv_nnc_conv_forw_autotune;
#endif
}
REGISTER_COMMAND_BACKEND(CCV_NNC_CONVOLUTION_BACKWARD, CCV_NNC_BACKEND_GPU_CUDNN)(ccv_nnc_cmd_backend_registry_t* const registry)
{
#ifdef HAVE_CUDNN
registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC;
registry->tensor_datatypes = CCV_32F | CCV_16F;
registry->tensor_memory = CCV_TENSOR_GPU_MEMORY;
registry->algorithms = CCV_NNC_CMD_CUDNN_CONV_BWD_DATA_ALGO_COUNT * CCV_NNC_CMD_CUDNN_CONV_BWD_FILTER_ALGO_COUNT;
registry->exec = _ccv_nnc_conv_back;
registry->autotune = _ccv_nnc_conv_back_autotune;
#endif
}
|
the_stack
|
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/reduce.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace pyrlk
{
__constant__ int c_winSize_x;
__constant__ int c_winSize_y;
__constant__ int c_halfWin_x;
__constant__ int c_halfWin_y;
__constant__ int c_iters;
texture<float, cudaTextureType2D, cudaReadModeElementType> tex_If(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_If4(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_Ib(false, cudaFilterModePoint, cudaAddressModeClamp);
texture<float, cudaTextureType2D, cudaReadModeElementType> tex_Jf(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_Jf4(false, cudaFilterModeLinear, cudaAddressModeClamp);
template <int cn> struct Tex_I;
template <> struct Tex_I<1>
{
static __device__ __forceinline__ float read(float x, float y)
{
return tex2D(tex_If, x, y);
}
};
template <> struct Tex_I<4>
{
static __device__ __forceinline__ float4 read(float x, float y)
{
return tex2D(tex_If4, x, y);
}
};
template <int cn> struct Tex_J;
template <> struct Tex_J<1>
{
static __device__ __forceinline__ float read(float x, float y)
{
return tex2D(tex_Jf, x, y);
}
};
template <> struct Tex_J<4>
{
static __device__ __forceinline__ float4 read(float x, float y)
{
return tex2D(tex_Jf4, x, y);
}
};
__device__ __forceinline__ void accum(float& dst, float val)
{
dst += val;
}
__device__ __forceinline__ void accum(float& dst, const float4& val)
{
dst += val.x + val.y + val.z;
}
__device__ __forceinline__ float abs_(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ float4 abs_(const float4& a)
{
return abs(a);
}
template <int cn, int PATCH_X, int PATCH_Y, bool calcErr>
__global__ void sparseKernel(const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols)
{
#if __CUDA_ARCH__ <= 110
const int BLOCK_SIZE = 128;
#else
const int BLOCK_SIZE = 256;
#endif
__shared__ float smem1[BLOCK_SIZE];
__shared__ float smem2[BLOCK_SIZE];
__shared__ float smem3[BLOCK_SIZE];
const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
float2 prevPt = prevPts[blockIdx.x];
prevPt.x *= (1.0f / (1 << level));
prevPt.y *= (1.0f / (1 << level));
if (prevPt.x < 0 || prevPt.x >= cols || prevPt.y < 0 || prevPt.y >= rows)
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
prevPt.x -= c_halfWin_x;
prevPt.y -= c_halfWin_y;
// extract the patch from the first image, compute covariation matrix of derivatives
float A11 = 0;
float A12 = 0;
float A22 = 0;
typedef typename TypeVec<float, cn>::vec_type work_type;
work_type I_patch [PATCH_Y][PATCH_X];
work_type dIdx_patch[PATCH_Y][PATCH_X];
work_type dIdy_patch[PATCH_Y][PATCH_X];
for (int yBase = threadIdx.y, i = 0; yBase < c_winSize_y; yBase += blockDim.y, ++i)
{
for (int xBase = threadIdx.x, j = 0; xBase < c_winSize_x; xBase += blockDim.x, ++j)
{
float x = prevPt.x + xBase + 0.5f;
float y = prevPt.y + yBase + 0.5f;
I_patch[i][j] = Tex_I<cn>::read(x, y);
// Sharr Deriv
work_type dIdx = 3.0f * Tex_I<cn>::read(x+1, y-1) + 10.0f * Tex_I<cn>::read(x+1, y) + 3.0f * Tex_I<cn>::read(x+1, y+1) -
(3.0f * Tex_I<cn>::read(x-1, y-1) + 10.0f * Tex_I<cn>::read(x-1, y) + 3.0f * Tex_I<cn>::read(x-1, y+1));
work_type dIdy = 3.0f * Tex_I<cn>::read(x-1, y+1) + 10.0f * Tex_I<cn>::read(x, y+1) + 3.0f * Tex_I<cn>::read(x+1, y+1) -
(3.0f * Tex_I<cn>::read(x-1, y-1) + 10.0f * Tex_I<cn>::read(x, y-1) + 3.0f * Tex_I<cn>::read(x+1, y-1));
dIdx_patch[i][j] = dIdx;
dIdy_patch[i][j] = dIdy;
accum(A11, dIdx * dIdx);
accum(A12, dIdx * dIdy);
accum(A22, dIdy * dIdy);
}
}
reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2, smem3), thrust::tie(A11, A12, A22), tid, thrust::make_tuple(plus<float>(), plus<float>(), plus<float>()));
#if __CUDA_ARCH__ >= 300
if (tid == 0)
{
smem1[0] = A11;
smem2[0] = A12;
smem3[0] = A22;
}
#endif
__syncthreads();
A11 = smem1[0];
A12 = smem2[0];
A22 = smem3[0];
float D = A11 * A22 - A12 * A12;
if (D < numeric_limits<float>::epsilon())
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
D = 1.f / D;
A11 *= D;
A12 *= D;
A22 *= D;
float2 nextPt = nextPts[blockIdx.x];
nextPt.x *= 2.f;
nextPt.y *= 2.f;
nextPt.x -= c_halfWin_x;
nextPt.y -= c_halfWin_y;
for (int k = 0; k < c_iters; ++k)
{
if (nextPt.x < -c_halfWin_x || nextPt.x >= cols || nextPt.y < -c_halfWin_y || nextPt.y >= rows)
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
float b1 = 0;
float b2 = 0;
for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i)
{
for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j)
{
work_type I_val = I_patch[i][j];
work_type J_val = Tex_J<cn>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
work_type diff = (J_val - I_val) * 32.0f;
accum(b1, diff * dIdx_patch[i][j]);
accum(b2, diff * dIdy_patch[i][j]);
}
}
reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2), thrust::tie(b1, b2), tid, thrust::make_tuple(plus<float>(), plus<float>()));
#if __CUDA_ARCH__ >= 300
if (tid == 0)
{
smem1[0] = b1;
smem2[0] = b2;
}
#endif
__syncthreads();
b1 = smem1[0];
b2 = smem2[0];
float2 delta;
delta.x = A12 * b2 - A22 * b1;
delta.y = A12 * b1 - A11 * b2;
nextPt.x += delta.x;
nextPt.y += delta.y;
if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f)
break;
}
float errval = 0;
if (calcErr)
{
for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i)
{
for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j)
{
work_type I_val = I_patch[i][j];
work_type J_val = Tex_J<cn>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
work_type diff = J_val - I_val;
accum(errval, abs_(diff));
}
}
reduce<BLOCK_SIZE>(smem1, errval, tid, plus<float>());
}
if (tid == 0)
{
nextPt.x += c_halfWin_x;
nextPt.y += c_halfWin_y;
nextPts[blockIdx.x] = nextPt;
if (calcErr)
err[blockIdx.x] = static_cast<float>(errval) / (cn * c_winSize_x * c_winSize_y);
}
}
template <int cn, int PATCH_X, int PATCH_Y>
void sparse_caller(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream)
{
dim3 grid(ptcount);
if (level == 0 && err)
sparseKernel<cn, PATCH_X, PATCH_Y, true><<<grid, block>>>(prevPts, nextPts, status, err, level, rows, cols);
else
sparseKernel<cn, PATCH_X, PATCH_Y, false><<<grid, block>>>(prevPts, nextPts, status, err, level, rows, cols);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <bool calcErr>
__global__ void denseKernel(PtrStepf u, PtrStepf v, const PtrStepf prevU, const PtrStepf prevV, PtrStepf err, const int rows, const int cols)
{
extern __shared__ int smem[];
const int patchWidth = blockDim.x + 2 * c_halfWin_x;
const int patchHeight = blockDim.y + 2 * c_halfWin_y;
int* I_patch = smem;
int* dIdx_patch = I_patch + patchWidth * patchHeight;
int* dIdy_patch = dIdx_patch + patchWidth * patchHeight;
const int xBase = blockIdx.x * blockDim.x;
const int yBase = blockIdx.y * blockDim.y;
for (int i = threadIdx.y; i < patchHeight; i += blockDim.y)
{
for (int j = threadIdx.x; j < patchWidth; j += blockDim.x)
{
float x = xBase - c_halfWin_x + j + 0.5f;
float y = yBase - c_halfWin_y + i + 0.5f;
I_patch[i * patchWidth + j] = tex2D(tex_Ib, x, y);
// Sharr Deriv
dIdx_patch[i * patchWidth + j] = 3 * tex2D(tex_Ib, x+1, y-1) + 10 * tex2D(tex_Ib, x+1, y) + 3 * tex2D(tex_Ib, x+1, y+1) -
(3 * tex2D(tex_Ib, x-1, y-1) + 10 * tex2D(tex_Ib, x-1, y) + 3 * tex2D(tex_Ib, x-1, y+1));
dIdy_patch[i * patchWidth + j] = 3 * tex2D(tex_Ib, x-1, y+1) + 10 * tex2D(tex_Ib, x, y+1) + 3 * tex2D(tex_Ib, x+1, y+1) -
(3 * tex2D(tex_Ib, x-1, y-1) + 10 * tex2D(tex_Ib, x, y-1) + 3 * tex2D(tex_Ib, x+1, y-1));
}
}
__syncthreads();
const int x = xBase + threadIdx.x;
const int y = yBase + threadIdx.y;
if (x >= cols || y >= rows)
return;
int A11i = 0;
int A12i = 0;
int A22i = 0;
for (int i = 0; i < c_winSize_y; ++i)
{
for (int j = 0; j < c_winSize_x; ++j)
{
int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
A11i += dIdx * dIdx;
A12i += dIdx * dIdy;
A22i += dIdy * dIdy;
}
}
float A11 = A11i;
float A12 = A12i;
float A22 = A22i;
float D = A11 * A22 - A12 * A12;
if (D < numeric_limits<float>::epsilon())
{
if (calcErr)
err(y, x) = numeric_limits<float>::max();
return;
}
D = 1.f / D;
A11 *= D;
A12 *= D;
A22 *= D;
float2 nextPt;
nextPt.x = x + prevU(y/2, x/2) * 2.0f;
nextPt.y = y + prevV(y/2, x/2) * 2.0f;
for (int k = 0; k < c_iters; ++k)
{
if (nextPt.x < 0 || nextPt.x >= cols || nextPt.y < 0 || nextPt.y >= rows)
{
if (calcErr)
err(y, x) = numeric_limits<float>::max();
return;
}
int b1 = 0;
int b2 = 0;
for (int i = 0; i < c_winSize_y; ++i)
{
for (int j = 0; j < c_winSize_x; ++j)
{
int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j];
int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f);
int diff = (J - I) * 32;
int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
b1 += diff * dIdx;
b2 += diff * dIdy;
}
}
float2 delta;
delta.x = A12 * b2 - A22 * b1;
delta.y = A12 * b1 - A11 * b2;
nextPt.x += delta.x;
nextPt.y += delta.y;
if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f)
break;
}
u(y, x) = nextPt.x - x;
v(y, x) = nextPt.y - y;
if (calcErr)
{
int errval = 0;
for (int i = 0; i < c_winSize_y; ++i)
{
for (int j = 0; j < c_winSize_x; ++j)
{
int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j];
int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f);
errval += ::abs(J - I);
}
}
err(y, x) = static_cast<float>(errval) / (c_winSize_x * c_winSize_y);
}
}
void loadConstants(int2 winSize, int iters)
{
cudaSafeCall( cudaMemcpyToSymbol(c_winSize_x, &winSize.x, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_winSize_y, &winSize.y, sizeof(int)) );
int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2);
cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_x, &halfWin.x, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_y, &halfWin.y, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_iters, &iters, sizeof(int)) );
}
void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream)
{
typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream);
static const func_t funcs[5][5] =
{
{sparse_caller<1, 1, 1>, sparse_caller<1, 2, 1>, sparse_caller<1, 3, 1>, sparse_caller<1, 4, 1>, sparse_caller<1, 5, 1>},
{sparse_caller<1, 1, 2>, sparse_caller<1, 2, 2>, sparse_caller<1, 3, 2>, sparse_caller<1, 4, 2>, sparse_caller<1, 5, 2>},
{sparse_caller<1, 1, 3>, sparse_caller<1, 2, 3>, sparse_caller<1, 3, 3>, sparse_caller<1, 4, 3>, sparse_caller<1, 5, 3>},
{sparse_caller<1, 1, 4>, sparse_caller<1, 2, 4>, sparse_caller<1, 3, 4>, sparse_caller<1, 4, 4>, sparse_caller<1, 5, 4>},
{sparse_caller<1, 1, 5>, sparse_caller<1, 2, 5>, sparse_caller<1, 3, 5>, sparse_caller<1, 4, 5>, sparse_caller<1, 5, 5>}
};
bindTexture(&tex_If, I);
bindTexture(&tex_Jf, J);
funcs[patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
level, block, stream);
}
void sparse4(PtrStepSz<float4> I, PtrStepSz<float4> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream)
{
typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream);
static const func_t funcs[5][5] =
{
{sparse_caller<4, 1, 1>, sparse_caller<4, 2, 1>, sparse_caller<4, 3, 1>, sparse_caller<4, 4, 1>, sparse_caller<4, 5, 1>},
{sparse_caller<4, 1, 2>, sparse_caller<4, 2, 2>, sparse_caller<4, 3, 2>, sparse_caller<4, 4, 2>, sparse_caller<4, 5, 2>},
{sparse_caller<4, 1, 3>, sparse_caller<4, 2, 3>, sparse_caller<4, 3, 3>, sparse_caller<4, 4, 3>, sparse_caller<4, 5, 3>},
{sparse_caller<4, 1, 4>, sparse_caller<4, 2, 4>, sparse_caller<4, 3, 4>, sparse_caller<4, 4, 4>, sparse_caller<4, 5, 4>},
{sparse_caller<4, 1, 5>, sparse_caller<4, 2, 5>, sparse_caller<4, 3, 5>, sparse_caller<4, 4, 5>, sparse_caller<4, 5, 5>}
};
bindTexture(&tex_If4, I);
bindTexture(&tex_Jf4, J);
funcs[patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
level, block, stream);
}
void dense(PtrStepSzb I, PtrStepSzf J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV, PtrStepSzf err, int2 winSize, cudaStream_t stream)
{
dim3 block(16, 16);
dim3 grid(divUp(I.cols, block.x), divUp(I.rows, block.y));
bindTexture(&tex_Ib, I);
bindTexture(&tex_Jf, J);
int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2);
const int patchWidth = block.x + 2 * halfWin.x;
const int patchHeight = block.y + 2 * halfWin.y;
size_t smem_size = 3 * patchWidth * patchHeight * sizeof(int);
if (err.data)
{
denseKernel<true><<<grid, block, smem_size, stream>>>(u, v, prevU, prevV, err, I.rows, I.cols);
cudaSafeCall( cudaGetLastError() );
}
else
{
denseKernel<false><<<grid, block, smem_size, stream>>>(u, v, prevU, prevV, PtrStepf(), I.rows, I.cols);
cudaSafeCall( cudaGetLastError() );
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
#endif /* CUDA_DISABLER */
|
the_stack
|
#include "CUDA_backend.hpp"
#include "../util/Macros.hpp"
#include "../util/ensure.hpp"
#include <cudnn.h>
#include <cublas_v2.h>
#include <stdio.h>
#include <sstream>
#include <iostream>
////////////////////////////////////////////////////////////
/// NAMESPACE UTIL MACROS
////////////////////////////////////////////////////////////
#define FatalError(s) do { \
std::stringstream _where, _message; \
_where << __FILE__ << ':' << __LINE__; \
_message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \
std::cerr << _message.str() << "\nAborting...\n"; \
cudaDeviceReset(); \
exit(1); \
} while(0)
#define checkCUDNN(status) do { \
std::stringstream _error; \
if (status != CUDNN_STATUS_SUCCESS) { \
_error << "CUDNN failure: " << cudnnGetErrorString(status); \
FatalError(_error.str()); \
} \
} while(0)
////////////////////////////////////////////////////////////
/// NAMESPACE AI
////////////////////////////////////////////////////////////
namespace ai
{
////////////////////////////////////////////////////////////
/// NAMESPACE CUDNN
////////////////////////////////////////////////////////////
namespace cudnn
{
////////////////////////////////////////////////////////////
/// FRAMEWORK INITIALIZATION
////////////////////////////////////////////////////////////
static cudnnHandle_t cudnnHandle = NULL;
static cublasHandle_t cublasHandle = NULL;
static int init_id = init();
////////////////////////////////////////////////////////////
int init()
{
checkCUDNN(cudnnCreate(&cudnnHandle));
cublasCreate(&cublasHandle);
printf("CUDA backend initialized!\n");
return 0;
}
////////////////////////////////////////////////////////////
void destroy()
{
checkCUDNN(cudnnDestroy(cudnnHandle));
cublasDestroy(cublasHandle);
}
////////////////////////////////////////////////////////////
/// TENSOR DESCRIPTION
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
TensorDescription::TensorDescription()
{
_tensor_description = NULL;
}
////////////////////////////////////////////////////////////
TensorDescription::TensorDescription(const int width, const int height, const int depth, const int batch_size, const DataType type)
{
create(width, height, depth, batch_size, type);
}
////////////////////////////////////////////////////////////
void TensorDescription::create(const int width, const int height, const int depth,
const int batch_size, const DataType type)
{
clear();
_tensor_description = operator new(sizeof(cudnnTensorDescriptor_t));
checkCUDNN(cudnnCreateTensorDescriptor((cudnnTensorDescriptor_t*)_tensor_description));
cudnnDataType_t datatype;
switch (type)
{
case DATA_FLOAT:
datatype = CUDNN_DATA_FLOAT;
break;
case DATA_DOUBLE:
datatype = CUDNN_DATA_DOUBLE;
break;
case DATA_HALF:
datatype = CUDNN_DATA_HALF;
break;
case DATA_INT8:
datatype = CUDNN_DATA_INT8;
break;
case DATA_INT32:
datatype = CUDNN_DATA_INT32;
break;
};
checkCUDNN(cudnnSetTensor4dDescriptor(*(cudnnTensorDescriptor_t*)_tensor_description, CUDNN_TENSOR_NCHW,
datatype, batch_size, depth, height, width));
}
////////////////////////////////////////////////////////////
TensorDescription::~TensorDescription()
{
clear();
}
////////////////////////////////////////////////////////////
void TensorDescription::clear()
{
if (_tensor_description != NULL) {
checkCUDNN(cudnnDestroyTensorDescriptor(*(cudnnTensorDescriptor_t*)_tensor_description));
operator delete(_tensor_description);
}
}
////////////////////////////////////////////////////////////
void* TensorDescription::get()
{
return _tensor_description;
}
////////////////////////////////////////////////////////////
/// ACTIVATION FUNCTION
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
Activation::Activation()
{
_activation_description = NULL;
}
////////////////////////////////////////////////////////////
Activation::Activation(const int size, const int batch_size, const ActivationType type)
{
create(size, batch_size, type);
}
////////////////////////////////////////////////////////////
void Activation::create(const int size, const int batch_size, const ActivationType type)
{
clear();
_size_description.create(size, 1, 1, batch_size, DATA_FLOAT);
_activation_description = operator new(sizeof(cudnnActivationDescriptor_t));
cudnnActivationMode_t activationtype;
switch (type)
{
case ACTIVATION_SIGMOID:
activationtype = CUDNN_ACTIVATION_SIGMOID;
break;
case ACTIVATION_RELU:
activationtype = CUDNN_ACTIVATION_RELU;
break;
case ACTIVATION_TANH:
activationtype = CUDNN_ACTIVATION_TANH;
break;
case ACTIVATION_CLIPPED_RELU:
activationtype = CUDNN_ACTIVATION_CLIPPED_RELU;
break;
case ACTIVATION_ELU:
activationtype = CUDNN_ACTIVATION_ELU;
break;
};
checkCUDNN(cudnnCreateActivationDescriptor((cudnnActivationDescriptor_t*)_activation_description));
checkCUDNN(cudnnSetActivationDescriptor(*(cudnnActivationDescriptor_t*)_activation_description, activationtype, CUDNN_PROPAGATE_NAN, 0.0));
}
////////////////////////////////////////////////////////////
Activation::~Activation()
{
clear();
}
////////////////////////////////////////////////////////////
void Activation::foreward(void* input, void* output)
{
const float alpha = 1.0;
const float beta = 0.0;
checkCUDNN(cudnnActivationForward(cudnnHandle, *(cudnnActivationDescriptor_t*)_activation_description,
&alpha, *(cudnnTensorDescriptor_t*)_size_description.get(), input, &beta,
*(cudnnTensorDescriptor_t*)_size_description.get(), output));
}
////////////////////////////////////////////////////////////
void Activation::backward(void* input, void* output, void* errors, void* output_errors)
{
const float alpha = 1.0;
const float beta = 0.0;
checkCUDNN(cudnnActivationBackward(cudnnHandle, *(cudnnActivationDescriptor_t*)_activation_description,
&alpha, *(cudnnTensorDescriptor_t*)_size_description.get(), output,
*(cudnnTensorDescriptor_t*)_size_description.get(), errors, *(cudnnTensorDescriptor_t*)_size_description.get(),
input, &beta, *(cudnnTensorDescriptor_t*)_size_description.get(), output_errors));
}
////////////////////////////////////////////////////////////
void Activation::clear()
{
if (_activation_description != NULL) {
checkCUDNN(cudnnDestroyActivationDescriptor(*(cudnnActivationDescriptor_t*)_activation_description));
operator delete(_activation_description);
}
}
////////////////////////////////////////////////////////////
/// CONVOLUTION
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
Convolution::Convolution()
{
_filter_description = NULL;
_convolution_description = NULL;
_fwd_algorithm_description = NULL;
_bwd_filter_algorithm_description = NULL;
_bwd_data_algorithm_description = NULL;
_workspace_size = 0;
}
////////////////////////////////////////////////////////////
Convolution::Convolution(const int input_width, const int input_height, const int input_depth,
const int batch_size, const int filter_width, const int filter_height, const int filter_count,
const int padding_w, const int padding_h, const int stride_u,
const int stride_v, const bool backward_errors)
{
create(input_width, input_height, input_depth, batch_size, filter_width, filter_height,
filter_count, padding_w, padding_h, stride_u, stride_v, backward_errors);
}
////////////////////////////////////////////////////////////
Convolution::~Convolution()
{
clear();
}
////////////////////////////////////////////////////////////
void Convolution::create(const int input_width, const int input_height, const int input_depth,
const int batch_size, const int filter_width, const int filter_height, const int filter_count,
const int padding_w, const int padding_h, const int stride_u,
const int stride_v, const bool backward_errors)
{
clear();
_input_description.create(input_width, input_height, input_depth, batch_size, DATA_FLOAT);
_bias_description.create(1, 1, filter_count, 1, DATA_FLOAT);
_filter_description = operator new(sizeof(cudnnFilterDescriptor_t));
_convolution_description = operator new(sizeof(cudnnConvolutionDescriptor_t));
_fwd_algorithm_description = operator new(sizeof(cudnnConvolutionFwdAlgo_t));
_bwd_filter_algorithm_description = operator new(sizeof(cudnnConvolutionBwdFilterAlgo_t));
if (backward_errors) _bwd_data_algorithm_description = operator new(sizeof(cudnnConvolutionBwdDataAlgo_t));
else _bwd_data_algorithm_description = NULL;
checkCUDNN(cudnnCreateFilterDescriptor((cudnnFilterDescriptor_t*)_filter_description));
checkCUDNN(cudnnCreateConvolutionDescriptor((cudnnConvolutionDescriptor_t*)_convolution_description));
checkCUDNN(cudnnSetFilter4dDescriptor(*(cudnnFilterDescriptor_t*)_filter_description,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, filter_count, input_depth, filter_height, filter_width));
checkCUDNN(cudnnSetConvolution2dDescriptor(*(cudnnConvolutionDescriptor_t*)_convolution_description,
padding_h, padding_w, stride_v, stride_u, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
int output_w, output_h, output_c, output_n;
cudnnGetConvolution2dForwardOutputDim( *(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnTensorDescriptor_t*)_input_description.get(), *(cudnnFilterDescriptor_t*)_filter_description,
&output_n, &output_c, &output_h, &output_w);
_output_description.create(output_w, output_h, output_c, output_n, DATA_FLOAT);
_output_width = output_w;
_output_height = output_h;
_output_depth = output_c;
_weights_size = filter_width * filter_height * filter_count * input_depth;
_bias_size = filter_count;
//Foreward algoritm
checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle,
*(cudnnTensorDescriptor_t*)_input_description.get(),
*(cudnnFilterDescriptor_t*)_filter_description,
*(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnTensorDescriptor_t*)_output_description.get(),
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
(cudnnConvolutionFwdAlgo_t*)_fwd_algorithm_description));
//Update workspace size
size_t tmp_workspace_size;
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle,
*(cudnnTensorDescriptor_t*)_input_description.get(),
*(cudnnFilterDescriptor_t*)_filter_description,
*(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnTensorDescriptor_t*)_output_description.get(),
*(cudnnConvolutionFwdAlgo_t*)_fwd_algorithm_description,
&tmp_workspace_size));
_workspace_size = max((int)tmp_workspace_size, _workspace_size);
//Filter weights gradient calculation algorithm
checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm(
cudnnHandle,
*(cudnnTensorDescriptor_t*)_input_description.get(),
*(cudnnTensorDescriptor_t*)_output_description.get(),
*(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnFilterDescriptor_t*)_filter_description,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,
0,
(cudnnConvolutionBwdFilterAlgo_t*)_bwd_filter_algorithm_description));
//Update workspace size
tmp_workspace_size = 0;
checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnnHandle,
*(cudnnTensorDescriptor_t*)_input_description.get(),
*(cudnnTensorDescriptor_t*)_output_description.get(),
*(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnFilterDescriptor_t*)_filter_description,
*(cudnnConvolutionBwdFilterAlgo_t*)_bwd_filter_algorithm_description,
&tmp_workspace_size));
_workspace_size = max((int)tmp_workspace_size, _workspace_size);
//Backpropagate gradients algorithm
if (_bwd_data_algorithm_description != NULL) {
checkCUDNN(cudnnGetConvolutionBackwardDataAlgorithm(
cudnnHandle,
*(cudnnFilterDescriptor_t*)_filter_description,
*(cudnnTensorDescriptor_t*)_output_description.get(),
*(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnTensorDescriptor_t*)_input_description.get(),
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
0,
(cudnnConvolutionBwdDataAlgo_t*)_bwd_data_algorithm_description));
tmp_workspace_size = 0;
checkCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnnHandle,
*(cudnnFilterDescriptor_t*)_filter_description,
*(cudnnTensorDescriptor_t*)_output_description.get(),
*(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnTensorDescriptor_t*)_input_description.get(),
*(cudnnConvolutionBwdDataAlgo_t*)_bwd_data_algorithm_description,
&tmp_workspace_size));
_workspace_size = max((int)tmp_workspace_size, _workspace_size);
}
}
////////////////////////////////////////////////////////////
void Convolution::foreward(void* input, void* output, void* weights, void* bias, void* workspace)
{
const float alpha = 1.0;
const float beta = 0.0;
//Convolution foreward
checkCUDNN(cudnnConvolutionForward(cudnnHandle,
&alpha,
*(cudnnTensorDescriptor_t*)_input_description.get(),
input,
*(cudnnFilterDescriptor_t*)_filter_description,
weights,
*(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnConvolutionFwdAlgo_t*)_fwd_algorithm_description,
workspace,
(size_t)_workspace_size,
&beta,
*(cudnnTensorDescriptor_t*)_output_description.get(),
output));
//Add bias
checkCUDNN(cudnnAddTensor(cudnnHandle,
&alpha,
*(cudnnTensorDescriptor_t*)_bias_description.get(),
bias,
&alpha,
*(cudnnTensorDescriptor_t*)_output_description.get(),
output));
}
////////////////////////////////////////////////////////////
void Convolution::accumulate_deltas(void* input, void* output, void* errors, void* filter_deltas,
void* bias_deltas, void* workspace)
{
const float alpha = 1.0;
const float beta = 1.0;
checkCUDNN(cudnnConvolutionBackwardBias(
cudnnHandle,
&alpha,
*(cudnnTensorDescriptor_t*)_output_description.get(),
errors,
&beta,
*(cudnnTensorDescriptor_t*)_bias_description.get(),
bias_deltas));
checkCUDNN(cudnnConvolutionBackwardFilter(
cudnnHandle,
&alpha,
*(cudnnTensorDescriptor_t*)_input_description.get(),
input,
*(cudnnTensorDescriptor_t*)_output_description.get(),
errors,
*(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnConvolutionBwdFilterAlgo_t*)_bwd_filter_algorithm_description,
workspace,
(size_t)_workspace_size,
&beta,
*(cudnnFilterDescriptor_t*)_filter_description,
filter_deltas));
}
////////////////////////////////////////////////////////////
void Convolution::backward(void* errors, void* output_errors, void* weights, void* workspace)
{
const float alpha = 1.0;
const float beta = 1.0;
checkCUDNN(cudnnConvolutionBackwardData(
cudnnHandle,
&alpha,
*(cudnnFilterDescriptor_t*)_filter_description,
weights,
*(cudnnTensorDescriptor_t*)_output_description.get(),
errors,
*(cudnnConvolutionDescriptor_t*)_convolution_description,
*(cudnnConvolutionBwdDataAlgo_t*)_bwd_data_algorithm_description,
workspace,
_workspace_size,
&beta,
*(cudnnTensorDescriptor_t*)_input_description.get(),
output_errors));
}
////////////////////////////////////////////////////////////
void Convolution::update_weights(void* weights, void* filter_deltas, void* bias, void* bias_deltas, const float learningrate)
{
const float alpha = learningrate;
cublasSaxpy(cublasHandle, _weights_size, &alpha, (float*)filter_deltas, 1, (float*)weights, 1);
cublasSaxpy(cublasHandle, _bias_size, &alpha, (float*)bias_deltas, 1, (float*)bias, 1);
}
////////////////////////////////////////////////////////////
void Convolution::clear()
{
if (_filter_description != NULL && _convolution_description != NULL && _fwd_algorithm_description != NULL) {
cudnnDestroyFilterDescriptor(*(cudnnFilterDescriptor_t*)_filter_description);
cudnnDestroyConvolutionDescriptor(*(cudnnConvolutionDescriptor_t*)_convolution_description);
operator delete(_filter_description);
operator delete(_convolution_description);
operator delete(_fwd_algorithm_description);
operator delete(_bwd_filter_algorithm_description);
if (_bwd_data_algorithm_description != NULL)
operator delete(_bwd_data_algorithm_description);
_workspace_size = 0;
}
_filter_description = NULL;
_convolution_description = NULL;
_fwd_algorithm_description = NULL;
_bwd_filter_algorithm_description = NULL;
_bwd_data_algorithm_description = NULL;
_workspace_size = 0;
}
////////////////////////////////////////////////////////////
void Convolution::getOutputSize(int* output_width, int* output_height, int* output_depth)
{
*output_width = _output_width;
*output_height = _output_height;
*output_depth = _output_depth;
}
////////////////////////////////////////////////////////////
int Convolution::getWorkspaceSize()
{
return _workspace_size;
}
////////////////////////////////////////////////////////////
/// POOLING
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
Pooling::Pooling()
{
_pooling_description = NULL;
}
////////////////////////////////////////////////////////////
Pooling::Pooling(const int input_width, const int input_height, const int input_count,
const int batch_size, const int pooling_width, const int pooling_height, const PoolingType type)
{
create(input_width, input_height, input_count, batch_size, pooling_width, pooling_height, type);
}
////////////////////////////////////////////////////////////
Pooling::~Pooling()
{
clear();
}
////////////////////////////////////////////////////////////
void Pooling::clear()
{
if (_pooling_description != NULL) {
checkCUDNN(cudnnDestroyPoolingDescriptor(*(cudnnPoolingDescriptor_t*)_pooling_description));
operator delete(_pooling_description);
}
}
////////////////////////////////////////////////////////////
void Pooling::create(const int input_width, const int input_height, const int input_count,
const int batch_size, const int pooling_width, const int pooling_height, const PoolingType type)
{
clear();
_input_description.create(input_width, input_height, input_count, batch_size, DATA_FLOAT);
_pooling_description = operator new(sizeof(cudnnPoolingDescriptor_t));
cudnnPoolingMode_t poolingmode;
switch (type)
{
case POOLING_MAX:
poolingmode = CUDNN_POOLING_MAX;
break;
case POOLING_AVERAGE:
poolingmode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
break;
}
checkCUDNN(cudnnCreatePoolingDescriptor((cudnnPoolingDescriptor_t*)_pooling_description));
checkCUDNN(cudnnSetPooling2dDescriptor(*(cudnnPoolingDescriptor_t*)_pooling_description,
poolingmode,
CUDNN_PROPAGATE_NAN,
pooling_width, pooling_height,
0, 0,
pooling_width, pooling_height));
int n, c, h, w;
checkCUDNN(cudnnGetPooling2dForwardOutputDim(
*(cudnnPoolingDescriptor_t*)_pooling_description,
*(cudnnTensorDescriptor_t*)_input_description.get(),
&n,
&c,
&h,
&w
));
_output_description.create(w, h, c, n, DATA_FLOAT);
}
////////////////////////////////////////////////////////////
void Pooling::foreward(void* input, void* output)
{
const float alpha = 1.0;
const float beta = 0.0;
checkCUDNN(cudnnPoolingForward(
cudnnHandle,
*(cudnnPoolingDescriptor_t*)_pooling_description,
&alpha,
*(cudnnTensorDescriptor_t*)_input_description.get(),
input,
&beta,
*(cudnnTensorDescriptor_t*)_output_description.get(),
output));
}
////////////////////////////////////////////////////////////
void Pooling::backward(void* input, void* outputs, void* errors, void* out_errors)
{
const float alpha = 1.0;
const float beta = 1.0;
checkCUDNN(cudnnPoolingBackward(cudnnHandle,
*(cudnnPoolingDescriptor_t*)_pooling_description,
&alpha,
*(cudnnTensorDescriptor_t*)_output_description.get(),
outputs,
*(cudnnTensorDescriptor_t*)_output_description.get(),
errors,
*(cudnnTensorDescriptor_t*)_input_description.get(),
input,
&beta,
*(cudnnTensorDescriptor_t*)_input_description.get(),
out_errors));
}
////////////////////////////////////////////////////////////
/// DROPOUT
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
Dropout::Dropout()
{
_dropout_description = NULL;
_states_size = 0;
_reserve_space_size = 0;
}
////////////////////////////////////////////////////////////
Dropout::Dropout(const int input_size, const float dropout_probability, void* state_buffer)
{
create(input_size, dropout_probability, state_buffer);
}
////////////////////////////////////////////////////////////
Dropout::~Dropout()
{
clear();
}
////////////////////////////////////////////////////////////
void Dropout::clear()
{
if (_dropout_description != NULL) {
checkCUDNN(cudnnDestroyDropoutDescriptor(*(cudnnDropoutDescriptor_t*)_dropout_description));
operator delete(_dropout_description);
_states_size = 0;
_reserve_space_size = 0;
_dropout_description = NULL;
}
}
////////////////////////////////////////////////////////////
void Dropout::create(const int input_size, const float dropout_probability, void* state_buffer)
{
clear();
_input_description.create(input_size, 1, 1, 1, DATA_FLOAT);
_states_size = getStatesSize();
_reserve_space_size = getReserveSpaceSize(input_size);
_dropout_description = operator new(sizeof(cudnnDropoutDescriptor_t));
checkCUDNN(cudnnCreateDropoutDescriptor((cudnnDropoutDescriptor_t*)_dropout_description));
checkCUDNN(cudnnSetDropoutDescriptor(
*(cudnnDropoutDescriptor_t*)_dropout_description,
cudnnHandle,
dropout_probability,
state_buffer,
(size_t)_states_size,
1));
}
////////////////////////////////////////////////////////////
void Dropout::foreward(void* input, void* output, void* reserve_space_buffer)
{
checkCUDNN(cudnnDropoutForward(
cudnnHandle,
*(cudnnDropoutDescriptor_t*)_dropout_description,
*(cudnnTensorDescriptor_t*)_input_description.get(),
input,
*(cudnnTensorDescriptor_t*)_input_description.get(),
output,
reserve_space_buffer,
(size_t)_reserve_space_size));
}
////////////////////////////////////////////////////////////
void Dropout::backward(void* errors, void* out_errors, void* reserve_space_buffer)
{
checkCUDNN(cudnnDropoutBackward(
cudnnHandle,
*(cudnnDropoutDescriptor_t*)_dropout_description,
*(cudnnTensorDescriptor_t*)_input_description.get(),
errors,
*(cudnnTensorDescriptor_t*)_input_description.get(),
out_errors,
reserve_space_buffer,
(size_t)_reserve_space_size));
}
////////////////////////////////////////////////////////////
size_t Dropout::getStatesSize()
{
size_t bytes;
checkCUDNN(cudnnDropoutGetStatesSize(cudnnHandle, &bytes));
return bytes;
}
////////////////////////////////////////////////////////////
size_t Dropout::getReserveSpaceSize(const int input_size)
{
cudnn::TensorDescription input;
input.create(input_size, 1, 1, 1, DATA_FLOAT);
size_t bytes;
checkCUDNN(cudnnDropoutGetReserveSpaceSize(*(cudnnTensorDescriptor_t*)input.get(), &bytes));
return bytes;
}
} /* namespace cudnn */
////////////////////////////////////////////////////////////
/// NAMESPACE CUDA
////////////////////////////////////////////////////////////
namespace cuda
{
////////////////////////////////////////////////////////////
/// CUDA KERNELS
////////////////////////////////////////////////////////////
__device__ float knl_tmp_float_buf[1024];
__device__ int knl_tmp_int_buf[1024];
__constant__ float _selu_alpha = 1.6732632423543772;
__constant__ float _selu_scale = 1.0507009873554804;
////////////////////////////////////////////////////////////
__global__ void knl_conv_foreward(float* weights, float* bias, float* inputs, float* outputs,
int* out_in_map, int input_count, int output_size, int input_size, int filter_area, int filters_count)
{
extern __shared__ float cache[]; //size of blockDim.x
cache[threadIdx.x] = 0; //clear my cache position
//REM
//blockIdx.x -> output pos
//blockIdx.y -> filter id
//Shortcuts
#define WEIGHT_OFFSET (blockIdx.y * input_count * filter_area)
#define INPUT_OFFSET ((tid / filter_area) * input_size)
int tid = threadIdx.x;
int i;
//Compute partial neuron output and put it inside the cache
while (tid < filter_area * input_count) {
const int filter_id = tid % filter_area;
if (out_in_map[blockIdx.x * filter_area + filter_id] != -1)
cache[threadIdx.x] += inputs[INPUT_OFFSET + out_in_map[blockIdx.x * filter_area + filter_id]]
* weights[WEIGHT_OFFSET + tid];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial neuron outputs to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i) cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
//Finally, store the single value to the outputs buffer
if (threadIdx.x == 0) outputs[blockIdx.y * output_size + blockIdx.x] = cache[0] + bias[blockIdx.y];
}
/*
////////////////////////////////////////////////////////////
__global__ void knl_conv_foreward(float* weights, float* bias, float* inputs, float* outputs,
int* out_in_map, int input_count, int output_size, int input_size, int filter_area, int filters_count)
{
extern __shared__ float cache[]; //size of blockDim.x
//REM
//blockIdx.x -> output pos
//blockIdx.y -> filter id
//Shortcuts
#define OUTPUT_FILTER (tid / output_size)
#define OUTPUT_X (tid % output_width)
#define OUTPUT_Y ((tid / output_width) % output_height)
#define WEIGHT_OFFSET (OUTPUT_FILTER * input_count * filter_area)
#define INPUT_OFFSET ((i / filter_area) * input_size)
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = 0;
while (tid < output_size * filters_count) {
//Clear output
cache[threadIdx.x] = 0;
//Compute partial neuron output and put it inside the cache
i = 0;
while (i < filter_area * input_count) {
const int filter_id = i % filter_area;
if (out_in_map[(tid % output_size) * filter_area + filter_id] != -1)
cache[threadIdx.x] += inputs[INPUT_OFFSET + out_in_map[(tid % output_size) * filter_area + filter_id]]
* weights[WEIGHT_OFFSET + i];
i++;
}
//Finally, store the single value to the outputs buffer
outputs[tid] = cache[threadIdx.x] + bias[OUTPUT_FILTER];
//Update output pos
tid += blockDim.x * gridDim.x;
}
}
*/
////////////////////////////////////////////////////////////
__global__ void knl_conv_backward(float* weights, float* out_errors, float* errors,
int* in_weight_map, int* in_out_map, int input_count, int output_size, int input_width,
int input_height, int filter_area, int filters_count)
{
extern __shared__ float cache[];
cache[threadIdx.x] = 0; //clear my cache position
//Shortcuts
const int x = blockIdx.x; //inputx
const int y = blockIdx.y; //inputy
const int z = blockIdx.z; //input_id
int tid = threadIdx.x;
int i;
const int input_local_id = (y * input_width + x);
#define __INPUT_GLOBAL_ID (z * input_width * input_height + input_local_id)
while (tid < filter_area * filters_count) {
//Get weight and output index from
const int w_id = in_weight_map[input_local_id * filter_area + (tid % filter_area)];
const int o_id = in_out_map[input_local_id * filter_area + (tid % filter_area)];
//Check if this weight and output exists
if (w_id != -1 && o_id != -1) {
//Update cache
cache[threadIdx.x] += weights[(tid / filter_area) * filter_area * input_count
+ z * filter_area + w_id] * errors[(tid / filter_area) * output_size + o_id];
}
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial errors to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
//Finally, store the single error value in th out_errors buffer
if (threadIdx.x == 0) out_errors[__INPUT_GLOBAL_ID] += cache[0];
}
////////////////////////////////////////////////////////////
__global__ void knl_conv_accumulate_deltas(float* weights_deltas, float* bias_deltas,
float* errors, float* inputs, float* outputs, int* out_in_map, int input_count,
int input_size, int output_size, int filter_area, int filters_count)
{
extern __shared__ float cache[]; //size of blockDim.x
cache[threadIdx.x] = 0; //clear my cache position
//Shortcuts
const int x = blockIdx.x; //filter id
const int y = blockIdx.y; //weight id
int tid = threadIdx.x;
int i;
if (y == filter_area * input_count) //bias
{
//Shortcuts
#define __DELTAS_BIAS_OFFSET x
#define __OUTPUT_ID x * output_size + tid //the global output position
//Compute partial neuron output and put it inside the cache
while (tid < output_size) {
cache[threadIdx.x] += errors[__OUTPUT_ID];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial deltas to a single deltas value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
//Finally, store the single delta value in th deltas buffer
if (threadIdx.x == 0) bias_deltas[__DELTAS_BIAS_OFFSET] += cache[0];
}
else //normal weight
{
//Shortcuts
#define __INPUT_OFFSET (y / filter_area) * input_size //Where my input begins
#define __OUTPUT_ID x * output_size + tid //the global output position
#define __WEIGHT_ID x * filter_area * input_count + y //the global weight position
#define __WEIGHT_FILTER_ID y % filter_area //the local weight position inside the filter in one input
//Compute partial neuron output and put it inside the cache
while (tid < output_size) {
if (out_in_map[tid * filter_area + __WEIGHT_FILTER_ID] != -1)
cache[threadIdx.x] += inputs[__INPUT_OFFSET + out_in_map[tid * filter_area + __WEIGHT_FILTER_ID]] * errors[__OUTPUT_ID];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial deltas to a single deltas value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
//Finally, store the single delta value in th deltas buffer
if (threadIdx.x == 0) weights_deltas[__WEIGHT_ID] += cache[0];
}
}
////////////////////////////////////////////////////////////
__global__ void knl_conv_update_parameters(float* weights, float* bias, float* weights_deltas,
float* bias_deltas, int filter_area, int input_count, int filter_count, float learningrate)
{
//Update weights
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < filter_area * input_count * filter_count) {
weights[tid] += weights_deltas[tid] * learningrate;
tid += blockDim.x * gridDim.x;
}
//Update bias
int offset = filter_area * input_count * filter_count;
tid -= offset;
while (tid < filter_count) {
bias[tid] += bias_deltas[tid] * learningrate;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_maxpooling_foreward(float* inputs, float* outputs, int* maxbuffer,
int input_width, int input_height, int input_count, int stride, int filter_size,
int output_width, int output_height)
{
extern __shared__ float cache[]; //size of blockDim.x
//output id
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= output_width * output_height * input_count) return;
cache[threadIdx.x] = -0x7FFE; //clear my cache position
const int z = tid / (output_width * output_height);
const int x = tid % output_width;
const int y = (tid / output_width) % output_height;
const int stopX = (x * stride + filter_size > input_width) ? (x * stride + filter_size) - x : filter_size;
const int stopY = (y * stride + filter_size > input_height) ? (y * stride + filter_size) - y : filter_size;
int index, sx, sy;
for (sx = 0; sx < stopX; sx++) {
for (sy = 0; sy < stopY; sy++) {
index = z * input_width * input_height + input_width * (y * stride + sy) + x * stride + sx;
if (inputs[index] > cache[threadIdx.x]) {
cache[threadIdx.x] = inputs[index];
maxbuffer[tid] = index;
}
}
}
outputs[tid] = cache[threadIdx.x];
}
////////////////////////////////////////////////////////////
__global__ void knl_maxpooling_backward(float* out_errors, float* errors, int* maxbuffer,
int input_width, int input_height, int input_count, int stride, int filter_size,
int output_width, int output_height)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= output_width * output_height * input_count) return;
out_errors[maxbuffer[tid]] += errors[tid];
}
////////////////////////////////////////////////////////////
__global__ void knl_averagepooling_foreward(float* inputs, float* outputs,
int input_width, int input_height, int input_count, int stride, int filter_size,
int output_width, int output_height)
{
extern __shared__ float cache[]; //size of blockDim.x
//output id
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= output_width * output_height * input_count) return;
cache[threadIdx.x] = 0; //clear my cache position
const int z = tid / (output_width * output_height);
const int x = tid % output_width;
const int y = (tid / output_width) % output_height;
const int stopX = (x * stride + filter_size > input_width) ? (x * stride + filter_size) - x : filter_size;
const int stopY = (y * stride + filter_size > input_height) ? (y * stride + filter_size) - y : filter_size;
int sx, sy;
for (sx = 0; sx < stopX; sx++) {
for (sy = 0; sy < stopY; sy++) {
#define INPUT_INDEX (z * input_width * input_height + input_width * (y * stride + sy) + x * stride + sx)
cache[threadIdx.x] += inputs[INPUT_INDEX];
}
}
outputs[tid] = cache[threadIdx.x] / (float)(stopX * stopY);
}
////////////////////////////////////////////////////////////
__global__ void knl_averagepooling_backward(float* out_errors, float* errors, int input_width,
int input_height, int input_count, int stride, int filter_size, int output_width,
int output_height)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= output_width * output_height * input_count) return;
const int z = tid / (output_width * output_height);
const int x = tid % output_width;
const int y = (tid / output_width) % output_height;
const int stopX = (x * stride + filter_size > input_width) ? (x * stride + filter_size) - x : filter_size;
const int stopY = (y * stride + filter_size > input_height) ? (y * stride + filter_size) - y : filter_size;
int sx, sy;
for (sx = 0; sx < stopX; sx++) {
for (sy = 0; sy < stopY; sy++) {
#define INPUT_INDEX (z * input_width * input_height + input_width * (y * stride + sy) + x * stride + sx)
out_errors[INPUT_INDEX] += errors[tid];
}
}
}
////////////////////////////////////////////////////////////
__global__ void knl_linear_foreward(float* weights, float* bias, float* inputs, float* outputs,
int input_size, int output_size, bool accumulate, bool use_bias)
{
extern __shared__ float cache[];
if (accumulate) cache[threadIdx.x] = outputs[blockIdx.x];
else cache[threadIdx.x] = 0;
int tid = threadIdx.x;
int i;
//Compute partial neuron output and put it inside the cache
while (tid < input_size) {
cache[threadIdx.x] += inputs[tid] * weights[tid * output_size + blockIdx.x];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial neuron outputs to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
//Finally, store the single value to the outputs buffer
if (threadIdx.x == 0) {
outputs[blockIdx.x] = cache[0];
if (use_bias == true) outputs[blockIdx.x] += bias[blockIdx.x];
}
}
////////////////////////////////////////////////////////////
__global__ void knl_linear_backward(float* weights, float* out_errors, float* errors, int input_size, int output_size)
{
extern __shared__ float cache[];
cache[threadIdx.x] = 0; //Clear my cache position
int tid = threadIdx.x;
int i;
//Compute partial leaving errors and put it inside the cache
while (tid < output_size) {
cache[threadIdx.x] += errors[tid] * weights[blockIdx.x * output_size + tid];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial leaving errors to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
//Finally, store the single value to the out_errors buffer
if (threadIdx.x == 0) out_errors[blockIdx.x] += cache[0];
}
////////////////////////////////////////////////////////////
__global__ void knl_linear_accumulate_deltas(float* deltas, float* inputs, float* errors, int input_size, int output_size, bool use_bias)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < input_size * output_size) {
deltas[tid] += inputs[tid / output_size] * errors[tid % output_size];
tid += blockDim.x * gridDim.x;
}
//Update bias
if (use_bias == false) return;
tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < output_size) {
deltas[input_size * output_size + tid] += errors[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_linear_update_parameters(float* weights, float* bias, float* deltas, float learningrate, int input_size, int output_size)
{
//Update weights
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < input_size * output_size) {
weights[tid] += deltas[tid] * learningrate;
tid += blockDim.x * gridDim.x;
}
//Update bias
tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < output_size) {
bias[tid] += deltas[input_size * output_size + tid] * learningrate;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_sigmoid_foreward(float* inputs, float* outputs, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
outputs[tid] = 1.0 / (1.0 + expf(-inputs[tid]));
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_sigmoid_backward(float* errors, float* out_errors, float* outputs, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
out_errors[tid] = outputs[tid] * (1.f - outputs[tid]) * errors[tid]; //derivative * error
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_relu_foreward(float* inputs, float* outputs, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
outputs[tid] = inputs[tid] * (inputs[tid] > 0);
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_relu_backward(float* errors, float* out_errors, float* outputs, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
out_errors[tid] = (outputs[tid] > 0) * errors[tid]; //derivative * error
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_selu_foreward(float* inputs, float* outputs, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
if (inputs[tid] >= 0.0) outputs[tid] = _selu_scale * inputs[tid];
else outputs[tid] = _selu_scale * (_selu_alpha * expf(inputs[tid]) - _selu_alpha);
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_selu_backward(float* errors, float* out_errors, float* outputs, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
if (outputs[tid] >= 0.0) out_errors[tid] = _selu_scale * errors[tid];
else out_errors[tid] = errors[tid] * (outputs[tid] + _selu_scale * _selu_alpha);
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tanh_foreward(float* inputs, float* outputs, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
outputs[tid] = tanh(inputs[tid]);
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tanh_backward(float* errors, float* out_errors, float* outputs, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
out_errors[tid] = (1.f - outputs[tid] * outputs[tid]) * errors[tid]; //derivative * error
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_dropout_foreward(float* inputs, float* outputs, unsigned int seed, float drop_probability, bool training, int size)
{
extern __shared__ unsigned int tseeds[];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= size) return;
tseeds[threadIdx.x] += seed * (tid + 1);
if (drop_probability != 0 && training == true)
{
//pseudo random number generator, or magic for short
tseeds[threadIdx.x] ^= tseeds[threadIdx.x] << 13;
tseeds[threadIdx.x] ^= tseeds[threadIdx.x] << 17;
tseeds[threadIdx.x] ^= tseeds[threadIdx.x] << 5;
//drop?
if ((float)tseeds[threadIdx.x] / ((unsigned int) UINT_MAX) < drop_probability) outputs[tid] = 0.f;
else outputs[tid] = inputs[tid];
}
else
{
outputs[tid] = inputs[tid];
}
tid += blockDim.x * gridDim.x;
}
////////////////////////////////////////////////////////////
__global__ void knl_dropout_backward(float* errors, float* out_errors, float* outputs, float drop_probability, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
if (outputs[tid] == 0) out_errors[tid] = 0;
else out_errors[tid] = errors[tid] * (1 - drop_probability);
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_softmax_foreward(float* inputs, float* outputs, float scale, int size, float epsilon)
{
extern __shared__ float cache[];
int tid = threadIdx.x;
int i;
cache[threadIdx.x] = 0;
while (tid < size) {
outputs[tid] = exp(inputs[tid] * scale);
cache[threadIdx.x] += outputs[tid];
tid += blockDim.x;
}
__syncthreads();
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
tid = threadIdx.x;
while (tid < size) {
outputs[tid] /= (cache[0] + epsilon);
tid += blockDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_softmax_backward(float* errors, float* out_errors, float* outputs, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
out_errors[tid] = outputs[tid] * (1.f - outputs[tid]) * errors[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_cost_crossentropy(float* prediction, float* target, float* errors, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float predictions[];
float denominator;
while (tid < size) {
predictions[threadIdx.x] = prediction[tid];
denominator = predictions[threadIdx.x] - predictions[threadIdx.x] * predictions[threadIdx.x];
if (denominator < 1e-6) denominator = 1e-6;
errors[tid] = (target[tid] - predictions[threadIdx.x]) / denominator;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_normalization_foreward_1(float* inputs, int size)
{
extern __shared__ float cache[];
cache[threadIdx.x] = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i;
//calculate mean
while (tid < size) {
cache[threadIdx.x] += inputs[tid];
tid += blockDim.x * gridDim.x;
}
__syncthreads();
//Reduce all the partial mean sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) knl_tmp_float_buf[blockIdx.x] = cache[0] / (float)size;
if (blockIdx.x == 0 && threadIdx.x == 0) knl_tmp_int_buf[0] = gridDim.x;
}
////////////////////////////////////////////////////////////
__global__ void knl_normalization_foreward_2(float* inputs, float* deviation, int size)
{
extern __shared__ float cache[];
cache[threadIdx.x] = 0;
int tid = threadIdx.x;
int i;
//calculate mean
while (tid < knl_tmp_int_buf[0]) {
cache[threadIdx.x] += knl_tmp_float_buf[tid];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial mean sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) knl_tmp_float_buf[0] = cache[0];
}
////////////////////////////////////////////////////////////
__global__ void knl_normalization_foreward_3(float* inputs, float* deviation, int size)
{
extern __shared__ float cache[];
cache[threadIdx.x] = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i;
while (tid < size) {
deviation[tid] = inputs[tid] - knl_tmp_float_buf[0];
cache[threadIdx.x] += deviation[tid] * deviation[tid];
tid += blockDim.x * gridDim.x;
}
//Reduce all the partial mean sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) knl_tmp_float_buf[blockIdx.x] = cache[0] / (float)size;
if (threadIdx.x == 0 && blockIdx.x == 0) knl_tmp_int_buf[0] = gridDim.x;
}
////////////////////////////////////////////////////////////
__global__ void knl_normalization_foreward_4(float* variance, int size)
{
extern __shared__ float cache[];
cache[threadIdx.x] = 0;
int tid = threadIdx.x;
int i;
//calculate mean
while (tid < knl_tmp_int_buf[0]) {
cache[threadIdx.x] += knl_tmp_float_buf[tid];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial mean sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) *variance = cache[0];
}
////////////////////////////////////////////////////////////
__global__ void knl_normalization_foreward_5(float* inputs, float* deviation, float* normalized,
float* outputs, float* variance, float* gamma, float* beta, float epsilon, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
normalized[tid] = deviation[tid] / sqrt(*variance + epsilon);
outputs[tid] = normalized[tid] * (*gamma) + *beta;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_normalization_backward_1(float* errors, float* deviation, int size)
{
//Allocate and clear cache
extern __shared__ float cache[];
cache[threadIdx.x] = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i;
//Sum all errors
while (tid < size) {
cache[threadIdx.x] += errors[tid];
tid += blockDim.x * gridDim.x;
}
__syncthreads();
//Reduce all the partial errors sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
//Store results
if (threadIdx.x == 0) knl_tmp_float_buf[blockIdx.x] = cache[0];
//reset cache
cache[threadIdx.x] = 0;
//Sum all errors deviations
tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
cache[threadIdx.x] += errors[tid] * deviation[tid];
tid += blockDim.x * gridDim.x;
}
__syncthreads();
//Reduce all the partial errors deviations sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
//Store results
if (threadIdx.x == 0) knl_tmp_float_buf[gridDim.x + blockIdx.x] = cache[0];
//Store grid dimension for later
if (blockIdx.x == 0 && threadIdx.x == 0) knl_tmp_int_buf[0] = gridDim.x;
}
////////////////////////////////////////////////////////////
__global__ void knl_normalization_backward_2()
{
//Allocate and clear cache
extern __shared__ float cache[];
cache[threadIdx.x] = 0;
int tid = threadIdx.x;
int i;
//Sum all errors
while (tid < knl_tmp_int_buf[0]) {
cache[threadIdx.x] += knl_tmp_float_buf[tid];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial errors sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) { knl_tmp_float_buf[0] = cache[0]; }
cache[threadIdx.x] = 0; //clear cache
//Sum all errors
tid = threadIdx.x;
while (tid < knl_tmp_int_buf[0]) {
cache[threadIdx.x] += knl_tmp_float_buf[knl_tmp_int_buf[0] + tid];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial errors sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) { knl_tmp_float_buf[1] = cache[0]; }
}
__global__ void knl_normalization_backward_3(float* errors, float* out_errors, float* deviation,
float* gamma, float* beta, float* variance, float epsilon, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
out_errors[tid] = 1.0 / (float)size * (*gamma) / sqrt(*variance + epsilon) *
((float)size * errors[tid] - knl_tmp_float_buf[0] - deviation[tid] / ((*variance) + epsilon)
* knl_tmp_float_buf[1]);
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_normalization_accumulate_deltas(float* errors, float* deviation, float* variance,
float epsilon, float* d_beta, float* d_gamma, int size)
{
//Allocate and clear cache
extern __shared__ float cache[];
cache[threadIdx.x] = 0;
int tid = threadIdx.x;
int i;
//Calculate d_beta delta
while (tid < size) {
cache[threadIdx.x] += errors[tid];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial deltas sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) *d_beta += cache[0];
__syncthreads();
//reset cache
cache[threadIdx.x] = 0;
//Calculate d_gamma delta
tid = threadIdx.x;
while (tid < size) {
cache[threadIdx.x] += deviation[tid] * sqrt(*variance + epsilon) * errors[tid];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial deltas sum to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0) *d_gamma += cache[0];
}
////////////////////////////////////////////////////////////
__global__ void knl_normalization_update_parameters(float* beta, float* gamma, float* d_beta,
float* d_gamma, float momentum, int _size, float learningrate)
{
*beta += ((double)*d_beta / (double)_size) * learningrate;
*gamma += ((double)*d_gamma / (double)_size) * learningrate;
*d_beta *= momentum;
*d_gamma *= momentum;
}
////////////////////////////////////////////////////////////
__global__ void knl_sparse_indices(float* inputs, int input_size, int* indices, int* tmp_indices, int* indices_count, int chunck_size)
{
extern __shared__ unsigned int temp[];
//clear counters
temp[threadIdx.x * 2] = 0;
temp[threadIdx.x * 2 + 1] = 0;
#define CHUNCK_OFFSET threadIdx.x * chunck_size
//Each thread should process a chunck
int tid = CHUNCK_OFFSET;
while (tid < CHUNCK_OFFSET + chunck_size && tid < input_size) {
//Store index of non-zero inputs
if (inputs[tid] != 0) {
tmp_indices[CHUNCK_OFFSET + temp[threadIdx.x * 2]] = tid;
temp[threadIdx.x * 2]++;
}
//Check the next element
tid++;
}
__syncthreads();
//calculate offset
for (tid = 0; tid < threadIdx.x; tid++)
temp[threadIdx.x * 2 + 1] += temp[tid * 2];
//store indicies
for (tid = 0; tid < temp[threadIdx.x * 2]; tid++)
indices[temp[threadIdx.x * 2 + 1] + tid] = tmp_indices[CHUNCK_OFFSET + tid];
//store indices count
if (threadIdx.x == blockDim.x -1)
*indices_count = temp[threadIdx.x * 2 + 1] + temp[threadIdx.x * 2];
}
////////////////////////////////////////////////////////////
__global__ void knl_linear_sparse_foreward(float* weights, float* bias, float* inputs, float* outputs, int* indices, int* indices_count, int input_size, int output_size)
{
extern __shared__ float cache[];
cache[threadIdx.x] = 0; //Clear my cache position
int tid = threadIdx.x;
int i;
//Compute partial neuron output and put it inside the cache
while (tid < *indices_count) {
cache[threadIdx.x] += inputs[indices[tid]] * weights[indices[tid] * output_size + blockIdx.x];
tid += blockDim.x;
}
__syncthreads();
//Reduce all the partial neuron outputs to a single value
for (i = blockDim.x/2; i > 0; i /= 2) {
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
__syncthreads();
}
//Finally, store the single value to the outputs buffer
if (threadIdx.x == 0) outputs[blockIdx.x] = cache[0] + bias[blockIdx.x];
}
////////////////////////////////////////////////////////////
__global__ void knl_linear_sparse_accumulate_deltas(float* deltas, float* inputs, float* errors, int* indices, int* indices_count, int input_size, int output_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < *indices_count * output_size) {
const int input = indices[tid % *indices_count];
const int output = tid / *indices_count;
deltas[output_size * input + output] += inputs[input] * errors[output];
tid += blockDim.x * gridDim.x;
}
//Update bias
tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < output_size) {
deltas[input_size * output_size + tid] += errors[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_concatenate_foreward(float** inputs, float* outputs, int* sizes, int input_count)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int offset = tid;
int input_id = 0;
//Update offset and input index
while (input_id < input_count && offset > sizes[input_id]) {
offset -= sizes[input_id];
input_id++;
}
while (input_id < input_count) {
//Update offset and input index
while (offset > sizes[input_id]) {
offset -= sizes[input_id];
input_id++;
if (input_id >= input_count) return;
}
outputs[tid] = inputs[input_id][offset];
tid += gridDim.x * blockDim.x;
offset += gridDim.x * blockDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_concatenate_backward(float* errors, float** out_errors, int* sizes, int input_count)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int offset = tid;
int input_id = 0;
//Update offset and input index
while (offset > sizes[input_id]) {
offset -= sizes[input_id];
input_id++;
if (input_id >= input_count) return;
}
while (input_id < input_count) {
//Update offset and input index
while (offset > sizes[input_id]) {
offset -= sizes[input_id];
input_id++;
if (input_id >= input_count) return;
}
out_errors[input_id][offset] += errors[tid];
tid += gridDim.x * blockDim.x;
offset += gridDim.x * blockDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void gradient_clipping(float* deltas, int size, const float clipping_deviation)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) return;
if (deltas[tid] > clipping_deviation) deltas[tid] = clipping_deviation;
else if (deltas[tid] < -clipping_deviation) deltas[tid] = -clipping_deviation;
}
////////////////////////////////////////////////////////////
__global__ void knl_l1_regularization(float* weights, const float l1_factor, const float learningrate, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) return;
weights[tid] += (weights[tid] > 0 ? -1.f : 1.f) * l1_factor * learningrate;
}
////////////////////////////////////////////////////////////
__global__ void knl_l2_regularization(float* weights, const float l2_factor, const float learningrate, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) return;
weights[tid] += (0 - weights[tid]) * l2_factor * learningrate;
}
////////////////////////////////////////////////////////////
__global__ void knl_image_translate(float* image, float* result_buffer, const int width, const int height, const int channels, const int by_x, const int by_y)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= width * height * channels) return;
const int c = tid / (width * height);
const int x = (tid % (width * height)) % width;
const int y = (tid % (width * height)) / width;
if (y - by_y < 0 || y - by_y >= height || x - by_x < 0 || x - by_x >= width)
result_buffer[y * width + x + c * width * height] = 0;
else
result_buffer[y * width + x + c * width * height] = image[(y - by_y) * width + x - by_x + c * width * height];
}
////////////////////////////////////////////////////////////
__global__ void knl_image_horizontal_flip(float* image, const int width, const int height, const int channels)
{
extern __shared__ float cache[];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int halfw = width / 2;
if (tid >= halfw * height * channels) return;
const int c = tid / (halfw * height);
const int x = (tid % (halfw * height)) % halfw;
const int y = (tid % (halfw * height)) / halfw;
cache[threadIdx.x] = image[c * width * height + y * width + x];
image[c * width * height + y * width + x] = image[c * width * height + y * width + (width - 1 - x)];
image[c * width * height + y * width + (width - 1 -x)] = cache[threadIdx.x];
}
////////////////////////////////////////////////////////////
__global__ void knl_image_vertical_flip(float* image, const int width, const int height, const int channels)
{
extern __shared__ float cache[];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int halfh = height / 2;
if (tid >= width * halfh * channels) return;
const int c = tid / (width * halfh);
const int x = (tid % (width * halfh)) % width;
const int y = (tid % (width * halfh)) / width;
cache[threadIdx.x] = image[c * width * height + y * width + x];
image[c * width * height + y * width + x] = image[c * width * height + (height -1 -y) * width + x];
image[c * width * height + (height -1 -y) * width + x] = cache[threadIdx.x];
}
////////////////////////////////////////////////////////////
__global__ void knl_image_rotate(float* image, float* result_buffer, const int width, const int height,
const int channels, const float a, const float b, const int xoffset, const int yoffset)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= width * height * channels) return;
const int c = tid / (width * height);
const int x = (tid % (width * height)) % width;
const int y = (tid % (width * height)) / width;
const int nx = x * a - y * b + xoffset;
const int ny = x * b + y * a + yoffset;
if (nx < 0 || nx >= width || ny < 0 || ny > height) result_buffer[y * width + x + c * width * height] = 0;
else result_buffer[y * width + x + c * width * height] = image[ny * width + nx + c * width * height];
}
////////////////////////////////////////////////////////////
__global__ void knl_image_scale(float* image, float* result_buffer, const int width,
const int height, const int channels, const float scale, const int center_x, const int center_y)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= width * height * channels) return;
const int c = tid / (width * height);
const int x = (tid % (width * height)) % width;
const int y = (tid % (width * height)) / width;
const int nx = center_x + (x - center_x) * scale;
const int ny = center_y + (y - center_y) * scale;
if (nx < 0 || nx >= width || ny < 0 || ny > height) result_buffer[y * width + x + c * width * height] = 0;
else result_buffer[y * width + x + c * width * height] = image[ny * width + nx + c * width * height];
}
////////////////////////////////////////////////////////////
__global__ void knl_image_add_noise(float* image, const int width, const int height, const int channels, const unsigned int seed, const float noise_probability)
{
extern __shared__ unsigned int tseeds[];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= width * height * channels) return;
//randomize sistem with seed
tseeds[threadIdx.x] += seed * (tid + 1);
//pseudo random number generator, or magic for short
tseeds[threadIdx.x] ^= tseeds[threadIdx.x] << 13;
tseeds[threadIdx.x] ^= tseeds[threadIdx.x] << 17;
tseeds[threadIdx.x] ^= tseeds[threadIdx.x] << 5;
const int c = tid / (width * height);
const int x = (tid % (width * height)) % width;
const int y = (tid % (width * height)) / width;
//Apply random data flipping
if ((float)tseeds[threadIdx.x] / ((unsigned int) UINT_MAX) < noise_probability)
image[c * width * height + y * width + x] = 1.f - image[c * width * height + y * width + x];
}
////////////////////////////////////////////////////////////
/// INTERFACE
////////////////////////////////////////////////////////////
//Shortcut
static unsigned int blocks;
static unsigned int threads;
//Get nearest lower power of two
unsigned int low_pow2 (unsigned int x)
{
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >> 16);
return x - (x >> 1);
}
//Get nearest higher power of two
unsigned long high_pow2(unsigned long v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
unsigned long bestmatch_pow2(unsigned long x)
{
unsigned long lp2 = low_pow2(x);
int mismatch_lp2 = x - lp2;
int mismatch_hp2 = lp2*2 - x;
if (mismatch_lp2 < mismatch_hp2) return lp2;
else return lp2 * 2;
}
////////////////////////////////////////////////////////////
void conv_foreward(float* weights, float* bias, float* inputs, float* outputs,
int* out_in_map, int input_width, int input_height, int input_count, int stride,
int output_width, int output_height, int filters_count, int filter_area)
{
dim3 numBlocks(output_width * output_height, filters_count);
threads = min((int)bestmatch_pow2(filter_area * input_count), CUDA_MAX_THREADS) / 4;
if (threads < 2) threads = 2;
knl_conv_foreward<<<numBlocks, threads, threads * sizeof(float)>>>(weights, bias,
inputs, outputs, out_in_map, input_count, output_width * output_height,
input_width * input_height, filter_area, filters_count);
/*
threads = min((int)bestmatch_pow2(output_width * output_height * filters_count), CUDA_MAX_THREADS) / 4;
if (threads < 2) threads = 2;
if (output_width * output_height * filters_count % threads == 0)
blocks = (output_width * output_height * filters_count) / threads;
else
blocks = (output_width * output_height * filters_count) / threads + 1;
knl_conv_foreward<<<blocks, threads, threads * sizeof(float)>>>(weights, bias,
inputs, outputs, out_in_map, input_count, output_width * output_height,
input_width * input_height, filter_area, filters_count);
*/
}
////////////////////////////////////////////////////////////
void conv_backward(float* weights, float* out_errors, float* errors,
int* in_weight_map, int* in_out_map, int input_count, int output_size, int input_width,
int input_height, int filter_area, int filters_count)
{
if (input_width == 0 || input_height == 0) return;
dim3 numBlocks(input_width, input_height, input_count);
threads = min((int)bestmatch_pow2(filter_area * filters_count), CUDA_MAX_THREADS) / 4;
if (threads < 2) threads = 2;
knl_conv_backward<<<numBlocks, threads, threads * sizeof(float)>>>(weights, out_errors,
errors, in_weight_map, in_out_map, input_count, output_size, input_width, input_height,
filter_area, filters_count);
}
////////////////////////////////////////////////////////////
void conv_accumulate_deltas(float* weights_deltas, float* bias_deltas, float* errors,
float* inputs, float* outputs, int* out_in_map, int input_count, int input_width,
int input_height, int output_size, int filter_area, int filters_count)
{
dim3 numBlocks(filters_count, filter_area * input_count + 1);
threads = min((int)bestmatch_pow2(output_size), CUDA_MAX_THREADS) / 4;
if (threads < 2) threads = 2;
knl_conv_accumulate_deltas<<<numBlocks, threads, threads * sizeof(float)>>>(weights_deltas,
bias_deltas, errors, inputs, outputs, out_in_map, input_count, input_width * input_height,
output_size, filter_area, filters_count);
}
////////////////////////////////////////////////////////////
void conv_update_parameters(float* weights, float* bias, float* weights_deltas, float* bias_deltas,
int filter_area, int input_count, int filter_count, float learningrate)
{
threads = min((int)low_pow2((filter_area * input_count + 1) * filter_count), CUDA_MAX_THREADS);
blocks = min((filter_area * input_count + 1) * filter_count / threads + 1, CUDA_MAX_CORES);
knl_conv_update_parameters<<<blocks, threads>>>(weights, bias, weights_deltas,
bias_deltas, filter_area, input_count, filter_count, learningrate);
}
////////////////////////////////////////////////////////////
void maxpooling_foreward(float* inputs, float* outputs, int* maxbuffer, int input_width,
int input_height, int input_count, int stride, int filter_size, int output_width,
int output_height)
{
threads = min((int)low_pow2(output_width * output_height * input_count), CUDA_MAX_THREADS);
blocks = min((output_width * output_height * input_count) / threads + 1, CUDA_MAX_CORES);
knl_maxpooling_foreward<<<blocks, threads, threads * sizeof(float)>>>(inputs, outputs, maxbuffer, input_width,
input_height, input_count, stride, filter_size, output_width, output_height);
}
////////////////////////////////////////////////////////////
void maxpooling_backward(float* out_errors, float* errors, int* maxbuffer, int input_width,
int input_height, int input_count, int stride, int filter_size, int output_width,
int output_height)
{
threads = min((int)low_pow2(output_width * output_height * input_count), CUDA_MAX_THREADS);
blocks = min((output_width * output_height * input_count) / threads + 1, CUDA_MAX_CORES);
knl_maxpooling_backward<<<blocks, threads>>>(out_errors, errors, maxbuffer, input_width,
input_height, input_count, stride, filter_size, output_width, output_height);
}
////////////////////////////////////////////////////////////
void averagepooling_foreward(float* inputs, float* outputs, int input_width, int input_height,
int input_count, int stride, int filter_size, int output_width, int output_height)
{
threads = min((int)low_pow2(output_width * output_height * input_count), CUDA_MAX_THREADS);
blocks = min((output_width * output_height * input_count) / threads + 1, CUDA_MAX_CORES);
knl_averagepooling_foreward<<<blocks, threads, threads * sizeof(float)>>>(inputs, outputs, input_width,
input_height, input_count, stride, filter_size, output_width, output_height);
}
////////////////////////////////////////////////////////////
void averagepooling_backward(float* out_errors, float* errors, int input_width, int input_height,
int input_count, int stride, int filter_size, int output_width, int output_height)
{
threads = min((int)low_pow2(output_width * output_height * input_count), CUDA_MAX_THREADS);
blocks = min((output_width * output_height * input_count) / threads + 1, CUDA_MAX_CORES);
knl_averagepooling_backward<<<blocks, threads>>>(out_errors, errors, input_width,
input_height, input_count, stride, filter_size, output_width, output_height);
}
////////////////////////////////////////////////////////////
void linear_foreward(float* weights, float* bias, float* inputs, float* outputs, int input_size,
int output_size, bool accumulate, bool use_bias)
{
blocks = min(output_size, CUDA_MAX_CORES);
threads = min((int)low_pow2(input_size), CUDA_MAX_THREADS);
knl_linear_foreward<<<blocks, threads, threads * sizeof(float)>>>(weights, bias, inputs,
outputs, input_size, output_size, accumulate, use_bias);
}
////////////////////////////////////////////////////////////
void linear_backward(float* weights, float* out_errors, float* errors, int input_size, int output_size)
{
if (input_size == 0) return;
blocks = min(input_size, CUDA_MAX_CORES);
threads = min(low_pow2(output_size), CUDA_MAX_THREADS);
knl_linear_backward<<<blocks, threads, threads * sizeof(float)>>>(weights, out_errors, errors, input_size, output_size);
}
////////////////////////////////////////////////////////////
void linear_accumulate_deltas(float* deltas, float* inputs, float* errors, int input_size, int output_size, bool use_bias)
{
blocks = min(output_size, CUDA_MAX_CORES);
threads = min((int)low_pow2(input_size), CUDA_MAX_THREADS);
knl_linear_accumulate_deltas<<<blocks, threads>>>(deltas, inputs, errors, input_size,
output_size, use_bias);
}
////////////////////////////////////////////////////////////
void linear_update_parameters(float* weights, float* bias, float* deltas, float learningrate, int input_size, int output_size)
{
threads = min((int)low_pow2(input_size * output_size), CUDA_MAX_THREADS);
blocks = (input_size * output_size) / threads + 1;
knl_linear_update_parameters<<<blocks, threads>>>(weights, bias, deltas, learningrate, input_size, output_size);
}
////////////////////////////////////////////////////////////
void sigmoid_foreward(float* inputs, float* outputs, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_sigmoid_foreward<<<blocks, threads>>>(inputs, outputs, size);
}
////////////////////////////////////////////////////////////
void sigmoid_backward(float* errors, float* out_errors, float* outputs, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_sigmoid_backward<<<blocks, threads>>>(errors, out_errors, outputs, size);
}
////////////////////////////////////////////////////////////
void relu_foreward(float* inputs, float* outputs, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_relu_foreward<<<blocks, threads>>>(inputs, outputs, size);
}
////////////////////////////////////////////////////////////
void relu_backward(float* errors, float* out_errors, float* outputs, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_relu_backward<<<blocks, threads>>>(errors, out_errors, outputs, size);
}
////////////////////////////////////////////////////////////
void selu_foreward(float* inputs, float* outputs, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_selu_foreward<<<blocks, threads>>>(inputs, outputs, size);
}
////////////////////////////////////////////////////////////
void selu_backward(float* errors, float* out_errors, float* outputs, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_selu_backward<<<blocks, threads>>>(errors, out_errors, outputs, size);
}
////////////////////////////////////////////////////////////
void tanh_foreward(float* inputs, float* outputs, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_tanh_foreward<<<blocks, threads>>>(inputs, outputs, size);
}
////////////////////////////////////////////////////////////
void tanh_backward(float* errors, float* out_errors, float* outputs, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_tanh_backward<<<blocks, threads>>>(errors, out_errors, outputs, size);
}
////////////////////////////////////////////////////////////
void dropout_foreward(float* inputs, float* outputs, unsigned int seed, float dropout_probability, bool training, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_dropout_foreward<<<blocks, threads, threads * sizeof(unsigned int)>>>(inputs, outputs, seed, dropout_probability, training, size);
}
////////////////////////////////////////////////////////////
void dropout_backward(float* errors, float* out_errors, float* outputs, float dropout_probability, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_dropout_backward<<<blocks, threads>>>(errors, out_errors, outputs, dropout_probability, size);
}
////////////////////////////////////////////////////////////
void softmax_foreward(float* inputs, float* outputs, float scale, int size, float epsilon)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = 1;
knl_softmax_foreward<<<blocks, threads, threads * sizeof(float)>>>(inputs, outputs, scale, size, epsilon);
}
////////////////////////////////////////////////////////////
void softmax_backward(float* errors, float* out_errors, float* outputs, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_softmax_backward<<<blocks, threads>>>(errors, out_errors, outputs, size);
}
////////////////////////////////////////////////////////////
void cost_crossentropy(float* prediction, float* target, float* errors, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_cost_crossentropy<<<blocks, threads, threads * sizeof(float)>>>(prediction, target, errors, size);
}
////////////////////////////////////////////////////////////
void normalization_foreward(float* inputs, float* deviation, float* normalized,
float* outputs, float* variance, float* gamma, float* beta, float epsilon, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_normalization_foreward_1<<<blocks, threads, threads * sizeof(float)>>>(inputs, size);
knl_normalization_foreward_2<<<1, threads, threads * sizeof(float)>>>(inputs, deviation, size);
knl_normalization_foreward_3<<<blocks, threads, threads * sizeof(float)>>>(inputs, deviation, size);
knl_normalization_foreward_4<<<1, threads, threads * sizeof(float)>>>(variance, size);
knl_normalization_foreward_5<<<blocks, threads>>>(inputs, deviation, normalized, outputs, variance, gamma, beta, epsilon, size);
}
////////////////////////////////////////////////////////////
void normalization_backward(float* errors, float* out_errors, float* deviation,
float* variance, float* gamma, float* beta, float epsilon, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_normalization_backward_1<<<blocks, threads, threads * sizeof(float)>>>(errors, deviation, size);
knl_normalization_backward_2<<<1, threads, threads * sizeof(float)>>>();
knl_normalization_backward_3<<<blocks, threads>>>(errors, out_errors, deviation, gamma, beta, variance, epsilon, size);
}
////////////////////////////////////////////////////////////
void normalization_accumulate_deltas(float* errors, float* deviation, float* variance, float* d_gamma, float* d_beta, float epsilon, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = 1;
knl_normalization_accumulate_deltas<<<blocks, threads, threads * sizeof(float)>>>(errors, deviation, variance, epsilon, d_beta, d_gamma, size);
}
////////////////////////////////////////////////////////////
void normalization_update_parameters(float* gamma, float* beta, float* d_gamma, float* d_beta, float momentum, int size, float learningrate)
{
threads = 1;
blocks = 1;
knl_normalization_update_parameters<<<blocks, threads>>>(beta, gamma, d_beta, d_gamma, momentum, size, learningrate);
}
////////////////////////////////////////////////////////////
void sparse_indices(float* inputs, int input_size, int* indices, int* tmp_indices, int* indices_count)
{
threads = 16;
blocks = 1;
int chunck_size = input_size / threads + 1;
knl_sparse_indices<<<blocks, threads, (threads * 2) * sizeof(unsigned int)>>>(inputs, input_size, indices, tmp_indices, indices_count, chunck_size);
}
////////////////////////////////////////////////////////////
void linear_sparse_foreward(float* weights, float* bias, float* inputs, float* outputs, int* indices, int* indices_count, int input_size, int output_size)
{
int host_indices_count;
cudaMemcpy(indices_count, &host_indices_count, sizeof(int), cudaMemcpyDeviceToHost);
blocks = min(output_size, CUDA_MAX_CORES);
threads = min((int)low_pow2(host_indices_count), CUDA_MAX_THREADS);
knl_linear_sparse_foreward<<<blocks, threads, threads * sizeof(float)>>>(weights, bias, inputs, outputs, indices, indices_count, input_size, output_size);
}
////////////////////////////////////////////////////////////
void linear_sparse_accumulate_deltas(float* deltas, float* inputs, float* errors, int* indices, int* indices_count, int input_size, int output_size)
{
int host_indices_count;
cudaMemcpy(indices_count, &host_indices_count, sizeof(int), cudaMemcpyDeviceToHost);
blocks = min(output_size, CUDA_MAX_CORES);
threads = min((int)low_pow2(host_indices_count), CUDA_MAX_THREADS);
knl_linear_sparse_accumulate_deltas<<<blocks, threads>>>(deltas, inputs, errors, indices, indices_count, input_size, output_size);
}
////////////////////////////////////////////////////////////
void concatenate_foreward(float** inputs, float* outputs, int* sizes, int input_count, int total_size)
{
threads = min((int)low_pow2(total_size), CUDA_MAX_THREADS);
blocks = min(total_size / threads + 1, CUDA_MAX_CORES);
knl_concatenate_foreward<<<blocks, threads>>>(inputs, outputs, sizes, input_count);
}
////////////////////////////////////////////////////////////
void concatenate_backward(float* errors, float** out_errors, int* sizes, int input_count, int total_size)
{
threads = min((int)low_pow2(total_size), CUDA_MAX_THREADS);
blocks = min(total_size / threads + 1, CUDA_MAX_CORES);
knl_concatenate_backward<<<blocks, threads>>>(errors, out_errors, sizes, input_count);
}
////////////////////////////////////////////////////////////
void gradient_clipping(float* deltas, int size, const float clipping_deviation)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_gradient_clipping<<<blocks, threads>>>(weights, size, clipping_deviation);
}
////////////////////////////////////////////////////////////
void l1_regularization(float* weights, const float l1_factor, const float learningrate, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_l1_regularization<<<blocks, threads>>>(weights, l1_factor, learningrate, size);
}
////////////////////////////////////////////////////////////
void l2_regularization(float* weights, const float l2_factor, const float learningrate, int size)
{
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_l2_regularization<<<blocks, threads>>>(weights, l2_factor, learningrate, size);
}
////////////////////////////////////////////////////////////
void image_translate(float* image, float* result_buffer, const int width, const int height, const int channels, const int by_x, const int by_y)
{
int size = height * width * channels;
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_image_translate<<<blocks, threads>>>(image, result_buffer, width, height, channels, by_x, by_y);
}
////////////////////////////////////////////////////////////
void image_vertical_flip(float* image, const int width, const int height, const int channels)
{
int size = (height / 2) * width * channels;
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_image_vertical_flip<<<blocks, threads, threads * sizeof(float)>>>(image, width, height, channels);
}
////////////////////////////////////////////////////////////
void image_horizontal_flip(float* image, const int width, const int height, const int channels)
{
int size = (width / 2) * height * channels;
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_image_horizontal_flip<<<blocks, threads, threads * sizeof(float)>>>(image, width, height, channels);
}
////////////////////////////////////////////////////////////
void image_rotate(float* image, float* result_buffer, const int width, const int height, const int channels, const float degrees)
{
int size = height * width * channels;
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
const float angle_rad = degrees * (3.14159/180.f);
const float a = cos(angle_rad);
const float b = sin(angle_rad);
const int wh = width/2.f;
const int hh = height/2.f;
const int xoffset = wh - (wh * a - hh * b);
const int yoffset = hh - (wh * b + hh * a);
knl_image_rotate<<<blocks, threads>>>(image, result_buffer, width, height, channels, a, b, xoffset, yoffset);
}
////////////////////////////////////////////////////////////
void image_scale(float* image, float* result_buffer, const int width, const int height, const int channels, const float scale_factor)
{
const int size = height * width * channels;
const float scale = 1.f / scale_factor;
const int center_x = width / 2;
const int center_y = height / 2;
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
knl_image_scale<<<blocks, threads>>>(image, result_buffer, width, height, channels, scale, center_x, center_y);
}
////////////////////////////////////////////////////////////
void image_add_noise(float* image, const int width, const int height, const int channels, const float noise_probability)
{
int size = width * height * channels;
threads = min((int)low_pow2(size), CUDA_MAX_THREADS);
blocks = min(size / threads + 1, CUDA_MAX_CORES);
const unsigned int seed = rand();
knl_image_add_noise<<<blocks, threads, threads * sizeof(unsigned int)>>>(image, width, height, channels, seed, noise_probability);
}
} //namespace cuda
} //namespace ai
|
the_stack
|
#if ( MODEL == HYDRO )
/********************************************************
1. Ideal gas EoS with a constant adiabatic index (EOS_GAMMA)
2. This file is shared by both CPU and GPU
GPU_EoS_Gamma.cu -> CPU_EoS_Gamma.cpp
3. Three steps are required to implement an EoS
I. Set EoS auxiliary arrays
II. Implement EoS conversion functions
III. Set EoS initialization functions
********************************************************/
// =============================================
// I. Set EoS auxiliary arrays
// =============================================
//-------------------------------------------------------------------------------------------------------
// Function : EoS_SetAuxArray_Gamma
// Description : Set the auxiliary arrays AuxArray_Flt/Int[]
//
// AuxArray_Flt[0] = gamma
// AuxArray_Flt[1] = gamma-1
// AuxArray_Flt[2] = 1/(gamma-1)
// AuxArray_Flt[3] = 1/gamma
// AuxArray_Flt[4] = (mean molecular weight)*(atomic mass unit)/(Boltzmann constant)*(UNIT_E/UNIT_M)
// AuxArray_Flt[5] = 1/AuxArray_Flt[4]
//
// Note : 1. Invoked by EoS_Init_Gamma()
// 2. AuxArray_Flt/Int[] have the size of EOS_NAUX_MAX defined in Macro.h (default = 20)
// 3. Add "#ifndef __CUDACC__" since this routine is only useful on CPU
// 4. Physical constants such as Const_amu/Const_kB should be set to unity when disabling OPT__UNIT
// 5. Do not change the order of AuxArray_Flt[]
// --> For example, the dual-energy routines assume AuxArray_Flt[0]=GAMMA
//
// Parameter : AuxArray_Flt/Int : Floating-point/Integer arrays to be filled up
//
// Return : AuxArray_Flt/Int[]
//-------------------------------------------------------------------------------------------------------
#ifndef __CUDACC__
void EoS_SetAuxArray_Gamma( double AuxArray_Flt[], int AuxArray_Int[] )
{
AuxArray_Flt[0] = GAMMA;
AuxArray_Flt[1] = GAMMA - 1.0;
AuxArray_Flt[2] = 1.0 / ( GAMMA - 1.0 );
AuxArray_Flt[3] = 1.0 / GAMMA;
AuxArray_Flt[4] = ( OPT__UNIT ) ? MOLECULAR_WEIGHT * Const_amu / Const_kB * (UNIT_E/UNIT_M)
: MOLECULAR_WEIGHT;
AuxArray_Flt[5] = 1.0 / AuxArray_Flt[4];
} // FUNCTION : EoS_SetAuxArray_Gamma
#endif // #ifndef __CUDACC__
// =============================================
// II. Implement EoS conversion functions
// (1) EoS_DensEint2Pres_*
// (2) EoS_DensPres2Eint_*
// (3) EoS_DensPres2CSqr_*
// (4) EoS_DensEint2Temp_* [OPTIONAL]
// (5) EoS_DensTemp2Pres_* [OPTIONAL]
// (6) EoS_General_* [OPTIONAL]
// =============================================
//-------------------------------------------------------------------------------------------------------
// Function : EoS_DensEint2Pres_Gamma
// Description : Convert gas mass density and internal energy density to gas pressure
//
// Note : 1. Internal energy density here is per unit volume instead of per unit mass
// 2. See EoS_SetAuxArray_Gamma() for the values stored in AuxArray_Flt/Int[]
//
// Parameter : Dens : Gas mass density
// Eint : Gas internal energy density
// Passive : Passive scalars (must not used here)
// AuxArray_* : Auxiliary arrays (see the Note above)
// Table : EoS tables
//
// Return : Gas pressure
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE_NOINLINE
static real EoS_DensEint2Pres_Gamma( const real Dens, const real Eint, const real Passive[],
const double AuxArray_Flt[], const int AuxArray_Int[],
const real *const Table[EOS_NTABLE_MAX] )
{
// check
# ifdef GAMER_DEBUG
if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ );
if ( Hydro_CheckNegative(Dens) )
printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Dens, __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(Eint) )
printf( "ERROR : invalid input internal energy density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Eint, __FILE__, __LINE__, __FUNCTION__ );
# endif // GAMER_DEBUG
const real Gamma_m1 = (real)AuxArray_Flt[1];
real Pres;
Pres = Eint * Gamma_m1;
return Pres;
} // FUNCTION : EoS_DensEint2Pres_Gamma
//-------------------------------------------------------------------------------------------------------
// Function : EoS_DensPres2Eint_Gamma
// Description : Convert gas mass density and pressure to gas internal energy density
//
// Note : 1. See EoS_DensEint2Pres_Gamma()
//
// Parameter : Dens : Gas mass density
// Pres : Gas pressure
// Passive : Passive scalars (must not used here)
// AuxArray_* : Auxiliary arrays (see the Note above)
// Table : EoS tables
//
// Return : Gas internal energy density
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE_NOINLINE
static real EoS_DensPres2Eint_Gamma( const real Dens, const real Pres, const real Passive[],
const double AuxArray_Flt[], const int AuxArray_Int[],
const real *const Table[EOS_NTABLE_MAX] )
{
// check
# ifdef GAMER_DEBUG
if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ );
if ( Hydro_CheckNegative(Dens) )
printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Dens, __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(Pres) )
printf( "ERROR : invalid input pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Pres, __FILE__, __LINE__, __FUNCTION__ );
# endif // GAMER_DEBUG
const real _Gamma_m1 = (real)AuxArray_Flt[2];
real Eint;
Eint = Pres * _Gamma_m1;
return Eint;
} // FUNCTION : EoS_DensPres2Eint_Gamma
//-------------------------------------------------------------------------------------------------------
// Function : EoS_DensPres2CSqr_Gamma
// Description : Convert gas mass density and pressure to sound speed squared
//
// Note : 1. See EoS_DensEint2Pres_Gamma()
//
// Parameter : Dens : Gas mass density
// Pres : Gas pressure
// Passive : Passive scalars (must not used here)
// AuxArray_* : Auxiliary arrays (see the Note above)
// Table : EoS tables
//
// Return : Sound speed squared
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE_NOINLINE
static real EoS_DensPres2CSqr_Gamma( const real Dens, const real Pres, const real Passive[],
const double AuxArray_Flt[], const int AuxArray_Int[],
const real *const Table[EOS_NTABLE_MAX] )
{
// check
# ifdef GAMER_DEBUG
if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ );
if ( Hydro_CheckNegative(Dens) )
printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Dens, __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(Pres) )
printf( "ERROR : invalid input pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Pres, __FILE__, __LINE__, __FUNCTION__ );
# endif // GAMER_DEBUG
const real Gamma = (real)AuxArray_Flt[0];
real Cs2;
Cs2 = Gamma * Pres / Dens;
return Cs2;
} // FUNCTION : EoS_DensPres2CSqr_Gamma
//-------------------------------------------------------------------------------------------------------
// Function : EoS_DensEint2Temp_Gamma
// Description : Convert gas mass density and internal energy density to gas temperature
//
// Note : 1. Internal energy density here is per unit volume instead of per unit mass
// 2. See EoS_SetAuxArray_Gamma() for the values stored in AuxArray_Flt/Int[]
// 3. Temperature is in kelvin
//
// Parameter : Dens : Gas mass density
// Eint : Gas internal energy density
// Passive : Passive scalars (must not used here)
// AuxArray_* : Auxiliary arrays (see the Note above)
// Table : EoS tables
//
// Return : Gas temperature in kelvin
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE_NOINLINE
static real EoS_DensEint2Temp_Gamma( const real Dens, const real Eint, const real Passive[],
const double AuxArray_Flt[], const int AuxArray_Int[],
const real *const Table[EOS_NTABLE_MAX] )
{
// check
# ifdef GAMER_DEBUG
if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ );
if ( Hydro_CheckNegative(Dens) )
printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Dens, __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(Eint) )
printf( "ERROR : invalid input internal energy density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Eint, __FILE__, __LINE__, __FUNCTION__ );
# endif // GAMER_DEBUG
const real Gamma_m1 = (real)AuxArray_Flt[1];
const real m_kB = (real)AuxArray_Flt[4];
real Pres, Temp;
Pres = Eint * Gamma_m1;
Temp = m_kB * Pres / Dens;
return Temp;
} // FUNCTION : EoS_DensEint2Temp_Gamma
//-------------------------------------------------------------------------------------------------------
// Function : EoS_DensTemp2Pres_Gamma
// Description : Convert gas mass density and temperature to gas pressure
//
// Note : 1. See EoS_SetAuxArray_Gamma() for the values stored in AuxArray_Flt/Int[]
// 2. Temperature is in kelvin
//
// Parameter : Dens : Gas mass density
// Temp : Gas temperature in kelvin
// Passive : Passive scalars (must not used here)
// AuxArray_* : Auxiliary arrays (see the Note above)
// Table : EoS tables
//
// Return : Gas pressure
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE_NOINLINE
static real EoS_DensTemp2Pres_Gamma( const real Dens, const real Temp, const real Passive[],
const double AuxArray_Flt[], const int AuxArray_Int[],
const real *const Table[EOS_NTABLE_MAX] )
{
// check
# ifdef GAMER_DEBUG
if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ );
if ( Hydro_CheckNegative(Dens) )
printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Dens, __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(Temp) )
printf( "ERROR : invalid input temperature (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Temp, __FILE__, __LINE__, __FUNCTION__ );
# endif // GAMER_DEBUG
const real _m_kB = (real)AuxArray_Flt[5];
real Pres;
Pres = Temp * Dens * _m_kB;
return Pres;
} // FUNCTION : EoS_DensTemp2Pres_Gamma
//-------------------------------------------------------------------------------------------------------
// Function : EoS_General_Gamma
// Description : General EoS converter: In_*[] -> Out[]
//
// Note : 1. See EoS_DensEint2Pres_Gamma()
// 2. In_*[] and Out[] must NOT overlap
// 3. Useless for this EoS
//
// Parameter : Mode : To support multiple modes in this general converter
// Out : Output array
// In_* : Input array
// AuxArray_* : Auxiliary arrays (see the Note above)
// Table : EoS tables
//
// Return : Out[]
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE_NOINLINE
static void EoS_General_Gamma( const int Mode, real Out[], const real In_Flt[], const int In_Int[],
const double AuxArray_Flt[], const int AuxArray_Int[],
const real *const Table[EOS_NTABLE_MAX] )
{
// not used by this EoS
} // FUNCTION : EoS_General_Gamma
// =============================================
// III. Set EoS initialization functions
// =============================================
#ifdef __CUDACC__
# define FUNC_SPACE __device__ static
#else
# define FUNC_SPACE static
#endif
FUNC_SPACE EoS_DE2P_t EoS_DensEint2Pres_Ptr = EoS_DensEint2Pres_Gamma;
FUNC_SPACE EoS_DP2E_t EoS_DensPres2Eint_Ptr = EoS_DensPres2Eint_Gamma;
FUNC_SPACE EoS_DP2C_t EoS_DensPres2CSqr_Ptr = EoS_DensPres2CSqr_Gamma;
FUNC_SPACE EoS_DE2T_t EoS_DensEint2Temp_Ptr = EoS_DensEint2Temp_Gamma;
FUNC_SPACE EoS_DT2P_t EoS_DensTemp2Pres_Ptr = EoS_DensTemp2Pres_Gamma;
FUNC_SPACE EoS_GENE_t EoS_General_Ptr = EoS_General_Gamma;
//-----------------------------------------------------------------------------------------
// Function : EoS_SetCPU/GPUFunc_Gamma
// Description : Return the function pointers of the CPU/GPU EoS routines
//
// Note : 1. Invoked by EoS_Init_Gamma()
// 2. Must obtain the CPU and GPU function pointers by **separate** routines
// since CPU and GPU functions are compiled completely separately in GAMER
// --> In other words, a unified routine like the following won't work
//
// EoS_SetFunc_Gamma( CPU_FuncPtr, GPU_FuncPtr );
//
// 3. Call-by-reference
//
// Parameter : EoS_DensEint2Pres_CPU/GPUPtr : CPU/GPU function pointers to be set
// EoS_DensPres2Eint_CPU/GPUPtr : ...
// EoS_DensPres2CSqr_CPU/GPUPtr : ...
// EoS_DensEint2Temp_CPU/GPUPtr : ...
// EoS_DensTemp2Pres_CPU/GPUPtr : ...
// EoS_General_CPU/GPUPtr : ...
//
// Return : EoS_DensEint2Pres_CPU/GPUPtr, EoS_DensPres2Eint_CPU/GPUPtr,
// EoS_DensPres2CSqr_CPU/GPUPtr, EoS_DensEint2Temp_CPU/GPUPtr,
// EoS_DensTemp2Pres_CPU/GPUPtr, EoS_General_CPU/GPUPtr
//-----------------------------------------------------------------------------------------
#ifdef __CUDACC__
__host__
void EoS_SetGPUFunc_Gamma( EoS_DE2P_t &EoS_DensEint2Pres_GPUPtr,
EoS_DP2E_t &EoS_DensPres2Eint_GPUPtr,
EoS_DP2C_t &EoS_DensPres2CSqr_GPUPtr,
EoS_DE2T_t &EoS_DensEint2Temp_GPUPtr,
EoS_DT2P_t &EoS_DensTemp2Pres_GPUPtr,
EoS_GENE_t &EoS_General_GPUPtr )
{
CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensEint2Pres_GPUPtr, EoS_DensEint2Pres_Ptr, sizeof(EoS_DE2P_t) ) );
CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensPres2Eint_GPUPtr, EoS_DensPres2Eint_Ptr, sizeof(EoS_DP2E_t) ) );
CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensPres2CSqr_GPUPtr, EoS_DensPres2CSqr_Ptr, sizeof(EoS_DP2C_t) ) );
CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensEint2Temp_GPUPtr, EoS_DensEint2Temp_Ptr, sizeof(EoS_DE2T_t) ) );
CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensTemp2Pres_GPUPtr, EoS_DensTemp2Pres_Ptr, sizeof(EoS_DT2P_t) ) );
CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_General_GPUPtr, EoS_General_Ptr, sizeof(EoS_GENE_t) ) );
}
#else // #ifdef __CUDACC__
void EoS_SetCPUFunc_Gamma( EoS_DE2P_t &EoS_DensEint2Pres_CPUPtr,
EoS_DP2E_t &EoS_DensPres2Eint_CPUPtr,
EoS_DP2C_t &EoS_DensPres2CSqr_CPUPtr,
EoS_DE2T_t &EoS_DensEint2Temp_CPUPtr,
EoS_DT2P_t &EoS_DensTemp2Pres_CPUPtr,
EoS_GENE_t &EoS_General_CPUPtr )
{
EoS_DensEint2Pres_CPUPtr = EoS_DensEint2Pres_Ptr;
EoS_DensPres2Eint_CPUPtr = EoS_DensPres2Eint_Ptr;
EoS_DensPres2CSqr_CPUPtr = EoS_DensPres2CSqr_Ptr;
EoS_DensEint2Temp_CPUPtr = EoS_DensEint2Temp_Ptr;
EoS_DensTemp2Pres_CPUPtr = EoS_DensTemp2Pres_Ptr;
EoS_General_CPUPtr = EoS_General_Ptr;
}
#endif // #ifdef __CUDACC__ ... else ...
#ifndef __CUDACC__
// local function prototypes
void EoS_SetAuxArray_Gamma( double [], int [] );
void EoS_SetCPUFunc_Gamma( EoS_DE2P_t &, EoS_DP2E_t &, EoS_DP2C_t &, EoS_DE2T_t &, EoS_DT2P_t &, EoS_GENE_t & );
#ifdef GPU
void EoS_SetGPUFunc_Gamma( EoS_DE2P_t &, EoS_DP2E_t &, EoS_DP2C_t &, EoS_DE2T_t &, EoS_DT2P_t &, EoS_GENE_t & );
#endif
//-----------------------------------------------------------------------------------------
// Function : EoS_Init_Gamma
// Description : Initialize EoS
//
// Note : 1. Set auxiliary arrays by invoking EoS_SetAuxArray_*()
// --> It will be copied to GPU automatically in CUAPI_SetConstMemory()
// 2. Set the CPU/GPU EoS routines by invoking EoS_SetCPU/GPUFunc_*()
// 3. Invoked by EoS_Init()
// --> Enable it by linking to the function pointer "EoS_Init_Ptr"
// 4. Add "#ifndef __CUDACC__" since this routine is only useful on CPU
//
// Parameter : None
//
// Return : None
//-----------------------------------------------------------------------------------------
void EoS_Init_Gamma()
{
EoS_SetAuxArray_Gamma( EoS_AuxArray_Flt, EoS_AuxArray_Int );
EoS_SetCPUFunc_Gamma( EoS_DensEint2Pres_CPUPtr, EoS_DensPres2Eint_CPUPtr,
EoS_DensPres2CSqr_CPUPtr, EoS_DensEint2Temp_CPUPtr,
EoS_DensTemp2Pres_CPUPtr, EoS_General_CPUPtr );
# ifdef GPU
EoS_SetGPUFunc_Gamma( EoS_DensEint2Pres_GPUPtr, EoS_DensPres2Eint_GPUPtr,
EoS_DensPres2CSqr_GPUPtr, EoS_DensEint2Temp_GPUPtr,
EoS_DensTemp2Pres_GPUPtr, EoS_General_GPUPtr );
# endif
} // FUNCTION : EoS_Init_Gamma
#endif // #ifndef __CUDACC__
#endif // #if ( MODEL == HYDRO )
|
the_stack
|
#include <stdio.h>
#include "helper_math.h"
#include "matrix_math.h"
#include "atmosphere/constants.h"
#include "atmosphere/definitions.h"
//#include <//assert.h>
#define COMBINED_SCATTERING_TEXTURES
__device__ float ClampCosine(float mu)
{
return clamp(mu, float(-1.0), float(1.0));
}
__device__ float ClampDistance(float d)
{
return fmaxf(d, 0.0 * m);
}
__device__ float ClampRadius(const AtmosphereParameters atmosphere, float r)
{
return clamp(r, atmosphere.bottom_radius, atmosphere.top_radius);
}
__device__ float SafeSqrt(float a)
{
return sqrtf(fmaxf(a, 0.0 * m2()));
}
__device__ float DistanceToTopAtmosphereBoundary(const AtmosphereParameters atmosphere, float r, float mu)
{
float discriminant = r * r * (mu * mu - 1.0) + atmosphere.top_radius * atmosphere.top_radius;
return ClampDistance(-r * mu + SafeSqrt(discriminant));
}
__device__ float DistanceToBottomAtmosphereBoundary(const AtmosphereParameters atmosphere, float r, float mu)
{
float discriminant = r * r * (mu * mu - 1.0) + atmosphere.bottom_radius * atmosphere.bottom_radius;
return ClampDistance(-r * mu - SafeSqrt(discriminant));
}
__device__ bool RayIntersectsGround(const AtmosphereParameters atmosphere, float r, float mu)
{
return mu < 0.0 && r * r * (mu * mu - 1.0) +
atmosphere.bottom_radius * atmosphere.bottom_radius >= 0.0 * m2();
}
__device__ float GetLayerDensity(const DensityProfileLayer layer, float altitude)
{
float density = layer.exp_term * exp(layer.exp_scale * altitude) +
layer.linear_term * altitude + layer.const_term;
return clamp(density, float(0.0), float(1.0));
}
__device__ float GetProfileDensity(const DensityProfile profile, float altitude)
{
return altitude < profile.layers[0].width ? GetLayerDensity(profile.layers[0], altitude) : GetLayerDensity(profile.layers[1], altitude);
}
__device__ float ComputeOpticalLengthToTopAtmosphereBoundary( const AtmosphereParameters atmosphere, const DensityProfile profile, float r, float mu)
{
// float of intervals for the numerical integration.
const int SAMPLE_COUNT = 500;
// The integration step, i.e. the float of each integration interval.
float dx = DistanceToTopAtmosphereBoundary(atmosphere, r, mu) / float(SAMPLE_COUNT);
// Integration loop.
float result = 0.0 * m;
for (int i = 0; i <= SAMPLE_COUNT; ++i) {
float d_i = float(i) * dx;
// Distance between the current sample point and the planet center.
float r_i = sqrt(d_i * d_i + 2.0 * r * mu * d_i + r * r);
// float density at the current sample point (divided by the float density
// at the bottom of the atmosphere, yielding a dimensionless float).
float y_i = GetProfileDensity(profile, r_i - atmosphere.bottom_radius);
// Sample weight (from the trapezoidal rule).
float weight_i = i == 0 || i == SAMPLE_COUNT ? 0.5 : 1.0;
result += y_i * weight_i * dx;
}
return result;
}
__device__ float3 ComputeTransmittanceToTopAtmosphereBoundary( const AtmosphereParameters atmosphere, float r, float mu)
{
return expf(-(
atmosphere.rayleigh_scattering *
ComputeOpticalLengthToTopAtmosphereBoundary(
atmosphere, atmosphere.rayleigh_density, r, mu) +
atmosphere.mie_extinction *
ComputeOpticalLengthToTopAtmosphereBoundary(
atmosphere, atmosphere.mie_density, r, mu) +
atmosphere.absorption_extinction *
ComputeOpticalLengthToTopAtmosphereBoundary(
atmosphere, atmosphere.absorption_density, r, mu)));
}
__device__ float GetTextureCoordFromUnitRange(float x, int texture_size)
{
return 0.5 / float(texture_size) + x * (1.0 - 1.0 / float(texture_size));
}
__device__ float GetUnitRangeFromTextureCoord(float u, int texture_size)
{
return (u - 0.5 / float(texture_size)) / (1.0 - 1.0 / float(texture_size));
}
__device__ float2 GetTransmittanceTextureUvFromRMu(const AtmosphereParameters atmosphere, float r, float mu)
{
// Distance to top atmosphere boundary for a horizontal ray at ground level.
float H = sqrtf(atmosphere.top_radius * atmosphere.top_radius - atmosphere.bottom_radius * atmosphere.bottom_radius);
// Distance to the horizon.
float rho = SafeSqrt(r * r - atmosphere.bottom_radius * atmosphere.bottom_radius);
// Distance to the top atmosphere boundary for the ray (r,mu), and its minimum
// and maximum values over all mu - obtained for (r,1) and (r,mu_horizon).
float d = DistanceToTopAtmosphereBoundary(atmosphere, r, mu);
float d_min = atmosphere.top_radius - r;
float d_max = rho + H;
float x_mu = (d - d_min) / (d_max - d_min);
float x_r = rho / H;
return make_float2(GetTextureCoordFromUnitRange(x_mu, TRANSMITTANCE_TEXTURE_WIDTH), GetTextureCoordFromUnitRange(x_r, TRANSMITTANCE_TEXTURE_HEIGHT));
}
__device__ void GetRMuFromTransmittanceTextureUv(const AtmosphereParameters atmosphere, float2 uv, float &r, float &mu)
{
float x_mu = GetUnitRangeFromTextureCoord(uv.x, TRANSMITTANCE_TEXTURE_WIDTH);
float x_r = GetUnitRangeFromTextureCoord(uv.y, TRANSMITTANCE_TEXTURE_HEIGHT);
// Distance to top atmosphere boundary for a horizontal ray at ground level.
float H = sqrt(atmosphere.top_radius * atmosphere.top_radius - atmosphere.bottom_radius * atmosphere.bottom_radius);
// Distance to the horizon, from which we can compute r:
float rho = H * x_r;
r = sqrt(rho * rho + atmosphere.bottom_radius * atmosphere.bottom_radius);
// Distance to the top atmosphere boundary for the ray (r,mu), and its minimum
// and maximum values over all mu - obtained for (r,1) and (r,mu_horizon) -
// from which we can recover mu:
float d_min = atmosphere.top_radius - r;
float d_max = rho + H;
float d = d_min + x_mu * (d_max - d_min);
mu = d == 0.0 * m ? float(1.0) : (H * H - rho * rho - d * d) / (2.0 * r * d);
mu = ClampCosine(mu);
}
__device__ float3 ComputeTransmittanceToTopAtmosphereBoundaryTexture(const AtmosphereParameters atmosphere, float2 frag_coord)
{
const float2 TRANSMITTANCE_TEXTURE_SIZE = make_float2(TRANSMITTANCE_TEXTURE_WIDTH, TRANSMITTANCE_TEXTURE_HEIGHT);
float r;
float mu;
GetRMuFromTransmittanceTextureUv(atmosphere, (frag_coord / TRANSMITTANCE_TEXTURE_SIZE), r, mu);
return ComputeTransmittanceToTopAtmosphereBoundary(atmosphere, r, mu);
}
__device__ float3 GetTransmittanceToTopAtmosphereBoundary(const AtmosphereParameters atmosphere, float r, float mu)
{
float2 uv = GetTransmittanceTextureUvFromRMu(atmosphere, r, mu);
int x = int(floor(uv.x * TRANSMITTANCE_TEXTURE_WIDTH));
int y = int(floor(uv.y * TRANSMITTANCE_TEXTURE_HEIGHT));
int idx = (y * TRANSMITTANCE_TEXTURE_WIDTH) + x;
idx = clamp(idx, 0, TRANSMITTANCE_TEXTURE_WIDTH*TRANSMITTANCE_TEXTURE_HEIGHT);
const float3 texval = make_float3(atmosphere.transmittance_buffer[idx]);
return texval;
}
__device__ float3 GetTransmittance(const AtmosphereParameters atmosphere, float r, float mu, float d, bool ray_r_mu_intersects_ground)
{
float r_d = ClampRadius(atmosphere, sqrt(d * d + 2.0 * r * mu * d + r * r));
float mu_d = ClampCosine((r * mu + d) / r_d);
if (ray_r_mu_intersects_ground) {
return fminf(GetTransmittanceToTopAtmosphereBoundary( atmosphere, r_d, -mu_d) / GetTransmittanceToTopAtmosphereBoundary( atmosphere, r, -mu), make_float3(1.0f));
}
else {
return fminf(GetTransmittanceToTopAtmosphereBoundary(atmosphere, r, mu) / GetTransmittanceToTopAtmosphereBoundary( atmosphere, r_d, mu_d), make_float3(1.0));
}
}
__device__ float3 GetTransmittanceToSun(const AtmosphereParameters atmosphere, float r, float mu_s)
{
float sin_theta_h = atmosphere.bottom_radius / r;
float cos_theta_h = -sqrt(max(1.0 - sin_theta_h * sin_theta_h, 0.0));
return GetTransmittanceToTopAtmosphereBoundary(
atmosphere, r, mu_s) *
smoothstep(-sin_theta_h * atmosphere.sun_angular_radius / rad,
sin_theta_h * atmosphere.sun_angular_radius / rad,
mu_s - cos_theta_h);
}
__device__ void ComputeSingleScatteringIntegrand(const AtmosphereParameters atmosphere, float r, float mu, float mu_s, float nu, float d, bool ray_r_mu_intersects_ground, float3 &rayleigh, float3 &mie)
{
float r_d = ClampRadius(atmosphere, sqrt(d * d + 2.0 * r * mu * d + r * r));
float mu_s_d = ClampCosine((r * mu_s + d * nu) / r_d);
float3 transmittance = GetTransmittance( atmosphere, r, mu, d, ray_r_mu_intersects_ground) * GetTransmittanceToSun( atmosphere, r_d, mu_s_d);
rayleigh = transmittance * GetProfileDensity(atmosphere.rayleigh_density, r_d - atmosphere.bottom_radius);
mie = transmittance * GetProfileDensity(atmosphere.mie_density, r_d - atmosphere.bottom_radius);
}
__device__ float DistanceToNearestAtmosphereBoundary(const AtmosphereParameters atmosphere, float r, float mu, bool ray_r_mu_intersects_ground)
{
if (ray_r_mu_intersects_ground) {
return DistanceToBottomAtmosphereBoundary(atmosphere, r, mu);
}
else {
return DistanceToTopAtmosphereBoundary(atmosphere, r, mu);
}
}
__device__ void ComputeSingleScattering(const AtmosphereParameters atmosphere, float r, float mu, float mu_s, float nu, bool ray_r_mu_intersects_ground, float3 &rayleigh, float3 &mie)
{
// float of intervals for the numerical integration.
const int SAMPLE_COUNT = 50;
// The integration step, i.e. the float of each integration interval.
float dx =
DistanceToNearestAtmosphereBoundary(atmosphere, r, mu, ray_r_mu_intersects_ground) / float(SAMPLE_COUNT);
// Integration loop.
float3 rayleigh_sum = make_float3(0.0f);
float3 mie_sum = make_float3(0.0f);
for (int i = 0; i <= SAMPLE_COUNT; ++i) {
float d_i = float(i) * dx;
// The Rayleigh and Mie single scattering at the current sample point.
float3 rayleigh_i;
float3 mie_i;
ComputeSingleScatteringIntegrand(atmosphere, r, mu, mu_s, nu, d_i, ray_r_mu_intersects_ground, rayleigh_i, mie_i);
// Sample weight (from the trapezoidal rule).
float weight_i = (i == 0 || i == SAMPLE_COUNT) ? 0.5 : 1.0;
rayleigh_sum += rayleigh_i * weight_i;
mie_sum += mie_i * weight_i;
}
rayleigh = rayleigh_sum * dx * atmosphere.solar_irradiance * atmosphere.rayleigh_scattering;
mie = mie_sum * dx * atmosphere.solar_irradiance * atmosphere.mie_scattering;
}
__device__ float RayleighPhaseFunction(float nu)
{
float k = 3.0 / (16.0 * PI * sr);
return k * (1.0 + nu * nu);
}
__device__ float MiePhaseFunction(float g, float nu)
{
float k = 3.0 / (8.0 * PI * sr) * (1.0 - g * g) / (2.0 + g * g);
return k * (1.0 + nu * nu) / pow(1.0 + g * g - 2.0 * g * nu, 1.5);
}
__device__ float4 GetScatteringTextureUvwzFromRMuMuSNu(const AtmosphereParameters atmosphere, float r, float mu, float mu_s, float nu, bool ray_r_mu_intersects_ground)
{
// Distance to top atmosphere boundary for a horizontal ray at ground level.
float H = sqrt(atmosphere.top_radius * atmosphere.top_radius -
atmosphere.bottom_radius * atmosphere.bottom_radius);
// Distance to the horizon.
float rho =
SafeSqrt(r * r - atmosphere.bottom_radius * atmosphere.bottom_radius);
float u_r = GetTextureCoordFromUnitRange(rho / H, SCATTERING_TEXTURE_R_SIZE);
// Discriminant of the quadratic equation for the intersections of the ray
// (r,mu) with the ground (see RayIntersectsGround).
float r_mu = r * mu;
float discriminant =
r_mu * r_mu - r * r + atmosphere.bottom_radius * atmosphere.bottom_radius;
float u_mu;
if (ray_r_mu_intersects_ground) {
// Distance to the ground for the ray (r,mu), and its minimum and maximum
// values over all mu - obtained for (r,-1) and (r,mu_horizon).
float d = -r_mu - SafeSqrt(discriminant);
float d_min = r - atmosphere.bottom_radius;
float d_max = rho;
u_mu = 0.5 - 0.5 * GetTextureCoordFromUnitRange(d_max == d_min ? 0.0 :
(d - d_min) / (d_max - d_min), SCATTERING_TEXTURE_MU_SIZE / 2);
}
else {
// Distance to the top atmosphere boundary for the ray (r,mu), and its
// minimum and maximum values over all mu - obtained for (r,1) and
// (r,mu_horizon).
float d = -r_mu + SafeSqrt(discriminant + H * H);
float d_min = atmosphere.top_radius - r;
float d_max = rho + H;
u_mu = 0.5 + 0.5 * GetTextureCoordFromUnitRange(
(d - d_min) / (d_max - d_min), SCATTERING_TEXTURE_MU_SIZE / 2);
}
float d = DistanceToTopAtmosphereBoundary(
atmosphere, atmosphere.bottom_radius, mu_s);
float d_min = atmosphere.top_radius - atmosphere.bottom_radius;
float d_max = H;
float a = (d - d_min) / (d_max - d_min);
float A =
-2.0 * atmosphere.mu_s_min * atmosphere.bottom_radius / (d_max - d_min);
float u_mu_s = GetTextureCoordFromUnitRange(
max(1.0 - a / A, 0.0) / (1.0 + a), SCATTERING_TEXTURE_MU_S_SIZE);
float u_nu = (nu + 1.0) / 2.0;
return make_float4(u_nu, u_mu_s, u_mu, u_r);
}
__device__ void GetRMuMuSNuFromScatteringTextureUvwz(const AtmosphereParameters atmosphere, float4 uvwz, float &r, float &mu, float &mu_s, float &nu, bool &ray_r_mu_intersects_ground)
{
// Distance to top atmosphere boundary for a horizontal ray at ground level.
float H = sqrt(atmosphere.top_radius * atmosphere.top_radius -
atmosphere.bottom_radius * atmosphere.bottom_radius);
// Distance to the horizon.
float rho =
H * GetUnitRangeFromTextureCoord(uvwz.w, SCATTERING_TEXTURE_R_SIZE);
r = sqrt(rho * rho + atmosphere.bottom_radius * atmosphere.bottom_radius);
if (uvwz.z < 0.5) {
// Distance to the ground for the ray (r,mu), and its minimum and maximum
// values over all mu - obtained for (r,-1) and (r,mu_horizon) - from which
// we can recover mu:
float d_min = r - atmosphere.bottom_radius;
float d_max = rho;
float d = d_min + (d_max - d_min) * GetUnitRangeFromTextureCoord(
1.0 - 2.0 * uvwz.z, SCATTERING_TEXTURE_MU_SIZE / 2);
mu = d == 0.0 * m ? float(-1.0) :
ClampCosine(-(rho * rho + d * d) / (2.0 * r * d));
ray_r_mu_intersects_ground = true;
}
else {
// Distance to the top atmosphere boundary for the ray (r,mu), and its
// minimum and maximum values over all mu - obtained for (r,1) and
// (r,mu_horizon) - from which we can recover mu:
float d_min = atmosphere.top_radius - r;
float d_max = rho + H;
float d = d_min + (d_max - d_min) * GetUnitRangeFromTextureCoord(
2.0 * uvwz.z - 1.0, SCATTERING_TEXTURE_MU_SIZE / 2);
mu = d == 0.0 * m ? float(1.0) :
ClampCosine((H * H - rho * rho - d * d) / (2.0 * r * d));
ray_r_mu_intersects_ground = false;
}
float x_mu_s =
GetUnitRangeFromTextureCoord(uvwz.y, SCATTERING_TEXTURE_MU_S_SIZE);
float d_min = atmosphere.top_radius - atmosphere.bottom_radius;
float d_max = H;
float A =
-2.0 * atmosphere.mu_s_min * atmosphere.bottom_radius / (d_max - d_min);
float a = (A - x_mu_s * A) / (1.0 + x_mu_s * A);
float d = d_min + min(a, A) * (d_max - d_min);
mu_s = d == 0.0 * m ? float(1.0) :
ClampCosine((H * H - d * d) / (2.0 * atmosphere.bottom_radius * d));
nu = ClampCosine(uvwz.x * 2.0 - 1.0);
}
__device__ void GetRMuMuSNuFromScatteringTextureFragCoord(const AtmosphereParameters atmosphere, float3 frag_coord, float& r, float& mu, float& mu_s, float& nu, bool& ray_r_mu_intersects_ground) {
const float4 SCATTERING_TEXTURE_SIZE = make_float4(SCATTERING_TEXTURE_NU_SIZE - 1, SCATTERING_TEXTURE_MU_S_SIZE, SCATTERING_TEXTURE_MU_SIZE, SCATTERING_TEXTURE_R_SIZE);
float frag_coord_nu = floor(frag_coord.x / float(SCATTERING_TEXTURE_MU_S_SIZE));
float frag_coord_mu_s = fmodf(frag_coord.x, float(SCATTERING_TEXTURE_MU_S_SIZE));
float4 uvwz = make_float4(frag_coord_nu, frag_coord_mu_s, frag_coord.y, frag_coord.z) / SCATTERING_TEXTURE_SIZE;
GetRMuMuSNuFromScatteringTextureUvwz(atmosphere, uvwz, r, mu, mu_s, nu, ray_r_mu_intersects_ground);
// Clamp nu to its valid range of values, given mu and mu_s.
nu = clamp(nu, mu * mu_s - sqrt((1.0 - mu * mu) * (1.0 - mu_s * mu_s)), mu * mu_s + sqrt((1.0 - mu * mu) * (1.0 - mu_s * mu_s)));
}
__device__ void ComputeSingleScatteringTexture(const AtmosphereParameters atmosphere, float3 frag_coord, float3& rayleigh, float3& mie) {
float r;
float mu;
float mu_s;
float nu;
bool ray_r_mu_intersects_ground;
GetRMuMuSNuFromScatteringTextureFragCoord(atmosphere, frag_coord, r, mu, mu_s, nu, ray_r_mu_intersects_ground);
ComputeSingleScattering(atmosphere, r, mu, mu_s, nu, ray_r_mu_intersects_ground, rayleigh, mie);
}
__device__ float3 GetScattering(const AtmosphereParameters atmosphere, float4 *scattering_buffer, float r, float mu, float mu_s, float nu, bool ray_r_mu_intersects_ground)
{
float4 uvwz = GetScatteringTextureUvwzFromRMuMuSNu(atmosphere, r, mu, mu_s, nu, ray_r_mu_intersects_ground);
float tex_coord_x = uvwz.x * float(SCATTERING_TEXTURE_NU_SIZE - 1);
float tex_x = floor(tex_coord_x);
float lerp = tex_coord_x - tex_x;
float3 uvw0 = make_float3((tex_x + uvwz.y) / float(SCATTERING_TEXTURE_NU_SIZE), uvwz.z, uvwz.w);
float3 uvw1 = make_float3((tex_x + 1.0 + uvwz.y) / float(SCATTERING_TEXTURE_NU_SIZE), uvwz.z, uvwz.w);
int3 uvw0_i = make_int3(uvw0.x * SCATTERING_TEXTURE_WIDTH, uvw0.y * SCATTERING_TEXTURE_HEIGHT, uvw0.z * SCATTERING_TEXTURE_DEPTH);
int3 uvw1_i = make_int3(uvw1.x * SCATTERING_TEXTURE_WIDTH, uvw1.y * SCATTERING_TEXTURE_HEIGHT, uvw1.z * SCATTERING_TEXTURE_DEPTH);
int index0 = uvw0_i.x + SCATTERING_TEXTURE_WIDTH * (uvw0_i.y + SCATTERING_TEXTURE_HEIGHT * uvw0_i.z);
int index1 = uvw1_i.x + SCATTERING_TEXTURE_WIDTH * (uvw1_i.y + SCATTERING_TEXTURE_HEIGHT * uvw1_i.z);
const float4 val1 = scattering_buffer[index0];
const float4 val2 = scattering_buffer[index1];
return float3(make_float3(val1) * (1.0 - lerp) + make_float3(val2) * lerp);
}
__device__ float3 GetScattering(const AtmosphereParameters atmosphere, float r, float mu, float mu_s, float nu, bool ray_r_mu_intersects_ground, int scattering_order)
{
if (scattering_order == 1) {
float3 rayleigh = GetScattering(atmosphere, atmosphere.delta_rayleigh_scattering_buffer, r, mu, mu_s, nu, ray_r_mu_intersects_ground);
float3 mie = GetScattering(atmosphere, atmosphere.delta_mie_scattering_buffer, r, mu, mu_s, nu, ray_r_mu_intersects_ground);
return rayleigh * RayleighPhaseFunction(nu) + mie * MiePhaseFunction(atmosphere.mie_phase_function_g, nu);
}
else {
return GetScattering(atmosphere, atmosphere.scattering_buffer, r, mu, mu_s, nu, ray_r_mu_intersects_ground);
}
}
__device__ float3 GetIrradiance(const AtmosphereParameters atmosphere, float r, float mu_s);
__device__ float3 ComputeScatteringDensity(const AtmosphereParameters atmosphere, float r, float mu, float mu_s, float nu, int scattering_order)
{
// Compute unit float3 vectors for the zenith, the view float3 omega and
// and the sun float3 omega_s, such that the cosine of the view-zenith
// float is mu, the cosine of the sun-zenith float is mu_s, and the cosine of
// the view-sun float is nu. The goal is to simplify computations below.
float3 zenith_direction = make_float3(0.0, 0.0, 1.0);
float3 omega = make_float3(sqrt(1.0f - mu * mu), 0.0, mu);
float sun_dir_x = omega.x == 0.0 ? 0.0 : (nu - mu * mu_s) / omega.x;
float sun_dir_y = sqrt(max(1.0 - sun_dir_x * sun_dir_x - mu_s * mu_s, 0.0));
float3 omega_s = make_float3(sun_dir_x, sun_dir_y, mu_s);
const int SAMPLE_COUNT = 16;
const float dphi = pi() / float(SAMPLE_COUNT);
const float dtheta = pi() / float(SAMPLE_COUNT);
float3 rayleigh_mie =
make_float3(0.0f * watt_per_cubic_meter_per_sr_per_nm());
// Nested loops for the integral over all the incident directions omega_i.
for (int l = 0; l < SAMPLE_COUNT; ++l) {
float theta = (float(l) + 0.5) * dtheta;
float cos_theta = cos(theta);
float sin_theta = sin(theta);
bool ray_r_theta_intersects_ground =
RayIntersectsGround(atmosphere, r, cos_theta);
// The distance and transmittance to the ground only depend on theta, so we
// can compute them in the outer loop for efficiency.
float distance_to_ground = 0.0 * m;
float3 transmittance_to_ground = make_float3(0.0f);
float3 ground_albedo = make_float3(0.0f);
if (ray_r_theta_intersects_ground) {
distance_to_ground = DistanceToBottomAtmosphereBoundary(atmosphere, r, cos_theta);
transmittance_to_ground = GetTransmittance(atmosphere, r, cos_theta, distance_to_ground, true);
ground_albedo = atmosphere.ground_albedo;
}
for (int m = 0; m < 2 * SAMPLE_COUNT; ++m) {
float phi = (float(m) + 0.5) * dphi;
float3 omega_i = make_float3(cos(phi) * sin_theta, sin(phi) * sin_theta, cos_theta);
float domega_i = (dtheta / rad) * (dphi / rad) * sin(theta) * sr;
// The radiance L_i arriving from float3 omega_i after n-1 bounces is
// the sum of a term given by the precomputed scattering texture for the
// (n-1)-th order:
float nu1 = dot(omega_s, omega_i);
float3 incident_radiance = GetScattering(atmosphere, r, omega_i.z, mu_s, nu1, ray_r_theta_intersects_ground, scattering_order - 1);
// and of the contribution from the light paths with n-1 bounces and whose
// last bounce is on the ground. This contribution is the product of the
// transmittance to the ground, the ground albedo, the ground BRDF, and
// the irradiance received on the ground after n-2 bounces.
float3 ground_normal = normalize(zenith_direction * r + omega_i * distance_to_ground);
float3 ground_irradiance = GetIrradiance( atmosphere, atmosphere.bottom_radius, dot(ground_normal, omega_s));
incident_radiance += transmittance_to_ground * ground_albedo * (1.0 / (PI * sr)) * ground_irradiance;
// The radiance finally scattered from float3 omega_i towards float3
// -omega is the product of the incident radiance, the scattering
// coefficient, and the phase function for directions omega and omega_i
// (all this summed over all particle types, i.e. Rayleigh and Mie).
float nu2 = dot(omega, omega_i);
float rayleigh_density = GetProfileDensity( atmosphere.rayleigh_density, r - atmosphere.bottom_radius);
float mie_density = GetProfileDensity( atmosphere.mie_density, r - atmosphere.bottom_radius);
rayleigh_mie += incident_radiance * (
atmosphere.rayleigh_scattering * rayleigh_density *
RayleighPhaseFunction(nu2) +
atmosphere.mie_scattering * mie_density *
MiePhaseFunction(atmosphere.mie_phase_function_g, nu2)) *
domega_i;
}
}
return rayleigh_mie;
}
__device__ float3 ComputeMultipleScattering(const AtmosphereParameters atmosphere, float r, float mu, float mu_s, float nu, bool ray_r_mu_intersects_ground)
{
// float of intervals for the numerical integration.
const int SAMPLE_COUNT = 50;
// The integration step, i.e. the float of each integration interval.
float dx =
DistanceToNearestAtmosphereBoundary(
atmosphere, r, mu, ray_r_mu_intersects_ground) /
float(SAMPLE_COUNT);
// Integration loop.
float3 rayleigh_mie_sum =
make_float3(0.0f * watt_per_square_meter_per_sr_per_nm());
for (int i = 0; i <= SAMPLE_COUNT; ++i) {
float d_i = float(i) * dx;
// The r, mu and mu_s parameters at the current integration point (see the
// single scattering section for a detailed explanation).
float r_i =
ClampRadius(atmosphere, sqrt(d_i * d_i + 2.0 * r * mu * d_i + r * r));
float mu_i = ClampCosine((r * mu + d_i) / r_i);
float mu_s_i = ClampCosine((r * mu_s + d_i * nu) / r_i);
// The Rayleigh and Mie multiple scattering at the current sample point.
float3 rayleigh_mie_i =
GetScattering( atmosphere, atmosphere.delta_scattering_density_buffer, r_i, mu_i, mu_s_i, nu, ray_r_mu_intersects_ground) *
GetTransmittance( atmosphere, r, mu, d_i, ray_r_mu_intersects_ground) * dx;
// Sample weight (from the trapezoidal rule).
float weight_i = (i == 0 || i == SAMPLE_COUNT) ? 0.5 : 1.0;
rayleigh_mie_sum += rayleigh_mie_i * weight_i;
}
return rayleigh_mie_sum;
}
__device__ float3 ComputeScatteringDensityTexture(const AtmosphereParameters atmosphere, float3 frag_coord, int scattering_order) {
float r;
float mu;
float mu_s;
float nu;
bool ray_r_mu_intersects_ground;
GetRMuMuSNuFromScatteringTextureFragCoord(atmosphere, frag_coord, r, mu, mu_s, nu, ray_r_mu_intersects_ground);
return ComputeScatteringDensity(atmosphere, r, mu, mu_s, nu, scattering_order);
}
__device__ float3 ComputeMultipleScatteringTexture(const AtmosphereParameters atmosphere, float3 frag_coord, float& nu) {
float r;
float mu;
float mu_s;
bool ray_r_mu_intersects_ground;
GetRMuMuSNuFromScatteringTextureFragCoord(atmosphere, frag_coord,
r, mu, mu_s, nu, ray_r_mu_intersects_ground);
return ComputeMultipleScattering(atmosphere, r, mu, mu_s, nu, ray_r_mu_intersects_ground);
}
__device__ float3 ComputeDirectIrradiance(const AtmosphereParameters atmosphere, float r, float mu_s) {
float alpha_s = atmosphere.sun_angular_radius / rad;
// Approximate average of the cosine factor mu_s over the visible fraction of
// the Sun disc.
float average_cosine_factor = mu_s < -alpha_s ? 0.0 : (mu_s > alpha_s ? mu_s : (mu_s + alpha_s) * (mu_s + alpha_s) / (4.0 * alpha_s));
return atmosphere.solar_irradiance * GetTransmittanceToTopAtmosphereBoundary( atmosphere, r, mu_s) * average_cosine_factor;
}
__device__ float3 ComputeIndirectIrradiance(const AtmosphereParameters atmosphere, float r, float mu_s, int scattering_order)
{
const int SAMPLE_COUNT = 32;
const float dphi = pi() / float(SAMPLE_COUNT);
const float dtheta = pi() / float(SAMPLE_COUNT);
float3 result = make_float3(0.0f * watt_per_square_meter_per_nm());
float3 omega_s = make_float3(sqrt(1.0 - mu_s * mu_s), 0.0, mu_s);
for (int j = 0; j < SAMPLE_COUNT / 2; ++j) {
float theta = (float(j) + 0.5) * dtheta;
for (int i = 0; i < 2 * SAMPLE_COUNT; ++i) {
float phi = (float(i) + 0.5) * dphi;
float3 omega =
make_float3(cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta));
float domega = (dtheta / rad) * (dphi / rad) * sin(theta) * sr;
float nu = dot(omega, omega_s);
result += GetScattering(atmosphere, r, omega.z, mu_s, nu, false, scattering_order) * omega.z * domega;
}
}
return result;
}
__device__ float2 GetIrradianceTextureUvFromRMuS(const AtmosphereParameters atmosphere, float r, float mu_s) {
float x_r = (r - atmosphere.bottom_radius) /
(atmosphere.top_radius - atmosphere.bottom_radius);
float x_mu_s = mu_s * 0.5 + 0.5;
return make_float2(GetTextureCoordFromUnitRange(x_mu_s, IRRADIANCE_TEXTURE_WIDTH),
GetTextureCoordFromUnitRange(x_r, IRRADIANCE_TEXTURE_HEIGHT));
}
__device__ void GetRMuSFromIrradianceTextureUv(const AtmosphereParameters atmosphere, float2 uv, float& r, float& mu_s) {
float x_mu_s = GetUnitRangeFromTextureCoord(uv.x, IRRADIANCE_TEXTURE_WIDTH);
float x_r = GetUnitRangeFromTextureCoord(uv.y, IRRADIANCE_TEXTURE_HEIGHT);
r = atmosphere.bottom_radius + x_r * (atmosphere.top_radius - atmosphere.bottom_radius);
mu_s = ClampCosine(2.0 * x_mu_s - 1.0);
}
__device__ float3 ComputeDirectIrradianceTexture(const AtmosphereParameters atmosphere, float2 frag_coord) {
float r;
float mu_s;
GetRMuSFromIrradianceTextureUv( atmosphere, (frag_coord / make_float2(IRRADIANCE_TEXTURE_WIDTH, IRRADIANCE_TEXTURE_HEIGHT)), r, mu_s);
return ComputeDirectIrradiance(atmosphere, r, mu_s);
}
__device__ float3 ComputeIndirectIrradianceTexture(const AtmosphereParameters atmosphere, float2 frag_coord, int scattering_order) {
float r;
float mu_s;
GetRMuSFromIrradianceTextureUv( atmosphere, frag_coord / make_float2(IRRADIANCE_TEXTURE_WIDTH, IRRADIANCE_TEXTURE_HEIGHT), r, mu_s);
return ComputeIndirectIrradiance(atmosphere, r, mu_s, scattering_order);
}
__device__ float3 GetIrradiance(const AtmosphereParameters atmosphere, float r, float mu_s)
{
float2 uv = GetIrradianceTextureUvFromRMuS(atmosphere, r, mu_s);
int x = int(floor(uv.x * IRRADIANCE_TEXTURE_WIDTH));
int y = int(floor(uv.y * IRRADIANCE_TEXTURE_HEIGHT));
int idx = (y * IRRADIANCE_TEXTURE_WIDTH) + x;
idx = clamp(idx, 0, IRRADIANCE_TEXTURE_WIDTH*IRRADIANCE_TEXTURE_HEIGHT);
const float3 val = make_float3(atmosphere.irradiance_buffer[idx]);
return val;
}
// KERNEL ACCESSORS
//**************************************************************************************************************************************
extern "C" __global__ void calculate_transmittance(const AtmosphereParameters atmosphere) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= TRANSMITTANCE_TEXTURE_WIDTH || y >= TRANSMITTANCE_TEXTURE_HEIGHT) return;
const unsigned int idx = y * TRANSMITTANCE_TEXTURE_WIDTH + x;
float2 frag_coord = make_float2(x, y);
frag_coord += make_float2(0.5f, 0.5f);
atmosphere.transmittance_buffer[idx] = make_float4(ComputeTransmittanceToTopAtmosphereBoundaryTexture(atmosphere, frag_coord));
}
extern "C" __global__ void calculate_direct_irradiance(const AtmosphereParameters atmosphere, const int blend){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= IRRADIANCE_TEXTURE_WIDTH || y >= IRRADIANCE_TEXTURE_HEIGHT) return;
const unsigned int idx = y * IRRADIANCE_TEXTURE_WIDTH + x;
float2 frag_coord = make_float2(x, y);
frag_coord += make_float2(0.5f, 0.5f);
if(!blend) atmosphere.irradiance_buffer[idx] = make_float4(.0f);
float4 temp_val = atmosphere.irradiance_buffer[idx];
atmosphere.delta_irradience_buffer[idx] = make_float4( ComputeDirectIrradianceTexture(atmosphere, frag_coord));
if(blend) atmosphere.irradiance_buffer[idx] += temp_val;
}
extern "C" __global__ void calculate_indirect_irradiance(const AtmosphereParameters atmosphere, const int blend, mat3 luminance_from_radiance , const int scattering_order) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= IRRADIANCE_TEXTURE_WIDTH || y >= IRRADIANCE_TEXTURE_HEIGHT) return;
const unsigned int idx = y * IRRADIANCE_TEXTURE_WIDTH + x;
float2 frag_coord = make_float2(x, y);
frag_coord += make_float2(0.5f, 0.5f);
float3 delta_irradiance_value = ComputeIndirectIrradianceTexture(atmosphere, frag_coord, scattering_order-1);
float4 temp_val = atmosphere.irradiance_buffer[idx];
atmosphere.irradiance_buffer[idx] = make_float4( luminance_from_radiance * delta_irradiance_value);
atmosphere.delta_irradience_buffer[idx] = atmosphere.irradiance_buffer[idx];
if (blend) atmosphere.irradiance_buffer[idx] += temp_val;
}
extern "C" __global__ void calculate_multiple_scattering(const AtmosphereParameters atmosphere, const int blend, mat3 luminance_from_radiance, const int scattering_order){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= SCATTERING_TEXTURE_WIDTH || y >= SCATTERING_TEXTURE_HEIGHT || z >= SCATTERING_TEXTURE_DEPTH) return;
const unsigned int idx = x + SCATTERING_TEXTURE_WIDTH * (y + SCATTERING_TEXTURE_HEIGHT * z);
float3 frag_coord = make_float3(x, y, z);
frag_coord += make_float3(0.5f, 0.5f, 0.5f);
float4 temp_val = atmosphere.scattering_buffer[idx];
float nu;
float3 delta_multiple_scattering_value = ComputeMultipleScatteringTexture(atmosphere, frag_coord, nu);
atmosphere.delta_multiple_scattering_buffer[idx] = make_float4(delta_multiple_scattering_value, 1.0f);
atmosphere.scattering_buffer[idx] = make_float4((luminance_from_radiance * delta_multiple_scattering_value) / RayleighPhaseFunction(nu), .0f);
if (blend) atmosphere.scattering_buffer[idx] += temp_val;
}
extern "C" __global__ void calculate_scattering_density(const AtmosphereParameters atmosphere, const float4 blend, const int scattering_order){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= SCATTERING_TEXTURE_WIDTH || y >= SCATTERING_TEXTURE_HEIGHT || z >= SCATTERING_TEXTURE_DEPTH) return;
const unsigned int idx = x + SCATTERING_TEXTURE_WIDTH * (y + SCATTERING_TEXTURE_HEIGHT * z);
float3 frag_coord = make_float3(x, y, z);
frag_coord += make_float3(0.5f, 0.5f, 0.5f);
float3 scattering_density = ComputeScatteringDensityTexture(atmosphere, frag_coord, scattering_order);
atmosphere.delta_scattering_density_buffer[idx] = make_float4(scattering_density, 1.0f);
}
extern "C" __global__ void calculate_single_scattering(const AtmosphereParameters atmosphere, const float4 blend, mat3 luminance_from_radiance){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= SCATTERING_TEXTURE_WIDTH || y >= SCATTERING_TEXTURE_HEIGHT || z>= SCATTERING_TEXTURE_DEPTH) return;
const unsigned int idx = x + SCATTERING_TEXTURE_WIDTH * (y + SCATTERING_TEXTURE_HEIGHT * z);
float3 frag_coord = make_float3(x, y, z);
frag_coord += make_float3(0.5f, 0.5f, 0.5f);
float3 delta_rayleigh, delta_mie;
float4 temp_scatter, temp_single_scatter;
temp_scatter = atmosphere.scattering_buffer[idx];
temp_single_scatter = atmosphere.optional_mie_single_scattering_buffer[idx];
ComputeSingleScatteringTexture(atmosphere, frag_coord, delta_rayleigh, delta_mie);
atmosphere.delta_rayleigh_scattering_buffer[idx] = make_float4(delta_rayleigh, 1.0f);
atmosphere.delta_mie_scattering_buffer[idx] = make_float4(delta_mie, 1.0f);
atmosphere.scattering_buffer[idx] = make_float4(luminance_from_radiance * delta_rayleigh, (luminance_from_radiance * delta_mie).x);
atmosphere.optional_mie_single_scattering_buffer[idx] = make_float4(delta_mie, 1.0f);
if (blend.z) atmosphere.scattering_buffer[idx] += temp_scatter;
if (blend.w) atmosphere.optional_mie_single_scattering_buffer[idx] += temp_single_scatter;
}
// Buffer Cleaners
extern "C" __global__ void clear_transmittance_buffers(const AtmosphereParameters atmosphere) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= TRANSMITTANCE_TEXTURE_WIDTH || y >= TRANSMITTANCE_TEXTURE_HEIGHT) return;
const unsigned int idx = y * TRANSMITTANCE_TEXTURE_WIDTH + x;
atmosphere.transmittance_buffer[idx] = make_float4(.0f);
}
extern "C" __global__ void clear_irradiance_buffers(const AtmosphereParameters atmosphere) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= IRRADIANCE_TEXTURE_WIDTH || y >= IRRADIANCE_TEXTURE_HEIGHT) return;
const unsigned int idx = y * IRRADIANCE_TEXTURE_WIDTH + x;
atmosphere.delta_irradience_buffer[idx] = make_float4(.0f);
atmosphere.irradiance_buffer[idx] = make_float4(.0f);
}
extern "C" __global__ void clear_scattering_buffers(const AtmosphereParameters atmosphere) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= SCATTERING_TEXTURE_WIDTH || y >= SCATTERING_TEXTURE_HEIGHT || z >= SCATTERING_TEXTURE_DEPTH) return;
const unsigned int idx = x + SCATTERING_TEXTURE_WIDTH * (y + SCATTERING_TEXTURE_HEIGHT * z);
atmosphere.scattering_buffer[idx] = make_float4(.0f);
atmosphere.delta_multiple_scattering_buffer[idx] = make_float4(.0f);
atmosphere.delta_scattering_density_buffer[idx] = make_float4(.0f);
atmosphere.optional_mie_single_scattering_buffer[idx] = make_float4(.0f);
atmosphere.delta_rayleigh_scattering_buffer[idx] = make_float4(.0f);
atmosphere.delta_mie_scattering_buffer[idx] = make_float4(.0f);
}
|
the_stack
|
THC_API accreal
THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THArgCheck(THCTensor_(nElement)(state, self) ==
THCTensor_(nElement)(state, src), 2, "sizes do not match");
self = THCTensor_(newContiguous)(state, self);
src = THCTensor_(newContiguous)(state, src);
#ifdef THC_REAL_IS_FLOAT
accreal result = THCudaBlas_Sdot(state,
THCTensor_(nElement)(state, self),
THCTensor_(data)(state, self), 1,
THCTensor_(data)(state, src), 1);
#elif defined(THC_REAL_IS_DOUBLE)
accreal result = THCudaBlas_Ddot(state,
THCTensor_(nElement)(state, self),
THCTensor_(data)(state, self), 1,
THCTensor_(data)(state, src), 1);
#elif defined(THC_REAL_IS_HALF)
accreal result = THCudaBlas_Hdot(state,
THCTensor_(nElement)(state, self),
THCTensor_(data)(state, self), 1,
THCTensor_(data)(state, src), 1);
#endif
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return result;
#else
THError("unimplemented data type");
return ScalarConvert<int, accreal>::to(0);
#endif
}
THC_API void
THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec));
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected");
if( mat->size[1] != vec->size[0] )
THError("size mismatch");
if(t->nDimension != 1)
THError("size mismatch");
if(t->size[0] != mat->size[0])
THError("size mismatch");
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
if(r_ != t)
{
THCTensor_(resizeAs)(state, r_, t);
THCTensor_(copy)(state, r_, t);
}
if(mat->stride[0] == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(state, 'n', mat->size[0], mat->size[1],
alpha, THCTensor_(data)(state, mat), mat->stride[1],
THCTensor_(data)(state, vec), vec->stride[0],
beta, THCTensor_(data)(state, r_), r_->stride[0]);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(state, 'n', mat->size[0], mat->size[1],
alpha, THCTensor_(data)(state, mat), mat->stride[1],
THCTensor_(data)(state, vec), vec->stride[0],
beta, THCTensor_(data)(state, r_), r_->stride[0]);
#endif
}
else if(mat->stride[1] == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0],
alpha, THCTensor_(data)(state, mat), mat->stride[0],
THCTensor_(data)(state, vec), vec->stride[0],
beta, THCTensor_(data)(state, r_), r_->stride[0]);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0],
alpha, THCTensor_(data)(state, mat), mat->stride[0],
THCTensor_(data)(state, vec), vec->stride[0],
beta, THCTensor_(data)(state, r_), r_->stride[0]);
#endif
}
else
{
THCTensor *cmat = THCTensor_(newContiguous)(state, mat);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(state, 't', mat->size[1], mat->size[0],
alpha, THCTensor_(data)(state, cmat), cmat->stride[0],
THCTensor_(data)(state, vec), vec->stride[0],
beta, THCTensor_(data)(state, r_), r_->stride[0]);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(state, 't', mat->size[1], mat->size[0],
alpha, THCTensor_(data)(state, cmat), cmat->stride[0],
THCTensor_(data)(state, vec), vec->stride[0],
beta, THCTensor_(data)(state, r_), r_->stride[0]);
#endif
THCTensor_(free)(state, cmat);
}
#elif defined(THC_REAL_IS_HALF)
// Currently no Hgemv/SgemvEx in Cublas
THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec);
THCTensor_(resize2d)(state, vecAsMatrix, vecAsMatrix->size[0], 1);
THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t);
THCTensor_(resize2d)(state, tAsMatrix, tAsMatrix->size[0], 1);
THCTensor_(addmm)(state, r_, beta, tAsMatrix, alpha, mat, vecAsMatrix);
// r_ will have answer as matrix, need to return a vector
THCTensor_(resize1d)(state, r_, r_->size[0]);
THCTensor_(free)(state, vecAsMatrix);
THCTensor_(free)(state, tAsMatrix);
#endif
#else
THError("unimplemented data type");
#endif
}
THC_API void
THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2));
if ( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) {
THError("vector and vector expected");
}
if (t->nDimension != 2) {
THError("size mismatch");
}
if ( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) {
THError("size mismatch");
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
if (r_ != t) {
THCTensor_(resizeAs)(state, r_, t);
THCTensor_(copy)(state, r_, t);
}
if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) {
THCTensor_(zero)(state, r_);
} else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) {
THCTensor_(mul)(state, r_, r_, beta);
}
if(r_->stride[0] == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sger(state, vec1->size[0], vec2->size[0],
alpha, THCTensor_(data)(state, vec1), vec1->stride[0],
THCTensor_(data)(state, vec2), vec2->stride[0],
THCTensor_(data)(state, r_), r_->stride[1]);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dger(state, vec1->size[0], vec2->size[0],
alpha, THCTensor_(data)(state, vec1), vec1->stride[0],
THCTensor_(data)(state, vec2), vec2->stride[0],
THCTensor_(data)(state, r_), r_->stride[1]);
#endif
}
else if(r_->stride[1] == 1)
{
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sger(state, vec2->size[0], vec1->size[0],
alpha, THCTensor_(data)(state, vec2), vec2->stride[0],
THCTensor_(data)(state, vec1), vec1->stride[0],
THCTensor_(data)(state, r_), r_->stride[0]);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dger(state, vec2->size[0], vec1->size[0],
alpha, THCTensor_(data)(state, vec2), vec2->stride[0],
THCTensor_(data)(state, vec1), vec1->stride[0],
THCTensor_(data)(state, r_), r_->stride[0]);
#endif
}
else
{
THCTensor *cr = THCTensor_(newClone)(state, r_);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sger(state, vec2->size[0], vec1->size[0],
alpha, THCTensor_(data)(state, vec2), vec2->stride[0],
THCTensor_(data)(state, vec1), vec1->stride[0],
THCTensor_(data)(state, cr), cr->stride[0]);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dger(state, vec2->size[0], vec1->size[0],
alpha, THCTensor_(data)(state, vec2), vec2->stride[0],
THCTensor_(data)(state, vec1), vec1->stride[0],
THCTensor_(data)(state, cr), cr->stride[0]);
#endif
THCTensor_(freeCopyTo)(state, cr, r_);
}
#elif defined(THC_REAL_IS_HALF)
// currently no Hger/SgerEx in Cublas.
THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2);
THCTensor_(resize2d)(state, vec2T, vec2T->size[0], 1);
THCTensor_(transpose)(state, vec2T, NULL, 0, 1);
THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1);
THCTensor_(resize2d)(state, vec1M, vec1M->size[0], 1);
THCTensor_(addmm)(state, r_, beta, t, alpha, vec1M, vec2T);
THCTensor_(free)(state, vec2T);
THCTensor_(free)(state, vec1M);
#endif
#else
THError("unimplemented data type");
#endif
}
THC_API void
THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2));
char transpose_r, transpose_m1, transpose_m2;
THCTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2) )
THError("matrix and matrix expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) )
THError("size mismatch");
if(t != r_)
{
THCTensor_(resizeAs)(state, r_, t);
if (ScalarConvert<real, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, r_, t);
}
}
/* r_ */
if(r_->stride[0] == 1 &&
r_->stride[1] != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 &&
r_->stride[0] != 0)
{
THCTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1);
r__ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THCTensor_(newContiguous)(state, m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THCTensor_(newContiguous)(state, m2);
}
#ifdef THC_REAL_IS_HALF
THCudaBlas_Hgemm(state,
transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THCTensor_(data)(state, r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
#elif defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgemm(state,
transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THCTensor_(data)(state, r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(state,
transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THCTensor_(data)(state, m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THCTensor_(data)(state, m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THCTensor_(data)(state, r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
#endif
/* free intermediate variables */
if(m1_ != m1) {
THCTensor_(free)(state, m1_);
}
if(m2_ != m2) {
THCTensor_(free)(state, m2_);
}
if(r__ != r_) {
THCTensor_(freeCopyTo)(state, r__, r_);
}
#else
THError("unimplemented data type");
#endif
}
THC_API void
THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t,
real alpha, THCTensor *batch1, THCTensor *batch2) {
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2));
THArgCheck(THCTensor_(nDimension)(state, t) == 2, 4, "expected 2D tensor");
THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor");
long batchnum = THCTensor_(size)(state, batch1, 0);
long m1d1 = THCTensor_(size)(state, batch1, 1);
long innerdim = THCTensor_(size)(state, batch1, 2);
long m2d2 = THCTensor_(size)(state, batch2, 2);
THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7,
"equal number of batches expected");
// M is t, as listed in the docs under addbmm
THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6,
"first dimension must match first dimension of M");
THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7,
"second dimension must match second dimension of M");
THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6,
"second dimension must match first dimension of batch2");
if (t != result) {
THCTensor_(resizeAs)(state, result, t);
if (ScalarConvert<real, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, result, t);
}
}
THCTensor *slice1 = THCTensor_(new)(state);
THCTensor *slice2 = THCTensor_(new)(state);
for (long i=0; i<batchnum; i++) {
THCTensor_(select)(state, slice1, batch1, 0, i);
THCTensor_(select)(state, slice2, batch2, 0, i);
THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2);
beta = ScalarConvert<int, real>::to(1);
}
THCTensor_(free)(state, slice1);
THCTensor_(free)(state, slice2);
#else
THError("unimplemented data type");
#endif
}
__global__ void createBatchGemmBuffer(const real** buffer, real* data,
long stride, long num_batches) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches) {
buffer[idx] = data + idx * stride;
}
}
THC_API void
THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t,
real alpha, THCTensor *batch1, THCTensor *batch2) {
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2));
THArgCheck(THCTensor_(nDimension)(state, t) == 3, 4, "expected 3D tensor");
THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6,
"equal number of batches expected");
THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7,
"equal number of batches expected");
THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7,
"wrong matrix size");
THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6,
"wrong matrix size");
if (t != result) {
THCTensor_(resizeAs)(state, result, t);
if (ScalarConvert<real, double>::to(beta) != 0.0) {
THCTensor_(copy)(state, result, t);
}
}
bool transpose_result;
char transpose_batch1, transpose_batch2;
long lda, ldb, ldc;
THCTensor *result_, *batch1_, *batch2_;
if (result->stride[1] == 1)
{
transpose_result = false;
result_ = result;
ldc = result_->stride[2];
}
else if (result->stride[2] == 1)
{
transpose_result = true;
THCTensor *swap = batch2;
batch2 = batch1;
batch1 = swap;
result_ = result;
ldc = result_->stride[1];
}
else
{
transpose_result = false;
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2);
result_ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, result_, NULL, 1, 2);
ldc = result_->stride[2];
}
if (batch1->stride[transpose_result ? 2 : 1] == 1 &&
batch1->stride[transpose_result ? 1 : 2] != 0)
{
transpose_batch1 = 'n';
batch1_ = batch1;
lda = batch1_->stride[transpose_result ? 1 : 2];
}
else if (batch1->stride[transpose_result ? 1 : 2] == 1 &&
batch1->stride[transpose_result ? 2 : 1] != 0)
{
transpose_batch1 = 't';
batch1_ = batch1;
lda = batch1_->stride[transpose_result ? 2 : 1];
}
else
{
transpose_batch1 = transpose_result ? 'n' : 't';
batch1_ = THCTensor_(newContiguous)(state, batch1);
lda = batch1_->stride[1];
}
if (batch2->stride[transpose_result ? 2 : 1] == 1 &&
batch2->stride[transpose_result ? 1 : 2] != 0)
{
transpose_batch2 = 'n';
batch2_ = batch2;
ldb = batch2_->stride[transpose_result ? 1 : 2];
}
else if (batch2->stride[transpose_result ? 1 : 2] == 1 &&
batch2->stride[transpose_result ? 2 : 1] != 0)
{
transpose_batch2 = 't';
batch2_ = batch2;
ldb = batch2_->stride[transpose_result ? 2 : 1];
}
else
{
transpose_batch2 = transpose_result ? 'n' : 't';
batch2_ = THCTensor_(newContiguous)(state, batch2);
ldb = batch2_->stride[1];
}
long num_batches = result_->size[0];
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
// Compute pointers to matrices in each batch.
#if CUDA_VERSION < 8000
size_t matrices_size = num_batches * sizeof(real*);
// Copy pointers to device.
const real **d_matrices1, **d_matrices2;
real **d_result_matrices;
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_result_matrices, matrices_size));
const long block = 512;
const long grid = (num_batches + block - 1) / block;
createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
d_matrices1, THCTensor_(data)(state, batch1_), batch1_->stride[0],
num_batches);
createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
d_matrices2, THCTensor_(data)(state, batch2_), batch2_->stride[0],
num_batches);
createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
(const real**)d_result_matrices, THCTensor_(data)(state,result_),
result_->stride[0], num_batches);
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size[transpose_result ? 2 : 1],
result_->size[transpose_result ? 1 : 2],
batch1_->size[transpose_result ? 1 : 2],
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size[transpose_result ? 2 : 1],
result_->size[transpose_result ? 1 : 2],
batch1_->size[transpose_result ? 1 : 2],
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
#endif
THCudaFree(state, d_matrices1);
THCudaFree(state, d_matrices2);
THCudaFree(state, d_result_matrices);
#else
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_SgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size[transpose_result ? 2 : 1],
result_->size[transpose_result ? 1 : 2],
batch1_->size[transpose_result ? 1 : 2],
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride[0],
THCTensor_(data)(state, batch2_), ldb, batch2_->stride[0],
beta,
THCTensor_(data)(state, result_), ldc, result_->stride[0],
num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_DgemmStridedBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size[transpose_result ? 2 : 1],
result_->size[transpose_result ? 1 : 2],
batch1_->size[transpose_result ? 1 : 2],
alpha,
THCTensor_(data)(state, batch1_), lda, batch1_->stride[0],
THCTensor_(data)(state, batch2_), ldb, batch2_->stride[0],
beta,
THCTensor_(data)(state, result_), ldc, result_->stride[0],
num_batches);
#endif
#endif
#elif defined(THC_REAL_IS_HALF)
// Currently no HgemmBatched in Cublas
for (long i = 0; i < num_batches; ++i) {
THCudaBlas_Hgemm(
state,
transpose_batch1,
transpose_batch2,
result_->size[transpose_result ? 2 : 1],
result_->size[transpose_result ? 1 : 2],
batch1_->size[transpose_result ? 1 : 2],
alpha,
THCTensor_(data)(state, batch1_) + i * batch1_->stride[0], lda,
THCTensor_(data)(state, batch2_) + i * batch2_->stride[0], ldb,
beta,
THCTensor_(data)(state, result_) + i * result_->stride[0], ldc);
}
#endif
if (batch1_ != batch1) {
THCTensor_(free)(state, batch1_);
}
if (batch2_ != batch2) {
THCTensor_(free)(state, batch2_);
}
if (result_ != result) {
THCTensor_(freeCopyTo)(state, result_, result);
}
#else
THError("unimplemented data type");
#endif
}
THC_API void THCTensor_(btrifact)(THCState *state, THCTensor *ra_, THCudaIntTensor *rpivots_, THCudaIntTensor *rinfo_, int pivot, THCTensor *a)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THAssert(THCTensor_(checkGPU)(state, 2, ra_, a));
THArgCheck(THCTensor_(nDimension)(state, a) == 3, 3, "expected 3D tensor");
THArgCheck(THCTensor_(size)(state, a, 1) ==
THCTensor_(size)(state, a, 2), 3, "matrices must be square");
if (ra_ != a) {
THCTensor_(resizeAs)(state, ra_, a);
// not sure if this is kosher, but things are nicer if we return in column major
if (ra_->stride[0] == 1) {
THCTensor_(transpose)(state, ra_, NULL, 1, 0);
} else if (ra_->stride[2] == 1) {
THCTensor_(transpose)(state, ra_, NULL, 1, 2);
}
THCTensor_(copy)(state, ra_, a);
}
int n = a->size[1];
int lda;
THCTensor *ra__;
if (ra_->stride[1] == 1) {
// column ordered, what BLAS wants
lda = ra_->stride[2];
ra__ = ra_;
} else {
// not column ordered, need to make it such (requires copy)
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, ra_, 1, 2);
ra__ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, ra__, NULL, 1, 2);
lda = ra__->stride[2];
}
long num_batches = ra__->size[0];
if (!pivot) {
THCudaIntTensor *t = THCudaIntTensor_new(state);
THCudaIntTensor_range(state, t, 1, n, 1);
THCudaIntTensor_unsqueeze1d(state, t, t, 0);
THCudaIntTensor** ptrs = (THCudaIntTensor**) THAlloc(sizeof(THCudaIntTensor*)*num_batches);
for (long i=0; i<num_batches; i++) {
ptrs[i] = t;
}
THCudaIntTensor_catArray(state, rpivots_, ptrs, num_batches, 0);
THCudaIntTensor_free(state, t);
THFree(ptrs);
} else {
THCudaIntTensor_resize2d(state, rpivots_, num_batches, n);
}
bool free_rinfo_ = !rinfo_;
if (rinfo_ == NULL) rinfo_ = THCudaIntTensor_new(state);
THCudaIntTensor_resize1d(state, rinfo_, num_batches);
int *info_gpu = THCudaIntTensor_data(state, rinfo_);
// Copy pointers to device.
real **d_result;
size_t matrices_size = num_batches * sizeof(real*);
THCudaCheck(THCudaMalloc(state, (void**)&d_result, matrices_size));
const long block = 512;
const long grid = (num_batches + block - 1) / block;
createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
(const real**)d_result, THCTensor_(data)(state, ra__),
ra__->stride[0], num_batches);
int *pivots_gpu = NULL;
if (pivot) {
pivots_gpu = THCudaIntTensor_data(state, rpivots_);
}
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches);
#endif
THCudaFree(state, d_result);
if (ra__ != ra_) {
THCTensor_(freeCopyTo)(state, ra__, ra_);
}
if (free_rinfo_) {
real min = THCudaIntTensor_minall(state, rinfo_);
real max = THCudaIntTensor_maxall(state, rinfo_);
THCudaIntTensor_free(state, rinfo_);
if (min != 0 || max != 0) {
THError("failed to factorize some batch elements (min info == %d, max info == %d)",
min, max);
}
}
#else
THError("unimplemented data type");
#endif
}
THC_API void THCTensor_(btrisolve)(THCState *state, THCTensor *rb_, THCTensor *b,
THCTensor *atf, THCudaIntTensor *pivots)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THAssert(THCTensor_(checkGPU)(state, 3, rb_, atf, b));
THArgCheck(THCTensor_(nDimension)(state, atf) == 3, 3, "expected 3D tensor");
THArgCheck(THCTensor_(nDimension)(state, b) == 3 ||
THCTensor_(nDimension)(state, b) == 2, 4, "expected 2D or 3D tensor");
THArgCheck(THCTensor_(size)(state, atf, 0) ==
THCTensor_(size)(state, b, 0), 3, "number of batches must be equal");
THArgCheck(THCTensor_(size)(state, atf, 1) ==
THCTensor_(size)(state, atf, 2), 3, "A matrices must be square");
THArgCheck(THCTensor_(size)(state, atf, 1) ==
THCTensor_(size)(state, b, 1), 3, "dimensions of A and b must be equal");
if (rb_ != b) {
THCTensor_(resizeAs)(state, rb_, b);
THCTensor_(copy)(state, rb_, b);
}
int n = atf->size[1];
int nrhs = rb_->nDimension > 2 ? rb_->size[2] : 1;
THCTensor *atf_;
THCTensor *rb__;
int lda, ldb;
// correct ordering of A_tf
if (atf->stride[1] == 1) {
// column ordered, what BLAS wants
lda = atf->stride[2];
atf_ = atf;
} else {
// not column ordered, need to make it such (requires copy)
// it would be nice if we could use the op(A) flags to automatically
// transpose A if needed, but this leads to unpredictable behavior if the
// user clones A_tf later with a different ordering
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, atf, 1, 2);
atf_ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, atf_, NULL, 1, 2);
lda = atf_->stride[2];
}
// correct ordering of B
if (rb_->stride[1] == 1) {
// column ordered
if (rb_->nDimension == 2 || rb_->size[2] == 1) {
ldb = n;
} else {
ldb = rb_->stride[2];
}
rb__ = rb_;
} else {
// make column ordered
if (rb_->nDimension > 2) {
THCTensor *transp_r_ = THCTensor_(newTranspose)(state, rb_, 1, 2);
rb__ = THCTensor_(newClone)(state, transp_r_);
THCTensor_(free)(state, transp_r_);
THCTensor_(transpose)(state, rb__, NULL, 1, 2);
ldb = rb__->stride[2];
} else {
rb__ = THCTensor_(newClone)(state, rb_);
ldb = n;
}
}
long num_batches = rb_->size[0];
size_t matrices_size = num_batches * sizeof(real*);
// Copy pointers to device.
real **d_result;
const real **d_atf;
THCudaCheck(THCudaMalloc(state, (void**)&d_result, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_atf, matrices_size));
const long block = 512;
const long grid = (num_batches + block - 1) / block;
createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
(const real**)d_result, THCTensor_(data)(state, rb__),
rb__->stride[0], num_batches);
createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
d_atf, THCTensor_(data)(state, atf_),
atf_->stride[0], num_batches);
if (!THCudaIntTensor_isContiguous(state, pivots)) {
THError("Error: pivots is not contiguous.");
}
int *pivots_data = THCudaIntTensor_data(state, pivots);
int info;
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches);
#endif
if (info < 0) {
THError("Illegal arg %d", -info);
}
THCudaFree(state, d_result);
THCudaFree(state, d_atf);
if (atf_ != atf) {
THCTensor_(free)(state, atf_);
}
if (rb__ != rb_) {
THCTensor_(freeCopyTo)(state, rb__, rb_);
}
#else
THError("unimplemented data type");
#endif
}
#endif
|
the_stack
|
typedef float d_type;
#define DATA_TYPE CUDNN_DATA_FLOAT
int GROUP_COUNT = 1;
int INPUT_CHANNELS_PER_GROUP = 128;
int choose_algo = -1;
cudnnMathType_t MATH_TYPE = CUDNN_DEFAULT_MATH;
cudnnTensorFormat_t INPUT_TENSOR_FORMAT = CUDNN_TENSOR_NCHW;
int INPUT_BATCH_SIZE = 16;
int INPUT_CHANNELS = 0;
int INPUT_HEIGHT = 112;
int INPUT_WIDTH = 112;
cudnnTensorFormat_t OUTPUT_TENSOR_FORMAT = CUDNN_TENSOR_NCHW;
int OUTPUT_BATCH_SIZE = 0, OUTPUT_CHANNELS = 0, OUTPUT_HEIGHT = 0,
OUTPUT_WIDTH = 0;
cudnnTensorFormat_t KERNEL_TENSOR_FORMAT = CUDNN_TENSOR_NCHW;
int KERNEL_OUT_CHANNELS = 256; // #kernels = #output.channels
int KERNEL_HEIGHT = 3;
int KERNEL_WIDTH = 3;
int PAD_HEIGHT = 0;
int PAD_WIDTH = 0;
int VERTICAL_STRIDE = 1;
int HORIZONTAL_STRIDE = 1;
int DILATION_HEIGHT = 1;
int DILATION_WIDTH = 1;
#define CONV_MODE CUDNN_CROSS_CORRELATION
cudnnConvolutionFwdAlgo_t CONV_ALGO = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM;
#define CONV_ALGO_PREFER CUDNN_CONVOLUTION_FWD_PREFER_FASTEST
#define MEMORY_LIMIT 0
int ROUNDS = 10;
namespace ch {
using namespace std::chrono;
}
const char algo_name[8][50] = {
"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_DIRECT",
"CUDNN_CONVOLUTION_FWD_ALGO_FFT",
"CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED",
};
const char math_types[3][50] = {
"CUDNN_DEFAULT_MATH",
"CUDNN_TENSOR_OP_MATH",
"CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION",
};
//const cudnnConvolutionFwdAlgo_t total_conv_algo[] = {
//CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
//CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM,
//CUDNN_CONVOLUTION_FWD_ALGO_GEMM,
//CUDNN_CONVOLUTION_FWD_ALGO_DIRECT,
//CUDNN_CONVOLUTION_FWD_ALGO_FFT,
//CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING,
//CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD,
//CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
//};
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status(expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
double get_durtime(struct timeval t1, struct timeval t2);
bool isdigit(const char *str) {
for (int i = 0; i < strlen(str); ++i)
if (str[i] < '0' || str[i] > '9')
return false;
return true;
}
void get_args(int argc, const char *argv[]) {
int pos = 1;
while (pos < argc) {
if (pos + 1 < argc && !strcmp(argv[pos], "-g") &&
isdigit(argv[pos + 1])) {
GROUP_COUNT = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-ca") &&
isdigit(argv[pos + 1])) {
if (atoi(argv[pos + 1]) < 8)
choose_algo = atoi(argv[pos + 1]);
pos += 2;
continue;
}
/*if (pos+1 < argc && !strcmp(argv[pos], "-cdt") &&
isdigit(argv[pos+1])) { if (atoi(argv[pos+1]) == 16) choose_data_type =
16; if (atoi(argv[pos+1]) == 32) choose_data_type = 32; pos += 2;
continue;
}*/
if (pos + 1 < argc && !strcmp(argv[pos], "-n") &&
isdigit(argv[pos + 1])) {
INPUT_BATCH_SIZE = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-c") &&
isdigit(argv[pos + 1])) {
INPUT_CHANNELS_PER_GROUP = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-f") &&
isdigit(argv[pos + 1])) {
KERNEL_OUT_CHANNELS = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-insize") &&
isdigit(argv[pos + 1])) {
INPUT_HEIGHT = INPUT_WIDTH = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-h") &&
isdigit(argv[pos + 1])) {
INPUT_HEIGHT = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-w") &&
isdigit(argv[pos + 1])) {
INPUT_WIDTH = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-kers") &&
isdigit(argv[pos + 1])) {
KERNEL_HEIGHT = KERNEL_WIDTH = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-r") &&
isdigit(argv[pos + 1])) {
KERNEL_HEIGHT = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-s") &&
isdigit(argv[pos + 1])) {
KERNEL_WIDTH = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-pad") &&
isdigit(argv[pos + 1])) {
PAD_HEIGHT = atoi(argv[pos + 1]);
PAD_WIDTH = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-ph") &&
isdigit(argv[pos + 1])) {
PAD_HEIGHT = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-pw") &&
isdigit(argv[pos + 1])) {
PAD_WIDTH = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-rounds") &&
isdigit(argv[pos + 1])) {
ROUNDS = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (!strcmp(argv[pos], "-nhwc")) {
INPUT_TENSOR_FORMAT = OUTPUT_TENSOR_FORMAT = KERNEL_TENSOR_FORMAT =
CUDNN_TENSOR_NHWC;
pos += 1;
continue;
}
if (!strcmp(argv[pos], "-nchw")) {
INPUT_TENSOR_FORMAT = OUTPUT_TENSOR_FORMAT = KERNEL_TENSOR_FORMAT =
CUDNN_TENSOR_NCHW;
pos += 1;
continue;
}
if (!strcmp(argv[pos], "-default-math")) {
MATH_TYPE = CUDNN_DEFAULT_MATH;
pos += 1;
continue;
}
if (!strcmp(argv[pos], "-tensor-op-math")) {
MATH_TYPE = CUDNN_TENSOR_OP_MATH;
pos += 1;
continue;
}
if (!strcmp(argv[pos], "-tensor-op-math-allow-conversion")) {
MATH_TYPE = CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION;
pos += 1;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-dh") &&
isdigit(argv[pos + 1])) {
DILATION_HEIGHT = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-dw") &&
isdigit(argv[pos + 1])) {
DILATION_WIDTH = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-dilate") &&
isdigit(argv[pos + 1])) {
DILATION_HEIGHT = atoi(argv[pos + 1]);
DILATION_WIDTH = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-sh") &&
isdigit(argv[pos + 1])) {
VERTICAL_STRIDE = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-sw") &&
isdigit(argv[pos + 1])) {
HORIZONTAL_STRIDE = atoi(argv[pos + 1]);
pos += 2;
continue;
}
if (pos + 1 < argc && !strcmp(argv[pos], "-stride") &&
isdigit(argv[pos + 1])) {
VERTICAL_STRIDE = atoi(argv[pos + 1]);
HORIZONTAL_STRIDE = atoi(argv[pos + 1]);
pos += 2;
continue;
}
pos += 1;
}
INPUT_CHANNELS = INPUT_CHANNELS_PER_GROUP * GROUP_COUNT;
}
int main(int argc, const char *argv[]) {
get_args(argc, argv);
int KERNEL_IN_CHANNELS = INPUT_CHANNELS_PER_GROUP;
struct timeval t1, t2;
struct timeval total_t1, total_t2;
double time_memcpy_htod = 0.0, time_memcpy_dtoh = 0.0;
double time_conv = 0.0;
cudaSetDevice(0);
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
gettimeofday(&total_t1, 0);
int expr = ROUNDS;
// input
d_type *c_input;
unsigned int input_size =
INPUT_BATCH_SIZE * INPUT_CHANNELS * INPUT_HEIGHT * INPUT_WIDTH;
size_t input_bytes = INPUT_BATCH_SIZE * INPUT_CHANNELS * INPUT_HEIGHT *
INPUT_WIDTH * sizeof(d_type);
c_input = (d_type *)malloc(input_bytes);
srand((unsigned)time(0));
for (int j = 0; j < input_size; ++j)
c_input[j] = (d_type)rand() / RAND_MAX;
d_type *d_input{nullptr};
cudaMalloc(&d_input, input_bytes);
gettimeofday(&t1, 0);
cudaMemcpy(d_input, c_input, input_bytes, cudaMemcpyHostToDevice);
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&t2, 0);
time_memcpy_htod += get_durtime(t1, t2);
// kernel
d_type *c_kernel;
unsigned int kernel_size =
KERNEL_OUT_CHANNELS * INPUT_CHANNELS * KERNEL_HEIGHT * KERNEL_WIDTH;
size_t kernel_bytes = KERNEL_OUT_CHANNELS * INPUT_CHANNELS * KERNEL_HEIGHT *
KERNEL_WIDTH * sizeof(d_type);
c_kernel = (d_type *)malloc(kernel_bytes);
for (int j = 0; j < kernel_size; ++j)
c_kernel[j] = (d_type)rand() / RAND_MAX;
d_type *d_kernel{nullptr};
cudaMalloc(&d_kernel, kernel_bytes);
gettimeofday(&t1, 0);
cudaMemcpy(d_kernel, c_kernel, kernel_bytes, cudaMemcpyHostToDevice);
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&t2, 0);
time_memcpy_htod += get_durtime(t1, t2);
gettimeofday(&t1, 0);
// descriptor
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/INPUT_TENSOR_FORMAT,
/*dataType=*/DATA_TYPE,
/*batch_size=*/INPUT_BATCH_SIZE,
/*channels=*/INPUT_CHANNELS,
/*height=*/INPUT_HEIGHT,
/*width=*/INPUT_WIDTH));
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
/*dataType=*/DATA_TYPE,
/*format=*/KERNEL_TENSOR_FORMAT,
/*out_channels=*/KERNEL_OUT_CHANNELS,
/*in_channels=*/KERNEL_IN_CHANNELS,
/*kernel_height=*/KERNEL_HEIGHT,
/*kernel_width=*/KERNEL_WIDTH));
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(
cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/PAD_HEIGHT,
/*pad_width=*/PAD_WIDTH,
/*vertical_stride=*/VERTICAL_STRIDE,
/*horizontal_stride=*/HORIZONTAL_STRIDE,
/*dilation_height=*/DILATION_HEIGHT,
/*dilation_width=*/DILATION_WIDTH,
/*mode=*/CONV_MODE,
/*conputeType=*/DATA_TYPE));
if (GROUP_COUNT > 1)
checkCUDNN(cudnnSetConvolutionGroupCount(convolution_descriptor,
/*group_count=*/GROUP_COUNT));
checkCUDNN(cudnnSetConvolutionMathType(convolution_descriptor, MATH_TYPE));
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(
convolution_descriptor, input_descriptor, kernel_descriptor,
&OUTPUT_BATCH_SIZE, &OUTPUT_CHANNELS, &OUTPUT_HEIGHT, &OUTPUT_WIDTH));
std::cout << "Rounds: " << expr << std::endl;
std::cout << "Group count: " << GROUP_COUNT << ", "
<< "Math type: " << math_types[MATH_TYPE] << std::endl;
std::cout << "Input dims: " << INPUT_BATCH_SIZE << ", " << INPUT_CHANNELS
<< ", " << INPUT_HEIGHT << ", " << INPUT_WIDTH << std::endl;
std::cout << "Kernel dims: " << KERNEL_IN_CHANNELS << ", "
<< KERNEL_OUT_CHANNELS << ", " << KERNEL_HEIGHT << ", "
<< KERNEL_WIDTH << std::endl;
std::cout << "Output dims: " << OUTPUT_BATCH_SIZE << ", " << OUTPUT_CHANNELS
<< ", " << OUTPUT_HEIGHT << ", " << OUTPUT_WIDTH << std::endl;
size_t output_bytes = OUTPUT_BATCH_SIZE * OUTPUT_CHANNELS * OUTPUT_HEIGHT *
OUTPUT_WIDTH * sizeof(d_type);
d_type *d_output{nullptr};
cudaMalloc(&d_output, output_bytes);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/OUTPUT_TENSOR_FORMAT,
/*dataType=*/DATA_TYPE,
/*batch_size=*/OUTPUT_BATCH_SIZE,
/*channels=*/OUTPUT_CHANNELS,
/*height=*/OUTPUT_HEIGHT,
/*width=*/OUTPUT_WIDTH));
// warmup for two times
//for (int kk = 0; kk < 3; ++kk) {
float best_time = 10000;
//cudnnConvolutionFwdAlgo_t convolution_algorithm;
//if (choose_algo == -1) {
//// checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnn,
//// input_descriptor,
//// kernel_descriptor,
//// convolution_descriptor,
//// output_descriptor,
//// CONV_ALGO_PREFER,
////[>memoryLimitInByptes=<]MEMORY_LIMIT,
////&convolution_algorithm));
//const int convAlgoCnt = 8;
//// get default workspace
//size_t workspaceSize = (size_t)1024 * 1024 * 1024;
//float *workspace;
//HANDLE_ERROR(cudaMalloc(&workspace, workspaceSize));
//int cnt = 0;
//cudnnConvolutionFwdAlgoPerf_t perfResults[convAlgoCnt];
//checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx(
//cudnn, input_descriptor, d_input, kernel_descriptor, d_kernel,
//convolution_descriptor, output_descriptor, d_output,
//convAlgoCnt, &cnt, perfResults, workspace, workspaceSize));
//assert(cnt > 0);
//checkCUDNN(perfResults[0].status);
//convolution_algorithm = perfResults[0].algo;
//// for (int i = 0; i < cnt; ++i) {
//// std::cout << "algo name: " << algo_name[perfResults[i].algo]
//// << ", time: " << perfResults[i].time << std::endl;
//// }
//HANDLE_ERROR(cudaDeviceSynchronize());
//} else
//convolution_algorithm = total_conv_algo[choose_algo];
//std::cout << "Chosen algorithm: " << algo_name[convolution_algorithm]
//<< std::endl;
for (int algoid = 0; algoid < 8; ++algoid) {
size_t workspace_bytes = 0;
auto status((cudnnGetConvolutionForwardWorkspaceSize(
cudnn, input_descriptor, kernel_descriptor, convolution_descriptor,
output_descriptor, (cudnnConvolutionFwdAlgo_t)algoid, &workspace_bytes)));
if (status != CUDNN_STATUS_SUCCESS)
continue;
//std::cout << "Workspace size: " << (workspace_bytes) << "B"
//<< std::endl;
void *d_workspace{nullptr};
if (workspace_bytes > 0)
cudaMalloc(&d_workspace, workspace_bytes);
const float alpha = 1, beta = 0;
ch::time_point<ch::high_resolution_clock, ch::nanoseconds> beg, end;
time_conv = 0;
int warmup = 200;
for (int i = 0; i < warmup + expr; ++i) {
HANDLE_ERROR(cudaDeviceSynchronize());
beg = ch::high_resolution_clock::now();
auto status(cudnnConvolutionForward(
cudnn, &alpha, input_descriptor, d_input, kernel_descriptor,
d_kernel, convolution_descriptor,
//convolution_algorithm,
(cudnnConvolutionFwdAlgo_t)algoid,
d_workspace, workspace_bytes, &beta, output_descriptor,
d_output));
if (status != CUDNN_STATUS_SUCCESS)
break;
HANDLE_ERROR(cudaDeviceSynchronize());
end = ch::high_resolution_clock::now();
if (i >= warmup)
time_conv +=
ch::duration_cast<ch::duration<double>>(end - beg).count() *
1000;
}
time_conv /= expr;
std::cout << algo_name[algoid] << ": " << time_conv << std::endl;
if (time_conv < best_time)
best_time = time_conv;
cudaFree(d_workspace);
}
//}
std::cout << "best time: " << best_time << std::endl;
d_type *h_output = new d_type[output_bytes];
gettimeofday(&t1, 0);
cudaMemcpy(h_output, d_output, output_bytes, cudaMemcpyDeviceToHost);
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&t2, 0);
time_memcpy_dtoh += get_durtime(t1, t2);
delete[] h_output;
cudaFree(d_kernel);
cudaFree(d_input);
cudaFree(d_output);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&total_t2, 0);
//time_total = get_durtime(total_t1, total_t2);
//printf("TFlops: %.2lf tflops\n",
//2.0 * INPUT_BATCH_SIZE * INPUT_CHANNELS_PER_GROUP * OUTPUT_HEIGHT *
//OUTPUT_WIDTH * KERNEL_OUT_CHANNELS * KERNEL_HEIGHT *
//KERNEL_WIDTH / VERTICAL_STRIDE / HORIZONTAL_STRIDE / 1e9 /
//time_conv * expr);
//printf("memcpy_htod: %.6lf ms, memcpy_dtoh: %.6lf ms\n",
//time_memcpy_htod / expr, time_memcpy_dtoh / expr);
//printf("choose: %.6lf ms, convolution: %.6lf ms, convtotal: %.6lf ms, "
//"total: %.6lf ms\n",
//time_choose / expr, time_conv / expr, time_conv_total / expr,
//time_total / expr);
return 0;
}
double get_durtime(struct timeval t1, struct timeval t2) {
return (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) /
1000.0;
}
|
the_stack
|
#pragma once
#ifdef BOOST_FOUND
// Boost includes for CPU Dijkstra SSSP reference algorithms
#include <boost/config.hpp>
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/dijkstra_shortest_paths.hpp>
#include <boost/property_map/property_map.hpp>
#else
#include <queue>
#include <vector>
#include <utility>
#endif
namespace gunrock {
namespace app {
namespace sssp {
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
/**
* @brief Displays the SSSP result (i.e., distance from source)
* @tparam T Type of values to display
* @tparam SizeT Type of size counters
* @param[in] preds Search depth from the source for each node.
* @param[in] num_nodes Number of nodes in the graph.
*/
template <typename T, typename SizeT>
void DisplaySolution(T *array, SizeT length) {
if (length > 40) length = 40;
util::PrintMsg("[", true, false);
for (SizeT i = 0; i < length; ++i) {
util::PrintMsg(std::to_string(i) + ":" + std::to_string(array[i]) + " ",
true, false);
}
util::PrintMsg("]");
}
/******************************************************************************
* SSSP Testing Routines
*****************************************************************************/
/**
* @brief Simple CPU-based reference SSSP implementations
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] graph Input graph
* @param[out] distances Computed distances from the source to each vertex
* @param[out] preds Computed predecessors for each vertex
* @param[in] src The source vertex
* @param[in] quiet Whether to print out anything to stdout
* @param[in] mark_preds Whether to compute predecessor info
* \return double Time taken for the SSSP
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double CPU_Reference(const GraphT &graph, ValueT *distances,
typename GraphT::VertexT *preds,
typename GraphT::VertexT src, bool quiet,
bool mark_preds) {
#ifdef BOOST_FOUND
using namespace boost;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::ValueT GValueT;
typedef typename GraphT::CsrT CsrT;
// Prepare Boost Datatype and Data structure
typedef adjacency_list<vecS, vecS, directedS, no_property,
property<edge_weight_t, GValueT> >
BGraphT;
typedef typename graph_traits<BGraphT>::vertex_descriptor vertex_descriptor;
typedef typename graph_traits<BGraphT>::edge_descriptor edge_descriptor;
typedef std::pair<VertexT, VertexT> EdgeT;
EdgeT *edges = (EdgeT *)malloc(sizeof(EdgeT) * graph.edges);
GValueT *weight = (GValueT *)malloc(sizeof(GValueT) * graph.edges);
for (VertexT v = 0; v < graph.nodes; ++v) {
SizeT edge_start = graph.CsrT::GetNeighborListOffset(v);
SizeT num_neighbors = graph.CsrT::GetNeighborListLength(v);
for (SizeT e = 0; e < num_neighbors; ++e) {
edges[e + edge_start] = EdgeT(v, graph.CsrT::GetEdgeDest(e + edge_start));
weight[e + edge_start] = graph.CsrT::edge_values[e + edge_start];
}
}
BGraphT g(edges, edges + graph.edges, weight, graph.nodes);
std::vector<ValueT> d(graph.nodes);
std::vector<vertex_descriptor> p(graph.nodes);
vertex_descriptor s = vertex(src, g);
typename property_map<BGraphT, vertex_index_t>::type indexmap =
get(vertex_index, g);
//
// Perform SSSP
//
util::CpuTimer cpu_timer;
cpu_timer.Start();
if (mark_preds) {
dijkstra_shortest_paths(
g, s,
predecessor_map(boost::make_iterator_property_map(
p.begin(), get(boost::vertex_index, g)))
.distance_map(boost::make_iterator_property_map(
d.begin(), get(boost::vertex_index, g))));
} else {
dijkstra_shortest_paths(g, s,
distance_map(boost::make_iterator_property_map(
d.begin(), get(boost::vertex_index, g))));
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
// util::PrintMsg("CPU SSSP finished in " + std::to_string(elapsed)
// + " msec.", !quiet);
typedef std::pair<VertexT, ValueT> PairT;
PairT *sort_dist = new PairT[graph.nodes];
typename graph_traits<BGraphT>::vertex_iterator vi, vend;
for (tie(vi, vend) = vertices(g); vi != vend; ++vi) {
sort_dist[(*vi)].first = (*vi);
sort_dist[(*vi)].second = d[(*vi)];
}
std::stable_sort(
sort_dist, sort_dist + graph.nodes,
// RowFirstTupleCompare<Coo<Value, Value> >);
[](const PairT &a, const PairT &b) -> bool { return a.first < b.first; });
for (VertexT v = 0; v < graph.nodes; ++v) distances[v] = sort_dist[v].second;
delete[] sort_dist;
sort_dist = NULL;
if (mark_preds) {
typedef std::pair<VertexT, VertexT> VPairT;
VPairT *sort_pred = new VPairT[graph.nodes];
for (tie(vi, vend) = vertices(g); vi != vend; ++vi) {
sort_pred[(*vi)].first = (*vi);
sort_pred[(*vi)].second = p[(*vi)];
}
std::stable_sort(sort_pred, sort_pred + graph.nodes,
// RowFirstTupleCompare< Coo<VertexId, VertexId> >);
[](const VPairT &a, const VPairT &b) -> bool {
return a.first < b.first;
});
for (VertexT v = 0; v < graph.nodes; ++v) preds[v] = sort_pred[v].second;
delete[] sort_pred;
sort_pred = NULL;
}
return elapsed;
#else
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef std::pair<VertexT, ValueT> PairT;
struct GreaterT {
bool operator()(const PairT &lhs, const PairT &rhs) {
return lhs.second > rhs.second;
}
};
typedef std::priority_queue<PairT, std::vector<PairT>, GreaterT> PqT;
for (VertexT v = 0; v < graph.nodes; v++) {
distances[v] = util::PreDefinedValues<ValueT>::MaxValue;
if (mark_preds && preds != NULL)
preds[v] = util::PreDefinedValues<VertexT>::InvalidValue;
}
distances[src] = 0;
if (mark_preds && preds != NULL) preds[src] = src;
PqT pq;
pq.push(std::make_pair(src, 0));
util::CpuTimer cpu_timer;
cpu_timer.Start();
while (!pq.empty()) {
auto pair = pq.top();
pq.pop();
VertexT v = pair.first;
ValueT v_distance = pair.second;
if (v_distance > distances[v]) continue;
SizeT e_start = graph.GetNeighborListOffset(v);
SizeT e_end = e_start + graph.GetNeighborListLength(v);
for (SizeT e = e_start; e < e_end; e++) {
VertexT u = graph.GetEdgeDest(e);
ValueT u_distance = v_distance + graph.edge_values[e];
if (u_distance < distances[u]) {
distances[u] = u_distance;
if (mark_preds && preds != NULL) preds[u] = v;
pq.push(std::make_pair(u, u_distance));
}
}
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
// util::PrintMsg("CPU SSSP finished in " + std::to_string(elapsed)
// + " msec.", !quiet);
return elapsed;
#endif
}
/**
* @brief Validation of SSSP results
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] src The source vertex
* @param[in] h_distances Computed distances from the source to each vertex
* @param[in] h_preds Computed predecessors for each vertex
* @param[in] ref_distances Reference distances from the source to each vertex
* @param[in] ref_preds Reference predecessors for each vertex
* @param[in] verbose Whether to output detail comparsions
* \return GraphT::SizeT Number of errors
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
typename GraphT::SizeT Validate_Results(
util::Parameters ¶meters, GraphT &graph, typename GraphT::VertexT src,
ValueT *h_distances, typename GraphT::VertexT *h_preds,
ValueT *ref_distances = NULL, typename GraphT::VertexT *ref_preds = NULL,
bool verbose = true) {
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::CsrT CsrT;
SizeT num_errors = 0;
// bool quick = parameters.Get<bool>("quick");
bool quiet = parameters.Get<bool>("quiet");
bool mark_pred = parameters.Get<bool>("mark-pred");
// Verify the result
if (ref_distances != NULL) {
for (VertexT v = 0; v < graph.nodes; v++) {
if (!util::isValid(ref_distances[v]))
ref_distances[v] = util::PreDefinedValues<ValueT>::MaxValue;
}
util::PrintMsg("Distance Validity: ", !quiet, false);
SizeT errors_num = util::CompareResults(h_distances, ref_distances,
graph.nodes, true, quiet);
if (errors_num > 0) {
util::PrintMsg(std::to_string(errors_num) + " errors occurred.", !quiet);
num_errors += errors_num;
}
} else if (ref_distances == NULL) {
util::PrintMsg("Distance Validity: ", !quiet, false);
SizeT errors_num = 0;
for (VertexT v = 0; v < graph.nodes; v++) {
ValueT v_distance = h_distances[v];
if (!util::isValid(v_distance)) continue;
SizeT e_start = graph.CsrT::GetNeighborListOffset(v);
SizeT num_neighbors = graph.CsrT::GetNeighborListLength(v);
SizeT e_end = e_start + num_neighbors;
for (SizeT e = e_start; e < e_end; e++) {
VertexT u = graph.CsrT::GetEdgeDest(e);
ValueT u_distance = h_distances[u];
ValueT e_value = graph.CsrT::edge_values[e];
if (v_distance + e_value >= u_distance) continue;
errors_num++;
if (errors_num > 1) continue;
util::PrintMsg("FAIL: v[" + std::to_string(v) + "] (" +
std::to_string(v_distance) + ") + e[" +
std::to_string(e) + "] (" + std::to_string(e_value) +
") < u[" + std::to_string(u) + "] (" +
std::to_string(u_distance) + ")",
!quiet);
}
}
if (errors_num > 0) {
util::PrintMsg(std::to_string(errors_num) + " errors occurred.", !quiet);
num_errors += errors_num;
} else {
util::PrintMsg("PASS", !quiet);
}
}
if (!quiet && verbose) {
// Display Solution
util::PrintMsg("First 40 distances of the GPU result:");
DisplaySolution(h_distances, graph.nodes);
if (ref_distances != NULL) {
util::PrintMsg("First 40 distances of the reference CPU result.");
DisplaySolution(ref_distances, graph.nodes);
}
util::PrintMsg("");
}
if (mark_pred) {
util::PrintMsg("Predecessors Validity: ", !quiet, false);
SizeT errors_num = 0;
for (VertexT v = 0; v < graph.nodes; v++) {
VertexT pred = h_preds[v];
if (!util::isValid(pred) || v == src) continue;
ValueT v_distance = h_distances[v];
if (v_distance == util::PreDefinedValues<ValueT>::MaxValue) continue;
ValueT pred_distance = h_distances[pred];
bool edge_found = false;
SizeT edge_start = graph.CsrT::GetNeighborListOffset(pred);
SizeT num_neighbors = graph.CsrT::GetNeighborListLength(pred);
for (SizeT e = edge_start; e < edge_start + num_neighbors; e++) {
if (v == graph.CsrT::GetEdgeDest(e) &&
std::abs((pred_distance + graph.CsrT::edge_values[e] - v_distance) *
1.0) < 1e-6) {
edge_found = true;
break;
}
}
if (edge_found) continue;
errors_num++;
if (errors_num > 1) continue;
util::PrintMsg("FAIL: [" + std::to_string(pred) + "] (" +
std::to_string(pred_distance) + ") -> [" +
std::to_string(v) + "] (" +
std::to_string(v_distance) +
") can't find the corresponding edge.",
!quiet);
}
if (errors_num > 0) {
util::PrintMsg(std::to_string(errors_num) + " errors occurred.", !quiet);
num_errors += errors_num;
} else {
util::PrintMsg("PASS", !quiet);
}
}
if (!quiet && mark_pred && verbose) {
util::PrintMsg("First 40 preds of the GPU result:");
DisplaySolution(h_preds, graph.nodes);
if (ref_preds != NULL) {
util::PrintMsg(
"First 40 preds of the reference CPU result "
"(could be different because the paths are not unique):");
DisplaySolution(ref_preds, graph.nodes);
}
util::PrintMsg("");
}
return num_errors;
}
} // namespace sssp
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
namespace dart {
// -=-=-=-=-=-=-=-=-=- helper -=-=-=-=-=-=-=-=-=-
static inline __host__ __device__ unsigned char clamp(int c) {
return min(max(0,c),255);
}
inline __host__ __device__ uchar3 hsv2rgb(float h, float s, float v) {
float c = v*s;
float hPrime = h/60.0f;
float x = c*(1 - fabs(fmodf(hPrime,2) - 1));
float m = v-c;
int hPrimeInt = hPrime;
switch (hPrimeInt) {
case 0:
return make_uchar3(255*(c+m),255*(x+m),255*(m));
case 1:
return make_uchar3(255*(x+m),255*(c+m),255*(m));
case 2:
return make_uchar3(255*(m),255*(c+m),255*(x+m));
case 3:
return make_uchar3(255*(m),255*(x+m),255*(c+m));
case 4:
return make_uchar3(255*(x+m),255*(m),255*(c+m));
case 5:
return make_uchar3(255*(c+m),255*(m),255*(x+m));
}
return make_uchar3(0,0,0);
}
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
__global__ void gpu_colorRampHeatMap(uchar3 * colored,
const float * vals,
const int width,
const int height,
const float minVal,
const float maxVal) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
uchar3 & imgVal = colored[index];
if (isnan(vals[index])) {
imgVal = make_uchar3(0,0,0);
return;
}
const float normVal = (vals[index] - minVal)/(maxVal-minVal);
if (normVal < 0.25) { imgVal = make_uchar3(0,clamp(255*(normVal/0.25)),255); }
else if (normVal < 0.5) { imgVal = make_uchar3(0,255,clamp(255*((0.5-normVal)/0.25))); }
else if (normVal < 0.75) { imgVal = make_uchar3(clamp(255*((normVal - 0.5)/0.25)),255,0); }
else { imgVal = make_uchar3(255,clamp(255*(1.0-normVal)/0.25),0); }
}
__global__ void gpu_colorRampHeatMap(uchar4 * colored,
const float * vals,
const int width,
const int height,
const float minVal,
const float maxVal) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
uchar4 & imgVal = colored[index];
if (isnan(vals[index])) {
imgVal = make_uchar4(0,0,0,0);
return;
}
const float normVal = (vals[index] - minVal)/(maxVal-minVal);
if (normVal < 0.25) { imgVal = make_uchar4(0,clamp(255*(normVal/0.25)),255,255); }
else if (normVal < 0.5) { imgVal = make_uchar4(0,255,clamp(255*((0.5-normVal)/0.25)),255); }
else if (normVal < 0.75) { imgVal = make_uchar4(clamp(255*((normVal - 0.5)/0.25)),255,0,255); }
else { imgVal = make_uchar4(255,clamp(255*(1.0-normVal)/0.25),0,255); }
}
__global__ void gpu_colorRampHeatMapUnsat(uchar3 * colored,
const float * vals,
const int width,
const int height,
const float minVal,
const float maxVal) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
uchar3 & imgVal = colored[index];
if (isnan(vals[index])) {
imgVal = make_uchar3(255,255,255);
return;
}
const float normVal = fmaxf(0,fminf((vals[index] - minVal)/(maxVal-minVal),1));
const float t = normVal == 1.0 ? 1.0 : fmodf(normVal,0.25)*4;
uchar3 a, b;
if (normVal < 0.25) { b = make_uchar3(32,191,139); a = make_uchar3(0x18,0x62,0x93); }
else if (normVal < 0.5) { b = make_uchar3(241,232,137); a = make_uchar3(32,191,139); }
else if (normVal < 0.75) { b = make_uchar3(198,132,63); a = make_uchar3(241,232,137); }
else { b = make_uchar3(0xc0,0x43,0x36); a = make_uchar3(198,132,63); }
imgVal = make_uchar3((1-t)*a.x + t*b.x,
(1-t)*a.y + t*b.y,
(1-t)*a.z + t*b.z);
}
__global__ void gpu_colorRampHeatMapUnsat(uchar4 * colored,
const float * vals,
const int width,
const int height,
const float minVal,
const float maxVal) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
uchar4 & imgVal = colored[index];
if (isnan(vals[index])) {
imgVal = make_uchar4(0,0,0,0);
return;
}
const float normVal = fmaxf(0,fminf((vals[index] - minVal)/(maxVal-minVal),1));
const float t = normVal == 1.0 ? 1.0 : fmodf(normVal,0.25)*4;
uchar3 a, b;
if (normVal < 0.25) { b = make_uchar3(32,191,139); a = make_uchar3(0x18,0x62,0x93); }
else if (normVal < 0.5) { b = make_uchar3(241,232,137); a = make_uchar3(32,191,139); }
else if (normVal < 0.75) { b = make_uchar3(198,132,63); a = make_uchar3(241,232,137); }
else { b = make_uchar3(0xc0,0x43,0x36); a = make_uchar3(198,132,63); }
imgVal = make_uchar4((1-t)*a.x + t*b.x,
(1-t)*a.y + t*b.y,
(1-t)*a.z + t*b.z,255);
}
template <bool showZeroLevel>
__global__ void gpu_colorRampTopographic(uchar4 * colored,
const float * vals,
const int width,
const int height,
const float lineThickness,
const float lineSpacing) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
uchar4 & imgVal = colored[index];
if (fabs(vals[index]) < 1.5*lineThickness) {
if (showZeroLevel) {
float g = clamp(2*255*(fabs(vals[index])-lineThickness)/lineThickness);
imgVal = make_uchar4(g,g,g,255);
} else {
imgVal = make_uchar4(255,255,255,255);
}
} else {
float c = fabs(fmodf(fabs(vals[index])+lineSpacing/2,lineSpacing)-lineSpacing/2);
if (c < lineThickness ) {
float g;
if (showZeroLevel) {
g = clamp(192+64*c/lineThickness);
} else {
g = clamp(64+192*c/lineThickness);
}
imgVal = make_uchar4(g,g,g,255);
}
else {
imgVal = make_uchar4(255,255,255,255);
}
}
}
template <bool norm>
__global__ void gpu_colorRamp2DGradient(uchar4 * colored,
const float2 * grad,
const int width,
const int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
uchar4 &imgVal = colored[index];
float2 g = grad[index];
if (norm) { float len = sqrtf(g.x*g.x + g.y*g.y); g = make_float2(g.x/len,g.y/len); }
// uchar3 rgb = hsv2rgb(180+180*atan2(g.x,g.y)/M_PI,1,1);
uchar3 rgb = hsv2rgb(180+180*atan2(g.x,g.y)/M_PI,1,1);
imgVal = make_uchar4(rgb.x,rgb.y,rgb.z,255);
}
template <bool norm>
__global__ void gpu_colorRamp3DGradient(uchar4 * colored,
const float3 * grad,
const int width,
const int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
uchar4 & imgVal = colored[index];
float3 g = grad[index];
if (norm) { float len = sqrtf(g.x*g.x+g.y*g.y+g.z*g.z); g = make_float3(g.x/len,g.y/len,g.z/len); }
imgVal = make_uchar4(clamp(128-128*g.x),clamp(128-128*g.y),clamp(128-128*g.z),255);
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
void colorRampHeatMap(uchar3 * colored,
const float * vals,
const int width,
const int height,
const float minVal,
const float maxVal) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
gpu_colorRampHeatMap<<<grid,block>>>(colored,vals,width,height,minVal,maxVal);
}
void colorRampHeatMap(uchar4 * colored,
const float * vals,
const int width,
const int height,
const float minVal,
const float maxVal) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
gpu_colorRampHeatMap<<<grid,block>>>(colored,vals,width,height,minVal,maxVal);
}
void colorRampHeatMapUnsat(uchar3 * colored,
const float * vals,
const int width,
const int height,
const float minVal,
const float maxVal) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
gpu_colorRampHeatMapUnsat<<<grid,block>>>(colored,vals,width,height,minVal,maxVal);
}
void colorRampHeatMapUnsat(uchar4 * colored,
const float * vals,
const int width,
const int height,
const float minVal,
const float maxVal) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
gpu_colorRampHeatMapUnsat<<<grid,block>>>(colored,vals,width,height,minVal,maxVal);
}
void colorRampTopographic(uchar4 * colored,
const float * vals,
const int width,
const int height,
const float lineThickness,
const float lineSpacing,
const bool showZeroLevel) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
if (showZeroLevel) {
gpu_colorRampTopographic<true><<<grid,block>>>(colored,vals,width,height,lineThickness,lineSpacing);
} else {
gpu_colorRampTopographic<false><<<grid,block>>>(colored,vals,width,height,lineThickness,lineSpacing);
}
}
void colorRamp2DGradient(uchar4 * color,
const float2 * grad,
const int width,
const int height,
const bool normalize) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
if (normalize) {
gpu_colorRamp2DGradient<true><<<grid,block>>>(color,grad,width,height);
} else {
gpu_colorRamp2DGradient<false><<<grid,block>>>(color,grad,width,height);
}
}
void colorRamp3DGradient(uchar4 * color,
const float3 * grad,
const int width,
const int height,
const bool normalize) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
if (normalize) {
gpu_colorRamp3DGradient<true><<<grid,block>>>(color,grad,width,height);
} else {
gpu_colorRamp3DGradient<false><<<grid,block>>>(color,grad,width,height);
}
}
}
|
the_stack
|
#define BSZ 16
/**
* \namespace kernels
* \brief Contains all custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Computes u-component of the vector resulting from the explicit terms of
* the discretized momentum equation, and element of the explcit convective terms.
*
* \param rn explicit terms of the discretized momentum equation
* \param H explicit convective terms of the discretized momentum equation
* \param q velcoity flux vector
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param dx cell-widths in the x-direction
* \param dy cell-widths in the y-direction
* \param dt time-increment
* \param gamma coefficient of the convection term at the current time-step
* \param zeta coefficient of the convection term at the previous time-step
* \param alpha coefficient of the explicit diffusion term
* \param nu viscosity
*/
__global__
void convectionTermU(real *rn, real *H, real *q,
int nx, int ny, real *dx, real *dy,
real dt, real gamma, real zeta, real alpha, real nu)
{
int bx = blockIdx.x,
by = blockIdx.y,
i = threadIdx.x,
j = threadIdx.y;
// work out global index of first point in block
int I = (BSZ-2)*bx + i,
J = (BSZ-2)*by + j;
if (I >= nx-1 || J >= ny) {
return;
}
int N_u = (nx-1)*ny,
Gidx_x = J*(nx-1) + I,
Gidx_y = (J-1)*nx + I + N_u;
real cTerm, dTerm, Hxn;
__shared__ real u[BSZ][BSZ],
v[BSZ][BSZ],
Hx[BSZ][BSZ];
__shared__ real Dx[BSZ][BSZ], Dy[BSZ][BSZ];
Dy[j][i] = dy[J];
Dx[j][i] = dx[I];
/// transfer from global to shared memory
u[j][i] = q[Gidx_x]/Dy[j][i];
v[j][i] = q[Gidx_y]/Dx[j][i];
__syncthreads();
/// check bounds for convective term in the x-direction
int global_check = ( I==0 || I==(nx-2) || J==0 || J==(ny-1) ), ///< check if we compute globally
block_check = ( i==0 || i==(BSZ-1) || j==0 || j==(BSZ-1) ); ///< check if element within block computes
/// X-component
if( !(global_check || block_check) )
{
// copy Hx -> Hxn, Hy -> Hyn
Hxn = H[Gidx_x];
// apply stencil
Hx[j][i] = - ( (u[j][i+1]+u[j][i])*(u[j][i+1]+u[j][i]) - (u[j][i]+u[j][i-1])*(u[j][i]+u[j][i-1]) )/( 2.0 * (Dx[j][i]+Dx[j][i+1]) ) \
- ( (u[j+1][i]+u[j][i])*(v[j+1][i]+v[j+1][i+1]) - (u[j][i]+u[j-1][i])*(v[j][i]+v[j][i+1]) )/( 4.0 * Dy[j][i] );
H[Gidx_x] = Hx[j][i];
// rN for u
cTerm = gamma*Hx[j][i] + zeta*Hxn;
//cTerm = Hx[j][i]; // 1st order Euler
dTerm = alpha*nu*2.0*( \
( Dx[j][i]*u[j][i+1] - (Dx[j][i]+Dx[j][i+1])*u[j][i] + Dx[j][i+1]*u[j][i-1] )/( Dx[j][i]*Dx[j][i+1]*(Dx[j][i]+Dx[j][i+1]) ) \
+ 4.0*( (Dy[j][i]+Dy[j-1][i])*u[j+1][i] - (Dy[j-1][i] + 2.0*Dy[j][i] + Dy[j+1][i])*u[j][i] + (Dy[j][i]+Dy[j+1][i])*u[j-1][i] ) \
/( (Dy[j][i]+Dy[j-1][i]) * (Dy[j-1][i] + 2.0*Dy[j][i] + Dy[j+1][i]) * (Dy[j][i]+Dy[j+1][i]) ) \
);
rn[Gidx_x] = (u[j][i]/dt + cTerm + dTerm) * 0.5*(Dx[j][i]+Dx[j][i+1]);
}
} // convectionTermU
/**
* \brief Computes v-component of the vector resulting from the explicit terms of
* the discretized momentum equation, and element of the explcit convective terms.
*
* \param rn explicit terms of the discretized momentum equation
* \param H explicit convective terms of the discretized momentum equation
* \param q velcoity flux vector
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param dx cell-widths in the x-direction
* \param dy cell-widths in the y-direction
* \param dt time-increment
* \param gamma coefficient of the convection term at the current time-step
* \param zeta coefficient of the convection term at the previous time-step
* \param alpha coefficient of the explicit diffusion term
* \param nu viscosity
*/
__global__
void convectionTermV(real *rn, real *H, real *q,
int nx, int ny, real *dx, real *dy,
real dt, real gamma, real zeta, real alpha, real nu)
{
int bx = blockIdx.x,
by = blockIdx.y,
i = threadIdx.x,
j = threadIdx.y;
// work out global index of first point in block
int I = (BSZ-2)*bx + i,
J = (BSZ-2)*by + j;
if (I >= nx || J >= ny-1) {
return;
}
int N_u = (nx-1)*ny,
Gidx_x = J*(nx-1) + I,
Gidx_y = J*nx + I + N_u;
real cTerm, dTerm, Hyn;
__shared__ real u[BSZ][BSZ],
v[BSZ][BSZ],
Hy[BSZ][BSZ];
__shared__ real Dx[BSZ][BSZ], Dy[BSZ][BSZ];
Dy[j][i] = dy[J];
Dx[j][i] = dx[I];
/// transfer from global to shared memory
u[j][i] = q[Gidx_x]/Dy[j][i];
v[j][i] = q[Gidx_y]/Dx[j][i];
__syncthreads();
/// check bounds for convective term in the x-direction
int global_check = ( I==0 || I==(nx-1) || J==0 || J==(ny-2) ), ///< check if we compute globally
block_check = ( i==0 || i==(BSZ-1) || j==0 || j==(BSZ-1) ); ///< check if element within block computes
/// Y-component
if( !(global_check || block_check) )
{
// Y-component
// copy global data to the register (to store previous value)
Hyn = H[Gidx_y];
// apply stencil
Hy[j][i] = - ( (u[j+1][i]+u[j][i])*(v[j][i]+v[j][i+1]) - (u[j+1][i-1]+u[j][i-1])*(v[j][i]+v[j][i-1]) )/(4.0 * Dx[j][i]) \
- ( (v[j+1][i]+v[j][i])*(v[j+1][i]+v[j][i]) - (v[j-1][i]+v[j][i])*(v[j-1][i]+v[j][i]) )/( 2.0 * (Dy[j+1][i]+Dy[j][i]) );
// store newly calculated value in global memory (to be used in the next time step)
H[Gidx_y] = Hy[j][i];
// rN for v
cTerm = gamma*Hy[j][i] + zeta*Hyn;
//cTerm = Hy[j][i]; // 1st order Euler
dTerm = alpha*nu*2.0*( \
4.0*( (Dx[j][i-1]+Dx[j][i])*v[j][i+1] - (Dx[j][i-1]+2.0*Dx[j][i]+Dx[j][i+1])*v[j][i] + (Dx[j][i]+Dx[j][i+1])*v[j][i-1] ) \
/( (Dx[j][i-1]+Dx[j][i]) * (Dx[j][i]+Dx[j][i+1]) * (Dx[j][i-1]+2.0*Dx[j][i]+Dx[j][i+1]) ) \
+ ( Dy[j][i]*v[j+1][i] - (Dy[j][i]+Dy[j+1][i])*v[j][i] + Dy[j+1][i]*v[j-1][i] )/( Dy[j][i]*Dy[j+1][i]*(Dy[j][i]+Dy[j+1][i]) ) \
);
rn[Gidx_y] = (v[j][i]/dt + cTerm + dTerm) * 0.5*(Dy[j][i]+Dy[j+1][i]);
}
} // convectionTermV
/**
* \brief Computes v-component of the vector resulting from the explicit terms of
* the discretized momentum equation, and element of the explcit convective terms
* on the bottom or top boundaries.
*
* \param rn explicit terms of the discretized momentum equation
* \param H explicit convective terms of the discretized momentum equation
* \param q velcoity flux vector
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param dx cell-widths in the x-direction
* \param dy cell-widths in the y-direction
* \param dt time-increment
* \param gamma coefficient of the convection term at the current time-step
* \param zeta coefficient of the convection term at the previous time-step
* \param alpha coefficient of the explicit diffusion term
* \param nu viscosity
* \param bcBottom bottom-boundary velocity
* \param bcTop top-boundary velocity
*/
__global__
void convectionTermVBottomTop(real *rn, real *H, real *q,
int nx, int ny, real *dx, real *dy,
real dt, real gamma, real zeta, real alpha, real nu,
real *bcBottom, real *bcTop)
{
int I = blockIdx.x*blockDim.x + threadIdx.x;
if(I==0 || I >= nx-1) return;
/// Boundary Conditions for v at the Bottom ***********************************************************************************
int Iu, Iv = I + (nx-1)*ny;
real Hn, cTerm, dTerm;
/// Convection Term
Hn = H[Iv];
H[Iv] = -( \
( 0.5*(q[Iv]/dx[I]+q[Iv+1]/dx[I+1]) ) * ( 0.5*(q[I]/dy[0]+q[I+(nx-1)]/dy[1]) ) \
- ( 0.5*(q[Iv-1]/dx[I-1]+q[Iv]/dx[I]) ) * ( 0.5*(q[I-1]/dy[0]+q[I-1+(nx-1)]/dy[1]) ) \
)/dx[I] \
-( \
(0.5*(q[Iv] + q[Iv+nx])/dx[I]) * (0.5*(q[Iv] + q[Iv+nx])/dx[I]) \
- (0.5*(bcBottom[I+nx-1] + q[Iv]/dx[I])) * (0.5*(bcBottom[I+nx-1] + q[Iv]/dx[I])) \
)/(0.5*(dy[0] + dy[1]));
cTerm = gamma*H[Iv] + zeta*Hn; /// 2nd order Adams-Bashforth
//cTerm = H[Iv]; /// 1st order Euler
/// Diffusion Term
dTerm = alpha*nu*2.0*( \
4.0 * ( (dx[I-1]+dx[I])*q[Iv+1]/dx[I+1] - (dx[I-1]+2.0*dx[I]+dx[I+1])*q[Iv]/dx[I] + (dx[I]+dx[I+1])*q[Iv-1]/dx[I-1] ) \
/( (dx[I-1]+dx[I]) * (dx[I]+dx[I+1]) * (dx[I-1]+2.0*dx[I]+dx[I+1]) ) \
+ ( dy[0]*q[Iv+nx]/dx[I] - (dy[0]+dy[1])*q[Iv]/dx[I] + dy[1]*bcBottom[I+nx-1] )/( dy[0]*dy[1]*(dy[0]+dy[1]) ) \
);
/// Calculate rn
rn[Iv] = ( q[Iv]/(dx[I]*dt) + cTerm + dTerm ) * 0.5*(dy[0] + dy[1]);
/// Boundary conditions for v at the Top **************************************************************************************
Iu = I + (ny-2)*(nx-1);
Iv = I + (nx-1)*ny + (ny-2)*nx;
/// Convection Term
Hn = H[Iv];
H[Iv] = -(
( 0.5*(q[Iv]/dx[I] + q[Iv+1]/dx[I+1]) )*( 0.5*(q[Iu]/dy[ny-2] + q[Iu+(nx-1)]/dy[ny-1]) ) \
- ( 0.5*(q[Iv-1]/dx[I-1] + q[Iv]/dx[I]) )*( 0.5*(q[Iu-1]/dy[ny-2] + q[Iu-1+(nx-1)]/dy[ny-1]) )
)/(dx[I])
-(
( 0.5*(q[Iv]/dx[I] + bcTop[I+nx-1]) )*( 0.5*(q[Iv]/dx[I] + bcTop[I+nx-1]) ) \
- ( 0.5*(q[Iv-nx] + q[Iv])/dx[I] )*( 0.5*(q[Iv-nx] + q[Iv])/dx[I] )
)/(0.5*(dy[ny-2] + dy[ny-1]));
cTerm = gamma*H[Iv] + zeta*Hn; /// 2nd order Adams-Bashforth
//cTerm = H[Iv]; /// 1st order Euler
/// Diffusion Term
dTerm = alpha*nu*2.0*( \
4.0*( (dx[I-1]+dx[I])*q[Iv+1]/dx[I+1] - (dx[I-1]+2.0*dx[I]+dx[I+1])*q[Iv]/dx[I] + (dx[I]+dx[I+1])*q[Iv-1]/dx[I-1] ) \
/( (dx[I-1]+dx[I]) * (dx[I]+dx[I+1]) * (dx[I-1]+2.0*dx[I]+dx[I+1]) ) \
+ ( dy[ny-2]*bcTop[I+nx-1] - (dy[ny-1]+dy[ny-2])*q[Iv]/dx[I] + dy[ny-1]*q[Iv-nx]/dx[I] )/( dy[ny-2]*dy[ny-1]*(dy[ny-2]+dy[ny-1]) ) \
);
/// Calculate rn
rn[Iv] = ( q[Iv]/(dx[I]*dt) + cTerm + dTerm ) * 0.5*(dy[ny-2] + dy[ny-1]);
} // convectionTermVBottomTop
/**
* \brief Computes u-component of the vector resulting from the explicit terms of
* the discretized momentum equation, and element of the explcit convective terms
* on the bottom or top boundaries.
*
* \param rn explicit terms of the discretized momentum equation
* \param H explicit convective terms of the discretized momentum equation
* \param q velcoity flux vector
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param dx cell-widths in the x-direction
* \param dy cell-widths in the y-direction
* \param dt time-increment
* \param gamma coefficient of the convection term at the current time-step
* \param zeta coefficient of the convection term at the previous time-step
* \param alpha coefficient of the explicit diffusion term
* \param nu viscosity
* \param bcBottom bottom-boundary velocity
* \param bcTop top-boundary velocity
* \param bcLeft left-boundary velocity
* \param bcRight right-boundary velocity
*/
__global__
void convectionTermUBottomTop(real *rn, real *H, real *q,
int nx, int ny, real *dx, real *dy,
real dt, real gamma, real zeta, real alpha, real nu,
real *bcBottom, real *bcTop, real *bcLeft, real *bcRight)
{
int I = blockIdx.x*blockDim.x + threadIdx.x;
if(I >= nx-1) return;
/// Boundary Conditions for u at the Bottom ***********************************************************************************
int Iu,
Iv = I + (nx-1)*ny;
real u = q[I]/dy[0],
u0x, u1x,
ul, ur,
Hn, cTerm, dTerm;
/// Calculate the Diffusion Term and Velocity Components for the convection term
if(I==0){
u0x = 0.5*(bcLeft[0] + u);
u1x = 0.5*(u + q[I+1]/dy[0]);
ul = bcLeft[0];
ur = q[I+1]/dy[0];
}
else if(I==nx-2){
u0x = 0.5*(q[I-1]/dy[0] + u);
u1x = 0.5*(u + bcRight[0]);
ul = q[I-1]/dy[0];
ur = bcRight[0];
}
else{
u0x = 0.5*(q[I-1]/dy[0] + u);
u1x = 0.5*(u + q[I+1]/dy[0]);
ul = q[I-1]/dy[0];
ur = q[I+1]/dy[0];
}
/// Convection Term
Hn = H[I];
H[I] = - ( u1x*u1x - u0x*u0x )/( 0.5*(dx[I]+dx[I+1]) ) \
- ( 0.5*(q[Iv]/dx[I] + q[Iv+1]/dx[I+1]) * 0.5*(u + q[I+(nx-1)]/dy[1]) - 0.5*(bcBottom[I+(nx-1)] + bcBottom[I+1+(nx-1)])*bcBottom[I] )/(dy[0]);
cTerm = gamma*H[I] + zeta*Hn;
//cTerm = H[I]; // 1st order Euler **************************** DID NOT CHANGE I TO Iu HERE
// Diffusion Term
dTerm = alpha*nu*2.0*( \
( dx[I]*ur - (dx[I]+dx[I+1])*u + dx[I+1]*ul )/( dx[I] * (dx[I]+dx[I+1]) * dx[I+1] ) \
+ 4.0*( dy[0]*q[I+(nx-1)]/dy[1] - (2.0*dy[0]+dy[1])*u + (dy[0]+dy[1])*bcBottom[I] ) \
/( dy[0] * (2.0*dy[0]+dy[1]) * (dy[0]+dy[1]) )
);
/// Calculate rn
rn[I] = ( u/dt + cTerm + dTerm ) * 0.5*(dx[I] + dx[I+1]);
/// Boundary conditions for u at the Top **************************************************************************************
Iu = I + (ny-1)*(nx-1);
Iv = I + (ny-2)*nx + (nx-1)*ny;
u = q[Iu]/dy[ny-1];
/// Calculate the Diffusion Term and Velocity Components for the convection term
if(I==0){
u0x = 0.5*(bcLeft[ny-1] + u);
u1x = 0.5*(u + q[Iu+1]/dy[ny-1]);
ul = bcLeft[ny-1];
ur = q[Iu+1]/dy[ny-1];
}
else if(I==nx-2){
u0x = 0.5*(q[Iu-1]/dy[ny-1] + u);
u1x = 0.5*(u + bcRight[ny-1]);
ul = q[Iu-1]/dy[ny-1];
ur = bcRight[ny-1];
}
else{
u0x = 0.5*(q[Iu-1]/dy[ny-1] + u);
u1x = 0.5*(u + q[Iu+1]/dy[ny-1]);
ul = q[Iu-1]/dy[ny-1];
ur = q[Iu+1]/dy[ny-1];
}
/// Convection Term
Hn = H[Iu];
H[Iu] = - ( u1x*u1x - u0x*u0x )/( 0.5*(dx[I]+dx[I+1]) ) \
- ( bcTop[I]*0.5*(bcTop[I+(nx-1)]+bcTop[I+1+(nx-1)]) - 0.5*(q[Iv]/dx[I] + q[Iv+1]/dx[I+1])*0.5*(u + q[Iu-(nx-1)]/dy[ny-2]) )/(dy[ny-1]);
cTerm = gamma*H[Iu] + zeta*Hn;
//cTerm = H[Iu]; /// 1st order Euler ************************* CHANGED I TO Iu HERE
// Diffusion Term
dTerm = alpha*nu*2.0*( \
( dx[I]*ur - (dx[I]+dx[I+1])*u + dx[I+1]*ul )/( dx[I] * (dx[I]+dx[I+1]) * dx[I+1] ) \
+ 4.0*( (dy[ny-2]+dy[ny-1])*bcTop[I] - (dy[ny-2]+2.0*dy[ny-1])*u + (dy[ny-1])*q[Iu-(nx-1)]/dy[ny-2] ) \
/( (dy[ny-2]+dy[ny-1])*(dy[ny-1])*(dy[ny-2]+2.0*dy[ny-1]) )
);
/// Calculate rn
rn[Iu] = ( u/dt + cTerm + dTerm) * 0.5*(dx[I] + dx[I+1]);
} // convectionTermUBottomTop
/**
* \brief Computes u-component of the vector resulting from the explicit terms of
* the discretized momentum equation, and element of the explcit convective terms
* on the left or right boundaries.
*
* \param rn explicit terms of the discretized momentum equation
* \param H explicit convective terms of the discretized momentum equation
* \param q velcoity flux vector
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param dx cell-widths in the x-direction
* \param dy cell-widths in the y-direction
* \param dt time-increment
* \param gamma coefficient of the convection term at the current time-step
* \param zeta coefficient of the convection term at the previous time-step
* \param alpha coefficient of the explicit diffusion term
* \param nu viscosity
* \param bcLeft left-boundary velocity
* \param bcRight right-boundary velocity
*/
__global__
void convectionTermULeftRight(real *rn, real *H, real *q,
int nx, int ny, real *dx, real *dy,
real dt, real gamma, real zeta, real alpha, real nu,
real *bcLeft, real *bcRight)
{
int I = blockIdx.x*blockDim.x + threadIdx.x;
if(I==0 || I >= ny-1) return;
/// Boundary Conditions for u at the Left *************************************************************************************
int Iu = I*(nx-1),
Iv = I*(nx) + (nx-1)*ny;
real Hn, cTerm, dTerm;
/// Convection Term
Hn = H[Iu];
H[Iu] = -( \
( 0.5*(q[Iu] + q[Iu+1])/dy[I] ) * ( 0.5*(q[Iu] + q[Iu+1])/dy[I] ) \
- ( 0.5*(bcLeft[I] + q[Iu]/dy[I]) ) * ( 0.5*(bcLeft[I] + q[Iu]/dy[I]) ) \
)/(0.5*(dx[0]+dx[1])) \
-( \
( 0.5*(q[Iv]/dx[0] + q[Iv+1]/dx[1]) ) * ( 0.5*(q[Iu]/dy[I] + q[Iu+(nx-1)]/dy[I+1]) ) \
- ( 0.5*(q[Iv-nx]/dx[0] + q[Iv+1-nx]/dx[1]) ) * ( 0.5*(q[Iu-(nx-1)]/dy[I-1] + q[Iu]/dy[I]) ) \
)/(dy[I]);
cTerm = gamma*H[Iu] + zeta*Hn;
//cTerm = H[Iu]; /// 1st order Euler
/// Diffusion Term
dTerm = alpha*nu*2.0*( \
( dx[0]*q[Iu+1]/dy[I] - (dx[0]+dx[1])*q[Iu]/dy[I] + dx[1]*bcLeft[I] )/( dx[0]*dx[1]*(dx[0]+dx[1]) ) \
+ 4.0 * ( (dy[I-1]+dy[I])*q[Iu+(nx-1)]/dy[I+1] - (dy[I-1]+2.0*dy[I]+dy[I+1])*q[Iu]/dy[I] + (dy[I]+dy[I+1])*q[Iu-(nx-1)]/dy[I-1] ) \
/( (dy[I-1]+dy[I]) * (dy[I]+dy[I+1]) * (dy[I-1]+2.0*dy[I]+dy[I+1]) ) \
);
/// Calculate rn
rn[Iu] = ( q[Iu]/(dy[I]*dt) + cTerm + dTerm ) * 0.5*(dx[0] + dx[1]);
/// Boundary conditions for u at the Right ************************************************************************************
Iu = I*(nx-1) + (nx-2);
Iv = I*(nx) + (nx-1)*ny + (nx-2);
/// Convection Term
H[Iu] = -( \
( 0.5*(q[Iu]/dy[I] + bcRight[I]) ) * ( 0.5*(q[Iu]/dy[I] + bcRight[I]) ) \
- ( 0.5*(q[Iu-1] + q[Iu])/dy[I] ) * ( 0.5*(q[Iu-1] + q[Iu])/dy[I] ) \
)/(0.5*(dx[nx-2]+dx[nx-1])) \
-( \
( 0.5*(q[Iv]/dx[nx-2] + q[Iv+1]/dx[nx-1]) ) * ( 0.5*(q[Iu]/dy[I] + q[Iu+(nx-1)]/dy[I+1]) ) \
- ( 0.5*(q[Iv-nx]/dx[nx-2] + q[Iv+1-nx]/dx[nx-1]) ) * ( 0.5*(q[Iu-(nx-1)]/dy[I-1] + q[Iu]/dy[I]) ) \
)/(dy[I]);
cTerm = gamma*H[Iu] + zeta*Hn;
//cTerm = H[Iu]; /// 1st order Euler
/// Diffusion Term
dTerm = alpha*nu*2.0*( \
( dx[nx-2]*bcRight[I] - (dx[nx-2]+dx[nx-1])*q[Iu]/dy[I] + dx[nx-1]*q[Iu-1]/dy[I] )/( dx[nx-2]*dx[nx-1]*(dx[nx-2]+dx[nx-1]) ) \
+ 4.0 * ( (dy[I-1]+dy[I])*q[Iu+(nx-1)]/dy[I+1] - (dy[I-1]+2.0*dy[I]+dy[I+1])*q[Iu]/dy[I] + (dy[I]+dy[I+1])*q[Iu-(nx-1)]/dy[I-1] ) \
/( (dy[I-1]+dy[I]) * (dy[I]+dy[I+1]) * (dy[I-1]+2.0*dy[I]+dy[I+1]) ) \
);
/// Calculate rn
rn[Iu] = ( q[Iu]/(dy[I]*dt) + cTerm + dTerm ) * 0.5*(dx[nx-2] + dx[nx-1]);
} // convectionTermULeftRight
/**
* \brief Computes v-component of the vector resulting from the explicit terms of
* the discretized momentum equation, and element of the explcit convective terms
* on the left or right boundaries.
*
* \param rn explicit terms of the discretized momentum equation
* \param H explicit convective terms of the discretized momentum equation
* \param q velcoity flux vector
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param dx cell-widths in the x-direction
* \param dy cell-widths in the y-direction
* \param dt time-increment
* \param gamma coefficient of the convection term at the current time-step
* \param zeta coefficient of the convection term at the previous time-step
* \param alpha coefficient of the explicit diffusion term
* \param nu viscosity
* \param bcBottom bottom-boundary velocity
* \param bcTop top-boundary velocity
* \param bcLeft left-boundary velocity
* \param bcRight right-boundary velocity
*/
__global__
void convectionTermVLeftRight(real *rn, real *H, real *q,
int nx, int ny, real *dx, real *dy,
real dt, real gamma, real zeta, real alpha, real nu,
real *bcBottom, real *bcTop, real *bcLeft, real *bcRight)
{
int I = blockIdx.x*blockDim.x + threadIdx.x;
if(I > ny-2) return;
/// Boundary Conditions for v at the Left *************************************************************************************
int Iu = I*(nx-1),
Iv = I*nx + (nx-1)*ny;
real vb, vt, v0y, v1y, v,
Hn, cTerm, dTerm;
v = q[Iv]/dx[0];
if(I==0){
v0y = 0.5*(bcBottom[nx-1] + v);
v1y = 0.5*(v + q[Iv+nx]/dx[0]);
vb = bcBottom[nx-1];
vt = q[Iv+nx]/dx[0];
}
else if(I==ny-2){
v0y = 0.5*(q[Iv-nx]/dx[0] + v);
v1y = 0.5*(q[Iv]/dx[0] + bcTop[nx-1]);
vb = q[Iv-nx]/dx[0];
vt = bcTop[nx-1];
}
else{
v0y = 0.5*(q[Iv-nx]/dx[0] + v);
v1y = 0.5*(v + q[Iv+nx]/dx[0]);
vb = q[Iv-nx]/dx[0];
vt = q[Iv+nx]/dx[0];
}
__syncthreads();
/// Convection Term
Hn = H[Iv];
H[Iv] = -( 0.5*(v + q[Iv+1]/dx[1])*0.5*(q[Iu]/dy[I] + q[Iu+(nx-1)]/dy[I+1]) - bcLeft[I+ny]*0.5*(bcLeft[I] + bcLeft[I+1]) )/dx[0] \
-( v1y*v1y - v0y*v0y )/(0.5*(dy[I] + dy[I+1]));
cTerm = gamma*H[Iv] + zeta*Hn;
//cTerm = H[Iv]; /// 1st order Euler
/// Diffusion Term
dTerm = alpha*nu*2.0*( \
4.0 * ( (dx[0])*q[Iv+1]/dx[1] - (2.0*dx[0]+dx[1])*v + (dx[0]+dx[1])*bcLeft[I+ny] ) \
/( (dx[0]) * (dx[0]+dx[1]) * (2.0*dx[0]+dx[1]) ) \
+ ( dy[I]*vt - (dy[I]+dy[I+1])*v + dy[I+1]*vb )/( dy[I]*dy[I+1]*(dy[I]+dy[I+1]) ) \
);
/// Calculate rn
rn[Iv] = ( v/dt + cTerm + dTerm ) * 0.5*(dy[I] + dy[I+1]);
/// Boundary Conditions for v at the Right ************************************************************************************
Iu = I*(nx-1) + (nx-1);
Iv = I*nx + (nx-1)*ny + (nx-1);
v = q[Iv]/dx[nx-1];
if(I==0){
v0y = 0.5*(bcBottom[nx-1+(nx-1)] + v);
v1y = 0.5*(v + q[Iv+nx]/dx[nx-1]);
vb = bcBottom[nx-1+(nx-1)];
vt = q[Iv+nx]/dx[nx-1];
}
else if(I==ny-2){
v0y = 0.5*(q[Iv-nx]/dx[nx-1] + v);
v1y = 0.5*(q[Iv]/dx[nx-1] + bcTop[nx-1+(nx-1)]);
vb = q[Iv-nx]/dx[nx-1];
vt = bcTop[nx-1+(nx-1)];
}
else{
v0y = 0.5*(q[Iv-nx]/dx[nx-1] + v);
v1y = 0.5*(v + q[Iv+nx]/dx[nx-1]);
vb = q[Iv-nx]/dx[nx-1];
vt = q[Iv+nx]/dx[nx-1];
}
__syncthreads();
/// Convection Term
Hn = H[Iv];
H[Iv] = -( bcRight[I+ny]*0.5*(bcRight[I]+bcRight[I+1]) - 0.5*(q[Iv-1]/dx[nx-2] + v)*0.5*(q[Iu-1]/dy[I] + q[Iu-1+(nx-1)]/dy[I+1]) )/dx[nx-1] \
-( v1y*v1y - v0y*v0y )/(0.5*(dy[I] + dy[I+1]));
cTerm = gamma*H[Iv] + zeta*Hn;
//cTerm = H[Iv]; /// 1st order Euler
/// Diffusion Term
dTerm = alpha*nu*2.0*( \
4.0 * ( (dx[nx-1]+dx[nx-2])*bcRight[I+ny] - (2.0*dx[nx-1]+dx[nx-2])*v + (dx[nx-1])*q[Iv-1]/dx[nx-2] ) \
/( (dx[nx-1]) * (dx[nx-1]+dx[nx-2]) * (2.0*dx[nx-1]+dx[nx-2]) ) \
+ ( dy[I]*vt - (dy[I]+dy[I+1])*v + dy[I+1]*vb )/( dy[I]*dy[I+1]*(dy[I]+dy[I+1]) ) \
);
/// Calculate rn
rn[Iv] = ( v/dt + cTerm + dTerm ) * 0.5*(dy[I] + dy[I+1]);
} // convectionTermVLeftRight
} // End of namespace kernels
|
the_stack
|
#include "nvcomp_cub.cuh"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cmath>
#include <iostream>
#include <stdexcept>
#include <string>
#include <vector>
#if defined(_WIN32)
#include <malloc.h>
#endif
namespace nvcomp
{
namespace highlevel
{
namespace
{
/**************************************************************************
* Device Functions
*************************************************************************/
// Device function to perform RLE on a buffer per block
template <typename VALUE, typename COUNT, int BLOCK_SIZE, int TILE_SIZE>
__device__ void deviceRLEKernel(
COUNT* const runBuffer,
VALUE* const valBuffer,
COUNT* const prefix,
size_t const num,
int ITEMS_PER_THREAD)
{
COUNT sum = 0;
{
VALUE val = valBuffer[threadIdx.x * ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
const int tid = threadIdx.x * ITEMS_PER_THREAD + i;
VALUE nextVal;
if (tid < num) {
nextVal = valBuffer[tid + 1];
sum += nextVal != val;
val = nextVal;
}
}
}
__syncthreads();
// prefixsum bit mask
{
typedef cub::BlockScan<COUNT, BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).InclusiveSum(sum, sum);
prefix[threadIdx.x + 1] = sum;
}
__syncthreads();
{
int outIdx = prefix[threadIdx.x];
VALUE val = valBuffer[threadIdx.x * ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; i++) {
const int tid = threadIdx.x * ITEMS_PER_THREAD + i;
const VALUE nextVal = valBuffer[tid + 1];
if (nextVal != val) {
runBuffer[outIdx + 1] = tid + 1;
val = nextVal;
++outIdx;
}
}
}
}
// Compute min and max of a buffer
template <typename VALUE, typename COUNT, int BLOCK_SIZE, int TILE_SIZE>
__device__ void deviceFindMinMax(
const size_t num,
COUNT* const prefix,
COUNT* const runBuffer,
VALUE* const valBuffer,
VALUE* const valBuffer2,
bool const prevValFlag,
COUNT* localRunMin,
COUNT* localRunMax,
VALUE* localValMin,
VALUE* localValMax,
uint64_t* const maxConnectedRun,
bool const connectedFlag,
bool const firstLayer)
{
COUNT numCompacted = prefix[BLOCK_SIZE];
if (threadIdx.x == 0 && blockIdx.x == gridDim.x - 1) {
COUNT remain = num % TILE_SIZE;
runBuffer[numCompacted] = (remain == 0) ? (TILE_SIZE) : remain;
}
__syncthreads();
COUNT cur_run = runBuffer[1] - runBuffer[0];
COUNT cur_run_min = cur_run;
COUNT cur_run_max = cur_run;
VALUE cur_val = valBuffer[0];
VALUE cur_val_min = valBuffer[0];
VALUE cur_val_max = cur_val_min;
for (int tid = threadIdx.x; tid < numCompacted; tid += BLOCK_SIZE) {
cur_run = runBuffer[tid + 1] - runBuffer[tid];
if (firstLayer && connectedFlag) {
if (tid == 0) {
if (prevValFlag)
atomicAdd(
(unsigned long long*)(&(maxConnectedRun[blockIdx.x])),
(unsigned long long)(cur_run));
else
atomicAdd((unsigned long long*)(&(maxConnectedRun[blockIdx.x])), 0);
}
}
int validx = runBuffer[tid];
cur_val = valBuffer[validx];
cur_run_min = (tid == threadIdx.x) ? cur_run : min(cur_run_min, cur_run);
cur_run_max = (tid == threadIdx.x) ? cur_run : max(cur_run_max, cur_run);
if (firstLayer)
valBuffer2[tid] = cur_val;
cur_val_min = (tid == threadIdx.x) ? cur_val : min(cur_val_min, cur_val);
cur_val_max = (tid == threadIdx.x) ? cur_val : max(cur_val_max, cur_val);
}
__syncthreads();
{
typedef cub::BlockScan<COUNT, BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).InclusiveScan(cur_run_min, cur_run_min, cub::Min());
BlockScan(temp_storage).InclusiveScan(cur_run_max, cur_run_max, cub::Max());
}
__syncthreads();
{
typedef cub::BlockScan<VALUE, BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).InclusiveScan(cur_val_min, cur_val_min, cub::Min());
BlockScan(temp_storage).InclusiveScan(cur_val_max, cur_val_max, cub::Max());
}
__syncthreads();
if (threadIdx.x == 0) {
*localRunMin = cur_run_min;
*localValMin = cur_val_min;
}
if (threadIdx.x == (BLOCK_SIZE - 1)) {
*localRunMax = cur_run_max;
*localValMax = cur_val_max;
}
__syncthreads();
}
/**************************************************************
* Kernels
*************************************************************/
// Kernel to perform the fused compression on a sample of the input
template <typename VALUE, typename COUNT, int BLOCK_SIZE, int TILE_SIZE>
__global__ void SampleFusedKernel(
const VALUE* const in,
size_t* const sample_offsets,
const size_t num,
unsigned long long int* const sizeBuffer
)
{
__shared__ size_t s_offset;
if (threadIdx.x == 0) {
s_offset = sample_offsets[blockIdx.x];
}
__syncthreads();
constexpr const int ITEMS_PER_THREAD = TILE_SIZE / BLOCK_SIZE;
__shared__ COUNT prefix[BLOCK_SIZE + 1];
__shared__ COUNT runBuffer[TILE_SIZE + 1];
__shared__ VALUE valBuffer1stRLE[TILE_SIZE + 1];
__shared__ VALUE valBuffer2ndRLE[TILE_SIZE + 1];
__shared__ COUNT localRunMinFor1stRLE;
__shared__ COUNT localRunMaxFor1stRLE;
__shared__ VALUE localValMinFor1stRLE;
__shared__ VALUE localValMaxFor1stRLE;
__shared__ COUNT localRunMinFor2ndRLE;
__shared__ COUNT localRunMaxFor2ndRLE;
__shared__ VALUE localValMinFor2ndRLE;
__shared__ VALUE localValMaxFor2ndRLE;
// load data
__syncthreads();
for (int tid = threadIdx.x; tid < TILE_SIZE; tid += BLOCK_SIZE) {
int gTid = tid + s_offset;
if (tid < num) {
valBuffer1stRLE[tid] = in[gTid];
} else {
int maxTid = num + s_offset - 1;
valBuffer1stRLE[tid] = in[maxTid];
}
}
if (threadIdx.x == 0) {
prefix[0] = 0;
runBuffer[0] = 0;
}
__syncthreads();
// Have each block run RLE on a tile of sampled input
size_t tile_size = TILE_SIZE;
deviceRLEKernel<VALUE, COUNT, BLOCK_SIZE, TILE_SIZE>(
runBuffer, valBuffer1stRLE, prefix, tile_size, ITEMS_PER_THREAD);
__syncthreads();
// Find min and max values used to estimate compression ratio
deviceFindMinMax<VALUE, COUNT, BLOCK_SIZE, TILE_SIZE>(
num,
prefix,
runBuffer,
valBuffer1stRLE,
valBuffer2ndRLE,
false,
&localRunMinFor1stRLE,
&localRunMaxFor1stRLE,
&localValMinFor1stRLE,
&localValMaxFor1stRLE,
NULL,
false,
true);
__syncthreads();
// Initialize input for 2nd RLE stage testing
size_t numOutFor1stRLE = prefix[BLOCK_SIZE];
size_t num2ndRLE = prefix[BLOCK_SIZE];
const int ITEMS_PER_THREAD2 = ceil((double)num2ndRLE / (double)BLOCK_SIZE);
for (int ph = 0; ph < ITEMS_PER_THREAD2; ph++) {
int tid = ph * BLOCK_SIZE + threadIdx.x;
if (tid == 0) {
valBuffer1stRLE[tid] = in[0];
} else {
valBuffer1stRLE[tid] = valBuffer2ndRLE[tid] - valBuffer2ndRLE[tid - 1];
}
}
__syncthreads();
if (threadIdx.x == 0) {
prefix[0] = 0;
runBuffer[0] = 0;
}
__syncthreads();
// Run 2nd RLE on output from first tested stage
deviceRLEKernel<VALUE, COUNT, BLOCK_SIZE, TILE_SIZE>(
runBuffer, valBuffer1stRLE, prefix, (size_t)num2ndRLE, ITEMS_PER_THREAD2);
__syncthreads();
size_t numOutFor2ndRLE = prefix[BLOCK_SIZE];
// Compute min and max for 2nd RLE stage
deviceFindMinMax<VALUE, COUNT, BLOCK_SIZE, TILE_SIZE>(
num2ndRLE,
prefix,
runBuffer,
valBuffer1stRLE,
valBuffer2ndRLE,
false,
&localRunMinFor2ndRLE,
&localRunMaxFor2ndRLE,
&localValMinFor2ndRLE,
&localValMaxFor2ndRLE,
NULL,
false,
false);
__syncthreads();
// Thread 0 computes the statistics of each compression scheme
// for the data the block processed
if (threadIdx.x == 0) {
VALUE valRangeFor1stRLE = localValMaxFor1stRLE - (localValMinFor1stRLE);
COUNT runRangeFor1stRLE = localRunMaxFor1stRLE - (localRunMinFor1stRLE);
size_t valBitsFor1stRLE;
size_t runBitsFor1stRLE;
size_t valBitsFor2ndRLE;
size_t runBitsFor2ndRLE;
// Compute number of bits needed for VALS if using 1 RLE
if (sizeof(valRangeFor1stRLE) > sizeof(int)) {
valBitsFor1stRLE
= sizeof(long long int) * 8
- __clzll(static_cast<long long int>(valRangeFor1stRLE));
} else {
valBitsFor1stRLE
= sizeof(int) * 8 - __clz(static_cast<int>(valRangeFor1stRLE));
}
// Compute number of bits needed for RUNS if using 1 RLE
if (sizeof(runRangeFor1stRLE) > sizeof(int)) {
runBitsFor1stRLE
= sizeof(long long int) * 8
- __clzll(static_cast<long long int>(runRangeFor1stRLE));
} else {
runBitsFor1stRLE
= sizeof(int) * 8 - __clz(static_cast<int>(runRangeFor1stRLE));
}
VALUE valRangeFor2ndRLE = localValMaxFor2ndRLE - (localValMinFor2ndRLE);
COUNT runRangeFor2ndRLE = localRunMaxFor2ndRLE - (localRunMinFor2ndRLE);
// Compute number of bits needed for VALS if using 2 RLEs
if (sizeof(valRangeFor2ndRLE) > sizeof(int)) {
valBitsFor2ndRLE
= sizeof(long long int) * 8
- __clzll(static_cast<long long int>(valRangeFor2ndRLE));
} else {
valBitsFor2ndRLE
= sizeof(int) * 8 - __clz(static_cast<int>(valRangeFor2ndRLE));
}
// Compute number of bits needed for RUNS if using 2 RLEs
if (sizeof(runRangeFor2ndRLE) > sizeof(int)) {
runBitsFor2ndRLE
= sizeof(long long int) * 8
- __clzll(static_cast<long long int>(runRangeFor2ndRLE));
} else {
runBitsFor2ndRLE
= sizeof(int) * 8 - __clz(static_cast<int>(runRangeFor2ndRLE));
}
// Ensure tha tthe number of bits will be at least 1
valBitsFor1stRLE
= max((unsigned long long)valBitsFor1stRLE, (unsigned long long)1);
valBitsFor2ndRLE
= max((unsigned long long)valBitsFor2ndRLE, (unsigned long long)1);
runBitsFor1stRLE
= max((unsigned long long)runBitsFor1stRLE, (unsigned long long)1);
runBitsFor2ndRLE
= max((unsigned long long)runBitsFor2ndRLE, (unsigned long long)1);
// Compute sizes of each compressed output of vals and runs for each stage
size_t run1stRLE = runBitsFor1stRLE * numOutFor1stRLE;
run1stRLE = roundUpTo(roundUpDiv(run1stRLE, 8ULL), sizeof(COUNT));
run1stRLE = roundUpTo(run1stRLE, sizeof(size_t));
size_t val1stRLE = valBitsFor1stRLE * numOutFor1stRLE;
val1stRLE = roundUpTo(roundUpDiv(val1stRLE, 8ULL), sizeof(VALUE));
val1stRLE = roundUpTo(val1stRLE, sizeof(size_t));
size_t val1stDelta = valBitsFor2ndRLE * numOutFor1stRLE;
val1stDelta = roundUpTo(roundUpDiv(val1stDelta, 8ULL), sizeof(VALUE));
val1stDelta = roundUpTo(val1stDelta, sizeof(size_t));
size_t run2ndRLE = runBitsFor2ndRLE * numOutFor2ndRLE;
run2ndRLE = roundUpTo(roundUpDiv(run2ndRLE, 8ULL), sizeof(COUNT));
run2ndRLE = roundUpTo(run2ndRLE, sizeof(size_t));
size_t val2ndRLE = valBitsFor2ndRLE * numOutFor2ndRLE;
val2ndRLE = roundUpTo(roundUpDiv(val2ndRLE, 8ULL), sizeof(VALUE));
val2ndRLE = roundUpTo(val2ndRLE, sizeof(size_t));
// Compute the final compressed size of each compression scheme
size_t R0D0B1 = valBitsFor1stRLE * num;
R0D0B1 = roundUpTo(roundUpDiv(R0D0B1, 8ULL), sizeof(VALUE));
R0D0B1 = roundUpTo(R0D0B1, sizeof(size_t));
size_t R0D1B1 = valBitsFor2ndRLE * num;
R0D1B1 = roundUpTo(roundUpDiv(R0D1B1, 8ULL), sizeof(VALUE));
R0D1B1 = roundUpTo(R0D1B1, sizeof(size_t));
size_t R1D0B1 = run1stRLE + val1stRLE;
size_t R1D1B1 = run1stRLE + val1stDelta;
size_t R2D1B1 = run1stRLE + run2ndRLE + val2ndRLE;
// Add total compression for block to overall sum of all samples
atomicAdd(&(sizeBuffer[0]), static_cast<unsigned long long int>(R0D0B1));
atomicAdd(&(sizeBuffer[1]), static_cast<unsigned long long int>(R0D1B1));
atomicAdd(&(sizeBuffer[2]), static_cast<unsigned long long int>(R1D0B1));
atomicAdd(&(sizeBuffer[3]), static_cast<unsigned long long int>(R1D1B1));
atomicAdd(&(sizeBuffer[4]), static_cast<unsigned long long int>(R2D1B1));
}
}
/******************************************************************************
* Internal function calls
******************************************************************************/
/*
*@brief Selected a cascaded compression scheme by Sampling Fast Selector
*@param in The input memory location on the GPU
*@sample_ptrs The input data offsets for each samples
*@param workspace The workspace memory location on the GPU
*@param workspaceSize The size of the workspace memory in bytes
*@param maxNum The number of elements in a sample
*@param outSizeBuffer The buffer for the size of compreesed data for all
*schemesin bytes (output)
*@numSamples The number of samples
*@NUM_SCHMES The number of cascaded schemes.
*@param stream The stream to execute the kernel on
*/
template <typename VALUE, typename COUNT>
void SampleFusedInternal(
const void* const in,
size_t* const sample_ptrs,
void* const workspace,
const size_t workspaceSize,
const size_t maxNum,
size_t* const outsizeBuffer,
size_t const numSamples,
const int NUM_SCHEMES,
cudaStream_t stream)
{
constexpr const int BLOCK_SIZE = 128;
constexpr const int SAMPLE_TILE_SIZE = 1024;
if (NUM_SCHEMES != 5) {
throw std::runtime_error("Number of schemes should be 5\n");
}
const size_t grid_size = numSamples;
const dim3 grid(grid_size);
const dim3 block(BLOCK_SIZE);
unsigned long long int* d_sizeBuffer;
TempSpaceBroker tempSpace(workspace, workspaceSize);
tempSpace.reserve(&d_sizeBuffer, NUM_SCHEMES);
cudaMemsetAsync(d_sizeBuffer, 0, sizeof(*d_sizeBuffer) * NUM_SCHEMES, stream);
const VALUE* const inTyped = static_cast<const VALUE*>(in);
SampleFusedKernel<VALUE, COUNT, BLOCK_SIZE, SAMPLE_TILE_SIZE>
<<<grid, block, 0, stream>>>(inTyped, sample_ptrs, maxNum, d_sizeBuffer);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
throw std::runtime_error(
"Fail to launch SampleFusedKernel: " + std::to_string(err));
}
std::vector<unsigned long long int> size_buffer(NUM_SCHEMES);
err = cudaMemcpyAsync(
size_buffer.data(),
d_sizeBuffer,
sizeof(unsigned long long int) * NUM_SCHEMES,
cudaMemcpyDeviceToHost,
stream);
err = cudaStreamSynchronize(stream);
if (err != cudaSuccess) {
throw std::runtime_error("size buffer cuda memcpy failed\n");
}
for (int i = 0; i < NUM_SCHEMES; i++) {
outsizeBuffer[i] = (size_t)size_buffer[i];
}
}
/*
*@brief Selected a cascaded compression scheme by Sampling Fast Selector
*
*@param in The input memory location on the GPU
*@sample_ptrs The input data offsets for each samples
*@param temp_ptr The workspace memory location on the GPU
*@param temp_bytes The size of the workspace memory in bytes
*@param outsize The buffer for the size of compreesed data for all schemesin
*bytes (output)
*@param numSamples The number of samples.
*@param num_schemes The number of cascaded schemes.
*@param stream The stream to execute the kernel on
*/
template <typename valT, typename runT>
void SampleFusedOption_internal(
const void* const in,
size_t* const sample_ptrs,
const size_t in_bytes,
void* const temp_ptr,
const size_t temp_bytes,
size_t* outsize,
size_t const numSamples,
const int num_schemes,
cudaStream_t stream)
{
const size_t maxNum = in_bytes / sizeof(valT);
SampleFusedInternal<valT, runT>(
in,
sample_ptrs,
temp_ptr,
temp_bytes,
maxNum,
outsize,
numSamples,
num_schemes,
stream);
}
} // namespace
void SamplingFastOption(
const void* const in,
size_t* const sample_offsets,
const size_t sample_bytes,
const size_t num_samples,
const nvcompType_t in_type,
void* const workspace,
const size_t workspaceSize,
size_t* outsizeBuffer,
int num_schemes,
cudaStream_t stream)
{
const nvcompType_t countType
= selectRunsType(sample_bytes / sizeOfnvcompType(in_type));
NVCOMP_TYPE_TWO_SWITCH(
in_type,
countType,
SampleFusedOption_internal,
in,
sample_offsets,
sample_bytes,
workspace,
workspaceSize,
outsizeBuffer,
num_samples,
num_schemes,
stream);
}
} // namespace highlevel
} // namespace nvcomp
|
the_stack
|
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC argmax_all
#define EIGEN_USE_GPU
// this is needed on Beignet 1.2.1, Intel HD5500 (as far as I can tell?)
// #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int32_t
// #define CALL_SUBTEST_1(expr) expr
// #define CALL_SUBTEST_2(expr) expr
// #define CALL_SUBTEST_3(expr) expr
#include <unsupported/Eigen/CXX11/Tensor>
#include "main.h"
#include <iostream>
using Eigen::Tensor;
void test_cuda_simple_argmax_tiny_rowmajor()
{
std::cout << "test" << std::endl;
#define N 5
Tensor<float, 1, RowMajor> in(Eigen::array<DenseIndex, 1>(N));
Tensor<DenseIndex, 1, RowMajor> out_max(Eigen::array<DenseIndex, 1>(1));
in(0) = 3.0f;
in(1) = 4.0f;
in(2) = 7.0f;
in(3) = 5.0f;
in(4) = 1.0f;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_max;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_max), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 1, RowMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 1>(N));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, RowMajor>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
gpu_out_max.device(gpu_device) = gpu_in.argmax();
assert(cudaMemcpyAsync(out_max.data(), d_out_max, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 2);
cudaFree(d_in);
cudaFree(d_out_max);
}
void test_cuda_simple_argmax_rowmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, RowMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, RowMajor> out_max(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_max;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_max), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, RowMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, RowMajor>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
gpu_out_max.device(gpu_device) = gpu_in.argmax();
assert(cudaMemcpyAsync(out_max.data(), d_out_max, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 37*53*97 + 43*97 + 88);
cudaFree(d_in);
cudaFree(d_out_max);
}
void test_cuda_simple_argmin_rowmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, RowMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, RowMajor> out_min(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_min.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_min;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_min), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, RowMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, RowMajor>, Aligned > gpu_out_min(d_out_min, Eigen::array<DenseIndex, 1>(1));
gpu_out_min.device(gpu_device) = gpu_in.argmin();
assert(cudaMemcpyAsync(out_min.data(), d_out_min, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_min(Eigen::array<DenseIndex, 1>(0)), 21*53*97 + 29*97 + 76);
cudaFree(d_in);
cudaFree(d_out_min);
}
void test_cuda_simple_argmax_colmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, ColMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, ColMajor> out_max(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_max;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_max), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, ColMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, ColMajor>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
gpu_out_max.device(gpu_device) = gpu_in.argmax();
assert(cudaMemcpyAsync(out_max.data(), d_out_max, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 88*53*72 + 43*72 + 37);
cudaFree(d_in);
cudaFree(d_out_max);
}
void test_cuda_simple_argmin_colmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, ColMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, ColMajor> out_min(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_min.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_min;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_min), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, ColMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, ColMajor>, Aligned > gpu_out_min(d_out_min, Eigen::array<DenseIndex, 1>(1));
gpu_out_min.device(gpu_device) = gpu_in.argmin();
assert(cudaMemcpyAsync(out_min.data(), d_out_min, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_min(Eigen::array<DenseIndex, 1>(0)), 76*53*72 + 29*72 + 21);
cudaFree(d_in);
cudaFree(d_out_min);
}
template <int DataLayout>
void test_cuda_argmax_dim()
{
Tensor<float, 4, DataLayout> tensor(2,3,5,7);
std::vector<int> dims;
dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7);
for (int dim = 0; dim < 4; ++dim) {
tensor.setRandom();
tensor = (tensor + tensor.constant(0.5)).log();
array<DenseIndex, 3> out_shape;
for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
Tensor<DenseIndex, 3, DataLayout> tensor_arg(out_shape);
array<DenseIndex, 4> ix;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != 0) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
tensor(ix) = 10.0;
}
}
}
}
std::size_t in_bytes = tensor.size() * sizeof(float);
std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out), out_bytes);
cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout>, Aligned > gpu_out(d_out, out_shape);
gpu_out.device(gpu_device) = gpu_in.argmax(dim);
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(tensor_arg.size(),
size_t(2*3*5*7 / tensor.dimension(dim)));
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the first index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
}
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != tensor.dimension(dim) - 1) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
tensor(ix) = 20.0;
}
}
}
}
cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
gpu_out.device(gpu_device) = gpu_in.argmax(dim);
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
cudaFree(d_in);
cudaFree(d_out);
}
}
template <int DataLayout>
void test_cuda_argmin_dim()
{
Tensor<float, 4, DataLayout> tensor(2,3,5,7);
std::vector<int> dims;
dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7);
for (int dim = 0; dim < 4; ++dim) {
tensor.setRandom();
tensor = (tensor + tensor.constant(0.5)).log();
array<DenseIndex, 3> out_shape;
for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
Tensor<DenseIndex, 3, DataLayout> tensor_arg(out_shape);
array<DenseIndex, 4> ix;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != 0) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
tensor(ix) = -10.0;
}
}
}
}
std::size_t in_bytes = tensor.size() * sizeof(float);
std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out), out_bytes);
cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout>, Aligned > gpu_out(d_out, out_shape);
gpu_out.device(gpu_device) = gpu_in.argmin(dim);
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(tensor_arg.size(),
2*3*5*7 / tensor.dimension(dim));
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect min to be in the first index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
}
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != tensor.dimension(dim) - 1) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
tensor(ix) = -20.0;
}
}
}
}
cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
gpu_out.device(gpu_device) = gpu_in.argmin(dim);
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
cudaFree(d_in);
cudaFree(d_out);
}
}
void test_argmax_all()
{
std::cout << "test_argmax_all()" << std::endl;
test_cuda_simple_argmax_tiny_rowmajor();
test_cuda_simple_argmax_rowmajor();
test_cuda_simple_argmin_rowmajor();
test_cuda_simple_argmax_colmajor();
test_cuda_simple_argmin_colmajor();
test_cuda_argmax_dim<RowMajor>();
test_cuda_argmax_dim<ColMajor>();
test_cuda_argmin_dim<RowMajor>();
test_cuda_argmin_dim<ColMajor>();
}
|
the_stack
|
* \file
* cub::BlockRadixRank provides operations for ranking unsigned integer types within a CUDA thread block
*/
#pragma once
#include <stdint.h>
#include "../thread/thread_reduce.cuh"
#include "../thread/thread_scan.cuh"
#include "../block/block_scan.cuh"
#include "../block/radix_rank_sort_operations.cuh"
#include "../config.cuh"
#include "../util_ptx.cuh"
#include "../util_type.cuh"
CUB_NAMESPACE_BEGIN
/**
* \brief Radix ranking algorithm, the algorithm used to implement stable ranking of the
* keys from a single tile. Note that different ranking algorithms require different
* initial arrangements of keys to function properly.
*/
enum RadixRankAlgorithm
{
/** Ranking using the BlockRadixRank algorithm with MEMOIZE_OUTER_SCAN == false. It
* uses thread-private histograms, and thus uses more shared memory. Requires blocked
* arrangement of keys. Does not support count callbacks. */
RADIX_RANK_BASIC,
/** Ranking using the BlockRadixRank algorithm with MEMOIZE_OUTER_SCAN ==
* true. Similar to RADIX_RANK BASIC, it requires blocked arrangement of
* keys and does not support count callbacks.*/
RADIX_RANK_MEMOIZE,
/** Ranking using the BlockRadixRankMatch algorithm. It uses warp-private
* histograms and matching for ranking the keys in a single warp. Therefore,
* it uses less shared memory compared to RADIX_RANK_BASIC. It requires
* warp-striped key arrangement and supports count callbacks. */
RADIX_RANK_MATCH,
/** Ranking using the BlockRadixRankMatchEarlyCounts algorithm with
* MATCH_ALGORITHM == WARP_MATCH_ANY. An alternative implementation of
* match-based ranking that computes bin counts early. Because of this, it
* works better with onesweep sorting, which requires bin counts for
* decoupled look-back. Assumes warp-striped key arrangement and supports
* count callbacks.*/
RADIX_RANK_MATCH_EARLY_COUNTS_ANY,
/** Ranking using the BlockRadixRankEarlyCounts algorithm with
* MATCH_ALGORITHM == WARP_MATCH_ATOMIC_OR. It uses extra space in shared
* memory to generate warp match masks using atomicOr(). This is faster when
* there are few matches, but can lead to slowdowns if the number of
* matching keys among warp lanes is high. Assumes warp-striped key
* arrangement and supports count callbacks. */
RADIX_RANK_MATCH_EARLY_COUNTS_ATOMIC_OR
};
/** Empty callback implementation */
template <int BINS_PER_THREAD>
struct BlockRadixRankEmptyCallback
{
__device__ __forceinline__ void operator()(int (&bins)[BINS_PER_THREAD]) {}
};
/**
* \brief BlockRadixRank provides operations for ranking unsigned integer types within a CUDA thread block.
* \ingroup BlockModule
*
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam RADIX_BITS The number of radix bits per digit place
* \tparam IS_DESCENDING Whether or not the sorted-order is high-to-low
* \tparam MEMOIZE_OUTER_SCAN <b>[optional]</b> Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure (default: true for architectures SM35 and newer, false otherwise). See BlockScanAlgorithm::BLOCK_SCAN_RAKING_MEMOIZE for more details.
* \tparam INNER_SCAN_ALGORITHM <b>[optional]</b> The cub::BlockScanAlgorithm algorithm to use (default: cub::BLOCK_SCAN_WARP_SCANS)
* \tparam SMEM_CONFIG <b>[optional]</b> Shared memory bank mode (default: \p cudaSharedMemBankSizeFourByte)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*
* \par Overview
* Blah...
* - Keys must be in a form suitable for radix ranking (i.e., unsigned bits).
* - \blocked
*
* \par Performance Considerations
* - \granularity
*
* \par Examples
* \par
* - <b>Example 1:</b> Simple radix rank of 32-bit integer keys
* \code
* #include <cub/cub.cuh>
*
* template <int BLOCK_THREADS>
* __global__ void ExampleKernel(...)
* {
*
* \endcode
*
* \par Re-using dynamically allocating shared memory
* The following example under the examples/block folder illustrates usage of
* dynamically shared memory with BlockReduce and how to re-purpose
* the same memory region:
* <a href="../../examples/block/example_block_reduce_dyn_smem.cu">example_block_reduce_dyn_smem.cu</a>
*
* This example can be easily adapted to the storage required by BlockRadixRank.
*/
template <
int BLOCK_DIM_X,
int RADIX_BITS,
bool IS_DESCENDING,
bool MEMOIZE_OUTER_SCAN = (CUB_PTX_ARCH >= 350) ? true : false,
BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS,
cudaSharedMemConfig SMEM_CONFIG = cudaSharedMemBankSizeFourByte,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockRadixRank
{
private:
/******************************************************************************
* Type definitions and constants
******************************************************************************/
// Integer type for digit counters (to be packed into words of type PackedCounters)
typedef unsigned short DigitCounter;
// Integer type for packing DigitCounters into columns of shared memory banks
typedef typename If<(SMEM_CONFIG == cudaSharedMemBankSizeEightByte),
unsigned long long,
unsigned int>::Type PackedCounter;
enum
{
// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
RADIX_DIGITS = 1 << RADIX_BITS,
LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH),
WARP_THREADS = 1 << LOG_WARP_THREADS,
WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS,
BYTES_PER_COUNTER = sizeof(DigitCounter),
LOG_BYTES_PER_COUNTER = Log2<BYTES_PER_COUNTER>::VALUE,
PACKING_RATIO = static_cast<int>(sizeof(PackedCounter) / sizeof(DigitCounter)),
LOG_PACKING_RATIO = Log2<PACKING_RATIO>::VALUE,
LOG_COUNTER_LANES = CUB_MAX((int(RADIX_BITS) - int(LOG_PACKING_RATIO)), 0), // Always at least one lane
COUNTER_LANES = 1 << LOG_COUNTER_LANES,
// The number of packed counters per thread (plus one for padding)
PADDED_COUNTER_LANES = COUNTER_LANES + 1,
RAKING_SEGMENT = PADDED_COUNTER_LANES,
};
public:
enum
{
/// Number of bin-starting offsets tracked per thread
BINS_TRACKED_PER_THREAD = CUB_MAX(1, (RADIX_DIGITS + BLOCK_THREADS - 1) / BLOCK_THREADS),
};
private:
/// BlockScan type
typedef BlockScan<
PackedCounter,
BLOCK_DIM_X,
INNER_SCAN_ALGORITHM,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
PTX_ARCH>
BlockScan;
/// Shared memory storage layout type for BlockRadixRank
struct __align__(16) _TempStorage
{
union Aliasable
{
DigitCounter digit_counters[PADDED_COUNTER_LANES][BLOCK_THREADS][PACKING_RATIO];
PackedCounter raking_grid[BLOCK_THREADS][RAKING_SEGMENT];
} aliasable;
// Storage for scanning local ranks
typename BlockScan::TempStorage block_scan;
};
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
/// Copy of raking segment, promoted to registers
PackedCounter cached_segment[RAKING_SEGMENT];
/******************************************************************************
* Utility methods
******************************************************************************/
/**
* Internal storage allocator
*/
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/**
* Performs upsweep raking reduction, returning the aggregate
*/
__device__ __forceinline__ PackedCounter Upsweep()
{
PackedCounter *smem_raking_ptr = temp_storage.aliasable.raking_grid[linear_tid];
PackedCounter *raking_ptr;
if (MEMOIZE_OUTER_SCAN)
{
// Copy data into registers
#pragma unroll
for (int i = 0; i < RAKING_SEGMENT; i++)
{
cached_segment[i] = smem_raking_ptr[i];
}
raking_ptr = cached_segment;
}
else
{
raking_ptr = smem_raking_ptr;
}
return internal::ThreadReduce<RAKING_SEGMENT>(raking_ptr, Sum());
}
/// Performs exclusive downsweep raking scan
__device__ __forceinline__ void ExclusiveDownsweep(
PackedCounter raking_partial)
{
PackedCounter *smem_raking_ptr = temp_storage.aliasable.raking_grid[linear_tid];
PackedCounter *raking_ptr = (MEMOIZE_OUTER_SCAN) ?
cached_segment :
smem_raking_ptr;
// Exclusive raking downsweep scan
internal::ThreadScanExclusive<RAKING_SEGMENT>(raking_ptr, raking_ptr, Sum(), raking_partial);
if (MEMOIZE_OUTER_SCAN)
{
// Copy data back to smem
#pragma unroll
for (int i = 0; i < RAKING_SEGMENT; i++)
{
smem_raking_ptr[i] = cached_segment[i];
}
}
}
/**
* Reset shared memory digit counters
*/
__device__ __forceinline__ void ResetCounters()
{
// Reset shared memory digit counters
#pragma unroll
for (int LANE = 0; LANE < PADDED_COUNTER_LANES; LANE++)
{
*((PackedCounter*) temp_storage.aliasable.digit_counters[LANE][linear_tid]) = 0;
}
}
/**
* Block-scan prefix callback
*/
struct PrefixCallBack
{
__device__ __forceinline__ PackedCounter operator()(PackedCounter block_aggregate)
{
PackedCounter block_prefix = 0;
// Propagate totals in packed fields
#pragma unroll
for (int PACKED = 1; PACKED < PACKING_RATIO; PACKED++)
{
block_prefix += block_aggregate << (sizeof(DigitCounter) * 8 * PACKED);
}
return block_prefix;
}
};
/**
* Scan shared memory digit counters.
*/
__device__ __forceinline__ void ScanCounters()
{
// Upsweep scan
PackedCounter raking_partial = Upsweep();
// Compute exclusive sum
PackedCounter exclusive_partial;
PrefixCallBack prefix_call_back;
BlockScan(temp_storage.block_scan).ExclusiveSum(raking_partial, exclusive_partial, prefix_call_back);
// Downsweep scan with exclusive partial
ExclusiveDownsweep(exclusive_partial);
}
public:
/// \smemstorage{BlockScan}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockRadixRank()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockRadixRank(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Raking
*********************************************************************/
//@{
/**
* \brief Rank keys.
*/
template <
typename UnsignedBits,
int KEYS_PER_THREAD,
typename DigitExtractorT>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile
int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile
DigitExtractorT digit_extractor) ///< [in] The digit extractor
{
DigitCounter thread_prefixes[KEYS_PER_THREAD]; // For each key, the count of previous keys in this tile having the same digit
DigitCounter* digit_counters[KEYS_PER_THREAD]; // For each key, the byte-offset of its corresponding digit counter in smem
// Reset shared memory digit counters
ResetCounters();
#pragma unroll
for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM)
{
// Get digit
unsigned int digit = digit_extractor.Digit(keys[ITEM]);
// Get sub-counter
unsigned int sub_counter = digit >> LOG_COUNTER_LANES;
// Get counter lane
unsigned int counter_lane = digit & (COUNTER_LANES - 1);
if (IS_DESCENDING)
{
sub_counter = PACKING_RATIO - 1 - sub_counter;
counter_lane = COUNTER_LANES - 1 - counter_lane;
}
// Pointer to smem digit counter
digit_counters[ITEM] = &temp_storage.aliasable.digit_counters[counter_lane][linear_tid][sub_counter];
// Load thread-exclusive prefix
thread_prefixes[ITEM] = *digit_counters[ITEM];
// Store inclusive prefix
*digit_counters[ITEM] = thread_prefixes[ITEM] + 1;
}
CTA_SYNC();
// Scan shared memory counters
ScanCounters();
CTA_SYNC();
// Extract the local ranks of each key
#pragma unroll
for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM)
{
// Add in thread block exclusive prefix
ranks[ITEM] = thread_prefixes[ITEM] + *digit_counters[ITEM];
}
}
/**
* \brief Rank keys. For the lower \p RADIX_DIGITS threads, digit counts for each digit are provided for the corresponding thread.
*/
template <
typename UnsignedBits,
int KEYS_PER_THREAD,
typename DigitExtractorT>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile
int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile (out parameter)
DigitExtractorT digit_extractor, ///< [in] The digit extractor
int (&exclusive_digit_prefix)[BINS_TRACKED_PER_THREAD]) ///< [out] The exclusive prefix sum for the digits [(threadIdx.x * BINS_TRACKED_PER_THREAD) ... (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1]
{
// Rank keys
RankKeys(keys, ranks, digit_extractor);
// Get the inclusive and exclusive digit totals corresponding to the calling thread.
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (linear_tid * BINS_TRACKED_PER_THREAD) + track;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
if (IS_DESCENDING)
bin_idx = RADIX_DIGITS - bin_idx - 1;
// Obtain ex/inclusive digit counts. (Unfortunately these all reside in the
// first counter column, resulting in unavoidable bank conflicts.)
unsigned int counter_lane = (bin_idx & (COUNTER_LANES - 1));
unsigned int sub_counter = bin_idx >> (LOG_COUNTER_LANES);
exclusive_digit_prefix[track] = temp_storage.aliasable.digit_counters[counter_lane][0][sub_counter];
}
}
}
};
/**
* Radix-rank using match.any
*/
template <
int BLOCK_DIM_X,
int RADIX_BITS,
bool IS_DESCENDING,
BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockRadixRankMatch
{
private:
/******************************************************************************
* Type definitions and constants
******************************************************************************/
typedef int32_t RankT;
typedef int32_t DigitCounterT;
enum
{
// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
RADIX_DIGITS = 1 << RADIX_BITS,
LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH),
WARP_THREADS = 1 << LOG_WARP_THREADS,
WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS,
PADDED_WARPS = ((WARPS & 0x1) == 0) ?
WARPS + 1 :
WARPS,
COUNTERS = PADDED_WARPS * RADIX_DIGITS,
RAKING_SEGMENT = (COUNTERS + BLOCK_THREADS - 1) / BLOCK_THREADS,
PADDED_RAKING_SEGMENT = ((RAKING_SEGMENT & 0x1) == 0) ?
RAKING_SEGMENT + 1 :
RAKING_SEGMENT,
};
public:
enum
{
/// Number of bin-starting offsets tracked per thread
BINS_TRACKED_PER_THREAD = CUB_MAX(1, (RADIX_DIGITS + BLOCK_THREADS - 1) / BLOCK_THREADS),
};
private:
/// BlockScan type
typedef BlockScan<
DigitCounterT,
BLOCK_THREADS,
INNER_SCAN_ALGORITHM,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
PTX_ARCH>
BlockScanT;
/// Shared memory storage layout type for BlockRadixRank
struct __align__(16) _TempStorage
{
typename BlockScanT::TempStorage block_scan;
union __align__(16) Aliasable
{
volatile DigitCounterT warp_digit_counters[RADIX_DIGITS][PADDED_WARPS];
DigitCounterT raking_grid[BLOCK_THREADS][PADDED_RAKING_SEGMENT];
} aliasable;
};
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
public:
/// \smemstorage{BlockScan}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockRadixRankMatch(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Raking
*********************************************************************/
//@{
/** \brief Computes the count of keys for each digit value, and calls the
* callback with the array of key counts.
* @tparam CountsCallback The callback type. It should implement an instance
* overload of operator()(int (&bins)[BINS_TRACKED_PER_THREAD]), where bins
* is an array of key counts for each digit value distributed in block
* distribution among the threads of the thread block. Key counts can be
* used, to update other data structures in global or shared
* memory. Depending on the implementation of the ranking algoirhtm
* (see BlockRadixRankMatchEarlyCounts), key counts may become available
* early, therefore, they are returned through a callback rather than a
* separate output parameter of RankKeys().
*/
template <int KEYS_PER_THREAD, typename CountsCallback>
__device__ __forceinline__ void CallBack(CountsCallback callback)
{
int bins[BINS_TRACKED_PER_THREAD];
// Get count for each digit
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (linear_tid * BINS_TRACKED_PER_THREAD) + track;
const int TILE_ITEMS = KEYS_PER_THREAD * BLOCK_THREADS;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
if (IS_DESCENDING)
{
bin_idx = RADIX_DIGITS - bin_idx - 1;
bins[track] = (bin_idx > 0 ?
temp_storage.aliasable.warp_digit_counters[bin_idx - 1][0] : TILE_ITEMS) -
temp_storage.aliasable.warp_digit_counters[bin_idx][0];
}
else
{
bins[track] = (bin_idx < RADIX_DIGITS - 1 ?
temp_storage.aliasable.warp_digit_counters[bin_idx + 1][0] : TILE_ITEMS) -
temp_storage.aliasable.warp_digit_counters[bin_idx][0];
}
}
}
callback(bins);
}
/**
* \brief Rank keys.
*/
template <
typename UnsignedBits,
int KEYS_PER_THREAD,
typename DigitExtractorT,
typename CountsCallback>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile
int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile
DigitExtractorT digit_extractor, ///< [in] The digit extractor
CountsCallback callback)
{
// Initialize shared digit counters
#pragma unroll
for (int ITEM = 0; ITEM < PADDED_RAKING_SEGMENT; ++ITEM)
temp_storage.aliasable.raking_grid[linear_tid][ITEM] = 0;
CTA_SYNC();
// Each warp will strip-mine its section of input, one strip at a time
volatile DigitCounterT *digit_counters[KEYS_PER_THREAD];
uint32_t warp_id = linear_tid >> LOG_WARP_THREADS;
uint32_t lane_mask_lt = LaneMaskLt();
#pragma unroll
for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM)
{
// My digit
uint32_t digit = digit_extractor.Digit(keys[ITEM]);
if (IS_DESCENDING)
digit = RADIX_DIGITS - digit - 1;
// Mask of peers who have same digit as me
uint32_t peer_mask = MatchAny<RADIX_BITS>(digit);
// Pointer to smem digit counter for this key
digit_counters[ITEM] = &temp_storage.aliasable.warp_digit_counters[digit][warp_id];
// Number of occurrences in previous strips
DigitCounterT warp_digit_prefix = *digit_counters[ITEM];
// Warp-sync
WARP_SYNC(0xFFFFFFFF);
// Number of peers having same digit as me
int32_t digit_count = __popc(peer_mask);
// Number of lower-ranked peers having same digit seen so far
int32_t peer_digit_prefix = __popc(peer_mask & lane_mask_lt);
if (peer_digit_prefix == 0)
{
// First thread for each digit updates the shared warp counter
*digit_counters[ITEM] = DigitCounterT(warp_digit_prefix + digit_count);
}
// Warp-sync
WARP_SYNC(0xFFFFFFFF);
// Number of prior keys having same digit
ranks[ITEM] = warp_digit_prefix + DigitCounterT(peer_digit_prefix);
}
CTA_SYNC();
// Scan warp counters
DigitCounterT scan_counters[PADDED_RAKING_SEGMENT];
#pragma unroll
for (int ITEM = 0; ITEM < PADDED_RAKING_SEGMENT; ++ITEM)
scan_counters[ITEM] = temp_storage.aliasable.raking_grid[linear_tid][ITEM];
BlockScanT(temp_storage.block_scan).ExclusiveSum(scan_counters, scan_counters);
#pragma unroll
for (int ITEM = 0; ITEM < PADDED_RAKING_SEGMENT; ++ITEM)
temp_storage.aliasable.raking_grid[linear_tid][ITEM] = scan_counters[ITEM];
CTA_SYNC();
if (!Equals<CountsCallback, BlockRadixRankEmptyCallback<BINS_TRACKED_PER_THREAD>>::VALUE)
{
CallBack<KEYS_PER_THREAD>(callback);
}
// Seed ranks with counter values from previous warps
#pragma unroll
for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM)
ranks[ITEM] += *digit_counters[ITEM];
}
template <
typename UnsignedBits,
int KEYS_PER_THREAD,
typename DigitExtractorT>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], int (&ranks)[KEYS_PER_THREAD],
DigitExtractorT digit_extractor)
{
RankKeys(keys, ranks, digit_extractor,
BlockRadixRankEmptyCallback<BINS_TRACKED_PER_THREAD>());
}
/**
* \brief Rank keys. For the lower \p RADIX_DIGITS threads, digit counts for each digit are provided for the corresponding thread.
*/
template <
typename UnsignedBits,
int KEYS_PER_THREAD,
typename DigitExtractorT,
typename CountsCallback>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile
int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile (out parameter)
DigitExtractorT digit_extractor, ///< [in] The digit extractor
int (&exclusive_digit_prefix)[BINS_TRACKED_PER_THREAD], ///< [out] The exclusive prefix sum for the digits [(threadIdx.x * BINS_TRACKED_PER_THREAD) ... (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1]
CountsCallback callback)
{
RankKeys(keys, ranks, digit_extractor, callback);
// Get exclusive count for each digit
#pragma unroll
for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track)
{
int bin_idx = (linear_tid * BINS_TRACKED_PER_THREAD) + track;
if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS))
{
if (IS_DESCENDING)
bin_idx = RADIX_DIGITS - bin_idx - 1;
exclusive_digit_prefix[track] = temp_storage.aliasable.warp_digit_counters[bin_idx][0];
}
}
}
template <
typename UnsignedBits,
int KEYS_PER_THREAD,
typename DigitExtractorT>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile
int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile (out parameter)
DigitExtractorT digit_extractor,
int (&exclusive_digit_prefix)[BINS_TRACKED_PER_THREAD]) ///< [out] The exclusive prefix sum for the digits [(threadIdx.x * BINS_TRACKED_PER_THREAD) ... (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1]
{
RankKeys(keys, ranks, digit_extractor, exclusive_digit_prefix,
BlockRadixRankEmptyCallback<BINS_TRACKED_PER_THREAD>());
}
};
enum WarpMatchAlgorithm
{
WARP_MATCH_ANY,
WARP_MATCH_ATOMIC_OR
};
/**
* Radix-rank using matching which computes the counts of keys for each digit
* value early, at the expense of doing more work. This may be useful e.g. for
* decoupled look-back, where it reduces the time other thread blocks need to
* wait for digit counts to become available.
*/
template <int BLOCK_DIM_X, int RADIX_BITS, bool IS_DESCENDING,
BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS,
WarpMatchAlgorithm MATCH_ALGORITHM = WARP_MATCH_ANY, int NUM_PARTS = 1>
struct BlockRadixRankMatchEarlyCounts
{
// constants
enum
{
BLOCK_THREADS = BLOCK_DIM_X,
RADIX_DIGITS = 1 << RADIX_BITS,
BINS_PER_THREAD = (RADIX_DIGITS + BLOCK_THREADS - 1) / BLOCK_THREADS,
BINS_TRACKED_PER_THREAD = BINS_PER_THREAD,
FULL_BINS = BINS_PER_THREAD * BLOCK_THREADS == RADIX_DIGITS,
WARP_THREADS = CUB_PTX_WARP_THREADS,
BLOCK_WARPS = BLOCK_THREADS / WARP_THREADS,
WARP_MASK = ~0,
NUM_MATCH_MASKS = MATCH_ALGORITHM == WARP_MATCH_ATOMIC_OR ? BLOCK_WARPS : 0,
// Guard against declaring zero-sized array:
MATCH_MASKS_ALLOC_SIZE = NUM_MATCH_MASKS < 1 ? 1 : NUM_MATCH_MASKS,
};
// types
typedef cub::BlockScan<int, BLOCK_THREADS, INNER_SCAN_ALGORITHM> BlockScan;
// temporary storage
struct TempStorage
{
union
{
int warp_offsets[BLOCK_WARPS][RADIX_DIGITS];
int warp_histograms[BLOCK_WARPS][RADIX_DIGITS][NUM_PARTS];
};
int match_masks[MATCH_MASKS_ALLOC_SIZE][RADIX_DIGITS];
typename BlockScan::TempStorage prefix_tmp;
};
TempStorage& temp_storage;
// internal ranking implementation
template <typename UnsignedBits, int KEYS_PER_THREAD, typename DigitExtractorT,
typename CountsCallback>
struct BlockRadixRankMatchInternal
{
TempStorage& s;
DigitExtractorT digit_extractor;
CountsCallback callback;
int warp;
int lane;
__device__ __forceinline__ int Digit(UnsignedBits key)
{
int digit = digit_extractor.Digit(key);
return IS_DESCENDING ? RADIX_DIGITS - 1 - digit : digit;
}
__device__ __forceinline__ int ThreadBin(int u)
{
int bin = threadIdx.x * BINS_PER_THREAD + u;
return IS_DESCENDING ? RADIX_DIGITS - 1 - bin : bin;
}
__device__ __forceinline__
void ComputeHistogramsWarp(UnsignedBits (&keys)[KEYS_PER_THREAD])
{
//int* warp_offsets = &s.warp_offsets[warp][0];
int (&warp_histograms)[RADIX_DIGITS][NUM_PARTS] = s.warp_histograms[warp];
// compute warp-private histograms
#pragma unroll
for (int bin = lane; bin < RADIX_DIGITS; bin += WARP_THREADS)
{
#pragma unroll
for (int part = 0; part < NUM_PARTS; ++part)
{
warp_histograms[bin][part] = 0;
}
}
if (MATCH_ALGORITHM == WARP_MATCH_ATOMIC_OR)
{
int* match_masks = &s.match_masks[warp][0];
#pragma unroll
for (int bin = lane; bin < RADIX_DIGITS; bin += WARP_THREADS)
{
match_masks[bin] = 0;
}
}
WARP_SYNC(WARP_MASK);
// compute private per-part histograms
int part = lane % NUM_PARTS;
#pragma unroll
for (int u = 0; u < KEYS_PER_THREAD; ++u)
{
atomicAdd(&warp_histograms[Digit(keys[u])][part], 1);
}
// sum different parts;
// no extra work is necessary if NUM_PARTS == 1
if (NUM_PARTS > 1)
{
WARP_SYNC(WARP_MASK);
// TODO: handle RADIX_DIGITS % WARP_THREADS != 0 if it becomes necessary
const int WARP_BINS_PER_THREAD = RADIX_DIGITS / WARP_THREADS;
int bins[WARP_BINS_PER_THREAD];
#pragma unroll
for (int u = 0; u < WARP_BINS_PER_THREAD; ++u)
{
int bin = lane + u * WARP_THREADS;
bins[u] = internal::ThreadReduce(warp_histograms[bin], Sum());
}
CTA_SYNC();
// store the resulting histogram in shared memory
int* warp_offsets = &s.warp_offsets[warp][0];
#pragma unroll
for (int u = 0; u < WARP_BINS_PER_THREAD; ++u)
{
int bin = lane + u * WARP_THREADS;
warp_offsets[bin] = bins[u];
}
}
}
__device__ __forceinline__
void ComputeOffsetsWarpUpsweep(int (&bins)[BINS_PER_THREAD])
{
// sum up warp-private histograms
#pragma unroll
for (int u = 0; u < BINS_PER_THREAD; ++u)
{
bins[u] = 0;
int bin = ThreadBin(u);
if (FULL_BINS || (bin >= 0 && bin < RADIX_DIGITS))
{
#pragma unroll
for (int j_warp = 0; j_warp < BLOCK_WARPS; ++j_warp)
{
int warp_offset = s.warp_offsets[j_warp][bin];
s.warp_offsets[j_warp][bin] = bins[u];
bins[u] += warp_offset;
}
}
}
}
__device__ __forceinline__
void ComputeOffsetsWarpDownsweep(int (&offsets)[BINS_PER_THREAD])
{
#pragma unroll
for (int u = 0; u < BINS_PER_THREAD; ++u)
{
int bin = ThreadBin(u);
if (FULL_BINS || (bin >= 0 && bin < RADIX_DIGITS))
{
int digit_offset = offsets[u];
#pragma unroll
for (int j_warp = 0; j_warp < BLOCK_WARPS; ++j_warp)
{
s.warp_offsets[j_warp][bin] += digit_offset;
}
}
}
}
__device__ __forceinline__
void ComputeRanksItem(
UnsignedBits (&keys)[KEYS_PER_THREAD], int (&ranks)[KEYS_PER_THREAD],
Int2Type<WARP_MATCH_ATOMIC_OR>)
{
// compute key ranks
int lane_mask = 1 << lane;
int* warp_offsets = &s.warp_offsets[warp][0];
int* match_masks = &s.match_masks[warp][0];
#pragma unroll
for (int u = 0; u < KEYS_PER_THREAD; ++u)
{
int bin = Digit(keys[u]);
int* p_match_mask = &match_masks[bin];
atomicOr(p_match_mask, lane_mask);
WARP_SYNC(WARP_MASK);
int bin_mask = *p_match_mask;
int leader = (WARP_THREADS - 1) - __clz(bin_mask);
int warp_offset = 0;
int popc = __popc(bin_mask & LaneMaskLe());
if (lane == leader)
{
// atomic is a bit faster
warp_offset = atomicAdd(&warp_offsets[bin], popc);
}
warp_offset = SHFL_IDX_SYNC(warp_offset, leader, bin_mask);
if (lane == leader) *p_match_mask = 0;
WARP_SYNC(WARP_MASK);
ranks[u] = warp_offset + popc - 1;
}
}
__device__ __forceinline__
void ComputeRanksItem(
UnsignedBits (&keys)[KEYS_PER_THREAD], int (&ranks)[KEYS_PER_THREAD],
Int2Type<WARP_MATCH_ANY>)
{
// compute key ranks
int* warp_offsets = &s.warp_offsets[warp][0];
#pragma unroll
for (int u = 0; u < KEYS_PER_THREAD; ++u)
{
int bin = Digit(keys[u]);
int bin_mask = MatchAny<RADIX_BITS>(bin);
int leader = (WARP_THREADS - 1) - __clz(bin_mask);
int warp_offset = 0;
int popc = __popc(bin_mask & LaneMaskLe());
if (lane == leader)
{
// atomic is a bit faster
warp_offset = atomicAdd(&warp_offsets[bin], popc);
}
warp_offset = SHFL_IDX_SYNC(warp_offset, leader, bin_mask);
ranks[u] = warp_offset + popc - 1;
}
}
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD],
int (&ranks)[KEYS_PER_THREAD],
int (&exclusive_digit_prefix)[BINS_PER_THREAD])
{
ComputeHistogramsWarp(keys);
CTA_SYNC();
int bins[BINS_PER_THREAD];
ComputeOffsetsWarpUpsweep(bins);
callback(bins);
BlockScan(s.prefix_tmp).ExclusiveSum(bins, exclusive_digit_prefix);
ComputeOffsetsWarpDownsweep(exclusive_digit_prefix);
CTA_SYNC();
ComputeRanksItem(keys, ranks, Int2Type<MATCH_ALGORITHM>());
}
__device__ __forceinline__ BlockRadixRankMatchInternal
(TempStorage& temp_storage, DigitExtractorT digit_extractor, CountsCallback callback)
: s(temp_storage), digit_extractor(digit_extractor),
callback(callback), warp(threadIdx.x / WARP_THREADS), lane(LaneId())
{}
};
__device__ __forceinline__ BlockRadixRankMatchEarlyCounts
(TempStorage& temp_storage) : temp_storage(temp_storage) {}
/**
* \brief Rank keys. For the lower \p RADIX_DIGITS threads, digit counts for each digit are provided for the corresponding thread.
*/
template <typename UnsignedBits, int KEYS_PER_THREAD, typename DigitExtractorT,
typename CountsCallback>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD],
int (&ranks)[KEYS_PER_THREAD],
DigitExtractorT digit_extractor,
int (&exclusive_digit_prefix)[BINS_PER_THREAD],
CountsCallback callback)
{
BlockRadixRankMatchInternal<UnsignedBits, KEYS_PER_THREAD, DigitExtractorT, CountsCallback>
internal(temp_storage, digit_extractor, callback);
internal.RankKeys(keys, ranks, exclusive_digit_prefix);
}
template <typename UnsignedBits, int KEYS_PER_THREAD, typename DigitExtractorT>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD],
int (&ranks)[KEYS_PER_THREAD],
DigitExtractorT digit_extractor,
int (&exclusive_digit_prefix)[BINS_PER_THREAD])
{
typedef BlockRadixRankEmptyCallback<BINS_PER_THREAD> CountsCallback;
BlockRadixRankMatchInternal<UnsignedBits, KEYS_PER_THREAD, DigitExtractorT, CountsCallback>
internal(temp_storage, digit_extractor, CountsCallback());
internal.RankKeys(keys, ranks, exclusive_digit_prefix);
}
template <typename UnsignedBits, int KEYS_PER_THREAD, typename DigitExtractorT>
__device__ __forceinline__ void RankKeys(
UnsignedBits (&keys)[KEYS_PER_THREAD],
int (&ranks)[KEYS_PER_THREAD],
DigitExtractorT digit_extractor)
{
int exclusive_digit_prefix[BINS_PER_THREAD];
RankKeys(keys, ranks, digit_extractor, exclusive_digit_prefix);
}
};
CUB_NAMESPACE_END
|
the_stack
|
#include <iostream>
using std::cout;
using std::cerr;
using std::endl;
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cmath>
#include <mpi.h>
namespace etics {
namespace scf {
__constant__ Real RadCoeff[(NMAX+1)*(LMAX+1)]; /*!< Stores coefficients related to the G */
__constant__ Real AngCoeff[(LMAX+1)*(LMAX+2)/2]; /*!< used blab bla222 */
__constant__ Complex A[(NMAX+1)*(LMAX+1)*(LMAX+2)/2];
__constant__ CacheStruct Cache;
Complex *PartialSum;
Real RadCoeff_h[(NMAX+1)*(LMAX+1)]; /*!< Stores coefficients related to the G */
Real AngCoeff_h[(LMAX+1)*(LMAX+2)/2]; /*!< used blab bla222 */
Complex A_h[(NMAX+1)*(LMAX+1)*(LMAX+2)/2];
CacheStruct Cache_h;
Complex *PartialSum_h;
int k3gs, k3bs, k4gs, k4bs;
}
}
void etics::scf::InitializeCache(int N) { // not sure why it's a separate function, the instructions can be in etics::scf::Init()
Cache_h.N = N;
cudaMalloc((void**)&Cache_h.xi, N * sizeof(Real));
cudaMalloc((void**)&Cache_h.Phi0l, N * sizeof(Real));
cudaMalloc((void**)&Cache_h.Wprev1, N * sizeof(Real));
cudaMalloc((void**)&Cache_h.Wprev2, N * sizeof(Real));
cudaMalloc((void**)&Cache_h.costheta, N * sizeof(Real));
cudaMalloc((void**)&Cache_h.sintheta_I, N * sizeof(Real));
cudaMalloc((void**)&Cache_h.Exponent, N * sizeof(Complex));
cudaMalloc((void**)&Cache_h.mass, N * sizeof(Real));
}
void etics::scf::UpdateN(int N) {
Cache_h.N = N;
cudaMemcpyToSymbol(Cache, &Cache_h, sizeof(CacheStruct));
}
__global__ void etics::scf::LoadParticlesToCache(Particle *P, int N) { // formerly "Kernel1"
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
vec3 Pos = P[i].pos;
Real r = sqrt(Pos.x*Pos.x + Pos.y*Pos.y + Pos.z*Pos.z);
Real xi = (r-1)/(r+1);
Real costheta = Pos.z/r;
Real sintheta_I = rsqrt(1-costheta*costheta);
Cache.xi[i] = xi;
Cache.Phi0l[i] = 0.5 * (1 - xi);
Cache.costheta[i] = costheta;
Cache.sintheta_I[i] = sintheta_I;
Real Normal_I = rsqrt(Pos.x*Pos.x + Pos.y*Pos.y);
Complex Exponent = make_Complex(Pos.x*Normal_I, -Pos.y*Normal_I);
Cache.Exponent[i] = Exponent;
Cache.mass[i] = P[i].m;
i += blockDim.x * gridDim.x;
}
}
__global__ void etics::scf::CalculatePhi0l(int l) { // formerly "Kernel2"
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < Cache.N) {
Real xi = Cache.xi[i];
Cache.Phi0l[i] *= 0.25*(1-xi*xi);
i += blockDim.x * gridDim.x;
}
}
__global__ void etics::scf::CalculateCoefficientsPartial(int n, int l, Complex *PartialSum) { // formerly "Kernel3"
extern __shared__ Complex ReductionCache[]; // size is determined in kernel launch
int tid = threadIdx.x;
for (int m = 0; m <= l; m++) ReductionCache[m*blockDim.x+tid] = make_Complex(0, 0);
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < Cache.N) {
Real xi = Cache.xi[i];
Real Wnl;
if (n == 0) Wnl = 1;
else if (n == 1) {Wnl = (4*l+3)*xi; Cache.Wprev2[i] = Wnl;}
else if (n == 2) {Wnl = -(2*l+1.5)+( 8*l*(l+2) +7.5)*xi*xi; Cache.Wprev1[i] = Wnl;}
else {
Real Wprev1 = Cache.Wprev1[i];
Wnl = (xi*(2*n+4*l+1)*Wprev1 - (n+4*l+1)*Cache.Wprev2[i])/(Real)n;
if (n < NMAX) { // Writing is expensive, avoid if possible.
Cache.Wprev2[i] = Wprev1;
Cache.Wprev1[i] = Wnl;
}
}
Real RadialPart = - Cache.mass[i] * SQRT_4_PI * Cache.Phi0l[i] * Wnl * RadCoeff[(LMAX+1)*n+l];
Real costheta = Cache.costheta[i];
Real Plm = Pl(l, costheta);
ReductionCache[tid] = Complex_add(ReductionCache[tid], make_Complex(RadialPart * Plm * AngCoeff[(l+1)*l/2],0));
if (l == 0) {i += blockDim.x * gridDim.x; continue;}
//////////////////////////////// ugly fix
if ((costheta < -0.999) || (costheta > +0.999)) {
i += blockDim.x * gridDim.x;
continue;
}
//////////////////////////////// ugly fix
Real Plm_prev1 = Plm;
Real sintheta_I = Cache.sintheta_I[i];
Plm = (costheta*Plm - Pl(l-1, costheta))*l*sintheta_I;
Complex Exponent = Cache.Exponent[i];
Real tmp0 = RadialPart * Plm * AngCoeff[(l+1)*l/2+1];
ReductionCache[blockDim.x+tid] = Complex_add(ReductionCache[blockDim.x+tid], make_Complex(tmp0 * Exponent.x, tmp0 * Exponent.y));
if (l == 1) {i += blockDim.x * gridDim.x; continue;}
Complex TorodialPart = Exponent;
for (int m = 2; m <= l; m++) { // make sure no redundancy at the end of the loop
Real Plm_prev2 = Plm_prev1;
Plm_prev1 = Plm;
Plm = - 2*(m-1)*costheta*sintheta_I*Plm_prev1 - (l+m-1)*(l-m+2)*Plm_prev2;
TorodialPart = Complex_mul(TorodialPart, Exponent);
tmp0 = RadialPart * Plm * AngCoeff[(l+1)*l/2+m];
ReductionCache[m*blockDim.x+tid] = Complex_add(ReductionCache[m*blockDim.x+tid], make_Complex(tmp0 * TorodialPart.x, tmp0 * TorodialPart.y));
}
i += blockDim.x * gridDim.x;
}
__syncthreads();
for (int m = 0; m <= l; m++) {
i = blockDim.x/2;
while (i != 0) {
if (tid < i)
ReductionCache[m*blockDim.x+tid] = Complex_add(ReductionCache[m*blockDim.x+tid], ReductionCache[m*blockDim.x+tid+i]);
__syncthreads();
i /= 2;
}
if (tid == 0)
PartialSum[blockIdx.x*(l+1) + m] = ReductionCache[m*blockDim.x];
}
}
void etics::scf::CalculateCoefficients(int n, int l, Complex *A_h) {
int BaseAddress = n*(LMAX+1)*(LMAX+2)/2 + l*(l+1)/2;
CalculateCoefficientsPartial<<<k3gs,k3bs,k3bs*sizeof(Complex)*(LMAX+1)>>>(n, l, PartialSum);
cudaMemcpy(PartialSum_h, PartialSum, k3gs*(l+1)*sizeof(Complex), cudaMemcpyDeviceToHost);
for (int m = 0; m <= l; m++)
for (int Block=0; Block<k3gs; Block++)
A_h[BaseAddress + m] = Complex_add(A_h[BaseAddress + m], PartialSum_h[Block*(l+1) + m]);
}
void etics::scf::CalculateCoefficients(Complex *A_h) {
memset(A_h, 0, (NMAX+1)*(LMAX+1)*(LMAX+2)/2 * sizeof(Complex));
for (int l = 0; l <= LMAX; l++) {
if (l > 0) CalculatePhi0l<<<128,128>>>(l); // wouldn't it make sense just putting it after the n-loop finishes? Probably not becasue then we need to skip at the last iter
for (int n = 0; n <= NMAX; n++) {
CalculateCoefficients(n, l, A_h);
}
}
}
template<int Mode>
__device__ void etics::scf::CalculateGravityTemplate(int i, Complex *A, vec3 *F, Real *Potential) {
// it gets A as parameter because it can be either on host or device
#warning !!! This cannot really be a host function because it needs device cahce, angular coefficients which are on device!!
// 0 = both force and potential, 1 = only force, 2 = only pot
#define A(n,l,m) A[n*(LMAX+1)*(LMAX+2)/2 + l*(l+1)/2 + m]
Real dPhiLeft;
Real dPhiLeftMul;
Real dPhiRight;
Real dPhiRightAdd;
Real dPhi;
Real RadialPart2;
Real PlmDerivTheta;
Real Pot = 0;
Real Fr = 0, Ftheta = 0, Fphi = 0;
Real xi = Cache.xi[i];
Real OneOverXiPlusOne = 1/(1+xi);
Real r_I = (1-xi)*OneOverXiPlusOne;
Real r = 1/r_I; // It's quite likely we can do everything without r.
Real costheta = Cache.costheta[i];
Real sintheta_I = rsqrt(1-costheta*costheta); // faster than using cachei // You sure??? in K3 it's the opposite
Complex ExponentTmp[LMAX];
Complex Exponent = Complex_conj(Cache.Exponent[i]);
ExponentTmp[0] = Exponent;
for (int m = 1; m < LMAX; m++) ExponentTmp[m] = Complex_mul(ExponentTmp[m-1],Exponent);
if (Mode != 2) {
Real xi2 = xi*xi;
Real xi3 = xi2*xi;
dPhiLeft = -0.25*OneOverXiPlusOne;
dPhiLeftMul = 0.25*(1-xi2);
dPhiRight = xi3 - xi2 - xi + 1;
dPhiRightAdd = 2*(xi3 - 2*xi2 + xi);
}
Real Phi0l = 1/(1+r);
Real tmp1 = Phi0l*Phi0l*r;
for (int l = 0; l <= LMAX; l++) {
if (Mode != 2) {
if (l > 0) {
dPhiLeft *= dPhiLeftMul;
dPhiRight += dPhiRightAdd;
}
}
if (Mode != 2) dPhi = dPhiLeft * dPhiRight;
for (int n = 0; n <= NMAX; n++) {
Real Wnl, Wprev1, Wprev2;
if (n == 0) Wnl = 1;
else if (n == 1) {Wnl = (4*l+3)*xi; Wprev2 = Wnl;}
else if (n == 2) {Wnl = -(2*l+1.5)+( 8*l*(l+2) +7.5)*xi*xi; Wprev1 = Wnl;}
else {
Wnl = (xi*(2*n+4*l+1)*Wprev1 - (n+4*l+1)*Wprev2)/(Real)n;
Wprev2 = Wprev1;
Wprev1 = Wnl;
}
Real Wderiv = 0;
if (n == 1) {Wderiv = 4*l + 3;}
else if (n > 1) {
Wderiv = (-n*xi*Wnl + (n+4*l+2)*Wprev2)/(1-xi*xi);
} // From an unknown reason it's faster to have this Block separate from the previous one.
Real RadialPart = - SQRT_4_PI * Phi0l * Wnl;
if (Mode != 2) RadialPart2 = SQRT_4_PI * (dPhi*Wnl + Phi0l*Wderiv*2/pow(1+r,2));
Real Plm = Pl(l, costheta);
Real tmp2 = Complex_real(A(n,l,0)) * AngCoeff[(l+1)*l/2] * Plm;
if (Mode != 1) Pot += RadialPart * tmp2;
if (Mode != 2) Fr += RadialPart2 * tmp2;
if (l == 0) continue;
//////////////////////////////// ugly fix
if ((costheta < -0.999) || (costheta > +0.999)) {
continue;
}
//////////////////////////////// ugly fix
// The Block below is l>=1, m=0.
if (Mode != 2) {
PlmDerivTheta = (costheta*Plm - Pl(l-1, costheta))*l*sintheta_I; //TODO check if storing Pl(l-1) somewhere makes it faster
Ftheta += - PlmDerivTheta * AngCoeff[(l+1)*l/2] * Complex_real(A(n,l,0)) * RadialPart * r_I;
}
// The Block below is l>=1, m=1.
if (Mode == 2) PlmDerivTheta = (costheta*Plm - Pl(l-1, costheta))*l*sintheta_I; //TODO see above regarding storing Pl(l-1)
Real Plm_prev1 = Plm;
Plm = PlmDerivTheta; // PlmDerivTheta equals Plm for m=1.
if (Mode != 2) PlmDerivTheta = - Plm*costheta*sintheta_I - l*(l+1)*Plm_prev1;
tmp2 = 2 * AngCoeff[(l+1)*l/2+1];
Complex tmp3 = Complex_mul(ExponentTmp[0], A(n,l,1));
Complex tmp4 = make_Complex(tmp2 * tmp3.x, tmp2 * tmp3.y);
Complex tmp5 = make_Complex(Plm * tmp4.x, Plm * tmp4.y);
Complex tmp6 = make_Complex(RadialPart * tmp5.x, RadialPart * tmp5.y);
if (Mode != 1) Pot += Complex_real(tmp6);
if (Mode != 2) {
Fr += RadialPart2 * Complex_real(tmp5);
Fphi += Complex_imag(tmp6) * sintheta_I * r_I;
Ftheta += - RadialPart * PlmDerivTheta * Complex_real(tmp4) * r_I;
}
if (l == 1) continue;
for (int m = 2; m <= l; m++) {
Real Plm_prev2 = Plm_prev1;
Plm_prev1 = Plm;
Plm = - 2*(m-1)*costheta*sintheta_I*Plm_prev1 - (l+m-1)*(l-m+2)*Plm_prev2;
tmp2 = 2 * AngCoeff[(l+1)*l/2+m];
tmp3 = Complex_mul(ExponentTmp[m-1], A(n,l,m));
tmp4 = make_Complex(tmp2 * tmp3.x, tmp2 * tmp3.y);
tmp5 = make_Complex(Plm * tmp4.x, Plm * tmp4.y);
tmp6 = make_Complex(RadialPart * tmp5.x, RadialPart * tmp5.y);
if (Mode != 1) Pot += Complex_real(tmp6);
if (Mode != 2) {
PlmDerivTheta = - m*Plm*costheta*sintheta_I - (l+m)*(l-m+1)*Plm_prev1;
Fr += RadialPart2 * Complex_real(tmp5);
Fphi += m * Complex_imag(tmp6) * sintheta_I * r_I;
Ftheta += - RadialPart * PlmDerivTheta * Complex_real(tmp4) * r_I;
}
}
}
Phi0l *= tmp1;
}
if (Mode != 2) {
Real sintheta = 1/sintheta_I;
Real tanphi = Exponent.y/Exponent.x;
Real cosphi = ((Exponent.x >= 0)?(+1):(-1)) * rsqrt(1+tanphi*tanphi); // no simpler way to get sign bit?
Real sinphi = tanphi*cosphi;
*F = vec3(sintheta*cosphi*Fr + costheta*cosphi*Ftheta - sinphi*Fphi, sintheta*sinphi*Fr + costheta*sinphi*Ftheta + cosphi*Fphi, costheta*Fr - sintheta*Ftheta);
}
if (Mode != 1) *Potential = Pot;
#undef A
}
__global__ void etics::scf::CalculateGravityFromCoefficients(Real *Potential, vec3 *F) { // formerly "Kernel4"
#define A(n,l,m) A[n*(LMAX+1)*(LMAX+2)/2 + l*(l+1)/2 + m]
#ifdef A_ON_SHARED_MEMORY
__shared__ Complex Buffer[(NMAX+1)*(LMAX+1)*(LMAX+2)/2];
if (threadIdx.x < warpSize) {
for(int i = threadIdx.x; i < (NMAX+1)*(LMAX+1)*(LMAX+2)/2; i += warpSize) {
Buffer[i] = A[i];
}
}
__syncthreads();
#define A(n,l,m) Buffer[n*(LMAX+1)*(LMAX+2)/2 + l*(l+1)/2 + m]
#endif
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < Cache.N) {
CalculateGravityTemplate<0>(i, A, &F[i], &Potential[i]);
#warning if we have A_ON_SHARED_MEMORY the above won't work
i += blockDim.x * gridDim.x;
}
}
#undef A
void etics::scf::SendCoeffsToGPU(Complex *A_h) {
cudaMemcpyToSymbol(A, A_h, (NMAX+1)*(LMAX+1)*(LMAX+2)/2 * sizeof(Complex));
}
void etics::scf::CalculateGravity(Particle *P, int N, Real *Potential, vec3 *F) {
LoadParticlesToCache<<<128,128>>>(P, N);
CalculateCoefficients(A_h);
Complex ATotal[(NMAX+1)*(LMAX+1)*(LMAX+2)/2];
MPI_Allreduce(&A_h, &ATotal, (NMAX+1)*(LMAX+1)*(LMAX+2)/2*2, MPI_ETICS_REAL, MPI_SUM, MPI_COMM_WORLD);
std::copy ( ATotal, ATotal+(NMAX+1)*(LMAX+1)*(LMAX+2)/2, A_h);
#warning not really need this copy, just calculate the coefficients in some local array, then sum it into a global array (A_h or somthing) and copy it to GPUs
// cudaMemcpyToSymbol(A, A_h, (NMAX+1)*(LMAX+1)*(LMAX+2)/2 * sizeof(Complex));
SendCoeffsToGPU(A_h);
CalculateGravityFromCoefficients<<<k4gs,k4bs>>>(Potential, F);
}
namespace etics {
namespace scf {
int blockSizeToDynamicSMemSize(int BlockSize) { // Should be a lambda function
return (LMAX+1)*sizeof(Complex)*BlockSize;
}
}
}
void etics::scf::Init(int N, int k3gs_new, int k3bs_new, int k4gs_new, int k4bs_new) {
if ((k3gs_new==0) || (k3bs_new==0)) {
cerr << "Warning: launch configuration for CalculateCoefficientsPartial(...) is unspecified; performance can be improved by optimizing it for this device." << endl;
int blockSize;
int minGridSize;
int gridSize;
cudaOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, CalculateCoefficientsPartial, blockSizeToDynamicSMemSize, 128);
cerr << "Warning: setting blockSizeLimit=128 for cudaOccupancyMaxPotentialBlockSizeVariableSMem." << endl;
gridSize = minGridSize;
cerr << "Using the following launch configuration: <<<" << gridSize << "," << blockSize << ">>>" << endl;
k3gs = gridSize;
k3bs = blockSize;
} else {
k3gs = k3gs_new;
k3bs = k3bs_new;
}
if ((k4gs_new==0) || (k4bs_new==0)) {
cerr << "Warning: launch configuration for CalculateGravityFromCoefficients is unspecified; performance can be improved by optimizing it for this device." << endl;
int blockSize;
int minGridSize;
int gridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, CalculateGravityFromCoefficients, 0, N);
gridSize = (N + blockSize - 1) / blockSize;
cerr << "Using the following launch configuration: <<<" << gridSize << "," << blockSize << ">>>" << endl;
k4gs = gridSize;
k4bs = blockSize;
} else {
k4gs = k4gs_new;
k4bs = k4bs_new;
}
RadialCoefficients(RadCoeff_h);
cudaMemcpyToSymbol(RadCoeff, RadCoeff_h, (NMAX+1)*(LMAX+1) * sizeof(Real));
AngularCoefficients(AngCoeff_h);
cudaMemcpyToSymbol(AngCoeff, AngCoeff_h, (LMAX+1)*(LMAX+2)/2 * sizeof(Real));
InitializeCache(N);
cudaMemcpyToSymbol(Cache, &Cache_h, sizeof(CacheStruct));
PartialSum_h = (Complex*)malloc(k3gs*(LMAX+1)*sizeof(Complex)); // why not use "new"?
cudaMalloc((void**)&PartialSum, k3gs*(LMAX+1)*sizeof(Complex));
}
|
the_stack
|
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <cub/cub.cuh>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scan.h>
#include <algorithm>
using cudf::device_span;
namespace cudf {
namespace strings {
namespace detail {
namespace {
//
// This is the functor for the url_encode() method below.
// Specific requirements are documented in custrings issue #321.
// In summary it converts mostly non-ascii characters and control characters into UTF-8 hex
// characters prefixed with '%'. For example, the space character must be converted to characters
// '%20' where the '20' indicates the hex value for space in UTF-8. Likewise, multi-byte characters
// are converted to multiple hex characters. For example, the é character is converted to characters
// '%C3%A9' where 'C3A9' is the UTF-8 bytes xc3a9 for this character.
//
struct url_encoder_fn {
column_device_view const d_strings;
int32_t const* d_offsets{};
char* d_chars{};
// utility to create 2-byte hex characters from single binary byte
__device__ void byte_to_hex(uint8_t byte, char* hex)
{
hex[0] = '0';
if (byte >= 16) {
uint8_t hibyte = byte / 16;
hex[0] = hibyte < 10 ? '0' + hibyte : 'A' + (hibyte - 10);
byte = byte - (hibyte * 16);
}
hex[1] = byte < 10 ? '0' + byte : 'A' + (byte - 10);
}
__device__ bool should_not_url_encode(char ch)
{
return (
(ch >= '0' && ch <= '9') || // these are the characters
(ch >= 'A' && ch <= 'Z') || // that are not to be url encoded
(ch >= 'a' &&
ch <= 'z') || // reference: docs.python.org/3/library/urllib.parse.html#urllib.parse.quote
(ch == '.') ||
(ch == '_') || (ch == '~') || (ch == '-'));
}
// main part of the functor the performs the url-encoding
__device__ size_type operator()(size_type idx)
{
if (d_strings.is_null(idx)) return 0;
string_view d_str = d_strings.element<string_view>(idx);
//
char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
size_type nbytes = 0;
char hex[2]; // two-byte hex max
for (auto itr = d_str.begin(); itr != d_str.end(); ++itr) {
auto ch = *itr;
if (ch < 128) {
if (should_not_url_encode(static_cast<char>(ch))) {
nbytes++;
if (out_ptr) out_ptr = copy_and_increment(out_ptr, d_str.data() + itr.byte_offset(), 1);
} else // url-encode everything else
{
nbytes += 3;
if (out_ptr) {
out_ptr = copy_and_increment(out_ptr, "%", 1); // add the '%' prefix
byte_to_hex(static_cast<uint8_t>(ch), hex); // convert to 2 hex chars
out_ptr = copy_and_increment(out_ptr, hex, 2); // add them to the output
}
}
} else // these are to be utf-8 url-encoded
{
uint8_t char_bytes[4]; // holds utf-8 bytes for one character
size_type char_width = from_char_utf8(ch, reinterpret_cast<char*>(char_bytes));
nbytes += char_width * 3; // '%' plus 2 hex chars per byte (example: é is %C3%A9)
// process each byte in this current character
for (size_type chidx = 0; out_ptr && (chidx < char_width); ++chidx) {
out_ptr = copy_and_increment(out_ptr, "%", 1); // add '%' prefix
byte_to_hex(char_bytes[chidx], hex); // convert to 2 hex chars
out_ptr = copy_and_increment(out_ptr, hex, 2); // add them to the output
}
}
}
return nbytes;
}
};
} // namespace
//
std::unique_ptr<column> url_encode(
strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_column(data_type{type_id::STRING});
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// copy null mask
rmm::device_buffer null_mask = cudf::detail::copy_bitmask(strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), url_encoder_fn{d_strings});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_offsets = offsets_column->view().data<int32_t>();
auto const bytes =
cudf::detail::get_value<int32_t>(offsets_column->view(), strings_count, stream);
// build chars column
auto chars_column = create_chars_child_column(bytes, stream, mr);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
url_encoder_fn{d_strings, d_offsets, d_chars});
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
std::move(null_mask));
}
} // namespace detail
// external API
std::unique_ptr<column> url_encode(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::url_encode(strings, rmm::cuda_stream_default, mr);
}
namespace detail {
namespace {
// utility to convert a hex char into a single byte
constexpr uint8_t hex_char_to_byte(char ch)
{
if (ch >= '0' && ch <= '9') return (ch - '0');
if (ch >= 'A' && ch <= 'F') return (ch - 'A' + 10); // in hex A=10,B=11,...,F=15
if (ch >= 'a' && ch <= 'f') return (ch - 'a' + 10); // same for lower case
return 0;
}
constexpr bool is_hex_digit(char ch)
{
return (ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f');
}
__forceinline__ __device__ bool is_escape_char(char const* const ptr)
{
return (ptr[0] == '%' && is_hex_digit(ptr[1]) && is_hex_digit(ptr[2]));
}
// helper function for converting an escaped sequence starting at `ptr` to a single byte
__forceinline__ __device__ char escaped_sequence_to_byte(char const* const ptr)
{
return (hex_char_to_byte(ptr[1]) << 4) | hex_char_to_byte(ptr[2]);
}
/**
* @brief Count the number of characters of each string after URL decoding.
*
* @tparam num_warps_per_threadblock Number of warps in a threadblock. This template argument must
* match the launch configuration, i.e. the kernel must be launched with
* `num_warps_per_threadblock * cudf::detail::warp_size` threads per threadblock.
* @tparam char_block_size Number of characters which will be loaded into the shared memory at a
* time.
*
* @param[in] in_strings Input string column.
* @param[out] out_counts Number of characters in each decode URL.
*/
template <int num_warps_per_threadblock, int char_block_size>
__global__ void url_decode_char_counter(column_device_view const in_strings,
offset_type* const out_counts)
{
constexpr int halo_size = 2;
__shared__ char temporary_buffer[num_warps_per_threadblock][char_block_size + halo_size];
__shared__ typename cub::WarpReduce<int8_t>::TempStorage cub_storage[num_warps_per_threadblock];
int const global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int const global_warp_id = global_thread_id / cudf::detail::warp_size;
int const local_warp_id = threadIdx.x / cudf::detail::warp_size;
int const warp_lane = threadIdx.x % cudf::detail::warp_size;
int const nwarps = gridDim.x * blockDim.x / cudf::detail::warp_size;
char* in_chars_shared = temporary_buffer[local_warp_id];
// Loop through strings, and assign each string to a warp.
for (size_type row_idx = global_warp_id; row_idx < in_strings.size(); row_idx += nwarps) {
if (in_strings.is_null(row_idx)) {
out_counts[row_idx] = 0;
continue;
}
auto const in_string = in_strings.element<string_view>(row_idx);
auto const in_chars = in_string.data();
auto const string_length = in_string.size_bytes();
int const nblocks = cudf::util::div_rounding_up_unsafe(string_length, char_block_size);
offset_type escape_char_count = 0;
for (int block_idx = 0; block_idx < nblocks; block_idx++) {
int const string_length_block =
std::min(char_block_size, string_length - char_block_size * block_idx);
// Each warp collectively loads input characters of the current block to the shared memory.
// When testing whether a location is the start of an escaped character, we need to access
// the current location as well as the next two locations. To avoid branches, two halo cells
// are added after the end of the block. If the cell is beyond the end of the string, 0s are
// filled in to make sure the last two characters of the string are not the start of an
// escaped sequence.
for (int char_idx = warp_lane; char_idx < string_length_block + halo_size;
char_idx += cudf::detail::warp_size) {
int const in_idx = block_idx * char_block_size + char_idx;
in_chars_shared[char_idx] = in_idx < string_length ? in_chars[in_idx] : 0;
}
__syncwarp();
// `char_idx_start` represents the start character index of the current warp.
for (int char_idx_start = 0; char_idx_start < string_length_block;
char_idx_start += cudf::detail::warp_size) {
int const char_idx = char_idx_start + warp_lane;
int8_t const is_ichar_escape_char =
(char_idx < string_length_block && is_escape_char(in_chars_shared + char_idx)) ? 1 : 0;
// Warp-wise reduction to calculate the number of escape characters.
// All threads in the warp participate in the reduction, even if `char_idx` is beyond
// `string_length_block`.
int8_t const total_escape_char =
cub::WarpReduce<int8_t>(cub_storage[local_warp_id]).Sum(is_ichar_escape_char);
if (warp_lane == 0) { escape_char_count += total_escape_char; }
__syncwarp();
}
}
// URL decoding replaces 3 bytes with 1 for each escape character.
if (warp_lane == 0) { out_counts[row_idx] = string_length - escape_char_count * 2; }
}
}
/**
* @brief Decode and copy from the input string column to the output char buffer.
*
* @tparam num_warps_per_threadblock Number of warps in a threadblock. This template argument must
* match the launch configuration, i.e. the kernel must be launched with
* `num_warps_per_threadblock * cudf::detail::warp_size` threads per threadblock.
* @tparam char_block_size Number of characters which will be loaded into the shared memory at a
* time.
*
* @param[in] in_strings Input string column.
* @param[out] out_chars Character buffer for the output string column.
* @param[in] out_offsets Offset value of each string associated with `out_chars`.
*/
template <int num_warps_per_threadblock, int char_block_size>
__global__ void url_decode_char_replacer(column_device_view const in_strings,
char* const out_chars,
offset_type const* const out_offsets)
{
constexpr int halo_size = 2;
__shared__ char temporary_buffer[num_warps_per_threadblock][char_block_size + halo_size * 2];
__shared__ typename cub::WarpScan<int8_t>::TempStorage cub_storage[num_warps_per_threadblock];
__shared__ int out_idx[num_warps_per_threadblock];
int const global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int const global_warp_id = global_thread_id / cudf::detail::warp_size;
int const local_warp_id = threadIdx.x / cudf::detail::warp_size;
int const warp_lane = threadIdx.x % cudf::detail::warp_size;
int const nwarps = gridDim.x * blockDim.x / cudf::detail::warp_size;
char* in_chars_shared = temporary_buffer[local_warp_id];
// Loop through strings, and assign each string to a warp
for (size_type row_idx = global_warp_id; row_idx < in_strings.size(); row_idx += nwarps) {
if (in_strings.is_null(row_idx)) continue;
auto const in_string = in_strings.element<string_view>(row_idx);
auto const in_chars = in_string.data();
auto const string_length = in_string.size_bytes();
auto out_chars_string = out_chars + out_offsets[row_idx];
int const nblocks = cudf::util::div_rounding_up_unsafe(string_length, char_block_size);
// Use the last thread of the warp to initialize `out_idx` to 0.
if (warp_lane == cudf::detail::warp_size - 1) { out_idx[local_warp_id] = 0; }
for (int block_idx = 0; block_idx < nblocks; block_idx++) {
int const string_length_block =
std::min(char_block_size, string_length - char_block_size * block_idx);
// Each warp collectively loads input characters of the current block to shared memory.
// Two halo cells before and after the block are added. The halo cells are used to test
// whether the current location as well as the previous two locations are escape characters,
// without branches.
for (int char_idx = warp_lane; char_idx < string_length_block + halo_size * 2;
char_idx += cudf::detail::warp_size) {
int const in_idx = block_idx * char_block_size + char_idx - halo_size;
in_chars_shared[char_idx] = in_idx >= 0 && in_idx < string_length ? in_chars[in_idx] : 0;
}
__syncwarp();
// `char_idx_start` represents the start character index of the current warp.
for (int char_idx_start = 0; char_idx_start < string_length_block;
char_idx_start += cudf::detail::warp_size) {
int const char_idx = char_idx_start + warp_lane;
// If the current character is part of an escape sequence starting at the previous two
// locations, the thread with the starting location should output the escaped character, and
// the current thread should not output a character.
int8_t const out_size =
(char_idx >= string_length_block || is_escape_char(in_chars_shared + char_idx) ||
is_escape_char(in_chars_shared + char_idx + 1))
? 0
: 1;
// Warp-wise prefix sum to establish output location of the current thread.
// All threads in the warp participate in the prefix sum, even if `char_idx` is beyond
// `string_length_block`.
int8_t out_offset;
cub::WarpScan<int8_t>(cub_storage[local_warp_id]).ExclusiveSum(out_size, out_offset);
if (out_size == 1) {
char const* const ch_ptr = in_chars_shared + char_idx + halo_size;
char const ch =
is_escape_char(ch_ptr)
?
// If the current location is the start of an escape sequence, load and decode.
escaped_sequence_to_byte(ch_ptr)
:
// If the current location is not the start of an escape sequence, load directly.
*ch_ptr;
out_chars_string[out_idx[local_warp_id] + out_offset] = ch;
}
if (warp_lane == cudf::detail::warp_size - 1) {
out_idx[local_warp_id] += (out_offset + out_size);
}
__syncwarp();
}
}
}
}
} // namespace
//
std::unique_ptr<column> url_decode(
strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_column(data_type{type_id::STRING});
constexpr int num_warps_per_threadblock = 4;
constexpr int threadblock_size = num_warps_per_threadblock * cudf::detail::warp_size;
constexpr int char_block_size = 256;
const int num_threadblocks =
std::min(65536, cudf::util::div_rounding_up_unsafe(strings_count, num_warps_per_threadblock));
auto offset_count = strings_count + 1;
auto const d_strings = column_device_view::create(strings.parent(), stream);
// build offsets column
auto offsets_column = make_numeric_column(
data_type{type_id::INT32}, offset_count, mask_state::UNALLOCATED, stream, mr);
// count number of bytes in each string after decoding and store it in offsets_column
auto offsets_view = offsets_column->view();
auto offsets_mutable_view = offsets_column->mutable_view();
url_decode_char_counter<num_warps_per_threadblock, char_block_size>
<<<num_threadblocks, threadblock_size, 0, stream.value()>>>(
*d_strings, offsets_mutable_view.begin<offset_type>());
// use scan to transform number of bytes into offsets
thrust::exclusive_scan(rmm::exec_policy(stream),
offsets_view.begin<offset_type>(),
offsets_view.end<offset_type>(),
offsets_mutable_view.begin<offset_type>());
// copy the total number of characters of all strings combined (last element of the offset column)
// to the host memory
auto out_chars_bytes =
cudf::detail::get_value<offset_type>(offsets_view, offset_count - 1, stream);
// create the chars column
auto chars_column = create_chars_child_column(out_chars_bytes, stream, mr);
auto d_out_chars = chars_column->mutable_view().data<char>();
// decode and copy the characters from the input column to the output column
url_decode_char_replacer<num_warps_per_threadblock, char_block_size>
<<<num_threadblocks, threadblock_size, 0, stream.value()>>>(
*d_strings, d_out_chars, offsets_column->view().begin<offset_type>());
// copy null mask
rmm::device_buffer null_mask = cudf::detail::copy_bitmask(strings.parent(), stream, mr);
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
std::move(null_mask));
}
} // namespace detail
// external API
std::unique_ptr<column> url_decode(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::url_decode(strings, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
the_stack
|
#if ( defined GRAVITY && defined GPU && POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 )
#define POT_NXT_F ( PATCH_SIZE+2*POT_GHOST_SIZE )
#define POT_PAD ( WARP_SIZE/2 - (POT_NXT_F*2%WARP_SIZE) )
#define POT_NTHREAD ( RHO_NXT*RHO_NXT*POT_BLOCK_SIZE_Z/2 )
#define POT_USELESS ( POT_GHOST_SIZE%2 )
/************************************************************
Many optimization options for SOR are defined in CUPOT.h
************************************************************/
// variables reside in constant memory
#include "CUDA_ConstMemory.h"
// parallel reduction routine
#define RED_NTHREAD POT_NTHREAD
#define RED_SUM
#ifdef SOR_USE_SHUFFLE
# include "../../GPU_Utility/CUUTI_BlockReduction_Shuffle.cu"
#else
# include "../../GPU_Utility/CUUTI_BlockReduction_WarpSync.cu"
#endif
// checks
#ifdef SOR_USE_PADDING
# if ( WARP_SIZE != 32 )
# error : ERROR : WARP_SIZE != 32 !!
# endif
#endif
//-------------------------------------------------------------------------------------------------------
// Function : CUPOT_PoissonSolver_SOR_10to14cube
// Description : GPU Poisson solver using the SOR scheme
//
// Note : 1. Work for POT_GHOST_SIZE = 1, 2, 3 <--> POT_NXT_F = 10, 12, 14
// --> For compute capabilities >= 2.0 (which as 48 KB shared memory), it also works
// with POT_GHOST_SIZE = 4, 5
// 2. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// 3. Each patch requires about 3.1*10^6 FLOPS (include the gravity solver)
// --> 133 GFLOPS is achieved in one C2050 GPU
// 4. Reference : Numerical Recipes, Chapter 20.5
// 5. Chester Cheng has implemented the SOR_USE_SHUFFLE and SOR_USE_PADDING optimizations, which
// greatly improve performance for the case POT_GHOST_SIZE == 5
// 6. Typically, the number of iterations required to reach round-off errors is 20 ~ 25 (single precision)
//
// Padding : Below shows how bank conflict is eliminated by padding.
//
// Example constants :
// POT_NXT_F = 18 // The number of floating point elements per row
// POT_PAD = 16 - (18 * 2 % 32) = 12 // number of floating point elements that needs to be added
// // within thread groups
//
// We now show how shared memory (s_FPot array) is accessed by a warp in residual evaluation.
//
// Before Padding:
// Thread number | Accessed shared memory bank
// 00 ~ 07 | | 01 | | 03 | | 05 | | 07 | | 09 | | 11 | | 13 | | 15 | | |
// 08 ~ 15 | | | 02 | | 04 | | 06 | | 08 | | 10 | | 12 | | 14 | | 16 | |
// 16 ~ 23 | | 05 | | 07 | | 09 | | 11 | | 13 | | 15 | | 17 | | 19 | | |
// 24 ~ 31 | | | 06 | | 08 | | 10 | | 12 | | 14 | | 16 | | 18 | | 20 | |
//
// After Padding:
// Thread number | Accessed shared memory bank
// 00 ~ 07 | | 01 | | 03 | | 05 | | 07 | | 09 | | 11 | | 13 | | 15 | | |
// 08 ~ 15 | | | 02 | | 04 | | 06 | | 08 | | 10 | | 12 | | 14 | | 16 | |
// ----------------- PAD 12 FLOATING POINTS HERE !!!!! ---------------------------------------
// 16 ~ 23 | | 17 | | 19 | | 21 | | 23 | | 25 | | 27 | | 29 | | 31 | | |
// 24 ~ 31 | | | 18 | | 20 | | 22 | | 24 | | 26 | | 28 | | 30 | | 00 | |
//
//
// Additional Notes for Padding:
// 1. When threads 08 ~ 15 access the elements below them (+y direction), we have to skip the padded
// elements. Same for when threads 16~23 access the elements above them (-y direction).
// 2. For every warp we need to pad #PAD_POT floating point elements. Each xy plane has 4 warps working
// on it, so for each xy plane we need to pad #4*PAD_POT floating point elements.
//
//
// Parameter : g_Rho_Array : Global memory array to store the input density
// g_Pot_Array_In : Global memory array storing the input "coarse-grid" potential for
// interpolation
// g_Pot_Array_Out : Global memory array to store the output potential
// Min_Iter : Minimum # of iterations for SOR
// Max_Iter : Maximum # of iterations for SOR
// Omega_6 : Omega / 6
// Const : (Coefficient in front of the RHS in the Poisson eq.) / dh^2
// IntScheme : Interpolation scheme for potential
// --> currently supported schemes include
// INT_CQUAD : conservative quadratic interpolation
// INT_QUAD : quadratic interpolation
//---------------------------------------------------------------------------------------------------
__global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme )
{
const uint bid = blockIdx.x;
const uint tid_x = threadIdx.x;
const uint tid_y = threadIdx.y;
const uint tid_z = threadIdx.z;
const uint bdim_x = blockDim.x;
const uint bdim_y = blockDim.y;
const uint bdim_z = blockDim.z;
const uint ID = __umul24( tid_z, __umul24(bdim_x,bdim_y) ) + __umul24( tid_y, bdim_x ) + tid_x;
const uint dx = 1;
const uint dy = POT_NXT_F;
const uint dz = POT_NXT_F*POT_NXT_F;
const uint DispEven = ( tid_y + tid_z ) & 1;
const uint DispOdd = DispEven^1;
const uint DispFlip = bdim_z & 1;
const uint RhoID0 = __umul24( tid_z, RHO_NXT*RHO_NXT ) + __umul24( tid_y, RHO_NXT )+ ( tid_x << 1 );
const uint dRhoID = __umul24( bdim_z, RHO_NXT*RHO_NXT );
# ifdef SOR_USE_PADDING
const uint dPotID = __umul24( bdim_z, POT_NXT_F*POT_NXT_F + POT_PAD*4 );
const uint warpID = ID % WARP_SIZE;
const uint pad_dy_0 = ( warpID >= 8 && warpID <= 15 ) ? dy + POT_PAD : dy; //
const uint pad_dy_1 = ( warpID >= 16 && warpID <= 23 ) ? dy + POT_PAD : dy; // please refer to the Padding notes above!
const uint pad_dz = dz + POT_PAD*4; //
const uint pad_pot = ( tid_y < 2 ) ? 0 : POT_PAD*((tid_y-2)/4 + 1);
# else
const uint dPotID = __umul24( bdim_z, POT_NXT_F*POT_NXT_F );
const uint pad_dy_0 = dy;
const uint pad_dy_1 = dy;
const uint pad_dz = dz;
const uint pad_pot = 0;
# endif
const uint PotID0 = pad_pot + __umul24( 1+tid_z, pad_dz ) + __umul24( 1+tid_y, dy ) + ( tid_x << 1 ) + 1;
uint ip, im, jp, jm, kp, km, t, s_index;
uint PotID, RhoID, DispPotID, DispRhoID, Disp;
real Residual, Residual_Total_Old, Residual_ThreadSum;
__shared__ real s_Residual_Total;
# ifdef SOR_USE_PADDING
__shared__ real s_FPot[ POT_NXT_F*POT_NXT_F*POT_NXT_F + POT_PAD*4*POT_NXT_F ];
# else
__shared__ real s_FPot[ POT_NXT_F*POT_NXT_F*POT_NXT_F ];
# endif
# ifdef SOR_CPOT_SHARED
__shared__ real s_CPot[ POT_NXT *POT_NXT *POT_NXT ];
# endif
# ifdef SOR_RHO_SHARED
__shared__ real s_Rho_Array[ RHO_NXT*RHO_NXT*RHO_NXT ];
# endif
// a1. load the fine-grid density into the shared memory
// -----------------------------------------------------------------------------------------------------------
# ifdef SOR_RHO_SHARED
t = ID;
do { s_Rho_Array[t] = g_Rho_Array[bid][t]; t += (POT_NTHREAD);} while ( t < RHO_NXT*RHO_NXT*RHO_NXT);
__syncthreads();
# else
const real *s_Rho_Array = g_Rho_Array[bid];
# endif
// a2. load the coarse-grid potential into the shared memory
// -----------------------------------------------------------------------------------------------------------
# ifdef SOR_CPOT_SHARED
t = ID;
do { s_CPot[t] = g_Pot_Array_In[bid][t]; t += POT_NTHREAD; } while ( t < POT_NXT*POT_NXT*POT_NXT );
__syncthreads();
# else
const real *s_CPot = g_Pot_Array_In[bid];
# endif
// b. evaluate the "fine-grid" potential by interpolation (as the initial guess and the B.C.)
// -----------------------------------------------------------------------------------------------------------
const int N_CSlice = POT_NTHREAD / ( (POT_NXT-2)*(POT_NXT-2) );
if ( ID < N_CSlice*(POT_NXT-2)*(POT_NXT-2) )
{
const real Const_8 = 1.0/8.0;
const real Const_64 = 1.0/64.0;
const real Const_512 = 1.0/512.0;
const int Cdx = 1;
const int Cdy = POT_NXT;
const int Cdz = POT_NXT*POT_NXT;
const int CIDx = 1 + ID % ( POT_NXT-2 );
const int CIDy = 1 + ( ID % ( (POT_NXT-2)*(POT_NXT-2) ) ) / ( POT_NXT-2 );
const int CIDz = 1 + ID / ( (POT_NXT-2)*(POT_NXT-2) );
int CID = __mul24( CIDz, Cdz ) + __mul24( CIDy, Cdy ) + __mul24( CIDx, Cdx );
const int Fdx = 1;
const int Fdy = POT_NXT_F;
const int FIDx = ( (CIDx-1)<<1 ) - POT_USELESS;
const int FIDy = ( (CIDy-1)<<1 ) - POT_USELESS;
int FIDz = ( (CIDz-1)<<1 ) - POT_USELESS;
# ifdef SOR_USE_PADDING
const int Fpad = ( FIDy < 3 ) ? 0 : POT_PAD*((FIDy-3)/4 + 1); // padding logic
const int Fdz = POT_NXT_F*POT_NXT_F + POT_PAD*4; // added padding
# else
const int Fpad = 0;
const int Fdz = POT_NXT_F*POT_NXT_F;
# endif
int FID = Fpad + __mul24( FIDz, Fdz ) + __mul24( FIDy, Fdy ) + __mul24( FIDx, Fdx );
real TempFPot1, TempFPot2, TempFPot3, TempFPot4, TempFPot5, TempFPot6, TempFPot7, TempFPot8;
real Slope_00, Slope_01, Slope_02, Slope_03, Slope_04, Slope_05, Slope_06, Slope_07;
real Slope_08, Slope_09, Slope_10, Slope_11, Slope_12;
int Idx, Idy, Idz, ii, jj, kk;
for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice)
{
switch ( IntScheme )
{
/*
case INT_CENTRAL :
{
Slope_00 = (real)0.125 * ( s_CPot[CID+Cdx] - s_CPot[CID-Cdx] );
Slope_01 = (real)0.125 * ( s_CPot[CID+Cdy] - s_CPot[CID-Cdy] );
Slope_02 = (real)0.125 * ( s_CPot[CID+Cdz] - s_CPot[CID-Cdz] );
TempFPot1 = s_CPot[CID] - Slope_00 - Slope_01 - Slope_02;
TempFPot2 = s_CPot[CID] + Slope_00 - Slope_01 - Slope_02;
TempFPot3 = s_CPot[CID] - Slope_00 + Slope_01 - Slope_02;
TempFPot4 = s_CPot[CID] + Slope_00 + Slope_01 - Slope_02;
TempFPot5 = s_CPot[CID] - Slope_00 - Slope_01 + Slope_02;
TempFPot6 = s_CPot[CID] + Slope_00 - Slope_01 + Slope_02;
TempFPot7 = s_CPot[CID] - Slope_00 + Slope_01 + Slope_02;
TempFPot8 = s_CPot[CID] + Slope_00 + Slope_01 + Slope_02;
}
break; // INT_CENTRAL
*/
case INT_CQUAD :
{
Slope_00 = Const_8 * ( s_CPot[CID+Cdx ] - s_CPot[CID-Cdx ] );
Slope_01 = Const_8 * ( s_CPot[CID +Cdy ] - s_CPot[CID -Cdy ] );
Slope_02 = Const_8 * ( s_CPot[CID +Cdz] - s_CPot[CID -Cdz] );
Slope_03 = Const_64 * ( s_CPot[CID+Cdx -Cdz] - s_CPot[CID-Cdx -Cdz] );
Slope_04 = Const_64 * ( s_CPot[CID +Cdy-Cdz] - s_CPot[CID -Cdy-Cdz] );
Slope_05 = Const_64 * ( s_CPot[CID+Cdx-Cdy ] - s_CPot[CID-Cdx-Cdy ] );
Slope_06 = Const_64 * ( s_CPot[CID+Cdx+Cdy ] - s_CPot[CID-Cdx+Cdy ] );
Slope_07 = Const_64 * ( s_CPot[CID+Cdx +Cdz] - s_CPot[CID-Cdx +Cdz] );
Slope_08 = Const_64 * ( s_CPot[CID +Cdy+Cdz] - s_CPot[CID -Cdy+Cdz] );
Slope_09 = Const_512 * ( s_CPot[CID+Cdx-Cdy-Cdz] - s_CPot[CID-Cdx-Cdy-Cdz] );
Slope_10 = Const_512 * ( s_CPot[CID+Cdx+Cdy-Cdz] - s_CPot[CID-Cdx+Cdy-Cdz] );
Slope_11 = Const_512 * ( s_CPot[CID+Cdx-Cdy+Cdz] - s_CPot[CID-Cdx-Cdy+Cdz] );
Slope_12 = Const_512 * ( s_CPot[CID+Cdx+Cdy+Cdz] - s_CPot[CID-Cdx+Cdy+Cdz] );
TempFPot1 = - Slope_00 - Slope_01 - Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot2 = + Slope_00 - Slope_01 - Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
TempFPot3 = - Slope_00 + Slope_01 - Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
TempFPot4 = + Slope_00 + Slope_01 - Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot5 = - Slope_00 - Slope_01 + Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
TempFPot6 = + Slope_00 - Slope_01 + Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot7 = - Slope_00 + Slope_01 + Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12 + s_CPot[CID];
TempFPot8 = + Slope_00 + Slope_01 + Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12 + s_CPot[CID];
}
break; // INT_CQUAD
case INT_QUAD :
{
TempFPot1 = TempFPot2 = TempFPot3 = TempFPot4 = (real)0.0;
TempFPot5 = TempFPot6 = TempFPot7 = TempFPot8 = (real)0.0;
for (int dk=-1; dk<=1; dk++) { Idz = dk+1; kk = __mul24( dk, Cdz );
for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy );
for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx );
TempFPot1 += s_CPot[CID+kk+jj+ii] * c_Mm[Idz] * c_Mm[Idy] * c_Mm[Idx];
TempFPot2 += s_CPot[CID+kk+jj+ii] * c_Mm[Idz] * c_Mm[Idy] * c_Mp[Idx];
TempFPot3 += s_CPot[CID+kk+jj+ii] * c_Mm[Idz] * c_Mp[Idy] * c_Mm[Idx];
TempFPot4 += s_CPot[CID+kk+jj+ii] * c_Mm[Idz] * c_Mp[Idy] * c_Mp[Idx];
TempFPot5 += s_CPot[CID+kk+jj+ii] * c_Mp[Idz] * c_Mm[Idy] * c_Mm[Idx];
TempFPot6 += s_CPot[CID+kk+jj+ii] * c_Mp[Idz] * c_Mm[Idy] * c_Mp[Idx];
TempFPot7 += s_CPot[CID+kk+jj+ii] * c_Mp[Idz] * c_Mp[Idy] * c_Mm[Idx];
TempFPot8 += s_CPot[CID+kk+jj+ii] * c_Mp[Idz] * c_Mp[Idy] * c_Mp[Idx];
}}}
}
break; // INT_QUAD
} // switch ( IntScheme )
// save data to the shared-memory array.
// Currently this part is highly diverged. However, since the interpolation takes much less time than the
// SOR iteration does, we have not yet tried to optimize this part
if ( FIDz >= 0 )
{
if ( FIDx >= 0 && FIDy >= 0 ) s_FPot[FID ] = TempFPot1;
if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_FPot[FID+Fdx ] = TempFPot2;
if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_FPot[FID +Fdy ] = TempFPot3;
if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_FPot[FID+Fdx+Fdy ] = TempFPot4;
}
if ( FIDz <= POT_NXT_F-2 )
{
if ( FIDx >= 0 && FIDy >= 0 ) s_FPot[FID +Fdz] = TempFPot5;
if ( FIDx <= POT_NXT_F-2 && FIDy >= 0 ) s_FPot[FID+Fdx +Fdz] = TempFPot6;
if ( FIDx >= 0 && FIDy <= POT_NXT_F-2 ) s_FPot[FID +Fdy+Fdz] = TempFPot7;
if ( FIDx <= POT_NXT_F-2 && FIDy <= POT_NXT_F-2 ) s_FPot[FID+Fdx+Fdy+Fdz] = TempFPot8;
}
CID += __mul24( N_CSlice, Cdz );
FID += __mul24( 2*N_CSlice, Fdz );
FIDz += 2*N_CSlice;
} // for (int z=CIDz; z<POT_NXT-1; z+=N_CSlice)
} // if ( ID < N_CSlice*(POT_NXT-2)*(POT_NXT-2) )
__syncthreads();
// c. use the SOR scheme to evaluate potential
// -----------------------------------------------------------------------------------------------------------
Residual_Total_Old = __FLT_MAX__;
for (uint Iter=0; Iter<Max_Iter; Iter++)
{
// (c1). evaluate residual, update potential
// ==============================================================================
Residual_ThreadSum = (real)0.0;
Disp = DispEven;
for (uint pass=0; pass<2; pass++) // pass = (0,1) <--> (even,odd) step
{
PotID = PotID0;
RhoID = RhoID0;
for (uint z=tid_z; z<RHO_NXT; z+=bdim_z)
{
DispPotID = PotID + Disp;
DispRhoID = RhoID + Disp;
ip = DispPotID + dx;
jp = DispPotID + pad_dy_0;
kp = DispPotID + pad_dz;
im = DispPotID - dx;
jm = DispPotID - pad_dy_1;
km = DispPotID - pad_dz;
// evaluate the residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*s_FPot[DispPotID] - Const*s_Rho_Array[DispRhoID] );
// update potential
s_FPot[DispPotID] += Omega_6*Residual;
// store the sum of the residuals evaluated by the same thread in a per-thread register
Residual_ThreadSum += FABS( Residual );
PotID += dPotID;
RhoID += dRhoID;
Disp = Disp^DispFlip;
} // for (int ZLoop=0; ZLoop<RHO_NXT; ZLoop+=bdim_z)
Disp = DispOdd;
__syncthreads();
} // for (int pass=0; pass<2; pass++)
if ( Iter+1 >= Min_Iter && Iter % SOR_MOD_REDUCTION == 0 )
{
// (c2). perform parallel reduction to get the one-norm of residual
// ==============================================================================
# ifdef SOR_USE_SHUFFLE
Residual_ThreadSum = BlockReduction_Shuffle ( Residual_ThreadSum );
# else
Residual_ThreadSum = BlockReduction_WarpSync( Residual_ThreadSum );
# endif
// broadcast to all threads
if ( ID == 0 ) s_Residual_Total = Residual_ThreadSum;
__syncthreads();
// (c3). termination criterion
// ==============================================================================
if ( s_Residual_Total > Residual_Total_Old ) break;
Residual_Total_Old = s_Residual_Total;
} // if ( Iter+1 >= Min_Iter && Iter % SOR_MOD_REDUCTION == 0 )
__syncthreads();
} // for (uint Iter=0; Iter<Max_Iter; Iter++)
// d. store potential back to the global memory
// -----------------------------------------------------------------------------------------------------------
t = ID;
do
{
# ifdef SOR_USE_PADDING
# define GHOST_DIFF ( POT_GHOST_SIZE - GRA_GHOST_SIZE - 1 )
uint dy_global = t % (GRA_NXT*GRA_NXT) / GRA_NXT;
uint pad_global = ((dy_global + GHOST_DIFF) < 2) ? 0 : POT_PAD*((dy_global + GHOST_DIFF-2)/4 + 1);
# else
uint pad_global = 0;
# endif
s_index = __umul24( t/(GRA_NXT*GRA_NXT) + POT_GHOST_SIZE - GRA_GHOST_SIZE, pad_dz )
+ __umul24( t%(GRA_NXT*GRA_NXT)/GRA_NXT + POT_GHOST_SIZE - GRA_GHOST_SIZE, dy )
+ t%(GRA_NXT ) + POT_GHOST_SIZE - GRA_GHOST_SIZE
+ pad_global;
g_Pot_Array_Out[bid][t] = s_FPot[s_index];
t += POT_NTHREAD;
}
while ( t < GRA_NXT*GRA_NXT*GRA_NXT );
} // FUNCTION : CUPOT_PoissonSolver_SOR_10to14cube
#endif // #if ( defined GRAVITY && defined GPU && POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 )
|
the_stack
|
#include "FiniteDiff.h"
// TEST make sure boost isn't included in nvcc code
#if defined(BOOST_COMPILER)
int bla[-1];
#endif
namespace PyCA {
template<BackgroundStrategy bg, InterpT interp>
__global__ void ApplyH_kernel(float* d_o, const float* d_i,
const float* d_hx, const float* d_hy, const float* d_hz,
int sizeX, int sizeY, int sizeZ)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < sizeX && j < sizeY){
int id = j * sizeX + i;
for (int k=0; k< sizeZ; ++k, id+= sizeX*sizeY){
float x = d_hx[id];
float y = d_hy[id];
float z = d_hz[id];
d_o[id] = point_interp<interp, bg>
(d_i, x, y, z, sizeX, sizeY, sizeZ);
}
}
}
/*
* apply hField to an image
* defImage(x) = image(h(x))
*/
template<BackgroundStrategy bg, InterpT interp>
void ApplyH(float* d_o, const float* d_i,
const float* d_hx, const float* d_hy, const float* d_hz,
int sizeX, int sizeY, int sizeZ, StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sizeX, threads.x), iDivUp(sizeY, threads.y));
ApplyH_kernel<bg, interp>
<<< grids, threads, 0, stream >>>(d_o, d_i,
d_hx, d_hy, d_hz,
sizeX, sizeY, sizeZ);
}
/*
* apply uField to an image
* defImage(x) = image(x + delta * u(x))
*/
__device__ __constant__ float c_delta;
#define MK_FROM_U_TO_H() \
float x, y, z; \
if (fwd) { \
x = i + delta * iSpX * d_uX[id]; \
y = j + delta * iSpY * d_uY[id]; \
z = k + delta * iSpZ * d_uZ[id]; \
} else { \
x = i - delta * iSpX * d_uX[id]; \
y = j - delta * iSpY * d_uY[id]; \
z = k - delta * iSpZ * d_uZ[id]; \
} \
#define MK_CORE_APPLY_U() int id = j * sizeX + i; \
for (int k=0; k< sizeZ; ++k, id+= sizeX*sizeY){ \
MK_FROM_U_TO_H(); \
d_o[id] = point_interp<interp, bg> \
(d_i, x, y, z, sizeX, sizeY, sizeZ); \
}
template<bool fwd, BackgroundStrategy bg, InterpT interp>
__global__ void ApplyV_kernel(float* d_o, const float* d_i,
const float* d_uX, const float* d_uY, const float* d_uZ,
int sizeX, int sizeY, int sizeZ,
float iSpX, float iSpY, float iSpZ){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= sizeX || j >= sizeY)
return;
float delta = c_delta;
MK_CORE_APPLY_U();
}
template<bool fwd, BackgroundStrategy bg, InterpT interp>
__global__ void ApplyV_kernel(float* d_o, const float* d_i,
const float* d_uX, const float* d_uY, const float* d_uZ,
float delta,
int sizeX, int sizeY, int sizeZ,
float iSpX, float iSpY, float iSpZ){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= sizeX || j >= sizeY)
return;
MK_CORE_APPLY_U();
}
template<bool fwd, BackgroundStrategy bg, InterpT interp>
void ApplyV(float* d_o, const float* d_i,
const float* d_ux, const float* d_uy, const float* d_uz, const float& delta,
int sizeX, int sizeY, int sizeZ,
float spX, float spY, float spZ, StreamT stream, bool onDev)
{
MK_CHECK_IMAGE_BACKGROUND(bg);
dim3 threads(16,16);
dim3 grids(iDivUp(sizeX, threads.x), iDivUp(sizeY, threads.y));
if (!onDev) {
ApplyV_kernel<fwd, bg, interp>
<<<grids, threads,0,stream>>>
(d_o, d_i,
d_ux, d_uy, d_uz,
delta,
sizeX, sizeY, sizeZ,
1.f/spX, 1.f/spY, 1.f/spZ);
} else {
cudaMemcpyToSymbolAsync(c_delta,&delta,sizeof(float),
0,cudaMemcpyDeviceToDevice,stream);
ApplyV_kernel<fwd, bg, interp>
<<<grids, threads,0,stream>>>
(d_o, d_i,
d_ux, d_uy, d_uz,
sizeX, sizeY, sizeZ,
1.f/spX, 1.f/spY, 1.f/spZ);
}
}
__device__ __constant__ float3 c_trans;
template<BackgroundStrategy bg, InterpT interp>
__global__ void ComposeTranslation_kernel(float* d_o, const float* d_i,
const float tx, const float ty, const float tz,
int sizeX, int sizeY, int sizeZ){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < sizeX && j < sizeY){
int id = j * sizeX + i;
for (int k=0; k< sizeZ; ++k, id+= sizeX*sizeY){
float x = i + tx;
float y = j + ty;
float z = k + tz;
d_o[id] = point_interp<interp, bg>
(d_i, x, y, z, sizeX, sizeY, sizeZ);
}
}
}
template<BackgroundStrategy bg, InterpT interp>
__global__ void ComposeTranslation_const_kernel(float* d_o, const float* d_i,
int sizeX, int sizeY, int sizeZ){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < sizeX && j < sizeY){
int id = j * sizeX + i;
for (int k=0; k< sizeZ; ++k, id+= sizeX*sizeY){
float x = i + c_trans.x;
float y = j + c_trans.y;
float z = k + c_trans.z;
d_o[id] = point_interp<interp, bg>
(d_i, x, y, z, sizeX, sizeY, sizeZ);
}
}
}
template<BackgroundStrategy bg, InterpT interp>
void ComposeTranslation(float* d_o, const float* d_i,
const Vec3Df& t, const Vec3Di& size, StreamT stream, bool onDev)
{
dim3 threads(16, 16);
dim3 grids(iDivUp(size.x, threads.x), iDivUp(size.y, threads.y));
if (onDev) {
cudaMemcpyToSymbolAsync(c_trans, &t.x,sizeof(float) * 3,
0, cudaMemcpyDeviceToDevice, stream);
ComposeTranslation_const_kernel<bg, interp>
<<<grids, threads, 0, stream>>>
(d_o, d_i, size.x, size.y, size.z);
} else {
ComposeTranslation_kernel<bg, interp>
<<<grids, threads, 0, stream>>>
(d_o, d_i, t.x, t.y, t.z, size.x, size.y, size.z);
}
}
void
Splat(float *d_o,
const float *d_hx,
const float *d_hy,
const float *d_hz,
const float *d_i,
Vec3Di sz,
StreamT stream)
{
size_t nVox = sz.prod();
int* i_do =(int*)d_o;
// Splat to fixed buffer
Splatting::splat3D(i_do, sz.x, sz.y, sz.z,
d_i, d_hx, d_hy, d_hz, nVox, stream);
Splatting::FixedToFloating_I(d_o, nVox, stream);
}
void
SplatAndNormalize(float *d_o,
const float *d_hx,
const float *d_hy,
const float *d_hz,
const float *d_i,
const float *temp,
Vec3Di sz,
StreamT stream)
{
size_t nVox = sz.prod();
int* i_do = (int*)d_o;
int* i_dd = (int*)temp;
// Splat to fixed buffer with weighted distance
Splatting::splat3D(i_do, i_dd, sz.x, sz.y, sz.z,
d_i, d_hx, d_hy, d_hz, nVox, stream);
// convert back to floating point buffer with distance
Splatting::convertWeightedDistance_I(d_o, i_dd, nVox, stream);
}
template < class T, enum DimT dim, enum DiffT diffType,
enum BoundaryCondT bc,
bool accum, enum OpT op >
static
__global__
void
finiteDiff_kernel(T *out,
const T *in,
int sx, int sy, int sz,
float scale) // spacing of dim
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x < sx && y < sy){
int index = y*sx + x;
scale = 1.f/scale;
const int stridez = sx*sy;
for(int z = 0; z < sz; ++z){
float val = finiteDiff<T, dim, diffType, bc>(in,x,y,z,sx,sy,sz);
val *= scale; // take care of voxel scaling
if(op == OP_SQR){
val = val*val;
}
if(accum)
out[index] += val;
else
out[index] = val;
index += stridez;
}
}
}
#define ACCUM_TRUE 1
#define ACCUM_FALSE 0
#define SLICE_TRUE 1
#define SLICE_FALSE 0
template <enum DimT dim, enum DiffT diffType,
enum BoundaryCondT bc,
bool accum, OpT op>
static
inline
void
g_finiteDiff(float* d_o, const float* d_i,
int szX, int szY, int szZ,
float spX, float spY, float spZ,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(szX, threads.x), iDivUp(szY, threads.y));
bool slice = (szZ == 1);
float sp;
if(dim == DIM_X){
sp = spX;
}else if(dim == DIM_Y){
sp = spY;
}else if(dim == DIM_Z){
sp = spZ;
}else{
throw PyCAException(__FILE__, __LINE__, "unknown DimT");
}
if(slice && dim == DIM_Z){
GMemOpers<float>::SetMem(d_o,0.f,szX*szY*szZ,stream,false);
}else{
finiteDiff_kernel<float,dim,diffType,bc,accum,op>
<<<grids,threads,0,stream>>>
(d_o, d_i, szX, szY, szZ, sp);
}
}
template <enum BoundaryCondT bc,
bool accum, OpT op>
static
inline
void
g_finiteDiff(float* d_o, const float* d_i,
int szX, int szY, int szZ,
float spX, float spY, float spZ,
DimT dim, DiffT diffType,
StreamT stream)
{
if(diffType == DIFF_FORWARD){
if(dim == DIM_X){
g_finiteDiff<DIM_X, DIFF_FORWARD, bc, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
stream);
}else if(dim == DIM_Y){
g_finiteDiff<DIM_Y, DIFF_FORWARD, bc, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
stream);
}else if(dim == DIM_Z){
g_finiteDiff<DIM_Z, DIFF_FORWARD, bc, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
stream);
}else{
throw PyCAException(__FILE__, __LINE__, "unknown DimT");
}
}else if(diffType == DIFF_BACKWARD){
if(dim == DIM_X){
g_finiteDiff<DIM_X, DIFF_BACKWARD, bc, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
stream);
}else if(dim == DIM_Y){
g_finiteDiff<DIM_Y, DIFF_BACKWARD, bc, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
stream);
}else if(dim == DIM_Z){
g_finiteDiff<DIM_Z, DIFF_BACKWARD, bc, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
stream);
}else{
throw PyCAException(__FILE__, __LINE__, "unknown DimT");
}
}else if(diffType == DIFF_CENTRAL){
if(dim == DIM_X){
g_finiteDiff<DIM_X, DIFF_CENTRAL, bc, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
stream);
}else if(dim == DIM_Y){
g_finiteDiff<DIM_Y, DIFF_CENTRAL, bc, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
stream);
}else if(dim == DIM_Z){
g_finiteDiff<DIM_Z, DIFF_CENTRAL, bc, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
stream);
}else{
throw PyCAException(__FILE__, __LINE__, "unknown DimT");
}
}else{
throw PyCAException(__FILE__, __LINE__, "unknown DiffT");
}
}
template <bool accum, OpT op>
static
inline
void
g_finiteDiff(float* d_o, const float* d_i,
int szX, int szY, int szZ,
float spX, float spY, float spZ,
DimT dim, DiffT diffType,
enum BoundaryCondT bc,
StreamT stream)
{
if(bc == BC_APPROX){
g_finiteDiff<BC_APPROX, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
dim, diffType,
stream);
}else if(bc == BC_WRAP){
g_finiteDiff<BC_WRAP, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
dim, diffType,
stream);
}else if(bc == BC_CLAMP){
g_finiteDiff<BC_CLAMP, accum, op>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
dim, diffType,
stream);
}else{
throw PyCAException(__FILE__, __LINE__,
"unknown boundary condition (BoundaryCondT)");
}
}
void
g_finiteDiff(float* d_o, const float* d_i,
int szX, int szY, int szZ,
float spX, float spY, float spZ,
DimT dim, DiffT diffType,
enum BoundaryCondT bc,
bool accum, OpT op,
StreamT stream)
{
if(accum){
if(op == OP_VAL){
g_finiteDiff<ACCUM_TRUE, OP_VAL>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
dim, diffType,
bc,
stream);
}else if(op == OP_SQR){
g_finiteDiff<ACCUM_TRUE, OP_SQR>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
dim, diffType,
bc,
stream);
}else{
throw PyCAException(__FILE__, __LINE__,
"unknown OpT");
}
}else{
if(op == OP_VAL){
g_finiteDiff<ACCUM_FALSE, OP_VAL>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
dim, diffType,
bc,
stream);
}else if(op == OP_SQR){
g_finiteDiff<ACCUM_FALSE, OP_SQR>
(d_o, d_i,
szX, szY, szZ,
spX, spY, spZ,
dim, diffType,
bc,
stream);
}else{
throw PyCAException(__FILE__, __LINE__,
"unknown OpT");
}
}
}
#define BG_CLAMP BACKGROUND_STRATEGY_CLAMP
template<DimT dim, bool slice>
static
__global__
void
upwindDiff_kernel(float *rtn,
const float *array,
const float *speed,
int szX, int szY, int szZ,
float spX, float spY, float spZ)
{
const uint x = blockDim.x * blockIdx.x + threadIdx.x;
const uint y = blockDim.y * blockIdx.y + threadIdx.y;
float sp;
if(dim == DIM_X){
sp = spX;
}else if(dim == DIM_Y){
sp = spY;
}else if(dim == DIM_Z){
sp = spZ;
}
if(x < szX && y < szY){
const uint stridez = szX*szY;
uint index = y * szX + x;
for(uint z = 0; z < szZ; z++){
float v = array[index]; // val
// previous and next values
float vp, vn;
if(dim == DIM_X){
vp = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x-1,y,z);
vn = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x+1,y,z);
}else if(dim == DIM_Y){
vp = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x,y-1,z);
vn = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x,y+1,z);
}else if(dim == DIM_Z){
if(slice){
vp = v;
vn = v;
}else{
vp = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x,y,z-1);
vn = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x,y,z+1);
}
}
float spd = speed[index];
float dx = 0.f;
if ( spd < 0.0f){
// forward difference
dx = (vn - v)/sp;
}else{
dx = (v - vp)/sp;
}
rtn[index] = dx;
index += stridez;
}
}
}
void
UpwindDiff(float *d_o, const float *d_i,
const float *d_speed,
const Vec3Di& sz,
const Vec3Df& sp,
DimT dim,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
bool slice = (sz.z == 1);
if(slice){
if(dim == DIM_X){
upwindDiff_kernel<DIM_X, SLICE_TRUE><<<grids,threads,0,stream>>>
(d_o, d_i, d_speed,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}else if(dim == DIM_Y){
upwindDiff_kernel<DIM_Y, SLICE_TRUE><<<grids,threads,0,stream>>>
(d_o, d_i, d_speed,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}else if(dim == DIM_Z){
upwindDiff_kernel<DIM_Z, SLICE_TRUE><<<grids,threads,0,stream>>>
(d_o, d_i, d_speed,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}else{
throw PyCAException(__FILE__, __LINE__, "unknown DimT");
}
}else{
if(dim == DIM_X){
upwindDiff_kernel<DIM_X, SLICE_FALSE><<<grids,threads,0,stream>>>
(d_o, d_i, d_speed,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}else if(dim == DIM_Y){
upwindDiff_kernel<DIM_Y, SLICE_FALSE><<<grids,threads,0,stream>>>
(d_o, d_i, d_speed,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}else if(dim == DIM_Z){
upwindDiff_kernel<DIM_Z, SLICE_FALSE><<<grids,threads,0,stream>>>
(d_o, d_i, d_speed,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}else{
throw PyCAException(__FILE__, __LINE__, "unknown DimT");
}
}
}
template<bool slice>
static
__global__
void
upwindGradMag_kernel(float *rtn,
const float *array,
const float *speed,
int szX, int szY, int szZ,
float spX, float spY, float spZ)
{
const uint x = blockDim.x * blockIdx.x + threadIdx.x;
const uint y = blockDim.y * blockIdx.y + threadIdx.y;
if(x < szX && y < szY){
const uint stridez = szX*szY;
uint index = y * szX + x;
for(uint z = 0; z < szZ; z++){
float v = array[index]; // val
float n = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x,y-1,z); // north
float s = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x, y+1, z); // south
float e = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x+1, y, z); // east
float w = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x-1, y, z); // west
float u = 0.f;
float d = 0.f;
if(!slice){
u = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x, y, z+1); // up
d = getSafeVal<float,BG_CLAMP>(array, szX,szY,szZ, x, y, z-1); // down
}
float grad_before_x = (x < szX-1 ? e - v : v - w)/spX;
float grad_after_x = (x > 0 ? v - w : e - v)/spX;
float grad_before_y = (y < szY-1 ? s - v : v - n)/spY;
float grad_after_y = (y > 0 ? v - n : s - v)/spY;
float grad_before_z = 0.f;
float grad_after_z = 0.f;
if(!slice){
grad_before_z = (index < stridez*(szZ-1) ? u - v : v - d)/spZ;
grad_after_z = (index >= stridez ? v - d : u - v)/spZ;
}
float spd = speed[index];
if ( spd < 0.0f)
{
grad_before_x = PYCAMIN(grad_before_x, 0.0f);
grad_after_x = PYCAMIN(-grad_after_x, 0.0f);
grad_before_y = PYCAMIN(grad_before_y, 0.0f);
grad_after_y = PYCAMIN(-grad_after_y, 0.0f);
if(!slice){
grad_before_z = PYCAMIN(grad_before_z, 0.0f);
grad_after_z = PYCAMIN(-grad_after_z, 0.0f);
}
}
else
{
grad_before_x = PYCAMAX(grad_before_x, 0.0f);
grad_after_x = PYCAMAX(-grad_after_x, 0.0f);
grad_before_y = PYCAMAX(grad_before_y, 0.0f);
grad_after_y = PYCAMAX(-grad_after_y, 0.0f);
if(!slice){
grad_before_z = PYCAMAX(grad_before_z, 0.0f);
grad_after_z = PYCAMAX(-grad_after_z, 0.0f);
}
}
float gradmag = 0.f;
if(!slice){
gradmag =
sqrt(grad_after_x*grad_after_x + grad_before_x*grad_before_x +
grad_after_y*grad_after_y + grad_before_y*grad_before_y +
grad_after_z*grad_after_z + grad_before_z*grad_before_z);
}else{
gradmag =
sqrt(grad_after_x*grad_after_x + grad_before_x*grad_before_x +
grad_after_y*grad_after_y + grad_before_y*grad_before_y);
}
rtn[index] = gradmag*spd;
index += stridez;
}
}
}
void
UpwindGradMag(float *d_o,
const float *d_i,
const float *d_speed,
const Vec3Di& sz,
const Vec3Df& sp,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
bool slice = (sz.z == 1);
if(slice){
upwindGradMag_kernel<SLICE_TRUE><<<grids,threads,0,stream>>>
(d_o, d_i, d_speed,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}else{
upwindGradMag_kernel<SLICE_FALSE><<<grids,threads,0,stream>>>
(d_o, d_i, d_speed,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}
}
template <enum DiffT diffType, enum BoundaryCondT bc>
inline
void
g_gradient(float* d_ox, float *d_oy, float* d_oz,
const float* d_i,
int szX, int szY, int szZ,
float spX, float spY, float spZ, StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(szX, threads.x), iDivUp(szY, threads.y));
bool slice = (szZ == 1);
finiteDiff_kernel<float,DIM_X,diffType,bc,
ACCUM_FALSE,OP_VAL>
<<<grids,threads,0,stream>>>
(d_ox, d_i, szX, szY, szZ, spX);
finiteDiff_kernel<float,DIM_Y,diffType,bc,
ACCUM_FALSE,OP_VAL>
<<<grids,threads,0,stream>>>
(d_oy, d_i, szX, szY, szZ, spY);
if(slice){
GMemOpers<float>::SetMem(d_oz,0.f,szX*szY*szZ,stream,false);
}else{
finiteDiff_kernel<float,DIM_Z,diffType,bc,
ACCUM_FALSE,OP_VAL>
<<<grids,threads,0,stream>>>
(d_oz, d_i, szX, szY, szZ, spZ);
}
}
// version of gradient taking boundary condition as a parameter
template <enum DiffT diffType>
inline
void
g_gradient(float* d_ox, float *d_oy, float* d_oz,
const float* d_i,
int szX, int szY, int szZ,
float spX, float spY, float spZ,
BoundaryCondT bc, StreamT stream)
{
if(bc == BC_APPROX){
g_gradient<diffType,BC_APPROX>
(d_ox,d_oy,d_oz,d_i,szX,szY,szZ,spX,spY,spZ,stream);
}else if(bc == BC_WRAP){
g_gradient<diffType,BC_WRAP>
(d_ox,d_oy,d_oz,d_i,szX,szY,szZ,spX,spY,spZ,stream);
}else if(bc == BC_CLAMP){
g_gradient<diffType,BC_CLAMP>
(d_ox,d_oy,d_oz,d_i,szX,szY,szZ,spX,spY,spZ,stream);
}else{
throw PyCAException(__FILE__, __LINE__, "unknown BoundaryCondT");
}
}
template < class T, enum DimT dim, enum DiffT diffType,
enum BoundaryCondT bc,
bool accum, enum OpT op >
static
__global__
void
finiteDiffMask_kernel(T *out,
const T *in, const T *mask,
int sx, int sy, int sz,
float scale) // spacing of dim
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x < sx && y < sy){
int index = y*sx + x;
scale = 1.f/scale;
const int stridez = sx*sy;
for(int z = 0; z < sz; ++z){
float val =
finiteDiffMask<T, dim, diffType, bc>
(in,mask,x,y,z,sx,sy,sz);
val *= scale; // take care of voxel scaling
if(op == OP_SQR){
val = val*val;
}
if(accum)
out[index] += val;
else
out[index] = val;
index += stridez;
}
}
}
template <enum DiffT diffType, enum BoundaryCondT bc>
inline
void
g_gradientMask(float* d_ox, float *d_oy, float* d_oz,
const float* d_i, const float* d_mask,
int szX, int szY, int szZ,
float spX, float spY, float spZ,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(szX, threads.x), iDivUp(szY, threads.y));
bool slice = (szZ == 1);
finiteDiffMask_kernel<float,DIM_X,diffType,bc,
ACCUM_FALSE,OP_VAL>
<<<grids,threads,0,stream>>>
(d_ox, d_i, d_mask, szX, szY, szZ, spX);
finiteDiffMask_kernel<float,DIM_Y,diffType,bc,
ACCUM_FALSE,OP_VAL>
<<<grids,threads,0,stream>>>
(d_oy, d_i, d_mask, szX, szY, szZ, spY);
if(slice){
GMemOpers<float>::SetMem(d_oz,0.f,szX*szY*szZ,stream,false);
}else{
finiteDiffMask_kernel<float,DIM_Z,diffType,bc,
ACCUM_FALSE,OP_VAL>
<<<grids,threads,0,stream>>>
(d_oz, d_i, d_mask, szX, szY, szZ, spZ);
}
}
// version of gradient taking boundary condition as a parameter
template <enum DiffT diffType>
inline
void
g_gradientMask(float* d_ox, float *d_oy, float* d_oz,
const float* d_i, const float* d_mask,
int szX, int szY, int szZ,
float spX, float spY, float spZ,
BoundaryCondT bc, StreamT stream)
{
if(bc == BC_APPROX){
g_gradientMask<diffType,BC_APPROX>
(d_ox,d_oy,d_oz,d_i,d_mask,szX,szY,szZ,spX,spY,spZ,stream);
}else if(bc == BC_WRAP){
throw PyCAException(__FILE__, __LINE__, "BC_WRAP boundary condition unimplemented for masked gradient");
}else if(bc == BC_CLAMP){
g_gradientMask<diffType,BC_CLAMP>
(d_ox,d_oy,d_oz,d_i,d_mask,szX,szY,szZ,spX,spY,spZ,stream);
}else{
throw PyCAException(__FILE__, __LINE__, "unknown BoundaryCondT");
}
}
template<typename T, enum DiffT diffType, enum BoundaryCondT bc>
void g_gradientMag(float* d_o,
const T* d_i,
int sizeX, int sizeY, int sizeZ,
float spX, float spY, float spZ, StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sizeX, threads.x), iDivUp(sizeY, threads.y));
bool slice = (sizeZ == 1);
finiteDiff_kernel<T,DIM_X,diffType,bc,ACCUM_FALSE,OP_SQR>
<<<grids,threads,0,stream>>>
(d_o, d_i, sizeX, sizeY, sizeZ, spX);
finiteDiff_kernel<T,DIM_Y,diffType,bc,ACCUM_TRUE,OP_SQR>
<<<grids,threads,0,stream>>>
(d_o, d_i, sizeX, sizeY, sizeZ, spY);
if(!slice){
finiteDiff_kernel<T,DIM_Z,diffType,bc,ACCUM_TRUE,OP_SQR>
<<<grids,threads,0,stream>>>
(d_o, d_i, sizeX, sizeY, sizeZ, spZ);
}
GMemOpers<float>::Sqrt_I(d_o,sizeX*sizeY*sizeZ,stream);
}
template<typename T, enum DiffT diffType>
void g_gradientMag(float* d_o,
const T* d_i,
int sizeX, int sizeY, int sizeZ,
float spX, float spY, float spZ,
BoundaryCondT bc, StreamT stream)
{
if(bc == BC_APPROX){
PyCA::g_gradientMag<T, diffType, BC_APPROX>
(d_o, d_i, sizeX, sizeY, sizeZ, spX, spY, spZ, stream);
}else if(bc == BC_WRAP){
PyCA::g_gradientMag<T, diffType, BC_WRAP>
(d_o, d_i, sizeX, sizeY, sizeZ, spX, spY, spZ, stream);
}else if(bc == BC_CLAMP){
PyCA::g_gradientMag<T, diffType, BC_CLAMP>
(d_o, d_i, sizeX, sizeY, sizeZ, spX, spY, spZ, stream);
}else{
throw PyCAException(__FILE__, __LINE__, "Unknown BoundaryCondT");
}
}
template <enum DiffT diffType, enum BoundaryCondT bc>
inline
void
g_divergence(float* d_o,
const float* d_ix, const float* d_iy, const float* d_iz,
int sizeX, int sizeY, int sizeZ,
float spX, float spY, float spZ, StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sizeX, threads.x), iDivUp(sizeY, threads.y));
bool slice = (sizeZ == 1);
finiteDiff_kernel<float,DIM_X,diffType,bc,ACCUM_FALSE,OP_VAL>
<<<grids,threads,0,stream>>>
(d_o, d_ix, sizeX, sizeY, sizeZ, spX);
finiteDiff_kernel<float,DIM_Y,diffType,bc,ACCUM_TRUE,OP_VAL>
<<<grids,threads,0,stream>>>
(d_o, d_iy, sizeX, sizeY, sizeZ, spY);
if(!slice){
finiteDiff_kernel<float,DIM_Z,diffType,bc,ACCUM_TRUE,OP_VAL>
<<<grids,threads,0,stream>>>
(d_o, d_iz, sizeX, sizeY, sizeZ, spZ);
}
}
template <enum DiffT diffType>
inline
void
g_divergence(float* d_o,
const float* d_ix, const float* d_iy, const float* d_iz,
int sizeX, int sizeY, int sizeZ,
float spX, float spY, float spZ,
BoundaryCondT bc, StreamT stream)
{
if (bc == BC_APPROX) {
g_divergence<diffType,BC_APPROX>
(d_o,
d_ix, d_iy, d_iz,
sizeX, sizeY, sizeZ,
spX, spY, spZ, stream);
} else if (bc == BC_WRAP) {
g_divergence<diffType,BC_WRAP>
(d_o,
d_ix, d_iy, d_iz,
sizeX, sizeY, sizeZ,
spX, spY, spZ, stream);
} else if (bc == BC_CLAMP) {
g_divergence<diffType,BC_CLAMP>
(d_o,
d_ix, d_iy, d_iz,
sizeX, sizeY, sizeZ,
spX, spY, spZ, stream);
} else {
throw PyCAException(__FILE__, __LINE__, "Unknownd BoundaryCondT");
}
}
/**
* Compute the magnitude image
* d_o[i] = sqrt(d_i[i].x^2 + d_i[i].y^2 + d_i[i].z^2)
*/
template<bool square_root>
__global__ void Magnitude_kernel(float* d_o, const float* d_ix, const float* d_iy, const float* d_iz, size_t n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_ix[id] * d_ix[id] + d_iy[id] * d_iy[id] + d_iz[id] * d_iz[id];
d_o[id] = (square_root) ? sqrt(v) : v;
}
}
/**
* Compute the magnitude array
* d_o[i] = sqrt(d_i[i].x^2 + d_i[i].y^2 + d_i[i].z^2)
*/
void
Magnitude(float *d_o,
const float *d_ix,
const float *d_iy,
const float *d_iz,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Magnitude_kernel<true><<<grids, threads, 0, stream>>>
(d_o, d_ix, d_iy, d_iz, n);
}
/**
* Compute the magnitude array
* d_o[i] = d_i[i].x^2 + d_i[i].y^2 + d_i[i].z^2
*/
void
SqrMagnitude(float *d_o,
const float *d_ix,
const float *d_iy,
const float *d_iz,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Magnitude_kernel<false><<<grids, threads, 0, stream>>>
(d_o, d_ix, d_iy, d_iz, n);
}
/**
* Compute dot product array
* d_o[i] = d_i[i].x * d_i1[i].x + d_i[i].y * d_i1[i].y + d_i[i].z * d_i1[i].z
*/
__global__ void ComponentDotProd_kernel(float* d_o,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1_x, const float* d_i1_y, const float* d_i1_z, uint n)
{
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
d_o[id] = d_i_x[id]*d_i1_x[id] +
d_i_y[id]*d_i1_y[id] +
d_i_z[id]*d_i1_z[id];
}
}
void
ComponentDotProd(float *d_o,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1x,
const float *d_i1y,
const float *d_i1z,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
ComponentDotProd_kernel<<<grids, threads, 0, stream>>>
(d_o,
d_ix, d_iy, d_iz,
d_i1x, d_i1y, d_i1z,
n);
}
__global__ void Add_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1 , uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i1[id];
d_o_x[id ]= d_i_x[id ] + v;
d_o_y[id ]= d_i_y[id ] + v;
d_o_z[id ]= d_i_z[id ] + v;
}
}
/** @brief d_o.x = d_i.x + d_i1, d_o.y = d_i.y + d_i1, d_o.z = d_i.z + d_i1 */
void
Add(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Add_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1, n);
}
__global__ void Sub_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1 , uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i1[id];
d_o_x[id ]= d_i_x[id ] - v;
d_o_y[id ]= d_i_y[id ] - v;
d_o_z[id ]= d_i_z[id ] - v;
}
}
/** @brief d_o.x = d_i.x - d_i1, d_o.y = d_i.y - d_i1, d_o.z = d_i.z - d_i1 */
void
Sub(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Sub_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1, n);
}
__global__ void Mul_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1 , uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i1[id];
d_o_x[id ]= d_i_x[id ] * v;
d_o_y[id ]= d_i_y[id ] * v;
d_o_z[id ]= d_i_z[id ] * v;
}
}
/** @brief d_o.x = d_i.x * d_i1, d_o.y = d_i.y * d_i1, d_o.z = d_i.z * d_i1 */
void
Mul(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Mul_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1, n);
}
__global__ void Div_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1 , uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i1[id];
d_o_x[id ]= d_i_x[id ] / v;
d_o_y[id ]= d_i_y[id ] / v;
d_o_z[id ]= d_i_z[id ] / v;
}
}
/** @brief d_o.x = d_i.x / d_i1, d_o.y = d_i.y / d_i1, d_o.z = d_i.z / d_i1 */
void
Div(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Div_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1, n);
}
__global__ void Add_I_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i , uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i[id];
d_o_x[id] += v;
d_o_y[id] += v;
d_o_z[id] += v;
}
}
/** @brief d_o.x += d_i, d_o.y += d_i, d_o.y += d_i, */
void
Add_I(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_i,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Add_I_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz,
d_i, n);
}
__global__ void Sub_I_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i , uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i[id];
d_o_x[id] -= v;
d_o_y[id] -= v;
d_o_z[id] -= v;
}
}
/** @brief d_o.x -= d_i, d_o.y -= d_i, d_o.y -= d_i, */
void
Sub_I(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_i,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Sub_I_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz,
d_i, n);
}
/** @brief d_o.x *= d_i, d_o.y *= d_i, d_o.y *= d_i, */
__global__ void Mul_I_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i[id];
d_o_x[id] *= v;
d_o_y[id] *= v;
d_o_z[id] *= v;
}
}
void
Mul_I(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_i,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Mul_I_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz,
d_i, n);
}
/** @brief d_o.x /= d_i, d_o.y /= d_i, d_o.y /= d_i, */
__global__ void Div_I_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v= 1.f / d_i[id];
d_o_x[id] *= v;
d_o_y[id] *= v;
d_o_z[id] *= v;
}
}
void
Div_I(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_i,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Div_I_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz,
d_i, n);
}
__global__ void Add_Mul_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1_x, const float* d_i1_y, const float* d_i1_z,
const float* d_i2, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i2[id];
d_o_x[id] = d_i_x[id] + d_i1_x[id] * v;
d_o_y[id] = d_i_y[id] + d_i1_y[id] * v;
d_o_z[id] = d_i_z[id] + d_i1_z[id] * v;
}
}
/** @brief d_o = d_i + d_i1 * d_i2 */
void
Add_Mul(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1x,
const float *d_i1y,
const float *d_i1z,
const float *d_i2,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Add_Mul_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1x, d_i1y, d_i1z,
d_i2, n);
}
/** @brief d_o = d_o + d_i * d_i1 */
__global__ void Add_Mul_I_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i1[id];
d_o_x[id] += d_i_x[id] * v;
d_o_y[id] += d_i_y[id] * v;
d_o_z[id] += d_i_z[id] * v;
}
}
void
Add_Mul_I(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Add_Mul_I_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1, n);
}
__global__ void Sub_Mul_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1_x, const float* d_i1_y, const float* d_i1_z,
const float* d_i2, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i2[id];
d_o_x[id] = d_i_x[id] - d_i1_x[id] * v;
d_o_y[id] = d_i_y[id] - d_i1_y[id] * v;
d_o_z[id] = d_i_z[id] - d_i1_z[id] * v;
}
}
/** @brief d_o = d_i + d_i1 * d_i2 */
void
Sub_Mul(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1x,
const float *d_i1y,
const float *d_i1z,
const float *d_i2,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Sub_Mul_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1x, d_i1y, d_i1z,
d_i2, n);
}
/** @brief d_o = d_o - d_i * d_i1 */
__global__ void Sub_Mul_I_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i1[id];
d_o_x[id] -= d_i_x[id] * v;
d_o_y[id] -= d_i_y[id] * v;
d_o_z[id] -= d_i_z[id] * v;
}
}
void
Sub_Mul_I(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
Sub_Mul_I_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1, n);
}
__global__ void MulMulC_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1, float c, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i1[id] * c;
d_o_x[id] = d_i_x[id] * v;
d_o_y[id] = d_i_y[id] * v;
d_o_z[id] = d_i_z[id] * v;
}
}
__global__ void MulMulC_const_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
float c = c_delta;
if (id < n){
float v = d_i1[id] * c;
d_o_x[id] = d_i_x[id] * v;
d_o_y[id] = d_i_y[id] * v;
d_o_z[id] = d_i_z[id] * v;
}
}
/** @brief d_o = d_i * d_i1 * c (d_o.x = d_i.x * d_i1 * c)*/
void
MulMulC(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1,
const float& c,
size_t n,
StreamT stream, bool onDev)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
if (onDev) {
cudaMemcpyToSymbolAsync(c_delta,&c, sizeof(float),
0,cudaMemcpyDeviceToDevice,stream);
MulMulC_const_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1, n);
} else {
MulMulC_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz,
d_ix, d_iy, d_iz,
d_i1, c, n);
}
}
__global__ void MulMulC_I_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i, float c, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i[id] * c;
d_o_x[id] *= v;
d_o_y[id] *= v;
d_o_z[id] *= v;
}
}
__global__ void MulMulC_I_const_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float c = c_delta;
float v = d_i[id] * c;
d_o_x[id] *= v;
d_o_y[id] *= v;
d_o_z[id] *= v;
}
}
/** @brief d_o = d_o * d_i * c (d_o.x = d_o.x * d_i * c)*/
void
MulMulC_I(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_i,
const float& c,
size_t n,
StreamT stream, bool onDev)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
if (onDev) {
cudaMemcpyToSymbolAsync(c_delta,&c, sizeof(float),
0,cudaMemcpyDeviceToDevice,stream);
MulMulC_I_const_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz,
d_i, n);
} else {
MulMulC_I_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz,
d_i, c, n);
}
}
__global__ void Add_MulMulC_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1_x, const float* d_i1_y, const float* d_i1_z,
const float* d_i2, float c, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i2[id] * c;
d_o_x[id] = d_i_x[id] + d_i1_x[id] * v;
d_o_y[id] = d_i_y[id] + d_i1_y[id] * v;
d_o_z[id] = d_i_z[id] + d_i1_z[id] * v;
}
}
__global__ void Add_MulMulC_const_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1_x, const float* d_i1_y, const float* d_i1_z,
const float* d_i2, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float c = c_delta;
float v = d_i2[id] * c;
d_o_x[id] = d_i_x[id] + d_i1_x[id] * v;
d_o_y[id] = d_i_y[id] + d_i1_y[id] * v;
d_o_z[id] = d_i_z[id] + d_i1_z[id] * v;
}
}
/** @brief d_o = d_i + d_i1 * d_i2 * c */
void
Add_MulMulC(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1x,
const float *d_i1y,
const float *d_i1z,
const float *d_i2,
const float& c,
size_t n,
StreamT stream, bool onDev)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
if (onDev) {
cudaMemcpyToSymbolAsync(c_delta,&c,sizeof(float),
0,cudaMemcpyDeviceToDevice,stream);
Add_MulMulC_const_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, d_i1x, d_i1y, d_i1z,d_i2, n);
} else {
Add_MulMulC_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, d_i1x, d_i1y, d_i1z,d_i2, c, n);
}
}
/** @brief d_o = d_o + d_i * d_i1 * c */
__global__ void Add_MulMulC_I_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1, float c, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float v = d_i1[id] * c;
d_o_x[id] += d_i_x[id] * v;
d_o_y[id] += d_i_y[id] * v;
d_o_z[id] += d_i_z[id] * v;
}
}
__global__ void Add_MulMulC_I_const_kernel(float* d_o_x, float* d_o_y, float* d_o_z,
const float* d_i_x, const float* d_i_y, const float* d_i_z,
const float* d_i1, uint n){
uint blockId = get_blockID();
uint id = get_threadID(blockId);
if (id < n){
float c = c_delta;
float v = d_i1[id] * c;
d_o_x[id] += d_i_x[id] * v;
d_o_y[id] += d_i_y[id] * v;
d_o_z[id] += d_i_z[id] * v;
}
}
void
Add_MulMulC_I(float *d_ox,
float *d_oy,
float *d_oz,
const float *d_ix,
const float *d_iy,
const float *d_iz,
const float *d_i1,
const float& c,
size_t n,
StreamT stream,
bool onDev)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
if (onDev ) {
cudaMemcpyToSymbolAsync(c_delta,&c,sizeof(float),
0,cudaMemcpyDeviceToDevice,stream);
Add_MulMulC_I_const_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, d_i1, n);
} else {
Add_MulMulC_I_kernel<<<grids, threads, 0, stream>>>
(d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, d_i1, c, n);
}
}
__global__ void JacDetH2D_kernel(
float* d_detJ,
const float* d_Xgx, const float* d_Xgy,
const float* d_Ygx, const float* d_Ygy,
uint n)
{
uint blockId = blockIdx.y * gridDim.x + blockIdx.x;
uint id = blockId * blockDim.x + threadIdx.x;
if (id < n){
d_detJ[id] = d_Xgx[id]*d_Ygy[id] - d_Xgy[id]*d_Ygx[id];
}
}
__global__ void JacDetH3D_kernel(
float* d_detJ,
const float* d_Xgx, const float* d_Xgy, const float* d_Xgz,
const float* d_Ygx, const float* d_Ygy, const float* d_Ygz,
const float* d_Zgx, const float* d_Zgy, const float* d_Zgz, uint n)
{
uint blockId = blockIdx.y * gridDim.x + blockIdx.x;
uint id = blockId * blockDim.x + threadIdx.x;
if (id < n){
float a00 = d_Xgx[id], a01 = d_Xgy[id], a02 = d_Xgz[id];
float a10 = d_Ygx[id], a11 = d_Ygy[id], a12 = d_Ygz[id];
float a20 = d_Zgx[id], a21 = d_Zgy[id], a22 = d_Zgz[id];
d_detJ[id] = det(a00, a01, a02,
a10, a11, a12,
a20, a21, a22);
}
}
void
JacDetH(float *d_detJ,
const float *d_Xgx,
const float *d_Xgy,
const float *d_Xgz,
const float *d_Ygx,
const float *d_Ygy,
const float *d_Ygz,
const float *d_Zgx,
const float *d_Zgy,
const float *d_Zgz,
size_t n,
bool slice,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
if(slice){
JacDetH2D_kernel<<<grids, threads, 0, stream>>>
(d_detJ,
d_Xgx, d_Xgy,
d_Ygx, d_Ygy,
n);
}else{
JacDetH3D_kernel<<<grids, threads, 0, stream>>>
(d_detJ,
d_Xgx, d_Xgy, d_Xgz,
d_Ygx, d_Ygy, d_Ygz,
d_Zgx, d_Zgy, d_Zgz, n);
}
}
template<bool fwd>
__global__ void JacDetV_kernel(
float* d_detJ,
const float* d_Xgx, const float* d_Xgy, const float* d_Xgz,
const float* d_Ygx, const float* d_Ygy, const float* d_Ygz,
const float* d_Zgx, const float* d_Zgy, const float* d_Zgz, uint n)
{
uint blockId = blockIdx.y * gridDim.x + blockIdx.x;
uint id = blockId * blockDim.x + threadIdx.x;
if (id < n){
float a00, a01, a02, a10, a11, a12, a20, a21, a22;
if (fwd) {
a00 = 1.f + d_Xgx[id], a01 = d_Xgy[id], a02 = d_Xgz[id];
a10 = d_Ygx[id], a11 = 1.f + d_Ygy[id], a12 = d_Ygz[id];
a20 = d_Zgx[id], a21 = d_Zgy[id], a22 = 1.f + d_Zgz[id];
}else {
a00 = 1.f - d_Xgx[id], a01 = d_Xgy[id], a02 = d_Xgz[id];
a10 = d_Ygx[id], a11 = 1.f - d_Ygy[id], a12 = d_Ygz[id];
a20 = d_Zgx[id], a21 = d_Zgy[id], a22 = 1.f - d_Zgz[id];
}
d_detJ[id] = det(a00, a01, a02,
a10, a11, a12,
a20, a21, a22);
}
}
template<bool fwd>
void
JacDetV(float *d_detJ,
const float *d_Xgx,
const float *d_Xgy,
const float *d_Xgz,
const float *d_Ygx,
const float *d_Ygy,
const float *d_Ygz,
const float *d_Zgx,
const float *d_Zgy,
const float *d_Zgz,
size_t n,
StreamT stream)
{
dim3 threads(256);
dim3 grids=make_grid(iDivUp(n, threads.x));
JacDetV_kernel<fwd><<<grids, threads, 0, stream>>>
(d_detJ,
d_Xgx, d_Xgy, d_Xgz,
d_Ygx, d_Ygy, d_Ygz,
d_Zgx, d_Zgy, d_Zgz, n);
}
template<DiffT diffType, BoundaryCondT bc, bool slice>
__global__ void JacDetHPointwise_kernel
(float* d_jdet,
const float* d_Hx, const float* d_Hy, const float *d_Hz,
int szX, int szY, int szZ,
float ispX, float ispY, float ispZ)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < szX && j < szY)
{
int id = j * szX + i;
for (int k=0; k< szZ; ++k, id+= szX*szY)
{
// from FiniteDiff.h
jacDetPoint<float,diffType,bc,slice>
(d_jdet[id],
d_Hx,d_Hy,d_Hz,
i,j,k,
szX,szY,szZ,
ispX,ispY,ispZ);
}
}
}
template<DiffT diffType, BoundaryCondT bc, bool slice>
void
g_JacDetHPointwise(float *d_jdet,
const float *d_hx,
const float *d_hy,
const float *d_hz,
Vec3Di sz,
Vec3Df sp,
StreamT stream)
{
Vec3Df isp(1.0/sp.x, 1.0/sp.y, 1.0/sp.z);
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
JacDetHPointwise_kernel<diffType, bc, slice>
<<<grids, threads, 0, stream>>>
(d_jdet,
d_hx, d_hy, d_hz,
sz.x, sz.y, sz.z,
isp.x, isp.y, isp.z);
}
template<DiffT diffType>
void
g_JacDetHPointwise(float *d_jdet,
const float *d_hx,
const float *d_hy,
const float *d_hz,
Vec3Di sz,
Vec3Df sp,
BoundaryCondT bc,
StreamT stream)
{
bool slice = (sz.z == 1);
if(bc == BC_APPROX){
if(slice){
PyCA::g_JacDetHPointwise<diffType, BC_APPROX, SLICE_TRUE>
(d_jdet,
d_hx, d_hy, d_hz,
sz, sp, stream);
}else{
PyCA::g_JacDetHPointwise<diffType, BC_APPROX, SLICE_FALSE>
(d_jdet,
d_hx, d_hy, d_hz,
sz, sp, stream);
}
}else if(bc == BC_WRAP){
if(slice){
PyCA::g_JacDetHPointwise<diffType, BC_WRAP, SLICE_TRUE>
(d_jdet,
d_hx, d_hy, d_hz,
sz, sp, stream);
}else{
PyCA::g_JacDetHPointwise<diffType, BC_WRAP, SLICE_FALSE>
(d_jdet,
d_hx, d_hy, d_hz,
sz, sp, stream);
}
}else if(bc == BC_CLAMP){
if(slice){
PyCA::g_JacDetHPointwise<diffType, BC_CLAMP, SLICE_TRUE>
(d_jdet,
d_hx, d_hy, d_hz,
sz, sp, stream);
}else{
PyCA::g_JacDetHPointwise<diffType, BC_CLAMP, SLICE_FALSE>
(d_jdet,
d_hx, d_hy, d_hz,
sz, sp, stream);
}
}else{
throw PyCAException(__FILE__, __LINE__, "Unknownd BoundaryCondT");
}
}
void
JacDetH(float *d_jdet,
const float *d_hx,
const float *d_hy,
const float *d_hz,
Vec3Di sz,
Vec3Df sp,
DiffT diffType,
BoundaryCondT bc,
StreamT stream)
{
if(diffType == DIFF_FORWARD){
PyCA::g_JacDetHPointwise<DIFF_FORWARD>
(d_jdet, d_hx, d_hy, d_hz, sz, sp, bc, stream);
}else if(diffType == DIFF_BACKWARD){
PyCA::g_JacDetHPointwise<DIFF_BACKWARD>
(d_jdet, d_hx, d_hy, d_hz, sz, sp, bc, stream);
}else if(diffType == DIFF_CENTRAL){
PyCA::g_JacDetHPointwise<DIFF_CENTRAL>
(d_jdet, d_hx, d_hy, d_hz, sz, sp, bc, stream);
}else{
throw PyCAException(__FILE__, __LINE__, "unknown DiffT");
}
}
// template instantiation
#include "GImageFieldOperKernels_inst.cxx"
} // end namespace PyCA
|
the_stack
|
namespace vilib {
#define BLOCKDIM_X 32
#define BLOCKDIM_Y 4
#define RESULT_STEPS 8
#define HALO_STEPS 1
#define INSTANTIATE_1D_ROW(I, O) \
template __host__ void conv_filter_row_gpu<I,O>(const I * d_image_in, \
const int input_pitch, \
O * d_image_out, \
const int output_pitch, \
const int width_px, \
const int height_px, \
const conv_filter_type_t filter_type, \
const conv_filter_border_type_t border_type, \
const bool skip_first_and_last_row, \
const float scale, \
cudaStream_t stream)
template<typename I, typename O, int RADIUS, conv_filter_border_type BORDER>
__global__ void conv_filter_row_gpu_shm_kernel(O * __restrict__ output,
const int output_pitch,
const I * __restrict__ input,
const int input_pitch,
const int input_width,
const int output_height,
const filter1x3_t filter,
const float scale) {
__shared__ float s_Data[BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_X];
// Offset to the left halo edge
const int baseX = (blockIdx.x * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y;
if(baseY >= output_height) return;
input += baseY * input_pitch;
output += baseY * output_pitch + baseX;
// Load main data AND right halo
#pragma unroll
for (int i = HALO_STEPS, i_x = HALO_STEPS * BLOCKDIM_X + baseX; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; ++i, i_x+= BLOCKDIM_X) {
switch(BORDER) {
case conv_filter_border_type::BORDER_SKIP:
// fall-through
case conv_filter_border_type::BORDER_ZERO:
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: 0;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[input_width - 1];
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[(input_width<<1) - 1 - i_x];
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[i_x - input_width];
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[(input_width<<1) - 2 - i_x];
break;
}
}
// Load left halo
#pragma unroll
for (int i = 0, i_x = baseX; i < HALO_STEPS; ++i, i_x += BLOCKDIM_X) {
switch(BORDER) {
case conv_filter_border_type::BORDER_SKIP:
// fall-through
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : 0;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[0];
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[-i_x - 1];
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[i_x + input_width];
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[-i_x];
break;
}
}
// Compute and store results
__syncthreads();
#pragma unroll
for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++)
{
float sum = 0.0f;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
sum += filter.d[RADIUS + j] * s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X + j];
}
sum *= scale;
// Saturate if non-float
if(sizeof(O) < sizeof(float)) {
sum = max(min(sum,255.0f),0.f);
}
if(input_width > i*BLOCKDIM_X + baseX) {
output[i * BLOCKDIM_X] = sum;
}
}
}
template <typename I, typename O>
__host__ void conv_filter_row_gpu(const I * d_image_in,
const int input_pitch,
O * d_image_out,
const int output_pitch,
const int width_px,
const int height_px,
const conv_filter_type_t filter_type,
const conv_filter_border_type_t border_type,
const bool skip_first_and_last_row,
const float scale,
cudaStream_t stream) {
const filter1x3_t & filter = conv_filter_get1x3(filter_type);
int height_px_out = height_px - (skip_first_and_last_row?2:0);
dim3 threads_per_block(BLOCKDIM_X, BLOCKDIM_Y);
dim3 blocks_per_grid((width_px + RESULT_STEPS * BLOCKDIM_X -1) / (RESULT_STEPS * BLOCKDIM_X),
(height_px_out + BLOCKDIM_Y -1) / BLOCKDIM_Y);
// Note: we actually support radiuses up to BLOCKDIM_X * HALO_STEPS, but the filter itself
// is not defined beyond 1
decltype(&conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>) kernel;
switch(border_type) {
case conv_filter_border_type::BORDER_SKIP:
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REPLICATE>;
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REFLECT>;
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_WRAP>;
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REFLECT_101>;
break;
default:
assert(0);
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>;
break;
}
kernel<<<blocks_per_grid,threads_per_block,0,stream>>>(
d_image_out + (skip_first_and_last_row?output_pitch:0),
output_pitch,
d_image_in + (skip_first_and_last_row?input_pitch:0),
input_pitch,
width_px,
height_px_out,
filter,
scale);
CUDA_KERNEL_CHECK();
}
__host__ void conv_filter_row_cpu(const unsigned char * h_image_in,
const int input_pitch,
unsigned char * h_image_out,
const int output_pitch,
const int width_px,
const int height_px,
const conv_filter_type_t filter_type,
const conv_filter_border_type_t border_type,
const bool skip_first_and_last_row,
const float scale) {
const filter1x3_t & filter = conv_filter_get1x3(filter_type);
const int x_min = 0 + (border_type==conv_filter_border_type::BORDER_SKIP?1:0);
const int x_max = (width_px-1) - (border_type==conv_filter_border_type::BORDER_SKIP?1:0);
const int y_min = 0 + (skip_first_and_last_row?1:0);
const int y_max = (height_px-1) - (skip_first_and_last_row?1:0);
for(int y=y_min;y<=y_max;++y) {
for(int x=x_min;x<=x_max;++x) {
float accu = 0.0f;
for(int f_x=-1;f_x<=1;++f_x) {
int i_x = x+f_x;
switch(border_type) {
case conv_filter_border_type::BORDER_SKIP:
// nothing to do
break;
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
// nothing to do
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
i_x = min(max(i_x,0),x_max);
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
if(i_x < 0) {
i_x = -1*i_x - 1;
} else if(i_x > x_max) {
i_x = x_max - (i_x-width_px);
}
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
if(i_x < 0) {
i_x += width_px;
} else if(i_x > x_max) {
i_x -= width_px;
}
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
if(i_x < 0) {
i_x *= -1;
} else if(i_x > x_max) {
i_x = 2*x_max - i_x;
}
break;
}
// Handling of BORDER_ZERO
accu += ((i_x < 0 || i_x >= width_px) ? 0.0f : h_image_in[y*input_pitch+i_x])*filter.d[f_x+1];
}
accu *= scale;
h_image_out[y*output_pitch + x] = static_cast<unsigned char>(min(max(accu,0.0f),255.0f));
}
}
}
// Explicit instantiations
INSTANTIATE_1D_ROW(unsigned char, unsigned char);
INSTANTIATE_1D_ROW(unsigned char, float);
INSTANTIATE_1D_ROW(float, unsigned char);
INSTANTIATE_1D_ROW(float, float);
} // namespace vilib
|
the_stack
|
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include <cuda_runtime.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "dropout.cuh"
#include "softmax.cuh"
#include "strided_batched_gemm.cuh"
namespace multihead_attn {
namespace self_bias_additive_mask {
namespace cublas_gemmex {
std::vector<torch::Tensor> fwd_cuda(bool use_time_mask, bool is_training,
int heads, torch::Tensor const &inputs,
torch::Tensor const &input_weights,
torch::Tensor const &output_weights,
torch::Tensor const &input_biases,
torch::Tensor const &output_biases,
const half *pad_mask, float dropout_prob) {
const int embed_dim = inputs.size(2);
const int sequences = inputs.size(1);
const int q_seq_len = inputs.size(0);
const int k_seq_len = q_seq_len;
const int batches = sequences * q_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_dim = 3 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim = attn_batches * 3 * head_dim;
const int batch_stride = 3 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta_zero = 0.0;
const float beta_one = 1.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated
// by ATen library code)
auto act_options = inputs.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor input_lin_results =
torch::empty({q_seq_len, sequences, output_lin_dim}, act_options);
torch::Tensor bmm1_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask =
torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results =
torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
torch::Tensor outputs = torch::empty_like(inputs, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void *q_lin_results_ptr = static_cast<void *>(input_lin_results.data_ptr());
void *k_lin_results_ptr = static_cast<void *>(
static_cast<half *>(input_lin_results.data_ptr()) + head_dim);
void *v_lin_results_ptr = static_cast<void *>(
static_cast<half *>(input_lin_results.data_ptr()) + 2 * head_dim);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void *bmm1_results_ptr = static_cast<void *>(bmm1_results.data_ptr());
void *dropout_results_ptr = static_cast<void *>(dropout_results.data_ptr());
char a_layout_t{'t'};
char a_layout_n{'n'};
char b_layout_n{'n'};
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Input Linear Fwd
input_lin_results.copy_(input_biases);
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_T, CUBLAS_OP_N, output_lin_dim, batches, embed_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(input_weights.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(inputs.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(&beta_one), q_lin_results_ptr,
CUDA_R_16F, output_lin_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
gemm_switch_fp32accum(
a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, scale,
static_cast<const half *>(k_lin_results_ptr), lead_dim, batch_stride,
static_cast<const half *>(q_lin_results_ptr), lead_dim, batch_stride,
beta_zero, static_cast<half *>(bmm1_results_ptr), k_seq_len,
k_seq_len * q_seq_len, attn_batches);
// Padded Softmax
bool softmax_success = false;
if (is_training) {
softmax_success =
dispatch_additive_masked_softmax_dropout<half, half, float>(
reinterpret_cast<half *>(dropout_results_ptr),
(is_training)
? reinterpret_cast<uint8_t *>(dropout_mask.data_ptr<uint8_t>())
: nullptr,
reinterpret_cast<const half *>(bmm1_results_ptr), pad_mask,
attn_batches * q_seq_len * q_seq_len, k_seq_len, k_seq_len,
attn_batches * q_seq_len, attn_batches * q_seq_len / sequences,
1.0f - dropout_prob, stream);
} else {
softmax_success = dispatch_additive_masked_softmax<half, half, float>(
reinterpret_cast<half *>(
dropout_results_ptr), // this is actually softmax results, but
// making it consistent for the next function
reinterpret_cast<const half *>(bmm1_results_ptr), pad_mask, k_seq_len,
k_seq_len, attn_batches * q_seq_len,
attn_batches * q_seq_len / sequences);
}
// Matmul2
gemm_switch_fp32accum(
a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, alpha,
static_cast<const half *>(v_lin_results_ptr), lead_dim, batch_stride,
static_cast<const half *>(dropout_results.data_ptr()), k_seq_len,
k_seq_len * q_seq_len, beta_zero,
static_cast<half *>(matmul2_results.data_ptr()), head_dim * attn_batches,
head_dim, attn_batches);
outputs.copy_(output_biases);
// Output Linear
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_T, CUBLAS_OP_N, embed_dim, batches, embed_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(output_weights.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(matmul2_results.data_ptr()),
CUDA_R_16F, embed_dim, static_cast<const void *>(&beta_one),
static_cast<void *>(outputs.data_ptr()), CUDA_R_16F, embed_dim,
CUDA_R_32F,
// CUBLAS_GEMM_ALGO1_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {input_lin_results, bmm1_results, dropout_results,
dropout_mask, matmul2_results, outputs};
}
std::vector<torch::Tensor> bwd_cuda(
int heads, torch::Tensor const &output_grads,
torch::Tensor const &matmul2_results, torch::Tensor const &dropout_results,
torch::Tensor const &bmm1_results, torch::Tensor const &pad_mask,
torch::Tensor const &input_lin_results, torch::Tensor const &inputs,
torch::Tensor const &input_weights, torch::Tensor const &output_weights,
torch::Tensor const &dropout_mask, float dropout_prob) {
const int embed_dim = inputs.size(2);
const int sequences = inputs.size(1);
const int q_seq_len = inputs.size(0);
const int k_seq_len = q_seq_len;
const int batches = sequences * q_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_dim = 3 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim = attn_batches * 3 * head_dim;
const int batch_stride = 3 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_grads = torch::empty_like(inputs);
torch::Tensor input_weight_grads = torch::empty_like(input_weights);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_output_grads = torch::empty_like(input_lin_results);
auto q_lin_results_ptr = static_cast<half *>(input_lin_results.data_ptr());
auto k_lin_results_ptr =
static_cast<half *>(input_lin_results.data_ptr()) + head_dim;
auto v_lin_results_ptr =
static_cast<half *>(input_lin_results.data_ptr()) + 2 * head_dim;
auto q_lin_grads_ptr = static_cast<half *>(input_lin_output_grads.data_ptr());
auto k_lin_grads_ptr =
static_cast<half *>(input_lin_output_grads.data_ptr()) + head_dim;
auto v_lin_grads_ptr =
static_cast<half *>(input_lin_output_grads.data_ptr()) + 2 * head_dim;
char a_layout_n{'n'};
char a_layout_t{'t'};
char b_layout_n{'n'};
char b_layout_t{'t'};
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches, embed_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(output_weights.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(output_grads.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(&beta),
static_cast<void *>(output_lin_grads.data_ptr()), CUDA_R_16F, embed_dim,
CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Output Linear Wgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, embed_dim, batches,
static_cast<const void *>(&alpha),
static_cast<const void *>(matmul2_results.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(output_grads.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(&beta),
static_cast<void *>(output_weight_grads.data_ptr()), CUDA_R_16F,
embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
auto output_bias_grads = output_grads.view({-1, embed_dim}).sum(0, false);
// MatMul2 Dgrad1
gemm_switch_fp32accum(
a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, alpha,
static_cast<const half *>(v_lin_results_ptr), lead_dim, batch_stride,
static_cast<const half *>(output_lin_grads.data_ptr()),
head_dim * attn_batches, head_dim, beta,
static_cast<half *>(matmul2_grads.data_ptr()), k_seq_len,
k_seq_len * q_seq_len, attn_batches);
// Matmul2 Dgrad2
gemm_switch_fp32accum(a_layout_n, b_layout_t, head_dim, k_seq_len,
q_seq_len, alpha,
static_cast<const half *>(output_lin_grads.data_ptr()),
head_dim * attn_batches, head_dim,
static_cast<const half *>(dropout_results.data_ptr()),
k_seq_len, k_seq_len * q_seq_len, beta, v_lin_grads_ptr,
lead_dim, batch_stride, attn_batches);
// Apply Dropout Mask and Scale by Dropout Probability
// Softmax Grad
dispatch_masked_scale_softmax_backward_recompute<half, half, float, false>(
static_cast<half *>(matmul2_grads.data_ptr()),
static_cast<half *const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const *>(bmm1_results.data_ptr()),
reinterpret_cast<half const *>(pad_mask.data_ptr()),
static_cast<uint8_t const *>(dropout_mask.data_ptr()),
1.0 / (1.0 - dropout_prob), k_seq_len, k_seq_len,
attn_batches * q_seq_len / sequences, attn_batches * q_seq_len, stream);
// Matmul1 Dgrad1
gemm_switch_fp32accum(a_layout_n, b_layout_n, head_dim, q_seq_len,
k_seq_len, scale, k_lin_results_ptr, lead_dim,
batch_stride,
static_cast<half *>(matmul2_grads.data_ptr()),
k_seq_len, k_seq_len * q_seq_len, beta, q_lin_grads_ptr,
lead_dim, batch_stride, attn_batches);
// Matmul1 Dgrad2
gemm_switch_fp32accum(a_layout_n, b_layout_t, head_dim, k_seq_len,
q_seq_len, scale, q_lin_results_ptr, lead_dim,
batch_stride,
static_cast<half *>(matmul2_grads.data_ptr()),
k_seq_len, k_seq_len * q_seq_len, beta, k_lin_grads_ptr,
lead_dim, batch_stride, attn_batches);
// Input Linear Dgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches, output_lin_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(input_weights.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(input_lin_output_grads.data_ptr()),
// static_cast<const void*>(q_lin_grads_ptr),
CUDA_R_16F, output_lin_dim, static_cast<const void *>(&beta),
static_cast<void *>(input_grads.data_ptr()), CUDA_R_16F, embed_dim,
CUDA_R_32F,
// CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Wgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, output_lin_dim, batches,
static_cast<const void *>(&alpha),
static_cast<const void *>(inputs.data_ptr()), CUDA_R_16F, embed_dim,
static_cast<const void *>(q_lin_grads_ptr), CUDA_R_16F, output_lin_dim,
static_cast<const void *>(&beta),
static_cast<void *>(input_weight_grads.data_ptr()), CUDA_R_16F, embed_dim,
CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
auto input_bias_grads =
input_lin_output_grads.view({-1, output_lin_dim}).sum(0, false);
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {input_grads, input_weight_grads, output_weight_grads,
input_bias_grads, output_bias_grads};
}
} // end namespace cublas_gemmex
} // namespace self_bias_additive_mask
} // end namespace multihead_attn
|
the_stack
|
#include "cupoch/collision/collision.h"
#include "cupoch/geometry/intersection_test.h"
#include "cupoch/geometry/lineset.h"
#include "cupoch/geometry/occupancygrid.h"
#include "cupoch/geometry/voxelgrid.h"
namespace cupoch {
namespace collision {
namespace {
const int MAX_NUM_COLLISIONS = 10000;
template <typename LBVHType>
struct intersect_voxel_voxel_functor {
intersect_voxel_voxel_functor(const LBVHType& lbvh,
float voxel_size,
const Eigen::Vector3f& origin,
float margin)
: lbvh_(lbvh),
voxel_size_(voxel_size),
origin_(origin),
margin_(margin){};
const LBVHType lbvh_;
const float voxel_size_;
const Eigen::Vector3f origin_;
const float margin_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<size_t, Eigen::Vector3i>& x) const {
Eigen::Vector3i key = thrust::get<1>(x);
Eigen::Vector3f vl = key.cast<float>() * voxel_size_ + origin_ -
Eigen::Vector3f::Constant(margin_);
Eigen::Vector3f vu =
(key + Eigen::Vector3i::Constant(1)).cast<float>() *
voxel_size_ +
origin_ + Eigen::Vector3f::Constant(margin_);
// make a query box.
lbvh::aabb<float> box;
box.lower = make_float4(vl[0], vl[1], vl[2], 0.0f);
box.upper = make_float4(vu[0], vu[1], vu[2], 0.0f);
unsigned int buffer[1];
const auto num_found =
lbvh::query_device(lbvh_, lbvh::overlaps(box), buffer, 1);
return (num_found > 0) ? Eigen::Vector2i(buffer[0], thrust::get<0>(x))
: Eigen::Vector2i(-1, -1);
}
};
template <typename LBVHType>
struct intersect_voxel_line_functor {
intersect_voxel_line_functor(const LBVHType& lbvh,
float voxel_size,
const Eigen::Vector3f& origin,
float margin)
: lbvh_(lbvh),
voxel_size_(voxel_size),
box_half_size_(Eigen::Vector3f(
voxel_size / 2, voxel_size / 2, voxel_size / 2)),
origin_(origin),
margin_(margin){};
const LBVHType lbvh_;
const float voxel_size_;
const Eigen::Vector3f box_half_size_;
const Eigen::Vector3f origin_;
const float margin_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<size_t, Eigen::Vector3f, Eigen::Vector3f>& x)
const {
Eigen::Vector3f p1 = thrust::get<1>(x);
Eigen::Vector3f p2 = thrust::get<2>(x);
const Eigen::Vector3f ms = Eigen::Vector3f::Constant(margin_);
Eigen::Vector3f vl = p1.array().min(p2.array()).matrix() - ms;
Eigen::Vector3f vu = p1.array().max(p2.array()).matrix() + ms;
// make a query box.
lbvh::aabb<float> box;
box.lower = make_float4(vl[0], vl[1], vl[2], 0.0f);
box.upper = make_float4(vu[0], vu[1], vu[2], 0.0f);
unsigned int buffer[MAX_NUM_COLLISIONS];
const auto num_found =
lbvh::query_device(lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
const Eigen::Vector3f h3 = Eigen::Vector3f::Constant(0.5);
for (int j = 0; j < num_found; ++j) {
const Eigen::Vector3i& other = lbvh_.objects[buffer[j]];
Eigen::Vector3f center =
((other.cast<float>() + h3) * voxel_size_) + origin_;
int coll = geometry::intersection_test::LineSegmentAABB(
p1, p2, center - box_half_size_ - ms, center + box_half_size_ + ms);
if (coll == 1) return Eigen::Vector2i(buffer[j], thrust::get<0>(x));
}
return Eigen::Vector2i(-1, -1);
}
};
template <typename LBVHType>
struct intersect_occgrid_line_functor {
intersect_occgrid_line_functor(const LBVHType& lbvh,
float voxel_size,
const Eigen::Vector3f& origin,
float margin)
: lbvh_(lbvh),
voxel_size_(voxel_size),
box_half_size_(Eigen::Vector3f(
voxel_size / 2, voxel_size / 2, voxel_size / 2)),
origin_(origin),
margin_(margin){};
const LBVHType lbvh_;
const float voxel_size_;
const Eigen::Vector3f box_half_size_;
const Eigen::Vector3f origin_;
const float margin_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<size_t, Eigen::Vector3f, Eigen::Vector3f>& x)
const {
Eigen::Vector3f p1 = thrust::get<1>(x);
Eigen::Vector3f p2 = thrust::get<2>(x);
const Eigen::Vector3f ms = Eigen::Vector3f::Constant(margin_);
Eigen::Vector3f vl = p1.array().min(p2.array()).matrix() - ms;
Eigen::Vector3f vu = p1.array().max(p2.array()).matrix() + ms;
// make a query box.
lbvh::aabb<float> box;
box.lower = make_float4(vl[0], vl[1], vl[2], 0.0f);
box.upper = make_float4(vu[0], vu[1], vu[2], 0.0f);
unsigned int buffer[MAX_NUM_COLLISIONS];
const auto num_found =
lbvh::query_device(lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
const Eigen::Vector3f h3 = Eigen::Vector3f::Constant(0.5);
for (int j = 0; j < num_found; ++j) {
const Eigen::Vector3ui16& other = lbvh_.objects[buffer[j]].grid_index_;
Eigen::Vector3f center =
((other.cast<float>() + h3) * voxel_size_) + origin_;
int coll = geometry::intersection_test::LineSegmentAABB(
p1, p2, center - box_half_size_ - ms, center + box_half_size_ + ms);
if (coll == 1) return Eigen::Vector2i(buffer[j], thrust::get<0>(x));
}
return Eigen::Vector2i(-1, -1);
}
};
template <typename LBVHType>
struct intersect_voxel_occgrid_functor {
intersect_voxel_occgrid_functor(const LBVHType& lbvh,
float voxel_size,
const Eigen::Vector3f& origin,
float margin)
: lbvh_(lbvh),
voxel_size_(voxel_size),
origin_(origin),
margin_(margin){};
const LBVHType lbvh_;
const float voxel_size_;
const Eigen::Vector3f origin_;
const float margin_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<size_t, geometry::OccupancyVoxel>& x) const {
geometry::OccupancyVoxel voxel = thrust::get<1>(x);
Eigen::Vector3f vl = voxel.grid_index_.cast<float>() * voxel_size_ +
origin_ - Eigen::Vector3f::Constant(margin_);
Eigen::Vector3f vu =
(voxel.grid_index_ + Eigen::Vector3ui16::Constant(1))
.cast<float>() *
voxel_size_ +
origin_ + Eigen::Vector3f::Constant(margin_);
// make a query box.
lbvh::aabb<float> box;
box.lower = make_float4(vl[0], vl[1], vl[2], 0.0f);
box.upper = make_float4(vu[0], vu[1], vu[2], 0.0f);
unsigned int buffer[1];
const auto num_found =
lbvh::query_device(lbvh_, lbvh::overlaps(box), buffer, 1);
return (num_found > 0) ? Eigen::Vector2i(buffer[0], thrust::get<0>(x))
: Eigen::Vector2i(-1, -1);
}
};
template <typename LBVHType>
struct intersect_voxel_primitive_functor {
intersect_voxel_primitive_functor(const LBVHType& lbvh,
float voxel_size,
const Eigen::Vector3f& origin,
float margin)
: lbvh_(lbvh),
voxel_size_(voxel_size),
box_half_size_(Eigen::Vector3f(
voxel_size / 2, voxel_size / 2, voxel_size / 2)),
origin_(origin),
margin_(margin){};
const LBVHType lbvh_;
const float voxel_size_;
const Eigen::Vector3f box_half_size_;
const Eigen::Vector3f origin_;
const float margin_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<size_t, PrimitivePack>& x) const {
PrimitivePack primitive = thrust::get<1>(x);
const Eigen::Vector3f h3 = Eigen::Vector3f::Constant(0.5);
unsigned int buffer[MAX_NUM_COLLISIONS];
switch (primitive.primitive_.type_) {
case Primitive::PrimitiveType::Box: {
const Box& obox = primitive.box_;
auto bbox = obox.GetAxisAlignedBoundingBox();
// make a query box.
lbvh::aabb<float> box;
box.lower = make_float4(bbox.min_bound_[0], bbox.min_bound_[1],
bbox.min_bound_[2], 0.0f);
box.upper = make_float4(bbox.max_bound_[0], bbox.max_bound_[1],
bbox.max_bound_[2], 0.0f);
const auto num_found = lbvh::query_device(
lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
for (int j = 0; j < num_found; ++j) {
const Eigen::Vector3i& other = lbvh_.objects[buffer[j]];
Eigen::Vector3f center =
((other.cast<float>() + h3) * voxel_size_) + origin_;
int coll = geometry::intersection_test::BoxBox(
obox.lengths_ * 0.5, obox.transform_.block<3, 3>(0, 0),
obox.transform_.block<3, 1>(0, 3), box_half_size_ + Eigen::Vector3f::Constant(margin_),
Eigen::Matrix3f::Identity(), center);
if (coll == 1) return Eigen::Vector2i(buffer[j], thrust::get<0>(x));
}
return Eigen::Vector2i(-1, -1);
}
case Primitive::PrimitiveType::Sphere: {
const Sphere& sphere = primitive.sphere_;
auto bbox = sphere.GetAxisAlignedBoundingBox();
lbvh::aabb<float> box;
box.lower = make_float4(bbox.min_bound_[0], bbox.min_bound_[1],
bbox.min_bound_[2], 0.0f);
box.upper = make_float4(bbox.max_bound_[0], bbox.max_bound_[1],
bbox.max_bound_[2], 0.0f);
const auto num_found = lbvh::query_device(
lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
for (int j = 0; j < num_found; ++j) {
const Eigen::Vector3i& other = lbvh_.objects[buffer[j]];
Eigen::Vector3f center =
((other.cast<float>() + h3) * voxel_size_) + origin_;
int coll = geometry::intersection_test::SphereAABB(
sphere.transform_.block<3, 1>(0, 3), sphere.radius_ + margin_,
center - box_half_size_, center + box_half_size_);
if (coll == 1) return Eigen::Vector2i(buffer[j], thrust::get<0>(x));
}
return Eigen::Vector2i(-1, -1);
}
case Primitive::PrimitiveType::Capsule: {
const Capsule& capsule = primitive.capsule_;
auto bbox = capsule.GetAxisAlignedBoundingBox();
lbvh::aabb<float> box;
box.lower = make_float4(bbox.min_bound_[0], bbox.min_bound_[1],
bbox.min_bound_[2], 0.0f);
box.upper = make_float4(bbox.max_bound_[0], bbox.max_bound_[1],
bbox.max_bound_[2], 0.0f);
const auto num_found = lbvh::query_device(
lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
for (int j = 0; j < num_found; ++j) {
const Eigen::Vector3i& other = lbvh_.objects[buffer[j]];
Eigen::Vector3f center =
((other.cast<float>() + h3) * voxel_size_) + origin_;
Eigen::Vector3f d =
capsule.transform_.block<3, 1>(0, 3) -
0.5 * capsule.height_ *
capsule.transform_.block<3, 1>(0, 2);
int coll = geometry::intersection_test::CapsuleAABB(
capsule.radius_ + margin_, d,
capsule.height_ * capsule.transform_.block<3, 1>(0, 2),
center - box_half_size_, center + box_half_size_);
if (coll == 1) return Eigen::Vector2i(buffer[j], thrust::get<0>(x));
}
return Eigen::Vector2i(-1, -1);
}
default: {
return Eigen::Vector2i(-1, -1);
}
}
}
};
template <typename LBVHType>
struct intersect_primitive_voxel_functor {
intersect_primitive_voxel_functor(const LBVHType& lbvh,
float voxel_size,
const Eigen::Vector3f& origin,
float margin)
: lbvh_(lbvh),
voxel_size_(voxel_size),
box_half_size_(Eigen::Vector3f(
voxel_size / 2, voxel_size / 2, voxel_size / 2)),
origin_(origin),
margin_(margin){};
const LBVHType lbvh_;
const float voxel_size_;
const Eigen::Vector3f box_half_size_;
const Eigen::Vector3f origin_;
const float margin_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<size_t, Eigen::Vector3i>& x) const {
Eigen::Vector3i key = thrust::get<1>(x);
const Eigen::Vector3f h3 = Eigen::Vector3f::Constant(0.5);
Eigen::Vector3f vl = key.cast<float>() * voxel_size_ + origin_ -
Eigen::Vector3f::Constant(margin_);
Eigen::Vector3f vu =
(key + Eigen::Vector3i::Constant(1)).cast<float>() *
voxel_size_ +
origin_ + Eigen::Vector3f::Constant(margin_);
// make a query box.
lbvh::aabb<float> box;
box.lower = make_float4(vl[0], vl[1], vl[2], 0.0f);
box.upper = make_float4(vu[0], vu[1], vu[2], 0.0f);
unsigned int buffer[MAX_NUM_COLLISIONS];
const auto num_found =
lbvh::query_device(lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
for (int j = 0; j < num_found; ++j) {
const PrimitivePack other = lbvh_.objects[buffer[j]];
int coll = 0;
switch (other.primitive_.type_) {
case Primitive::PrimitiveType::Box: {
const Box& obox = other.box_;
Eigen::Vector3f center =
((key.cast<float>() + h3) * voxel_size_) + origin_;
coll = geometry::intersection_test::BoxBox(
obox.lengths_ * 0.5, obox.transform_.block<3, 3>(0, 0),
obox.transform_.block<3, 1>(0, 3), box_half_size_ + Eigen::Vector3f::Constant(margin_),
Eigen::Matrix3f::Identity(), center);
break;
}
case Primitive::PrimitiveType::Sphere: {
const Sphere& sphere = other.sphere_;
Eigen::Vector3f center =
((key.cast<float>() + h3) * voxel_size_) + origin_;
coll = geometry::intersection_test::SphereAABB(
sphere.transform_.block<3, 1>(0, 3), sphere.radius_ + margin_,
center - box_half_size_, center + box_half_size_);
break;
}
case Primitive::PrimitiveType::Capsule: {
const Capsule& capsule = other.capsule_;
Eigen::Vector3f center =
((key.cast<float>() + h3) * voxel_size_) + origin_;
Eigen::Vector3f d =
capsule.transform_.block<3, 1>(0, 3) -
0.5 * capsule.height_ *
capsule.transform_.block<3, 1>(0, 2);
coll = geometry::intersection_test::CapsuleAABB(
capsule.radius_ + margin_, d,
capsule.height_ * capsule.transform_.block<3, 1>(0, 2),
center - box_half_size_, center + box_half_size_);
break;
}
default: {
break;
}
}
if (coll == 1) return Eigen::Vector2i(thrust::get<0>(x), buffer[j]);
}
return Eigen::Vector2i(-1, -1);
}
};
template <typename LBVHType>
struct intersect_occvoxel_primitive_functor {
intersect_occvoxel_primitive_functor(const LBVHType& lbvh,
float voxel_size,
const Eigen::Vector3f& origin,
float margin)
: lbvh_(lbvh),
voxel_size_(voxel_size),
box_half_size_(Eigen::Vector3f(
voxel_size / 2, voxel_size / 2, voxel_size / 2)),
origin_(origin),
margin_(margin){};
const LBVHType lbvh_;
const float voxel_size_;
const Eigen::Vector3f box_half_size_;
const Eigen::Vector3f origin_;
const float margin_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<size_t, PrimitivePack>& x) const {
PrimitivePack primitive = thrust::get<1>(x);
const Eigen::Vector3f h3 = Eigen::Vector3f::Constant(0.5);
unsigned int buffer[MAX_NUM_COLLISIONS];
switch (primitive.primitive_.type_) {
case Primitive::PrimitiveType::Box: {
const Box& obox = primitive.box_;
auto bbox = obox.GetAxisAlignedBoundingBox();
// make a query box.
lbvh::aabb<float> box;
box.lower = make_float4(bbox.min_bound_[0], bbox.min_bound_[1],
bbox.min_bound_[2], 0.0f);
box.upper = make_float4(bbox.max_bound_[0], bbox.max_bound_[1],
bbox.max_bound_[2], 0.0f);
const auto num_found = lbvh::query_device(
lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
for (int j = 0; j < num_found; ++j) {
const geometry::OccupancyVoxel& other =
lbvh_.objects[buffer[j]];
Eigen::Vector3f center =
((other.grid_index_.cast<float>() + h3) * voxel_size_) +
origin_;
int coll = geometry::intersection_test::BoxBox(
obox.lengths_ * 0.5, obox.transform_.block<3, 3>(0, 0),
obox.transform_.block<3, 1>(0, 3), box_half_size_ + Eigen::Vector3f::Constant(margin_),
Eigen::Matrix3f::Identity(), center);
if (coll == 1) return Eigen::Vector2i(buffer[j], thrust::get<0>(x));
}
return Eigen::Vector2i(-1, -1);
}
case Primitive::PrimitiveType::Sphere: {
const Sphere& sphere = primitive.sphere_;
auto bbox = sphere.GetAxisAlignedBoundingBox();
lbvh::aabb<float> box;
box.lower = make_float4(bbox.min_bound_[0], bbox.min_bound_[1],
bbox.min_bound_[2], 0.0f);
box.upper = make_float4(bbox.max_bound_[0], bbox.max_bound_[1],
bbox.max_bound_[2], 0.0f);
const auto num_found = lbvh::query_device(
lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
for (int j = 0; j < num_found; ++j) {
const geometry::OccupancyVoxel& other =
lbvh_.objects[buffer[j]];
Eigen::Vector3f center =
((other.grid_index_.cast<float>() + h3) * voxel_size_) +
origin_;
int coll = geometry::intersection_test::SphereAABB(
sphere.transform_.block<3, 1>(0, 3), sphere.radius_ + margin_,
center - box_half_size_, center + box_half_size_);
if (coll == 1) return Eigen::Vector2i(buffer[j], thrust::get<0>(x));
}
return Eigen::Vector2i(-1, -1);
}
case Primitive::PrimitiveType::Capsule: {
const Capsule& capsule = primitive.capsule_;
auto bbox = capsule.GetAxisAlignedBoundingBox();
lbvh::aabb<float> box;
box.lower = make_float4(bbox.min_bound_[0], bbox.min_bound_[1],
bbox.min_bound_[2], 0.0f);
box.upper = make_float4(bbox.max_bound_[0], bbox.max_bound_[1],
bbox.max_bound_[2], 0.0f);
const auto num_found = lbvh::query_device(
lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
for (int j = 0; j < num_found; ++j) {
const geometry::OccupancyVoxel& other =
lbvh_.objects[buffer[j]];
Eigen::Vector3f center =
((other.grid_index_.cast<float>() + h3) * voxel_size_) +
origin_;
Eigen::Vector3f d =
capsule.transform_.block<3, 1>(0, 3) -
0.5 * capsule.height_ *
capsule.transform_.block<3, 1>(0, 2);
int coll = geometry::intersection_test::CapsuleAABB(
capsule.radius_ + margin_, d,
capsule.height_ * capsule.transform_.block<3, 1>(0, 2),
center - box_half_size_, center + box_half_size_);
if (coll == 1) return Eigen::Vector2i(buffer[j], thrust::get<0>(x));
}
return Eigen::Vector2i(-1, -1);
}
default: {
return Eigen::Vector2i(-1, -1);
}
}
}
};
template <typename LBVHType>
struct intersect_primitive_occvoxel_functor {
intersect_primitive_occvoxel_functor(const LBVHType& lbvh,
float voxel_size,
const Eigen::Vector3f& origin,
float margin)
: lbvh_(lbvh),
voxel_size_(voxel_size),
box_half_size_(Eigen::Vector3f(
voxel_size / 2, voxel_size / 2, voxel_size / 2)),
origin_(origin),
margin_(margin){};
const LBVHType lbvh_;
const float voxel_size_;
const Eigen::Vector3f box_half_size_;
const Eigen::Vector3f origin_;
const float margin_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<size_t, geometry::OccupancyVoxel>& x) const {
geometry::OccupancyVoxel voxel = thrust::get<1>(x);
const Eigen::Vector3f h3 = Eigen::Vector3f::Constant(0.5);
Eigen::Vector3f vl = voxel.grid_index_.cast<float>() * voxel_size_ +
origin_ - Eigen::Vector3f::Constant(margin_);
Eigen::Vector3f vu =
(voxel.grid_index_ + Eigen::Vector3ui16::Constant(1))
.cast<float>() *
voxel_size_ +
origin_ + Eigen::Vector3f::Constant(margin_);
// make a query box.
lbvh::aabb<float> box;
box.lower = make_float4(vl[0], vl[1], vl[2], 0.0f);
box.upper = make_float4(vu[0], vu[1], vu[2], 0.0f);
unsigned int buffer[MAX_NUM_COLLISIONS];
const auto num_found =
lbvh::query_device(lbvh_, lbvh::overlaps(box), buffer, MAX_NUM_COLLISIONS);
if (num_found == 0) return Eigen::Vector2i(-1, -1);
for (int j = 0; j < num_found; ++j) {
const PrimitivePack other = lbvh_.objects[buffer[j]];
int coll = 0;
switch (other.primitive_.type_) {
case Primitive::PrimitiveType::Box: {
const Box& obox = other.box_;
Eigen::Vector3f center =
((voxel.grid_index_.cast<float>() + h3) * voxel_size_) +
origin_;
coll = geometry::intersection_test::BoxBox(
obox.lengths_ * 0.5, obox.transform_.block<3, 3>(0, 0),
obox.transform_.block<3, 1>(0, 3), box_half_size_ + Eigen::Vector3f::Constant(margin_),
Eigen::Matrix3f::Identity(), center);
break;
}
case Primitive::PrimitiveType::Sphere: {
const Sphere& sphere = other.sphere_;
Eigen::Vector3f center =
((voxel.grid_index_.cast<float>() + h3) * voxel_size_) +
origin_;
coll = geometry::intersection_test::SphereAABB(
sphere.transform_.block<3, 1>(0, 3), sphere.radius_ + margin_,
center - box_half_size_, center + box_half_size_);
break;
}
case Primitive::PrimitiveType::Capsule: {
const Capsule& capsule = other.capsule_;
Eigen::Vector3f center =
((voxel.grid_index_.cast<float>() + h3) * voxel_size_) +
origin_;
Eigen::Vector3f d =
capsule.transform_.block<3, 1>(0, 3) -
0.5 * capsule.height_ *
capsule.transform_.block<3, 1>(0, 2);
coll = geometry::intersection_test::CapsuleAABB(
capsule.radius_ + margin_, d,
capsule.height_ * capsule.transform_.block<3, 1>(0, 2),
center - box_half_size_, center + box_half_size_);
break;
}
default: {
break;
}
}
if (coll == 1) return Eigen::Vector2i(thrust::get<0>(x), buffer[j]);
}
return Eigen::Vector2i(-1, -1);
}
};
struct convert_index_functor1 {
convert_index_functor1(int resolution) : resolution_(resolution){};
const int resolution_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<Eigen::Vector2i, geometry::OccupancyVoxel>& x) {
return Eigen::Vector2i(
thrust::get<0>(x)[0],
IndexOf(thrust::get<1>(x).grid_index_.cast<int>(),
resolution_));
}
};
struct convert_index_functor2 {
convert_index_functor2(int resolution) : resolution_(resolution){};
const int resolution_;
__device__ Eigen::Vector2i operator()(
const thrust::tuple<Eigen::Vector2i, geometry::OccupancyVoxel>& x) {
return Eigen::Vector2i(
IndexOf(thrust::get<1>(x).grid_index_.cast<int>(), resolution_),
thrust::get<0>(x)[0]);
}
};
} // namespace
CollisionResult::CollisionResult()
: first_(CollisionResult::CollisionType::Unspecified),
second_(CollisionResult::CollisionType::Unspecified){};
CollisionResult::CollisionResult(CollisionResult::CollisionType first,
CollisionResult::CollisionType second)
: first_(first), second_(second){};
CollisionResult::CollisionResult(const CollisionResult& other)
: first_(other.first_),
second_(other.second_),
collision_index_pairs_(other.collision_index_pairs_){};
CollisionResult::~CollisionResult(){};
thrust::host_vector<Eigen::Vector2i> CollisionResult::GetCollisionIndexPairs()
const {
thrust::host_vector<Eigen::Vector2i> h_collision_index_pairs =
collision_index_pairs_;
return h_collision_index_pairs;
}
utility::device_vector<size_t> CollisionResult::GetFirstCollisionIndices()
const {
utility::device_vector<size_t> res(collision_index_pairs_.size());
thrust::transform(collision_index_pairs_.begin(),
collision_index_pairs_.end(), res.begin(),
element_get_functor<Eigen::Vector2i, 0>());
return res;
}
utility::device_vector<size_t> CollisionResult::GetSecondCollisionIndices()
const {
utility::device_vector<size_t> res(collision_index_pairs_.size());
thrust::transform(collision_index_pairs_.begin(),
collision_index_pairs_.end(), res.begin(),
element_get_functor<Eigen::Vector2i, 1>());
return res;
}
template <>
class ConstructorImpl<geometry::VoxelGrid> {
public:
struct aabb_getter {
aabb_getter(float voxel_size, const Eigen::Vector3f& origin)
: voxel_size_(voxel_size), origin_(origin){};
const float voxel_size_;
const Eigen::Vector3f origin_;
__device__ lbvh::aabb<float> operator()(
const Eigen::Vector3i& obj) const {
Eigen::Vector3f vl = obj.cast<float>() * voxel_size_ + origin_;
Eigen::Vector3f vu =
(obj + Eigen::Vector3i::Constant(1)).cast<float>() *
voxel_size_ +
origin_;
lbvh::aabb<float> box;
box.upper = make_float4(vu[0], vu[1], vu[2], 0.0f);
box.lower = make_float4(vl[0], vl[1], vl[2], 0.0f);
return box;
}
};
ConstructorImpl(const geometry::VoxelGrid& voxelgrid)
: bvh_(voxelgrid.voxels_keys_.begin(),
voxelgrid.voxels_keys_.end(),
aabb_getter(voxelgrid.voxel_size_, voxelgrid.origin_)){};
~ConstructorImpl(){};
lbvh::bvh<float, Eigen::Vector3i, aabb_getter> bvh_;
};
template <>
class ConstructorImpl<geometry::OccupancyGrid> {
public:
struct aabb_getter {
aabb_getter(float voxel_size, const Eigen::Vector3f& origin)
: voxel_size_(voxel_size), origin_(origin){};
const float voxel_size_;
const Eigen::Vector3f origin_;
__device__ lbvh::aabb<float> operator()(
const geometry::OccupancyVoxel& obj) const {
Eigen::Vector3f vl =
obj.grid_index_.cast<float>() * voxel_size_ + origin_;
Eigen::Vector3f vu =
(obj.grid_index_ + Eigen::Vector3ui16::Constant(1))
.cast<float>() *
voxel_size_ +
origin_;
lbvh::aabb<float> box;
box.upper = make_float4(vu[0], vu[1], vu[2], 0.0f);
box.lower = make_float4(vl[0], vl[1], vl[2], 0.0f);
return box;
}
};
ConstructorImpl(
const utility::device_vector<geometry::OccupancyVoxel>& values,
float voxel_size,
const Eigen::Vector3f& origin)
: bvh_(values.begin(), values.end(), aabb_getter(voxel_size, origin)){};
~ConstructorImpl(){};
lbvh::bvh<float, geometry::OccupancyVoxel, aabb_getter> bvh_;
};
template <>
class ConstructorImpl<PrimitiveArray> {
public:
struct aabb_getter {
aabb_getter(){};
__device__ lbvh::aabb<float> operator()(
const PrimitivePack& obj) const {
switch (obj.primitive_.type_) {
case Primitive::PrimitiveType::Box: {
const Box& obox = obj.box_;
auto bbox = obox.GetAxisAlignedBoundingBox();
// make a query box.
lbvh::aabb<float> box;
box.lower =
make_float4(bbox.min_bound_[0], bbox.min_bound_[1],
bbox.min_bound_[2], 0.0f);
box.upper =
make_float4(bbox.max_bound_[0], bbox.max_bound_[1],
bbox.max_bound_[2], 0.0f);
return box;
}
case Primitive::PrimitiveType::Sphere: {
const Sphere& sphere = obj.sphere_;
auto bbox = sphere.GetAxisAlignedBoundingBox();
lbvh::aabb<float> box;
box.lower =
make_float4(bbox.min_bound_[0], bbox.min_bound_[1],
bbox.min_bound_[2], 0.0f);
box.upper =
make_float4(bbox.max_bound_[0], bbox.max_bound_[1],
bbox.max_bound_[2], 0.0f);
return box;
}
case Primitive::PrimitiveType::Capsule: {
const Capsule& capsule = obj.capsule_;
auto bbox = capsule.GetAxisAlignedBoundingBox();
lbvh::aabb<float> box;
box.lower =
make_float4(bbox.min_bound_[0], bbox.min_bound_[1],
bbox.min_bound_[2], 0.0f);
box.upper =
make_float4(bbox.max_bound_[0], bbox.max_bound_[1],
bbox.max_bound_[2], 0.0f);
return box;
}
default: {
return lbvh::aabb<float>();
}
}
}
};
ConstructorImpl(const PrimitiveArray& primitives)
: bvh_(primitives.begin(), primitives.end(), aabb_getter()){};
~ConstructorImpl(){};
lbvh::bvh<float, PrimitivePack, aabb_getter> bvh_;
};
template <>
void Intersection<geometry::VoxelGrid>::Construct() {
if (target_.IsEmpty()) {
utility::LogWarning("[Intersection::Construct] target is empty.");
return;
}
impl_ = std::make_shared<ConstructorImpl<geometry::VoxelGrid>>(target_);
}
template <>
void Intersection<geometry::OccupancyGrid>::Construct() {
if (target_.IsEmpty()) {
utility::LogWarning("[Intersection::Construct] target is empty.");
return;
}
const Eigen::Vector3f occ_origin =
target_.origin_ -
0.5 * target_.voxel_size_ *
Eigen::Vector3f::Constant(target_.resolution_);
auto occupied_voxels = target_.ExtractOccupiedVoxels();
impl_ = std::make_shared<ConstructorImpl<geometry::OccupancyGrid>>(
*occupied_voxels, target_.voxel_size_, occ_origin);
}
template <>
void Intersection<PrimitiveArray>::Construct() {
if (target_.empty()) {
utility::LogWarning("[Intersection::Construct] target is empty.");
return;
}
impl_ = std::make_shared<ConstructorImpl<PrimitiveArray>>(target_);
}
template <>
template <>
std::shared_ptr<CollisionResult>
Intersection<geometry::VoxelGrid>::Compute<geometry::VoxelGrid>(
const geometry::VoxelGrid& query, float margin) const {
auto out = std::make_shared<CollisionResult>(
CollisionResult::CollisionType::VoxelGrid,
CollisionResult::CollisionType::VoxelGrid);
if (target_.IsEmpty() || query.IsEmpty()) {
utility::LogWarning(
"[Intersection::Compute] target or query is empty.");
return out;
}
const auto bvh_dev = impl_->bvh_.get_device_repr();
intersect_voxel_voxel_functor<decltype(bvh_dev)> func(
bvh_dev, query.voxel_size_, query.origin_, margin);
out->collision_index_pairs_.resize(query.voxels_keys_.size());
thrust::transform(enumerate_begin(query.voxels_keys_),
enumerate_end(query.voxels_keys_),
out->collision_index_pairs_.begin(), func);
remove_negative(utility::exec_policy(0)->on(0),
out->collision_index_pairs_);
return out;
}
template <>
template <>
std::shared_ptr<CollisionResult>
Intersection<geometry::VoxelGrid>::Compute<geometry::LineSet<3>>(
const geometry::LineSet<3>& query, float margin) const {
auto out = std::make_shared<CollisionResult>(
CollisionResult::CollisionType::VoxelGrid,
CollisionResult::CollisionType::LineSet);
if (target_.IsEmpty() || query.IsEmpty()) {
utility::LogWarning(
"[Intersection::Compute] target or query is empty.");
return out;
}
const auto bvh_dev = impl_->bvh_.get_device_repr();
out->collision_index_pairs_.resize(query.lines_.size());
intersect_voxel_line_functor<decltype(bvh_dev)> func(
bvh_dev, target_.voxel_size_, target_.origin_, margin);
thrust::transform(
make_tuple_iterator(
thrust::make_counting_iterator<size_t>(0),
thrust::make_permutation_iterator(
query.points_.begin(),
thrust::make_transform_iterator(
query.lines_.begin(),
element_get_functor<Eigen::Vector2i, 0>())),
thrust::make_permutation_iterator(
query.points_.begin(),
thrust::make_transform_iterator(
query.lines_.begin(),
element_get_functor<Eigen::Vector2i, 1>()))),
make_tuple_iterator(
thrust::make_counting_iterator(query.lines_.size()),
thrust::make_permutation_iterator(
query.points_.begin(),
thrust::make_transform_iterator(
query.lines_.end(),
element_get_functor<Eigen::Vector2i, 0>())),
thrust::make_permutation_iterator(
query.points_.begin(),
thrust::make_transform_iterator(
query.lines_.end(),
element_get_functor<Eigen::Vector2i, 1>()))),
out->collision_index_pairs_.begin(), func);
remove_negative(utility::exec_policy(0)->on(0),
out->collision_index_pairs_);
return out;
}
template <>
template <>
std::shared_ptr<CollisionResult>
Intersection<geometry::VoxelGrid>::Compute<geometry::OccupancyGrid>(
const geometry::OccupancyGrid& query, float margin) const {
auto out = std::make_shared<CollisionResult>(
CollisionResult::CollisionType::VoxelGrid,
CollisionResult::CollisionType::OccupancyGrid);
if (target_.IsEmpty() || query.IsEmpty()) {
utility::LogWarning(
"[Intersection::Compute] target or query is empty.");
return out;
}
const auto bvh_dev = impl_->bvh_.get_device_repr();
intersect_voxel_occgrid_functor<decltype(bvh_dev)> func(
bvh_dev, query.voxel_size_, query.origin_, margin);
auto occ_voxels = query.ExtractOccupiedVoxels();
out->collision_index_pairs_.resize(occ_voxels->size());
thrust::transform(enumerate_begin(*occ_voxels), enumerate_end(*occ_voxels),
out->collision_index_pairs_.begin(), func);
remove_negative(utility::exec_policy(0)->on(0),
out->collision_index_pairs_);
convert_index_functor1 cfunc(query.resolution_);
thrust::transform(
make_tuple_iterator(
out->collision_index_pairs_.begin(),
thrust::make_permutation_iterator(
occ_voxels->begin(),
thrust::make_transform_iterator(
out->collision_index_pairs_.begin(),
element_get_functor<Eigen::Vector2i, 1>()))),
make_tuple_iterator(
out->collision_index_pairs_.end(),
thrust::make_permutation_iterator(
occ_voxels->begin(),
thrust::make_transform_iterator(
out->collision_index_pairs_.end(),
element_get_functor<Eigen::Vector2i, 1>()))),
out->collision_index_pairs_.begin(), cfunc);
return out;
}
template <>
template <>
std::shared_ptr<CollisionResult>
Intersection<geometry::OccupancyGrid>::Compute<geometry::VoxelGrid>(
const geometry::VoxelGrid& query, float margin) const {
auto out = std::make_shared<CollisionResult>(
CollisionResult::CollisionType::OccupancyGrid,
CollisionResult::CollisionType::VoxelGrid);
if (target_.IsEmpty() || query.IsEmpty()) {
utility::LogWarning(
"[Intersection::Compute] target or query is empty.");
return out;
}
const auto bvh_dev = impl_->bvh_.get_device_repr();
out->collision_index_pairs_.resize(query.voxels_keys_.size());
intersect_voxel_voxel_functor<decltype(bvh_dev)> func(
bvh_dev, query.voxel_size_, query.origin_, margin);
thrust::transform(enumerate_begin(query.voxels_keys_),
enumerate_end(query.voxels_keys_),
out->collision_index_pairs_.begin(), func);
remove_negative(utility::exec_policy(0)->on(0),
out->collision_index_pairs_);
convert_index_functor2 cfunc(target_.resolution_);
thrust::transform(
thrust::device,
make_tuple_iterator(
out->collision_index_pairs_.begin(),
thrust::make_permutation_iterator(
bvh_dev.objects,
thrust::make_transform_iterator(
out->collision_index_pairs_.begin(),
element_get_functor<Eigen::Vector2i, 0>()))),
make_tuple_iterator(
out->collision_index_pairs_.end(),
thrust::make_permutation_iterator(
bvh_dev.objects,
thrust::make_transform_iterator(
out->collision_index_pairs_.end(),
element_get_functor<Eigen::Vector2i, 0>()))),
out->collision_index_pairs_.begin(), cfunc);
return out;
}
template <>
template <>
std::shared_ptr<CollisionResult>
Intersection<geometry::OccupancyGrid>::Compute<geometry::LineSet<3>>(
const geometry::LineSet<3>& query, float margin) const {
auto out = std::make_shared<CollisionResult>(
CollisionResult::CollisionType::OccupancyGrid,
CollisionResult::CollisionType::LineSet);
if (target_.IsEmpty() || query.IsEmpty()) {
utility::LogWarning(
"[Intersection::Compute] target or query is empty.");
return out;
}
const auto bvh_dev = impl_->bvh_.get_device_repr();
out->collision_index_pairs_.resize(query.lines_.size());
intersect_occgrid_line_functor<decltype(bvh_dev)> func(
bvh_dev, target_.voxel_size_, target_.origin_, margin);
thrust::transform(
make_tuple_iterator(
thrust::make_counting_iterator<size_t>(0),
thrust::make_permutation_iterator(
query.points_.begin(),
thrust::make_transform_iterator(
query.lines_.begin(),
element_get_functor<Eigen::Vector2i, 0>())),
thrust::make_permutation_iterator(
query.points_.begin(),
thrust::make_transform_iterator(
query.lines_.begin(),
element_get_functor<Eigen::Vector2i, 1>()))),
make_tuple_iterator(
thrust::make_counting_iterator(query.lines_.size()),
thrust::make_permutation_iterator(
query.points_.begin(),
thrust::make_transform_iterator(
query.lines_.end(),
element_get_functor<Eigen::Vector2i, 0>())),
thrust::make_permutation_iterator(
query.points_.begin(),
thrust::make_transform_iterator(
query.lines_.end(),
element_get_functor<Eigen::Vector2i, 1>()))),
out->collision_index_pairs_.begin(), func);
remove_negative(utility::exec_policy(0)->on(0),
out->collision_index_pairs_);
convert_index_functor2 cfunc(target_.resolution_);
thrust::transform(
thrust::device,
make_tuple_iterator(
out->collision_index_pairs_.begin(),
thrust::make_permutation_iterator(
bvh_dev.objects,
thrust::make_transform_iterator(
out->collision_index_pairs_.begin(),
element_get_functor<Eigen::Vector2i, 0>()))),
make_tuple_iterator(
out->collision_index_pairs_.end(),
thrust::make_permutation_iterator(
bvh_dev.objects,
thrust::make_transform_iterator(
out->collision_index_pairs_.end(),
element_get_functor<Eigen::Vector2i, 0>()))),
out->collision_index_pairs_.begin(), cfunc);
return out;
}
template <>
template <>
std::shared_ptr<CollisionResult>
Intersection<geometry::VoxelGrid>::Compute<PrimitiveArray>(
const PrimitiveArray& query, float margin) const {
auto out = std::make_shared<CollisionResult>(
CollisionResult::CollisionType::VoxelGrid,
CollisionResult::CollisionType::Primitives);
if (target_.IsEmpty() || query.empty()) {
utility::LogWarning(
"[Intersection::Compute] target or query is empty.");
return out;
}
const auto bvh_dev = impl_->bvh_.get_device_repr();
out->collision_index_pairs_.resize(query.size());
intersect_voxel_primitive_functor<decltype(bvh_dev)> func(
bvh_dev, target_.voxel_size_, target_.origin_, margin);
thrust::transform(enumerate_begin(query), enumerate_end(query),
out->collision_index_pairs_.begin(), func);
remove_negative(utility::exec_policy(0)->on(0),
out->collision_index_pairs_);
return out;
}
template <>
template <>
std::shared_ptr<CollisionResult>
Intersection<PrimitiveArray>::Compute<geometry::VoxelGrid>(
const geometry::VoxelGrid& query, float margin) const {
auto out = std::make_shared<CollisionResult>(
CollisionResult::CollisionType::Primitives,
CollisionResult::CollisionType::VoxelGrid);
if (target_.empty() || query.IsEmpty()) {
utility::LogWarning(
"[Intersection::Compute] target or query is empty.");
return out;
}
const auto bvh_dev = impl_->bvh_.get_device_repr();
intersect_primitive_voxel_functor<decltype(bvh_dev)> func(
bvh_dev, query.voxel_size_, query.origin_, margin);
out->collision_index_pairs_.resize(query.voxels_keys_.size());
thrust::transform(enumerate_begin(query.voxels_keys_),
enumerate_end(query.voxels_keys_),
out->collision_index_pairs_.begin(), func);
remove_negative(utility::exec_policy(0)->on(0),
out->collision_index_pairs_);
return out;
}
template <>
template <>
std::shared_ptr<CollisionResult>
Intersection<geometry::OccupancyGrid>::Compute<PrimitiveArray>(
const PrimitiveArray& query, float margin) const {
auto out = std::make_shared<CollisionResult>(
CollisionResult::CollisionType::OccupancyGrid,
CollisionResult::CollisionType::Primitives);
if (target_.IsEmpty() || query.empty()) {
utility::LogWarning(
"[Intersection::Compute] target or query is empty.");
return out;
}
const auto bvh_dev = impl_->bvh_.get_device_repr();
out->collision_index_pairs_.resize(query.size());
auto occ_voxels = target_.ExtractOccupiedVoxels();
intersect_occvoxel_primitive_functor<decltype(bvh_dev)> func(
bvh_dev, target_.voxel_size_, target_.origin_, margin);
thrust::transform(enumerate_begin(query), enumerate_end(query),
out->collision_index_pairs_.begin(), func);
remove_negative(utility::exec_policy(0)->on(0),
out->collision_index_pairs_);
return out;
}
template <>
template <>
std::shared_ptr<CollisionResult>
Intersection<PrimitiveArray>::Compute<geometry::OccupancyGrid>(
const geometry::OccupancyGrid& query, float margin) const {
auto out = std::make_shared<CollisionResult>(
CollisionResult::CollisionType::Primitives,
CollisionResult::CollisionType::OccupancyGrid);
if (target_.empty() || query.IsEmpty()) {
utility::LogWarning(
"[Intersection::Compute] target or query is empty.");
return out;
}
const auto bvh_dev = impl_->bvh_.get_device_repr();
intersect_primitive_occvoxel_functor<decltype(bvh_dev)> func(
bvh_dev, query.voxel_size_, query.origin_, margin);
auto occ_voxels = query.ExtractOccupiedVoxels();
out->collision_index_pairs_.resize(occ_voxels->size());
thrust::transform(enumerate_begin(*occ_voxels), enumerate_end(*occ_voxels),
out->collision_index_pairs_.begin(), func);
remove_negative(utility::exec_policy(0)->on(0),
out->collision_index_pairs_);
convert_index_functor2 cfunc(query.resolution_);
thrust::transform(
thrust::device,
make_tuple_iterator(
out->collision_index_pairs_.begin(),
thrust::make_permutation_iterator(
occ_voxels->begin(),
thrust::make_transform_iterator(
out->collision_index_pairs_.begin(),
element_get_functor<Eigen::Vector2i, 1>()))),
make_tuple_iterator(
out->collision_index_pairs_.end(),
thrust::make_permutation_iterator(
occ_voxels->begin(),
thrust::make_transform_iterator(
out->collision_index_pairs_.end(),
element_get_functor<Eigen::Vector2i, 1>()))),
out->collision_index_pairs_.begin(), cfunc);
return out;
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const geometry::VoxelGrid& voxelgrid1,
const geometry::VoxelGrid& voxelgrid2,
float margin) {
Intersection<geometry::VoxelGrid> intsct(voxelgrid1);
return intsct.Compute<geometry::VoxelGrid>(voxelgrid2, margin);
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const geometry::VoxelGrid& voxelgrid,
const geometry::LineSet<3>& lineset,
float margin) {
Intersection<geometry::VoxelGrid> intsct(voxelgrid);
return intsct.Compute<geometry::LineSet<3>>(lineset, margin);
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const geometry::LineSet<3>& lineset,
const geometry::VoxelGrid& voxelgrid,
float margin) {
auto out = ComputeIntersection(voxelgrid, lineset, margin);
out->first_ = CollisionResult::CollisionType::LineSet;
out->second_ = CollisionResult::CollisionType::VoxelGrid;
swap_index(out->collision_index_pairs_);
return out;
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const geometry::VoxelGrid& voxelgrid,
const geometry::OccupancyGrid& occgrid,
float margin) {
Intersection<geometry::VoxelGrid> intsct(voxelgrid);
return intsct.Compute<geometry::OccupancyGrid>(occgrid, margin);
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const geometry::OccupancyGrid& occgrid,
const geometry::VoxelGrid& voxelgrid,
float margin) {
Intersection<geometry::OccupancyGrid> intsct(occgrid);
return intsct.Compute<geometry::VoxelGrid>(voxelgrid, margin);
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const geometry::LineSet<3>& lineset,
const geometry::OccupancyGrid& occgrid,
float margin) {
auto out = ComputeIntersection(occgrid, lineset, margin);
out->first_ = CollisionResult::CollisionType::LineSet;
out->second_ = CollisionResult::CollisionType::OccupancyGrid;
swap_index(out->collision_index_pairs_);
return out;
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const geometry::OccupancyGrid& occgrid,
const geometry::LineSet<3>& lineset,
float margin) {
Intersection<geometry::OccupancyGrid> intsct(occgrid);
return intsct.Compute<geometry::LineSet<3>>(lineset, margin);
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const geometry::VoxelGrid& voxelgrid,
const PrimitiveArray& primitives,
float margin) {
Intersection<geometry::VoxelGrid> intsct(voxelgrid);
return intsct.Compute<PrimitiveArray>(primitives, margin);
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const PrimitiveArray& primitives,
const geometry::VoxelGrid& voxelgrid,
float margin) {
Intersection<PrimitiveArray> intsct(primitives);
return intsct.Compute<geometry::VoxelGrid>(voxelgrid, margin);
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const geometry::OccupancyGrid& occgrid,
const PrimitiveArray& primitives,
float margin) {
Intersection<geometry::OccupancyGrid> intsct(occgrid);
return intsct.Compute<PrimitiveArray>(primitives, margin);
}
std::shared_ptr<CollisionResult> ComputeIntersection(
const PrimitiveArray& primitives,
const geometry::OccupancyGrid& occgrid,
float margin) {
Intersection<PrimitiveArray> intsct(primitives);
return intsct.Compute<geometry::OccupancyGrid>(occgrid, margin);
}
} // namespace collision
} // namespace cupoch
|
the_stack
|
//#include <stdio.h>
using namespace kfusion::device;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
TsdfVolume::elem_type *beg = tsdf.beg(x, y);
TsdfVolume::elem_type *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for (TsdfVolume::elem_type* pos = beg; pos != end; pos = tsdf.zstep(pos))
*pos = pack_tsdf(0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block(32, 8);
dim3 grid(1, 1, 1);
grid.x = divUp(volume.dims.x, block.x);
grid.y = divUp(volume.dims.y, block.y);
clear_volume_kernel<<<grid, block>>>(volume);
cudaSafeCall(cudaGetLastError());
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion
{
namespace device
{
texture<float, 2> dists_tex(0, cudaFilterModePoint, cudaAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for (int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc);
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if (Dp == 0 || vc.z <= 0)
continue;
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist)
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf(gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1);
int weight_new = min(weight_prev + 1, volume.max_weight);
//pack and write
gmem::StCs(pack_tsdf(tsdf_new, weight_new), vptr);
}
}
}
};
__global__ void integrate_kernel(const TsdfIntegrator integrator, TsdfVolume volume)
{
integrator(volume);
}
}
}
void kfusion::device::integrate(const Dists& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f / volume.trunc_dist;
dists_tex.filterMode = cudaFilterModePoint;
dists_tex.addressMode[0] = cudaAddressModeBorder;
dists_tex.addressMode[1] = cudaAddressModeBorder;
dists_tex.addressMode[2] = cudaAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf());
(void) binder;
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
integrate_kernel<<<grid, block>>>(ti, volume);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f / ray_dir.x, 1.f / ray_dir.y, 1.f / ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd(cf.x), __float2int_rd(cf.y), __float2int_rd(cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn(p.x * voxel_size_inv.x);
int y = __float2int_rn(p.y * voxel_size_inv.y);
int z = __float2int_rn(p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized(aff.R * reproj(x, y, 1.f));
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized(aff.R * reproj(x, y, 1.f));
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized(n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj) :
volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj)
{
}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{
raycaster(depth, normals);
}
;
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{
raycaster(points, normals);
}
;
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj, Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f / volume.voxel_size;
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<ushort> ) depth, normals);
cudaSafeCall(cudaGetLastError());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj, Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f / volume.voxel_size;
dim3 block(32, 8);
dim3 grid(divUp(points.cols(), block.x), divUp(points.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<Point> ) points, normals);
cudaSafeCall(cudaGetLastError());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind
{
exclusive, inclusive
};
template<ScanKind Kind, class T>
__kf_device__ T scan_warp(volatile T *ptr, const unsigned int idx = threadIdx.x)
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1)
ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2)
ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4)
ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8)
ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16)
ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 6, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) :
volume(vol)
{
}
__kf_device__
float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__
void operator ()(PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE];
#endif
#if __CUDA_ARCH__ >= 120
if (__all_sync(0xFFFFFFFF, x >= volume.dims.x) || __all_sync(0xFFFFFFFF, y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs(F) + fabs(Fn));
p.x = (V.x * fabs(Fn) + Vnx * fabs(F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (x + 1 < volume.dims.x) */
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch(x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs(F) + fabs(Fn));
p.y = (V.y * fabs(Fn) + Vny * fabs(F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (y + 1 < volume.dims.y) */
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch(x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs(F) + fabs(Fn));
p.z = (V.z * fabs(Fn) + Vnz * fabs(F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (z + 1 < volume.dims.z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < volume.dims.x && y < volume.dims.y) */
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot_sync(0xFFFFFFFF, local_count > 0)) + __popc (__ballot_sync(0xFFFFFFFF, local_count > 1)) + __popc (__ballot_sync(0xFFFFFFFF, local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*) (storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd(&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc(&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min((int) output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output)
{
fs(output);
}
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) :
volume(vol)
{
voxel_size_inv.x = 1.f / volume.voxel_size.x;
voxel_size_inv.y = 1.f / volume.voxel_size.y;
voxel_size_inv.z = 1.f / volume.voxel_size.z;
}
__kf_device__
int3 getVoxel(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn(p.x * voxel_size_inv.x);
int y = __float2int_rn(p.y * voxel_size_inv.y);
int z = __float2int_rn(p.z * voxel_size_inv.z);
return make_int3(x, y, z);
}
__kf_device__
void operator ()(float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
float3 n = make_float3(qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel(point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;
;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized(aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel(const ExtractNormals en, float4* output)
{
en(output);
}
}
}
size_t kfusion::device::extractCloud(const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block(FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
extract_kernel<<<grid, block>>>(fs, output);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
int size;
cudaSafeCall(cudaMemcpyFromSymbol(&size, output_count, sizeof(size)));
return (size_t) size;
}
void kfusion::device::extractNormals(const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block(256);
dim3 grid(divUp((int) points.size, block.x));
extract_normals_kernel<<<grid, block>>>(en, output);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
|
the_stack
|
#include "Threshold.h"
#include <iostream>
using namespace std;
#include "ErrorCode.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// Kernel 函数:_thresholdKer(使用 ImageCuda 实现的阈值分割)
// 有输出图像但无高低像素值(low,high)
static __global__ void // kernel 函数无返回值
_thresholdKer(
ImageCuda in, // 输入图像
ImageCuda out, // 输出图像
unsigned char minpixel, // 最小像素值
unsigned char maxpixel // 最大像素值
);
// Kernel 函数:_thresholdKer(使用 ImageCuda 实现的阈值分割)
// 无输出图像且无高低像素值(low, high)
static __global__ void // kernel 函数无返回值
_thresholdKer(
ImageCuda inout, // 输入输出图像
unsigned char minpixel, // 最小像素值
unsigned char maxpixel // 最大像素值
);
// Kernel 函数:_thresholdKer(使用 ImageCuda 实现的阈值分割)
// 有输出图像且有高低像素值(low, high)
static __global__ void // kernel 函数无返回值
_thresholdKer(
ImageCuda in, // 输入图像
ImageCuda out, // 输出图像
unsigned char minpixel, // 最小像素值
unsigned char maxpixel, // 最大像素值
unsigned char low, // 低像素值
unsigned char high // 高像素值
);
// Kernel 函数:_thresholdKer(使用 ImageCuda 实现的阈值分割)
// 无输出图像但有高低像素值(low, high)
static __global__ void // kernel 函数无返回值
_thresholdKer(
ImageCuda inout, // 输入输出图像
unsigned char minpixel, // 最小像素值
unsigned char maxpixel, // 最大像素值
unsigned char low, // 低像素值
unsigned char high // 高像素值
);
// Kernel 函数:_thresholdKer(使用ImageCuda实现的阈值分割)
static __global__ void _thresholdKer(
ImageCuda in, ImageCuda out, unsigned char minpixel,
unsigned char maxpixel)
{
// 计算想成对应的输出点的位置,其中 dstc 和 dstr 分别表示线程处理的像素点的
// 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并
// 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻
// 4 行上,因此,对于 dstr 需要进行乘 4 计算。
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (dstc >= in.imgMeta.width || dstr >= in.imgMeta.height)
return;
// 计算第一个输入坐标点和输出坐标点对应的图像数据数组下标。
int dstidx = dstr * in.pitchBytes + dstc;
int outidx = dstr * out.pitchBytes + dstc;
// 根据点的像素值进行阈值分割
if(in.imgMeta.imgData[dstidx] < minpixel || in.imgMeta.imgData[dstidx] >
maxpixel)
out.imgMeta.imgData[outidx] = 0;
else
out.imgMeta.imgData[outidx] = in.imgMeta.imgData[dstidx];
// 处理剩下的三个像素点。
for (int i = 0; i < 3; i++) {
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点
// 之间没有变化,故不用检查。
if (++dstr >= out.imgMeta.height)
return;
// 计算输入坐标点以及输出坐标点,由于只有 y 分量增加 1,所以下标只需要加
// 上对应的 pitch 即可,不需要在进行乘法计算
dstidx += in.pitchBytes;
outidx += out.pitchBytes;
// 若输入点像素在阈值范围内,输出点像素即为对应输入点像素,否则为 0
if(in.imgMeta.imgData[dstidx] < minpixel ||
in.imgMeta.imgData[dstidx] > maxpixel)
out.imgMeta.imgData[outidx] = 0;
else
out.imgMeta.imgData[outidx] = in.imgMeta.imgData[dstidx];
}
}
// Host 成员方法:threshold(阈值分割)
// 未指定高低像素值且输出图像不为 NULL 的阈值分割。
__host__ int Threshold::threshold(Image *inimg, Image *outimg)
{
// 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。
if (inimg == NULL)
return NULL_POINTER;
// 如果输出图像为NULL,直接调用 In—Place 版本的成员方法。
if (outimg == NULL)
return threshold(inimg);
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 将输出图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, inimg->roiX2 - inimg->roiX1,
inimg->roiY2 - inimg->roiY1);
if (errcode != NO_ERROR)
return errcode;
}
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一
if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width)
insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width;
else
outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width;
if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height)
insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height;
else
outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用对应的 kernel 函数进行计算
_thresholdKer<<<gridsize, blocksize>>>(
insubimgCud, outsubimgCud, minPixelVal, maxPixelVal);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
return NO_ERROR;
}
// Kernel 函数:_thresholdKer(使用 ImageCuda 实现的阈值分割)
static __global__ void _thresholdKer(
ImageCuda inout, unsigned char minpixel, unsigned char maxpixel)
{
// 计算对应输出点的下标
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 越界检查,若越界则不作任何处理直接退出
if (dstc >= inout.imgMeta.width || dstr >= inout.imgMeta.height)
return;
int dstidx = dstr * inout.pitchBytes + dstc;
// 在图像本身上进行阈值分割,不在阈值内置0否则保持不变
if(inout.imgMeta.imgData[dstidx] < minpixel ||
inout.imgMeta.imgData[dstidx] > maxpixel)
inout.imgMeta.imgData[dstidx] = 0;
// 处理剩下的三个点
for (int i = 0; i < 3; i++) {
if (++dstr >= inout.imgMeta.height)
return;
// 计算输入坐标点,由于只有 y 分量增加 1,所以下标只需要加
// 上一个 pitch 即可,不需要在进行乘法计算
dstidx += inout.pitchBytes;
//若输入点像素在阈值范围内,输出点像素保持不变,否则为 0
if(inout.imgMeta.imgData[dstidx] < minpixel ||
inout.imgMeta.imgData[dstidx] > maxpixel)
inout.imgMeta.imgData[dstidx] = 0;
}
}
// Host 成员方法:threshold(阈值分割)
__host__ int Threshold::threshold(Image *inoutimg)
{
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inoutimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的子图像
ImageCuda inoutimgCud;
errcode = ImageBasicOp::roiSubImage(inoutimg, &inoutimgCud);
if (errcode != NO_ERROR)
return errcode;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (inoutimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (inoutimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用对应的 kernel 函数进行计算
_thresholdKer<<<gridsize, blocksize>>>(inoutimgCud, minPixelVal,
maxPixelVal);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
return NO_ERROR;
}
// Kernel 函数:_thresholdKer(使用 ImageCuda 实现的阈值分割)
static __global__ void _thresholdKer(
ImageCuda in, ImageCuda out, unsigned char minpixel,
unsigned char maxpixel, unsigned char low, unsigned char high)
{
// 计算对应输出点的下标
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 越界检查,若越界则不作任何处理直接退出
if (dstc >= in.imgMeta.width || dstr >= in.imgMeta.height)
return;
// 计算第一个输入坐标点和输出坐标点对应的图像数据数组下标。
int dstidx = dstr * in.pitchBytes + dstc;
int outidx = dstr * out.pitchBytes + dstc;
// 根据输入图像进行阈值分割,不在阈值内置为低像素:low否则置为高像素:high
if(in.imgMeta.imgData[dstidx] < minpixel || in.imgMeta.imgData[dstidx] >
maxpixel)
out.imgMeta.imgData[outidx] = low;
else
out.imgMeta.imgData[outidx] = high;
// 处理剩下的三个点
for (int i = 0; i < 3; i++) {
if (++dstr >= out.imgMeta.height)
return;
// 计算输入坐标点以及输出坐标点,由于只有 y 分量增加 1,所以下标只需要
// 加上对应的 pitch 即可,不需要在进行乘法计算
dstidx += in.pitchBytes;
outidx += out.pitchBytes;
// 若输入点像素在阈值范围内,输出点像素为高像素:high,否则为低像素:
// low
if(in.imgMeta.imgData[dstidx] < minpixel ||
in.imgMeta.imgData[dstidx] > maxpixel)
out.imgMeta.imgData[outidx] = low;
else
out.imgMeta.imgData[outidx] = high;
}
}
// Host 成员方法:threshold(阈值分割)
__host__ int Threshold::threshold(
Image *inimg, Image *outimg, unsigned char low, unsigned char high)
{
// 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。
if (inimg == NULL)
return NULL_POINTER;
// 如果输出图像为NULL,直接调用 In—Place 版本的成员方法。
if (outimg == NULL)
return threshold(inimg, low, high);
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 将输出图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, inimg->roiX2 - inimg->roiX1,
inimg->roiY2 - inimg->roiY1);
if (errcode != NO_ERROR)
return errcode;
}
// 提取输入图像的子图像
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取输出图像的子图像
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一
if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width)
insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width;
else
outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width;
if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height)
insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height;
else
outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用对应的 kernel 函数进行计算
_thresholdKer<<<gridsize, blocksize>>>(
insubimgCud, outsubimgCud, minPixelVal, maxPixelVal,low,high);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
return NO_ERROR;
}
// Kernel 函数:_thresholdKer(使用 ImageCuda 实现的阈值分割)
static __global__ void _thresholdKer(
ImageCuda inout, unsigned char minpixel, unsigned char maxpixel,
unsigned char low, unsigned char high)
{
// 计算对应输出点的下标
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 越界检查,若越界则不作任何处理直接退出
if (dstc >= inout.imgMeta.width || dstr >= inout.imgMeta.height)
return;
// 计算第一个输出坐标点对应的图像数据数组下标。
int dstidx = dstr * inout.pitchBytes + dstc;
// 在图像自身上进行阈值分割,不在阈值内置为低像素:low 否则置为高像素:high
if(inout.imgMeta.imgData[dstidx] < minpixel ||
inout.imgMeta.imgData[dstidx] > maxpixel)
inout.imgMeta.imgData[dstidx] = low;
else
inout.imgMeta.imgData[dstidx] = high;
// 处理剩下的三个点
for (int i = 0; i < 3; i++) {
if (++dstr >= inout.imgMeta.height)
return;
// 计算输入坐标点以及输出坐标点,由于只有 y 分量增加 1,所以下标只需要加
// 上对应的 pitch 即可,不需要在进行乘法计
dstidx += inout.pitchBytes;
// 在图像自身上进行阈值分割,不在阈值内置为低像素:low 否则置为高像素:
// high
if(inout.imgMeta.imgData[dstidx] < minpixel ||
inout.imgMeta.imgData[dstidx] > maxpixel)
inout.imgMeta.imgData[dstidx] = low;
else
inout.imgMeta.imgData[dstidx] = high;
}
}
// Host 成员方法:threshold(阈值分割)
__host__ int Threshold::threshold(Image *inoutimg, unsigned char low,
unsigned char high)
{
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inoutimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的ROI子图像
ImageCuda inoutimgCud;
errcode = ImageBasicOp::roiSubImage(inoutimg, &inoutimgCud);
if (errcode != NO_ERROR)
return errcode;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (inoutimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (inoutimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用对应的 kernel 函数进行计算
_thresholdKer<<<gridsize, blocksize>>>(
inoutimgCud, minPixelVal, maxPixelVal, low, high);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
return NO_ERROR;
}
|
the_stack
|
#define CUDA_NUM_THREADS 256
#define CUDA_MAX_THREADS 256
// #define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#define EPS 1e-8
#define SAFE_DIV(a, b) ( (b==0)? ( (a)/(EPS) ): ( (a)/(b) ) )
#define CHECK_LEGALITY(x, min, max) ((x>=min && x<=max)? (true):(false) )
template <typename scalar_t>
__global__ void kernel_block_extractor_update_output(const int n,
const scalar_t* __restrict__ source,
const long4 source_size,
const long4 source_stride,
const scalar_t* __restrict__ flow_field,
const long4 flow_field_size,
const long4 flow_field_stride,
scalar_t* __restrict__ output,
const long4 output_size,
const long4 output_stride,
int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = DIM0(output_stride);
int dim_hw = DIM1(output_stride);
int dim_hs = DIM2(source_size);
int dim_ws = DIM3(source_size);
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int yf = y/kernel_size;
int xf = x/kernel_size;
int yf_offset = y%kernel_size - kernel_size/2;
int xf_offset = x%kernel_size - kernel_size/2;
scalar_t flow_y = DIM3_INDEX(flow_field, b, 1, yf, xf) + yf_offset;
scalar_t flow_x = DIM3_INDEX(flow_field, b, 0, yf, xf) + xf_offset;
scalar_t dy = flow_y + static_cast<scalar_t>(yf);
scalar_t dx = flow_x + static_cast<scalar_t>(xf);
int xL = max(min( int(floor(dx) ), dim_ws-1), 0);
int xR = max(min( int(floor(dx) + 1), dim_ws-1), 0);
int yT = max(min( int(floor(dy) ), dim_hs-1), 0);
int yB = max(min( int(floor(dy) + 1), dim_hs-1), 0);
scalar_t xL_P = 1 - (dx - floor(dx));
scalar_t xR_P = dx - floor(dx);
scalar_t yT_P = 1 - (dy - floor(dy));
scalar_t yB_P = dy - floor(dy);
scalar_t sample = 0.0f;
sample += (xL_P*yT_P * DIM3_INDEX(source, b, c, yT, xL));
sample += (xR_P*yT_P * DIM3_INDEX(source, b, c, yT, xR));
sample += (xL_P*yB_P * DIM3_INDEX(source, b, c, yB, xL));
sample += (xR_P*yB_P * DIM3_INDEX(source, b, c, yB, xR));
output[index] = sample;
}
template <typename scalar_t>
__global__ void kernel_block_extractor_backward(
const int n,
const scalar_t* __restrict__ source,
const long4 source_size,
const long4 source_stride,
const scalar_t* __restrict__ flow_field,
const long4 flow_field_size,
const long4 flow_field_stride,
const scalar_t* __restrict__ grad_output,
const long4 grad_output_size,
const long4 grad_output_stride,
scalar_t* __restrict__ grad_source,
const long4 grad_source_size,
const long4 grad_source_stride,
scalar_t* __restrict__ grad_flow_field,
const long4 grad_flow_field_size,
const long4 grad_flow_field_stride,
int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(grad_output_size);
int dim_c = DIM1(grad_output_size);
int dim_h = DIM2(grad_output_size);
int dim_w = DIM3(grad_output_size);
int dim_chw = DIM0(grad_output_stride);
int dim_hw = DIM1(grad_output_stride);
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int dim_hs = DIM2(source_size);
int dim_ws = DIM3(source_size);
int yf = y/kernel_size;
int xf = x/kernel_size;
int yf_offset = y%kernel_size - kernel_size/2;
int xf_offset = x%kernel_size - kernel_size/2;
scalar_t flow_y = DIM3_INDEX(flow_field, b, 1, yf, xf) + yf_offset;
scalar_t flow_x = DIM3_INDEX(flow_field, b, 0, yf, xf) + xf_offset;
scalar_t dy = flow_y + static_cast<scalar_t>(yf);
scalar_t dx = flow_x + static_cast<scalar_t>(xf);
int xL = max(min( int(floor(dx) ), dim_ws-1), 0);
int xR = max(min( int(floor(dx) + 1), dim_ws-1), 0);
int yT = max(min( int(floor(dy) ), dim_hs-1), 0);
int yB = max(min( int(floor(dy) + 1), dim_hs-1), 0);
scalar_t xL_P = 1 - (dx - floor(dx));
scalar_t xR_P = dx - floor(dx);
scalar_t yT_P = 1 - (dy - floor(dy));
scalar_t yB_P = dy - floor(dy);
scalar_t xL_yT = DIM3_INDEX(source, b, c, yT, xL);
scalar_t xR_yT = DIM3_INDEX(source, b, c, yT, xR);
scalar_t xL_yB = DIM3_INDEX(source, b, c, yB, xL);
scalar_t xR_yB = DIM3_INDEX(source, b, c, yB, xR);
scalar_t grad = DIM3_INDEX(grad_output, b, c, y, x);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yT, xL), grad*xL_P*yT_P);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yT, xR), grad*xR_P*yT_P);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yB, xL), grad*xL_P*yB_P);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yB, xR), grad*xR_P*yB_P);
scalar_t grady = grad*(-xL_P*xL_yT - xR_P*xR_yT + xL_P*xL_yB + xR_P*xR_yB);
scalar_t gradx = grad*(-yT_P*xL_yT - yB_P*xL_yB + yT_P*xR_yT + yB_P*xR_yB);
atomicAdd(&DIM3_INDEX(grad_flow_field, b, 1, yf, xf), grady);
atomicAdd(&DIM3_INDEX(grad_flow_field, b, 0, yf, xf), gradx);
}
void block_extractor_kernel_forward(
at::Tensor& source,
at::Tensor& flow_field,
at::Tensor& output,
int kernel_size) {
// clock_t start, end;
// start = clock();
int n = output.numel();
const long4 source_size = make_long4(source.size(0), source.size(1), source.size(2), source.size(3));
const long4 source_stride = make_long4(source.stride(0), source.stride(1), source.stride(2), source.stride(3));
const long4 flow_field_size = make_long4(flow_field.size(0), flow_field.size(1), flow_field.size(2), flow_field.size(3));
const long4 flow_field_stride = make_long4(flow_field.stride(0), flow_field.stride(1), flow_field.stride(2), flow_field.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
int Threads = CUDA_NUM_THREADS;
const dim3 threads(Threads);
const dim3 blocks((n + Threads - 1) / Threads);
AT_DISPATCH_FLOATING_TYPES(source.type(), "block_extractor_forward_kernel", ([&] {
kernel_block_extractor_update_output<scalar_t><<< blocks, threads, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
source.data<scalar_t>(),
source_size,
source_stride,
flow_field.data<scalar_t>(),
flow_field_size,
flow_field_stride,
output.data<scalar_t>(),
output_size,
output_stride,
kernel_size);
}));
// end = clock();
// printf("%d\n", end-start);
// TODO: ATen-equivalent check
// THCudaCheck(cudaGetLastError());
}
void block_extractor_kernel_backward(
at::Tensor& source,
at::Tensor& flow_field,
at::Tensor& grad_output,
at::Tensor& grad_source,
at::Tensor& grad_flow_field,
int kernel_size) {
int n = grad_output.numel();
const long4 source_size = make_long4(source.size(0), source.size(1), source.size(2), source.size(3));
const long4 source_stride = make_long4(source.stride(0), source.stride(1), source.stride(2), source.stride(3));
const long4 flow_field_size = make_long4(flow_field.size(0), flow_field.size(1), flow_field.size(2), flow_field.size(3));
const long4 flow_field_stride = make_long4(flow_field.stride(0), flow_field.stride(1), flow_field.stride(2), flow_field.stride(3));
const long4 grad_output_size = make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3));
const long4 grad_output_stride = make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3));
const long4 grad_source_size = make_long4(grad_source.size(0), grad_source.size(1), grad_source.size(2), grad_source.size(3));
const long4 grad_source_stride = make_long4(grad_source.stride(0), grad_source.stride(1), grad_source.stride(2), grad_source.stride(3));
const long4 grad_flow_field_size = make_long4(grad_flow_field.size(0), grad_flow_field.size(1), grad_flow_field.size(2), grad_flow_field.size(3));
const long4 grad_flow_field_stride = make_long4(grad_flow_field.stride(0), grad_flow_field.stride(1), grad_flow_field.stride(2), grad_flow_field.stride(3));
int Threads = CUDA_NUM_THREADS;
const dim3 threads(Threads);
const dim3 blocks((n + Threads - 1) / Threads);
AT_DISPATCH_FLOATING_TYPES(source.type(), "block_extractor_backward", ([&] {
kernel_block_extractor_backward<scalar_t><<< blocks, threads, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
source.data<scalar_t>(),
source_size,
source_stride,
flow_field.data<scalar_t>(),
flow_field_size,
flow_field_stride,
grad_output.data<scalar_t>(),
grad_output_size,
grad_output_stride,
grad_source.data<scalar_t>(),
grad_source_size,
grad_source_stride,
grad_flow_field.data<scalar_t>(),
grad_flow_field_size,
grad_flow_field_stride,
kernel_size);
}));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(cudaGetLastError());
}
|
the_stack
|
// Self
#include "GDelData.h"
// Project
#include "Config.h"
#include "GDelKernels.h"
#include "thrust/extrema.h"
// Externs
extern int ThreadsPerBlock;
extern int BlocksPerGrid;
/////////////////////////////////////////////////////////////////// PointData //
struct GetMortonNumber
{
// Note: No performance benefit by changing by-reference to by-value here
// Note: No benefit by making this __forceinline__
__device__ int operator () ( const Point3& point ) const
{
const int Guard = 0xFFFFFC00; // 22 1-bits, 10 0-bits
const int Gap16 = 0x030000FF; // Creates 16-bit gap between value bits
const int Gap08 = 0x0300F00F; // ... and so on ...
const int Gap04 = 0x030C30C3; // ...
const int Gap02 = 0x09249249; // ...
int coord[3] = { ( int ) point._p[0], ( int ) point._p[1], ( int ) point._p[2] };
// Iterate coordinates of point
for ( int vi = 0; vi < 3; ++vi )
{
// Read
int v = coord[ vi ];
CudaAssert( ( 0 == ( v & Guard ) ) && "Coordinate value is negative OR occupies more than 10 bits!" );
// Create 2-bit gaps between the 10 value bits
// Ex: 1001001001001001001001001001
v = ( v | ( v << 16 ) ) & Gap16;
v = ( v | ( v << 8 ) ) & Gap08;
v = ( v | ( v << 4 ) ) & Gap04;
v = ( v | ( v << 2 ) ) & Gap02;
// Write back
coord[ vi ] = v;
}
// Interleave bits of x-y-z coordinates
const int mortonNum = ( coord[ 0 ] | ( coord[ 1 ] << 1 ) | ( coord[ 2 ] << 2 ) );
return mortonNum;
}
};
void PointData::init( Point3HVec& pointHVec, Point3HVec& scaledHVec )
{
_pointVec = new Point3DVec( pointHVec );
_scaledVec = scaledHVec.empty() ? NULL : ( new Point3DVec( scaledHVec ) );
_bitsPerIndex = ( int ) ceil( log( ( double ) _pointVec->size() ) / log( 2.0 ) ); // Number of bits to store point index
if ( getConfig()._doSorting )
{
////
// Sort points by Morton order
////
// Space for Morton number of each point
IntDVec orderVec( _pointVec->size() );
// Pick vector with points inside scaled cube
const Point3DVec* morPointVec = ( NULL == _scaledVec ) ? _pointVec : _scaledVec;
// Generate Morton number of each point
thrust::transform( morPointVec->begin(), morPointVec->end(), orderVec.begin(), GetMortonNumber() );
// Use Morton number to sort points on device
if ( NULL == _scaledVec )
{
thrust::sort_by_key( orderVec.begin(), orderVec.end(), _pointVec->begin() );
}
else
{
thrust::sort_by_key( orderVec.begin(), orderVec.end(),
thrust::make_zip_iterator( thrust::make_tuple( _pointVec->begin(), _scaledVec->begin() ) ) );
}
// Copy back sorted points to host
pointHVec = *_pointVec;
if ( _scaledVec )
{
scaledHVec = *_scaledVec;
}
}
return;
}
void PointData::deinit()
{
safeDeleteDevConPtr( &_pointVec );
safeDeleteDevConPtr( &_scaledVec );
_bitsPerIndex = -1;
return;
}
KerPointData PointData::toKernel()
{
KerPointData pData;
pData._pointArr = toKernelPtr( _pointVec );
pData._num = ( int ) _pointVec->size();
return pData;
}
//////////////////////////////////////////////////////////////// TriangleData //
void TriangleData::init()
{
for ( int i = 0; i < 2; ++i )
{
_triVec[i] = new TriDVec();
_triOppVec[i] = new TriOppDVec();
_triStarVec[i] = new IntDVec();
_triStatusVec[i] = new TriStatusDVec();
}
return;
}
void TriangleData::deinit()
{
for ( int i = 0; i < 2; ++i )
{
safeDeleteDevConPtr( &_triVec[i] );
safeDeleteDevConPtr( &_triOppVec[i] );
safeDeleteDevConPtr( &_triStarVec[i] );
safeDeleteDevConPtr( &_triStatusVec[i] );
}
return;
}
void TriangleData::resize( int newSize, int arrId, const TriangleStatus& triStatus )
{
_triVec[ arrId ]->resize( newSize );
_triOppVec[ arrId ]->resize( newSize );
_triStarVec[ arrId ]->resize( newSize );
_triStatusVec[ arrId ]->resize( newSize, triStatus );
return;
}
int TriangleData::size( int vecId ) const
{
return _triVec[ vecId ]->size();
}
int TriangleData::totalSize() const
{
return size( 0 ) + size( 1 );
}
//////////////////////////////////////////////////////////////////// StarData //
void StarData::init( int pointNum )
{
// Preallocate these per-star vectors
_starTriMap[0] = new IntDVec( pointNum );
_starTriMap[1] = new IntDVec( pointNum );
_pointNumVec = new IntDVec( pointNum );
_maxSizeVec = new IntDVec( pointNum );
_insCountVec = new IntDVec( pointNum );
_triData.init();
return;
}
void StarData::deInit()
{
_triData.deinit();
safeDeleteDevConPtr( &_starTriMap[0] );
safeDeleteDevConPtr( &_starTriMap[1] );
safeDeleteDevConPtr( &_pointNumVec );
safeDeleteDevConPtr( &_maxSizeVec );
safeDeleteDevConPtr( &_insCountVec );
return;
}
KerStarData StarData::toKernel()
{
KerStarData sData;
for ( int i = 0; i < 2; ++i )
{
sData._triNum[i] = ( int ) _triData.size( i );
sData._triArr[i] = toKernelPtr( _triData._triVec[i] );
sData._triOppArr[i] = toKernelPtr( _triData._triOppVec[i] );
sData._triStarArr[i] = toKernelPtr( _triData._triStarVec[i] );
sData._triStatusArr[i] = toKernelPtr( _triData._triStatusVec[i] );
sData._starTriMap[i] = toKernelPtr( _starTriMap[i] );
}
sData._starNum = _starNum;
sData._totalTriNum = sData._triNum[0] + sData._triNum[1];
sData._pointNumArr = toKernelPtr( _pointNumVec );
sData._maxSizeArr = toKernelPtr( _maxSizeVec );
sData._insCountArr = toKernelPtr( _insCountVec );
return sData;
}
// We move all triangle arrays EXCEPT triStar, which should already be updated!
template< typename T >
__global__ void kerMoveTriangleArray
(
int oldTriNum,
KerIntArray oldNewMap,
KerArray< T > oldArr,
KerArray< T > newArr
)
{
// Iterate through triangles
for ( int oldTriIdx = getCurThreadIdx(); oldTriIdx < oldTriNum; oldTriIdx += getThreadNum() )
{
// Skip free triangles
const int newTriIdx = oldNewMap._arr[ oldTriIdx ];
if ( -1 == newTriIdx )
{
continue;
}
// Copy old to new
newArr._arr[ newTriIdx ] = oldArr._arr[ oldTriIdx ];
}
return;
}
// Expands input vector to hold old data
template< typename T >
void StarData::expandData( int oldSize, int newSize, IntDVec& oldNewMap, DeviceContainer< T >& inVec )
{
DeviceContainer< T > tmpVec( newSize );
if ( oldSize > 0 )
{
kerMoveTriangleArray<<< BlocksPerGrid, ThreadsPerBlock >>>(
oldSize,
toKernelArray( oldNewMap ),
toKernelArray( inVec ),
toKernelArray( tmpVec ) );
CudaCheckError();
}
inVec.swap( tmpVec );
return;
}
void StarData::expandTriangles( int newSize, IntDVec& newTriMap )
{
const int oldSize = ( int ) _triData.size( 1 ); // Grab old size before it is replaced
////
// Create old-to-new triangle index map
// *and* also update triStar and triStatus
////
IntDVec newStarVec( newSize );
TriStatusDVec newStatusVec( newSize, Free );
IntDVec oldNewMap( oldSize, -1 );
if ( oldSize > 0 )
{
kerMakeOldToNewTriMap<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernel(),
oldSize,
toKernelArray( newTriMap ),
toKernelArray( oldNewMap ),
toKernelArray( newStarVec ),
toKernelArray( newStatusVec ) );
CudaCheckError();
}
_starTriMap[1]->swap( newTriMap );
_triData._triStarVec[1]->swapAndFree( newStarVec );
_triData._triStatusVec[1]->swapAndFree( newStatusVec );
// Move rest of triangle arrays
expandData( oldSize, newSize, oldNewMap, *_triData._triVec[1] );
expandData( oldSize, newSize, oldNewMap, *_triData._triOppVec[1] );
return;
}
///////////////////////////////////////////////////////////////// MissingData //
void MissingData::init()
{
_memberVec = new IntDVec();
_leaderVec = new IntDVec();
return;
}
void MissingData::deInit()
{
safeDeleteDevConPtr( &_memberVec );
safeDeleteDevConPtr( &_leaderVec );
return;
}
KerMissingData MissingData::toKernel()
{
KerMissingData mData;
mData._memberArr = toKernelPtr( _memberVec );
mData._leaderArr = toKernelPtr( _leaderVec );
mData._num = _memberVec->size();
return mData;
}
/////////////////////////////////////////////////////////////// InsertionData //
void InsertionData::init()
{
_vertVec = new IntDVec();
_vertStarVec = new IntDVec();
_starVertMap = new IntDVec();
_shelveStarVec = new IntDVec();
_shelveVertVec = new IntDVec();
return;
}
void InsertionData::deInit()
{
safeDeleteDevConPtr( &_vertVec );
safeDeleteDevConPtr( &_vertStarVec );
safeDeleteDevConPtr( &_starVertMap );
safeDeleteDevConPtr( &_shelveStarVec );
safeDeleteDevConPtr( &_shelveVertVec );
return;
}
KerInsertData InsertionData::toKernel()
{
KerInsertData iData;
iData._vertArr = toKernelPtr( _vertVec );
iData._vertStarArr = toKernelPtr( _vertStarVec );
iData._starVertMap = toKernelPtr( _starVertMap );
iData._vertNum = _vertVec->size();
iData._starNum = _starVertMap->size();
return iData;
}
///////////////////////////////////////////////////////////////// HistoryData //
void HistoryData::init()
{
for ( int i = 0; i < 2; ++i )
{
_vertVec[ i ] = new IntDVec();
_vertStarVec[ i ] = new IntDVec();
_starVertMap[ i ] = new IntDVec();
}
return;
}
void HistoryData::deInit()
{
for ( int i = 0; i < 2; ++i )
{
safeDeleteDevConPtr( &_vertVec[ i ] );
safeDeleteDevConPtr( &_starVertMap[ i ] );
safeDeleteDevConPtr( &_vertStarVec[ i ] );
}
return;
}
KerHistoryData HistoryData::toKernel()
{
KerHistoryData hData;
for ( int i = 0; i < 2; ++i )
{
hData._vertArr[i] = toKernelPtr( _vertVec[i] );
hData._vertStarArr[i] = toKernelPtr( _vertStarVec[i] );
hData._starVertMap[i] = toKernelPtr( _starVertMap[i] );
hData._vertNum[i] = _vertVec[i]->size();
}
return hData;
}
///////////////////////////////////////////////////////////////// BeneathData //
void BeneathData::init( int pointNum )
{
_beneathTriPosVec = new TriPositionDVec( pointNum );
_exactTriPosVec = new TriPositionDVec( ExactTriangleMax );
_flagVec = new IntDVec( FlagNum, 0 );
return;
}
void BeneathData::deInit()
{
safeDeleteDevConPtr( &_beneathTriPosVec );
safeDeleteDevConPtr( &_exactTriPosVec );
safeDeleteDevConPtr( &_flagVec );
return;
}
KerBeneathData BeneathData::toKernel()
{
KerBeneathData bData;
bData._beneathTriPosArr = toKernelPtr( _beneathTriPosVec );
bData._exactTriPosArr = toKernelPtr( _exactTriPosVec );
bData._flagArr = toKernelPtr( _flagVec );
return bData;
}
////////////////////////////////////////////////////////////////// ActiveData //
void ActiveData::init()
{
_starVec = new IntDVec();
_starTriMap = new IntDVec();
return;
}
void ActiveData::deInit()
{
safeDeleteDevConPtr( &_starVec );
safeDeleteDevConPtr( &_starTriMap );
return;
}
KerActiveData ActiveData::toKernel()
{
KerActiveData aData;
aData._starArr = toKernelPtr( _starVec );
aData._starTriMap = toKernelPtr( _starTriMap );
return aData;
}
/////////////////////////////////////////////////////////////////// TetraData //
void TetraData::init()
{
_vec = new TetraDVec();
return;
}
void TetraData::deInit()
{
safeDeleteDevConPtr( &_vec );
return;
}
KerTetraData TetraData::toKernel()
{
KerTetraData tData;
tData._arr = toKernelPtr( _vec );
tData._num = _vec->size();
return tData;
}
////////////////////////////////////////////////////////////////////////////////
|
the_stack
|
#include <helpers/DebugHelper.h>
#include <system/Environment.h>
#include <system/op_boilerplate.h>
#include <types/types.h>
#include "../indexreduce.h"
#include "../legacy_ops.h"
using namespace simdOps;
template <typename X, typename Z>
static SD_KERNEL void simpleIndexReduceGeneric(const int op, void const *dx, sd::LongType const *xShapeInfo, int xRank,
void *extraParams, void *result, sd::LongType const *zShapeInfo,
int zRank, int *dimension, int dimensionLength, int postProcessOrNot,
int *allocationBuffer, void *reductionBuffer,
sd::LongType const *tadOnlyShapeInfo, sd::LongType const *tadOffsets) {
functions::indexreduce::IndexReduce<X, Z>::transform(op, dx, xShapeInfo, extraParams, result, zShapeInfo, dimension,
dimensionLength, postProcessOrNot, allocationBuffer,
reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
namespace functions {
namespace indexreduce {
template <typename X, typename Z>
SD_HOST void IndexReduce<X, Z>::executeIndexReduceScalar(
dim3 launchDims, cudaStream_t *stream, const int opNum, void const *dx, sd::LongType const *xShapeInfo, int xRank,
void *extraParams, void *result, sd::LongType const *zShapeInfo, int zRank, int *dimension, int dimensionLength,
int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, sd::LongType const *tadOnlyShapeInfo,
sd::LongType const *tadOffsets) {
simpleIndexReduceGeneric<X, Z><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum, dx, xShapeInfo, xRank, extraParams, result, zShapeInfo, 0, nullptr, 0, 1, allocationBuffer,
reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
template <typename X, typename Z>
SD_HOST void IndexReduce<X, Z>::executeIndexReduce(dim3 launchDims, cudaStream_t *stream, const int opNum,
void const *dx, sd::LongType const *xShapeInfo, int xRank,
void *extraParams, void *result, sd::LongType const *zShapeInfo,
int zRank, int *dimension, int dimensionLength, int postProcessOrNot,
int *allocationBuffer, void *reductionBuffer,
sd::LongType const *tadOnlyShapeInfo,
sd::LongType const *tadOffsets) {
simpleIndexReduceGeneric<X, Z><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum, dx, xShapeInfo, xRank, extraParams, result, zShapeInfo, zRank, dimension, dimensionLength, 1,
allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
template <typename T>
struct SharedIndexValue {
// Ensure that we won't compile any un-specialized types
SD_DEVICE T *getPointer() {
extern SD_DEVICE void error(void);
error();
return 0;
}
};
// Following are the specializations for the following types.
// int, sd::Unsigned, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template <>
struct SharedIndexValue<float> {
SD_DEVICE IndexValue<float> *getPointer() {
extern __shared__ IndexValue<float> s_int2[];
return s_int2;
}
};
// Following are the specializations for the following types.
// int, sd::Unsigned, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template <>
struct SharedIndexValue<double> {
SD_DEVICE IndexValue<double> *getPointer() {
extern __shared__ IndexValue<double> s_int6[];
return s_int6;
}
};
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void IndexReduce<X, Z>::aggregatePartials(IndexValue<X> *sPartials, sd::LongType tid,
sd::LongType numElements, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<X *>(vextraParams);
sd::LongType floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
IndexValue<X> prev = sPartials[tid - floorPow2];
IndexValue<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = OpType::update(prev, curr, extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
IndexValue<X> curr = sPartials[tid];
IndexValue<X> next = sPartials[tid + activeThreads];
sPartials[tid] = OpType::update(curr, next, extraParams);
}
__syncthreads();
}
}
template <typename X, typename Y>
SD_DEVICE void IndexReduce<X, Y>::transform(const int opNum, void const *x, sd::LongType const *xShapeInfo,
void *extraParams, void *result, sd::LongType const *zShapeInfo,
int *dimension, int dimensionLength, int postProcessOrNot,
int *allocationBuffer, void *reductionBuffer,
sd::LongType const *tadShapeInfo, sd::LongType const *tadOffset) {
DISPATCH_BY_OPNUM_TT(transform,
PARAMS(x, xShapeInfo, extraParams, result, zShapeInfo, dimension, dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadShapeInfo, tadOffset),
INDEX_REDUCE_OPS);
}
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void IndexReduce<X, Z>::transform(void const *vdx, sd::LongType const *xShapeInfo, void *vextraParams,
void *vz, sd::LongType const *zShapeInfo, int *dimension,
int dimensionLength, int postProcessOrNot, int *allocationBuffer,
void *vreductionBuffer, sd::LongType const *tadOnlyShapeInfo,
sd::LongType const *tadOffsets) {
/**int
* Gpu information for the problem
*/
auto dx = reinterpret_cast<X const *>(vdx);
auto z = reinterpret_cast<Z *>(vz);
auto extraParams = static_cast<X *>(vextraParams);
auto reductionBuffer = static_cast<X *>(vreductionBuffer);
auto order = shape::order(xShapeInfo);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile bool resultScalar;
// shared memory space for storing intermediate results
__shared__ IndexValue<X> sPartials[SD_CUDA_BLOCK_SIZE];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
// length for the tad
__shared__ volatile sd::LongType xLength;
__shared__ volatile sd::LongType zLen;
// only compute the tad indexes once
IndexValue<X> reduction = OpType::startingIndexValue(dx);
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
zLen = shape::length(zShapeInfo);
else
zLen = 1;
if (zLen == 1)
resultScalar = true;
else
resultScalar = false;
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (sd::ArrayOptions::arrayType(xShapeInfo) == sd::ArrayType::EMPTY) {
if (sd::ArrayOptions::arrayType(zShapeInfo) == sd::ArrayType::EMPTY) return;
for (sd::Unsigned i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x)
z[i] = (Z)reduction.index;
return;
}
if (!resultScalar) {
__shared__ sd::LongType tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (dimensionLength > 1 || tadEWS < 1) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo);
IndexValue<X> comp{dx[xOffset], i};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = (Z)sPartials[threadIdx.x].index;
}
__syncthreads();
}
} else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
sd::LongType tadOffsetForBlock = tadOffsets[i];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (int x = threadIdx.x; x < tadLength; x += blockDim.x) {
IndexValue<X> comp{dx[tadOffsetForBlock + x * tadEWS], x};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = (Z)sPartials[threadIdx.x].index; // postProcess(sPartials[0],tadLength ,extraParams);
}
__syncthreads();
}
}
} else {
auto n = shape::length(xShapeInfo);
auto xElementWiseStride = shape::elementWiseStride(xShapeInfo);
if (xElementWiseStride >= 1 && order == 'c') {
for (sd::LongType i = tid; i < n; i += (blockDim.x * gridDim.x)) {
IndexValue<X> indexVal = {dx[i * xElementWiseStride], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
} else {
for (sd::LongType i = tid; i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo);
IndexValue<X> indexVal = {dx[offset], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, (int)n), extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *)reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
auto pBuffer = reinterpret_cast<IndexValue<X> *>(reductionBuffer);
pBuffer[blockIdx.x] = {sPartials[0].value, sPartials[0].index};
}
__threadfence();
__syncthreads();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
IndexValue<X> *pBuffer = (IndexValue<X> *)reductionBuffer;
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (sd::LongType i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (tid == 0) {
z[0] = (Z)sPartials[0].index;
}
}
} else {
if (tid == 0) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
tc[16384] = 0;
z[0] = (Z)sPartials[0].index;
}
}
}
}
BUILD_DOUBLE_TEMPLATE(template class IndexReduce, , SD_COMMON_TYPES, SD_INDEXING_TYPES);
} // namespace indexreduce
} // namespace functions
|
the_stack
|
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/console.h>
#include <nvbio/basic/vector.h>
#include <nvbio/basic/packedstream.h>
#include <nvbio/strings/string_set.h>
#include <nvbio/strings/seeds.h>
#include <nvbio/basic/shared_pointer.h>
#include <nvbio/io/sequence/sequence.h>
#include <nvbio/qgram/qgram.h>
#include <nvbio/qgram/qgroup.h>
#include <nvbio/qgram/filter.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
namespace nvbio {
// return the size of a given range
struct range_size
{
typedef uint2 argument_type;
typedef uint32 result_type;
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 operator() (const uint2 range) const { return range.y - range.x; }
};
// return 1 for non-empty ranges, 0 otherwise
struct valid_range
{
typedef uint2 argument_type;
typedef uint32 result_type;
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 operator() (const uint2 range) const { return range.y - range.x > 0 ? 1u : 0u; }
};
// query stats
//
struct Stats
{
Stats() :
build_time(0),
unsorted_time(0),
sorted_time(0),
filter_time(0),
merge_time(0),
queries(0),
matches(0),
occurrences(0),
merged(0) {}
float build_time;
float unsorted_time;
float sorted_time;
float filter_time;
float merge_time;
uint64 queries;
uint64 matches;
uint64 occurrences;
uint64 merged;
};
// build a set of q-grams from a given string, together with their sorted counterpart
//
template <typename genome_string, typename qgram_vector_type, typename index_vector_type>
void build_qgrams(
const uint32 Q,
const uint32 genome_len,
const uint32 genome_offset,
const genome_string genome,
const uint32 n_queries,
qgram_vector_type& qgrams,
qgram_vector_type& sorted_qgrams,
index_vector_type& sorted_indices)
{
// build the q-grams
qgrams.resize( n_queries );
generate_qgrams( Q, 2u, genome_len, genome, n_queries, thrust::make_counting_iterator<uint32>(genome_offset), qgrams.begin() );
// sort the q-grams
sorted_qgrams = qgrams;
sorted_indices.resize( n_queries );
thrust::copy(
thrust::make_counting_iterator<uint32>(genome_offset),
thrust::make_counting_iterator<uint32>(genome_offset) + n_queries,
sorted_indices.begin() );
thrust::sort_by_key( sorted_qgrams.begin(), sorted_qgrams.end(), sorted_indices.begin() );
}
// build a q-gram index from a string
//
template <typename string_type>
void test_qgram_index_build(
const uint32 Q,
const uint32 string_len,
const string_type string,
QGramIndexDevice& qgram_index)
{
log_verbose(stderr, " building q-gram index... started\n");
Timer timer;
timer.start();
// build the q-gram index
qgram_index.build(
Q, // q-gram size
2u, // implicitly convert N to A
string_len,
string,
12u );
cudaDeviceSynchronize();
timer.stop();
const float time = timer.seconds();
log_verbose(stderr, " building q-gram index... done\n");
log_verbose(stderr, " indexed q-grams : %6.2f M q-grams\n", 1.0e-6f * float( qgram_index.n_qgrams ));
log_verbose(stderr, " unique q-grams : %6.2f M q-grams\n", 1.0e-6f * float( qgram_index.n_unique_qgrams ));
log_verbose(stderr, " throughput : %5.1f M q-grams/s\n", 1.0e-6f * float( string_len ) / time);
log_verbose(stderr, " memory usage : %5.1f MB\n", float( qgram_index.used_device_memory() ) / float(1024*1024) );
log_verbose(stderr, " querying q-gram index... started\n");
}
// build a q-gram set-index from a string-set
//
template <typename string_set_type>
void test_qgram_set_index_build(
const uint32 Q,
const string_set_type string_set,
QGramSetIndexDevice& qgram_index)
{
log_verbose(stderr, " building q-gram set-index... started\n");
Timer timer;
timer.start();
// build the q-gram set index
qgram_index.build(
Q, // q-gram size
2u, // implicitly convert N to A
string_set,
uniform_seeds_functor<>( Q, 10u ),
12u );
cudaDeviceSynchronize();
timer.stop();
const float time = timer.seconds();
log_verbose(stderr, " building q-gram set-index... done\n");
log_verbose(stderr, " indexed q-grams : %6.2f M q-grams\n", 1.0e-6f * float( qgram_index.n_qgrams ));
log_verbose(stderr, " unique q-grams : %6.2f M q-grams\n", 1.0e-6f * float( qgram_index.n_unique_qgrams ));
log_verbose(stderr, " throughput : %5.1f M q-grams/s\n", 1.0e-6f * float( qgram_index.n_qgrams ) / time);
log_verbose(stderr, " memory usage : %5.1f MB\n", float( qgram_index.used_device_memory() ) / float(1024*1024) );
}
// build a q-group index from a string
//
template <typename string_type>
void test_qgroup_index_build(
const uint32 Q,
const uint32 string_len,
const string_type string,
QGroupIndexDevice& qgram_index)
{
log_verbose(stderr, " building q-group index... started\n");
Timer timer;
timer.start();
// build the q-group index
qgram_index.build(
Q, // q-group size
2u, // implicitly convert N to A
string_len,
string );
cudaDeviceSynchronize();
timer.stop();
const float time = timer.seconds();
log_verbose(stderr, " building q-group index... done\n");
log_verbose(stderr, " indexed q-grams : %6.2f M q-grams\n", 1.0e-6f * float( qgram_index.n_qgrams ));
log_verbose(stderr, " unique q-grams : %6.2f M q-grams\n", 1.0e-6f * float( qgram_index.n_unique_qgrams ));
log_verbose(stderr, " throughput : %5.1f M q-grams/s\n", 1.0e-6f * float( string_len ) / time);
log_verbose(stderr, " memory usage : %5.1f MB\n", float( qgram_index.used_device_memory() ) / float(1024*1024) );
log_verbose(stderr, " querying q-group index... started\n");
}
// test a generic q-gram index query, both using plain queries and with a q-gram filter
//
template <typename qgram_index_type, typename genome_string>
void test_qgram_index_query(
qgram_index_type& qgram_index,
const uint32 n_queries,
const uint32 genome_len,
const uint32 genome_offset,
const genome_string genome,
Stats& stats)
{
const uint32 Q = qgram_index.Q;
typedef typename qgram_index_type::system_tag system_tag;
// prepare some vectors to store the query qgrams
nvbio::vector<system_tag,uint64> qgrams( n_queries );
nvbio::vector<system_tag,uint64> sorted_qgrams( n_queries );
nvbio::vector<system_tag,uint32> sorted_indices( n_queries );
build_qgrams(
Q,
genome_len,
genome_offset,
genome,
n_queries,
qgrams,
sorted_qgrams,
sorted_indices );
// prepare a vector to store the query results
nvbio::vector<system_tag,uint2> ranges( n_queries );
log_verbose(stderr, " querying q-gram index... started\n");
Timer timer;
timer.start();
// search the query q-grams in the index
thrust::transform(
qgrams.begin(),
qgrams.begin() + n_queries,
ranges.begin(),
nvbio::plain_view( qgram_index ) );
cudaDeviceSynchronize();
timer.stop();
const float unsorted_time = timer.seconds();
timer.start();
// and now repeat the same operation with the sorted q-grams
thrust::transform(
sorted_qgrams.begin(),
sorted_qgrams.begin() + n_queries,
ranges.begin(),
nvbio::plain_view( qgram_index ) );
cudaDeviceSynchronize();
timer.stop();
const float sorted_time = timer.seconds();
const uint32 n_occurrences = thrust::reduce(
thrust::make_transform_iterator( ranges.begin(), range_size() ),
thrust::make_transform_iterator( ranges.begin(), range_size() ) + n_queries );
const uint32 n_matches = thrust::reduce(
thrust::make_transform_iterator( ranges.begin(), valid_range() ),
thrust::make_transform_iterator( ranges.begin(), valid_range() ) + n_queries );
stats.queries += n_queries;
stats.unsorted_time += unsorted_time;
stats.sorted_time += sorted_time;
stats.matches += n_matches;
stats.occurrences += n_occurrences;
log_verbose(stderr, " querying q-gram index... done\n");
log_verbose(stderr, " unsorted throughput : %.2f B q-grams/s\n", (1.0e-9f * float( stats.queries )) / stats.unsorted_time);
log_verbose(stderr, " sorted throughput : %.2f B q-grams/s\n", (1.0e-9f * float( stats.queries )) / stats.sorted_time);
log_verbose(stderr, " matches : %.2f M\n", 1.0e-6f * float( stats.matches ) );
log_verbose(stderr, " occurrences : %.3f B\n", 1.0e-9f * float( stats.occurrences ) );
log_verbose(stderr, " q-gram filter... started\n");
//
// search the sorted query q-grams with a q-gram filter
//
const uint32 batch_size = 16*1024*1024;
typedef QGramFilter<system_tag,qgram_index_type,const uint64*,const uint32*> qgram_filter_type;
typedef typename qgram_filter_type::hit_type hit_type;
typedef typename qgram_filter_type::diagonal_type diagonal_type;
// prepare storage for the output hits
nvbio::vector<system_tag,hit_type> hits( batch_size );
nvbio::vector<system_tag,diagonal_type> merged_hits( batch_size );
nvbio::vector<system_tag,uint16> merged_counts( batch_size );
qgram_filter_type qgram_filter;
timer.start();
// first step: rank the query q-grams
const uint32 n_hits = qgram_filter.rank(
qgram_index,
n_queries,
nvbio::raw_pointer( sorted_qgrams ),
nvbio::raw_pointer( sorted_indices ) );
if (n_hits != n_occurrences)
{
log_error(stderr, " mismatching number of hits: expected %u, got %u\n", n_occurrences, n_hits);
exit(1);
}
// loop through large batches of hits and locate them
for (uint32 hits_begin = 0; hits_begin < n_hits; hits_begin += batch_size)
{
const uint32 hits_end = nvbio::min( hits_begin + batch_size, n_hits );
qgram_filter.locate(
hits_begin,
hits_end,
hits.begin() );
}
cudaDeviceSynchronize();
timer.stop();
const float filter_time = timer.seconds();
stats.filter_time += filter_time;
timer.start();
// loop through large batches of hits and locate & merge them
for (uint32 hits_begin = 0; hits_begin < n_hits; hits_begin += batch_size)
{
const uint32 hits_end = nvbio::min( hits_begin + batch_size, n_hits );
qgram_filter.locate(
hits_begin,
hits_end,
hits.begin() );
const uint32 n_merged = qgram_filter.merge(
16u,
hits_end - hits_begin,
hits.begin(),
merged_hits.begin(),
merged_counts.begin() );
stats.merged += n_merged;
}
cudaDeviceSynchronize();
timer.stop();
const float merge_time = timer.seconds();
stats.merge_time += merge_time;
log_verbose(stderr, " q-gram filter... done\n");
log_verbose(stderr, " filter throughput : %.2f M q-grams/s\n", (1.0e-6f * float( stats.queries )) / stats.filter_time);
log_verbose(stderr, " merge throughput : %.2f M q-grams/s\n", (1.0e-6f * float( stats.queries )) / stats.merge_time);
log_verbose(stderr, " merged occurrences : %.3f B (%.1f %%)\n", 1.0e-9f * float( stats.merged ), 100.0f * float(stats.merged)/float(stats.occurrences));
}
enum QGramTest
{
ALL = 0xFFFFFFFFu,
QGRAM_INDEX = 1u,
QGRAM_SET_INDEX = 2u,
QGROUP_INDEX = 4u,
};
// main test entry point
//
int qgram_test(int argc, char* argv[])
{
uint32 TEST_MASK = 0xFFFFFFFFu;
uint32 n_qgrams = 10000000;
uint32 n_queries = 10000000;
uint32 queries_batch = 10000000;
bool device_test = true;
bool host_test = true;
const char* reads = "./data/SRR493095_1.fastq.gz";
const char* index = "./data/human.NCBI36/Homo_sapiens.NCBI36.53.dna.toplevel.fa";
for (int i = 0; i < argc; ++i)
{
if (strcmp( argv[i], "-qgrams" ) == 0)
n_qgrams = uint32( atoi( argv[++i] ) )*1000u;
else if (strcmp( argv[i], "-queries" ) == 0)
n_queries = uint32( atoi( argv[++i] ) )*1000u;
else if (strcmp( argv[i], "-batch" ) == 0)
queries_batch = uint32( atoi( argv[++i] ) )*1000u;
else if (strcmp( argv[i], "-reads" ) == 0)
reads = argv[++i];
else if (strcmp( argv[i], "-index" ) == 0)
index = argv[++i];
else if (strcmp( argv[i], "-no-device" ) == 0)
device_test = false;
else if (strcmp( argv[i], "-no-host" ) == 0)
host_test = false;
else if (strcmp( argv[i], "-tests" ) == 0)
{
const std::string tests_string( argv[++i] );
char temp[256];
const char* begin = tests_string.c_str();
const char* end = begin;
TEST_MASK = 0u;
while (1)
{
while (*end != ':' && *end != '\0')
{
temp[end - begin] = *end;
end++;
}
temp[end - begin] = '\0';
if (strcmp( temp, "qgram" ) == 0)
TEST_MASK |= QGRAM_INDEX;
else if (strcmp( temp, "qgram-set" ) == 0)
TEST_MASK |= QGRAM_SET_INDEX;
else if (strcmp( temp, "qgroup" ) == 0)
TEST_MASK |= QGROUP_INDEX;
if (*end == '\0')
break;
++end; begin = end;
}
}
}
#if defined(_OPENMP)
// Now set the number of threads
omp_set_num_threads( omp_get_num_procs() );
#endif
log_info(stderr, "q-gram test... started\n");
const io::QualityEncoding qencoding = io::Phred33;
log_info(stderr, " loading reads... started\n");
SharedPointer<io::SequenceDataStream> read_data_file(
io::open_sequence_file(
reads,
qencoding,
uint32(-1),
uint32(-1) ) );
if (read_data_file == NULL || read_data_file->is_ok() == false)
{
log_error(stderr, " failed opening file \"%s\"\n", reads);
return 1u;
}
const uint32 batch_size = uint32(-1);
const uint32 batch_bps = n_qgrams;
// load a batch of reads
io::SequenceDataHost h_read_data;
if (io::next( DNA_N, &h_read_data, read_data_file.get(), batch_size, batch_bps ) == 0)
{
log_error(stderr, " unable to read input sequences\n");
return 1;
}
// build its device version
const io::SequenceDataDevice d_read_data( h_read_data );
const io::SequenceDataAccess<DNA_N> d_read_access( d_read_data );
log_info(stderr, " loading reads... done\n");
// fetch the actual string
typedef io::SequenceDataAccess<DNA_N> read_access_type;
typedef read_access_type::sequence_stream_type string_type;
typedef read_access_type::sequence_string_set_type string_set_type;
const uint32 n_strings = d_read_access.size();
const uint32 string_len = d_read_access.bps();
const string_type string = d_read_access.sequence_stream();
const string_set_type string_set = d_read_access.sequence_string_set();
log_info(stderr, " strings: %u\n", n_strings);
log_info(stderr, " symbols: %.3f M\n", 1.0e-6f * float(string_len));
io::SequenceDataHost ref;
if (!io::load_sequence_file( DNA, &ref, index ))
{
log_error(stderr, " failed loading index \"%s\"\n", index);
return 1u;
}
// build its device version
const io::SequenceDataDevice ref_cuda( ref );
typedef io::SequenceDataAccess<DNA> genome_access_type;
typedef genome_access_type::sequence_stream_type genome_type;
const uint32 genome_len = ref.bps();
const genome_access_type h_genome_access( ref );
const genome_type h_genome( h_genome_access.sequence_stream() );
const genome_access_type d_genome_access( ref_cuda );
const genome_type d_genome( d_genome_access.sequence_stream() );
// clamp the total number of queries
n_queries = nvbio::min( n_queries, genome_len );
// test q-gram index
if (TEST_MASK & QGRAM_INDEX)
{
log_visible(stderr, " testing q-gram index (device)... started\n");
QGramIndexDevice qgram_index;
test_qgram_index_build(
20u,
string_len,
string,
qgram_index );
if (device_test)
{
Stats stats;
for (uint32 genome_begin = 0; genome_begin < n_queries; genome_begin += queries_batch)
{
const uint32 genome_end = nvbio::min( genome_begin + queries_batch, n_queries );
test_qgram_index_query(
qgram_index,
genome_end - genome_begin,
genome_len,
genome_begin,
d_genome,
stats );
}
log_visible(stderr, " testing q-gram index (device)... done\n");
const float genome_ratio = float(genome_len)/float(stats.queries);
log_info(stderr, " sorted throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " sorted throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.filter_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.filter_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.merge_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.merge_time * genome_ratio) );
}
if (host_test)
{
log_visible(stderr, " testing q-gram index (host)... started\n");
QGramIndexHost h_qgram_index;
h_qgram_index = qgram_index;
Stats stats;
for (uint32 genome_begin = 0; genome_begin < n_queries; genome_begin += queries_batch)
{
const uint32 genome_end = nvbio::min( genome_begin + queries_batch, n_queries );
test_qgram_index_query(
h_qgram_index,
genome_end - genome_begin,
genome_len,
genome_begin,
h_genome,
stats );
}
log_visible(stderr, " testing q-gram index (host)... done\n");
const float genome_ratio = float(genome_len)/float(stats.queries);
log_info(stderr, " sorted throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " sorted throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.filter_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.filter_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.merge_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.merge_time * genome_ratio) );
}
}
// test q-gram set-index
if (TEST_MASK & QGRAM_SET_INDEX)
{
log_visible(stderr, " testing q-gram set-index (device)... started\n");
QGramSetIndexDevice qgram_index;
test_qgram_set_index_build(
22u,
string_set,
qgram_index );
if (device_test)
{
Stats stats;
for (uint32 genome_begin = 0; genome_begin < n_queries; genome_begin += queries_batch)
{
const uint32 genome_end = nvbio::min( genome_begin + queries_batch, n_queries );
test_qgram_index_query(
qgram_index,
genome_end - genome_begin,
genome_len,
genome_begin,
d_genome,
stats );
}
log_visible(stderr, " testing q-gram set-index (device)... done\n");
const float genome_ratio = float(genome_len)/float(stats.queries);
log_info(stderr, " sorted throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " sorted throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.filter_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.filter_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.merge_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.merge_time * genome_ratio) );
}
if (host_test)
{
log_visible(stderr, " testing q-gram set-index (host)... started\n");
QGramSetIndexHost h_qgram_index;
h_qgram_index = qgram_index;
Stats stats;
for (uint32 genome_begin = 0; genome_begin < n_queries; genome_begin += queries_batch)
{
const uint32 genome_end = nvbio::min( genome_begin + queries_batch, n_queries );
test_qgram_index_query(
h_qgram_index,
genome_end - genome_begin,
genome_len,
genome_begin,
h_genome,
stats );
}
log_visible(stderr, " testing q-gram set-index (host)... done\n");
const float genome_ratio = float(genome_len)/float(stats.queries);
log_info(stderr, " sorted throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " sorted throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.filter_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.filter_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.merge_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.merge_time * genome_ratio) );
}
}
// test q-group index
if (TEST_MASK & QGROUP_INDEX)
{
log_visible(stderr, " testing q-group index (device)... started\n");
QGroupIndexDevice qgram_index;
test_qgroup_index_build(
16u,
string_len,
string,
qgram_index );
if (device_test)
{
Stats stats;
for (uint32 genome_begin = 0; genome_begin < n_queries; genome_begin += queries_batch)
{
const uint32 genome_end = nvbio::min( genome_begin + queries_batch, n_queries );
test_qgram_index_query(
qgram_index,
genome_end - genome_begin,
genome_len,
genome_begin,
d_genome,
stats );
}
log_visible(stderr, " testing q-group index (device)... done\n");
const float genome_ratio = float(genome_len)/float(stats.queries);
log_info(stderr, " sorted throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " sorted throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.sorted_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.filter_time * genome_ratio) );
log_info(stderr, " filter throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.filter_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f K reads/s\n", 1.0e-3f * float(n_strings) / (stats.merge_time * genome_ratio) );
log_info(stderr, " merge throughput: %7.2f M bases/s\n", 1.0e-6f * float(string_len) / (stats.merge_time * genome_ratio) );
}
}
log_info(stderr, "q-gram test... done\n" );
return 0;
}
} // namespace nvbio
|
the_stack
|
using namespace std;
void process_error(int severity, string err); // this should probably live in a utils header file
#if defined(_MSC_VER)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
unsigned int hash_seed;
struct float_avg
{
__host__ float_type operator()(const float_type &lhs, const int_type &rhs) const {
return lhs/rhs;
}
};
struct float_avg1
{
__host__ float_type operator()(const int_type &lhs, const int_type &rhs) const {
return ((float_type)lhs)/rhs;
}
};
struct div100
{
__host__ int_type operator()(const int_type &lhs, const int_type &rhs) const {
return (lhs*100)/rhs;
}
};
thrust::host_vector<unsigned long long int> h_merge;
using namespace std;
using namespace thrust::placeholders;
void create_c(CudaSet* c, CudaSet* b)
{
c->not_compressed = 1;
c->segCount = 1;
c->columnNames = b->columnNames;
h_merge.clear();
c->cols = b->cols;
c->type = b->type;
c->decimal = b->decimal;
c->decimal_zeroes = b->decimal_zeroes;
c->grp_type = b->grp_type;
c->ts_cols = b->ts_cols;
for(unsigned int i=0; i < b->columnNames.size(); i++) {
if (b->type[b->columnNames[i]] == 0) {
c->h_columns_int[b->columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
c->d_columns_int[b->columnNames[i]] = thrust::device_vector<int_type>();
if(b->string_map.find(b->columnNames[i]) != b->string_map.end()) {
c->string_map[b->columnNames[i]] = b->string_map[b->columnNames[i]];
};
}
else
if (b->type[b->columnNames[i]] == 1) {
c->h_columns_float[b->columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
c->d_columns_float[b->columnNames[i]] = thrust::device_vector<float_type>();
}
else {
c->h_columns_char[b->columnNames[i]] = nullptr;
c->d_columns_char[b->columnNames[i]] = nullptr;
c->char_size[b->columnNames[i]] = b->char_size[b->columnNames[i]];
};
};
}
void add(CudaSet* c, CudaSet* b, queue<string> op_v3, map<string,string> aliases,
vector<thrust::device_vector<int_type> >& distinct_tmp, vector<thrust::device_vector<int_type> >& distinct_val,
vector<thrust::device_vector<int_type> >& distinct_hash, CudaSet* a)
{
if (c->columnNames.empty()) {
// create d_columns and h_columns
create_c(c,b);
}
size_t cycle_sz = op_v3.size();
vector<string> opv;
for(unsigned int z = 0; z < cycle_sz; z++) {
if(std::find(b->columnNames.begin(), b->columnNames.end(), aliases[op_v3.front()]) == b->columnNames.end()) {
//cout << "Syntax error: alias " << op_v3.front() << endl;
//exit(0);
opv.push_back(op_v3.front());
}
else
opv.push_back(aliases[op_v3.front()]);
op_v3.pop();
};
// create hashes of groupby columns
unsigned long long int* hashes = new unsigned long long int[b->mRecCount];
unsigned long long int* sum = new unsigned long long int[cycle_sz*b->mRecCount];
for(unsigned int z = 0; z < cycle_sz; z++) {
// b->CopyColumnToHost(opv[z]);
if(b->type[opv[z]] != 1) { //int or string
for(int i = 0; i < b->mRecCount; i++) {
//memcpy(&sum[i*cycle_sz + z], &b->h_columns_int[opv[z]][i], 8);
sum[i*cycle_sz + z] = b->h_columns_int[opv[z]][i];
//cout << "CPY to " << i*cycle_sz + z << " " << opv[z] << " " << b->h_columns_int[opv[z]][i] << endl;
//cout << "SET " << sum[i*cycle_sz + z] << endl;
};
}
else { //float
for(int i = 0; i < b->mRecCount; i++) {
memcpy(&sum[i*cycle_sz + z], &b->h_columns_float[opv[z]][i], 8);
};
};
};
for(int i = 0; i < b->mRecCount; i++) {
hashes[i] = MurmurHash64A(&sum[i*cycle_sz], 8*cycle_sz, hash_seed);
//cout << "hash " << hashes[i] << " " << i*cycle_sz << " " << sum[i*cycle_sz] << " " << sum[i*cycle_sz + 1] << endl;
};
delete [] sum;
thrust::device_vector<unsigned long long int> d_hashes(b->mRecCount);
thrust::device_vector<unsigned int> v(b->mRecCount);
thrust::sequence(v.begin(), v.end(), 0, 1);
thrust::copy(hashes, hashes+b->mRecCount, d_hashes.begin());
// sort the results by hash
thrust::sort_by_key(d_hashes.begin(), d_hashes.end(), v.begin());
void* d_tmp;
CUDA_SAFE_CALL(cudaMalloc((void **) &d_tmp, b->mRecCount*int_size));
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
if(b->type[b->columnNames[i]] == 0 || b->type[b->columnNames[i]] == 2) {
thrust::device_ptr<int_type> d_tmp_int((int_type*)d_tmp);
thrust::gather(v.begin(), v.end(), b->d_columns_int[b->columnNames[i]].begin(), d_tmp_int);
thrust::copy(d_tmp_int, d_tmp_int + b->mRecCount, b->h_columns_int[b->columnNames[i]].begin());
}
else
if(b->type[b->columnNames[i]] == 1) {
thrust::device_ptr<float_type> d_tmp_float((float_type*)d_tmp);
thrust::gather(v.begin(), v.end(), b->d_columns_float[b->columnNames[i]].begin(), d_tmp_float);
thrust::copy(d_tmp_float, d_tmp_float + b->mRecCount, b->h_columns_float[b->columnNames[i]].begin());
}
};
cudaFree(d_tmp);
thrust::host_vector<unsigned long long int> hh = d_hashes;
char* tmp = new char[max_char(b)*(c->mRecCount + b->mRecCount)];
c->resize(b->mRecCount);
//lets merge every column
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
if(b->type[b->columnNames[i]] != 1) {
thrust::merge_by_key(h_merge.begin(), h_merge.end(),
hh.begin(), hh.end(),
c->h_columns_int[c->columnNames[i]].begin(), b->h_columns_int[b->columnNames[i]].begin(),
thrust::make_discard_iterator(), (int_type*)tmp);
memcpy(thrust::raw_pointer_cast(c->h_columns_int[c->columnNames[i]].data()), (int_type*)tmp, (h_merge.size() + b->mRecCount)*int_size);
}
else {
thrust::merge_by_key(h_merge.begin(), h_merge.end(),
hh.begin(), hh.end(),
c->h_columns_float[c->columnNames[i]].begin(), b->h_columns_float[b->columnNames[i]].begin(),
thrust::make_discard_iterator(), (float_type*)tmp);
memcpy(thrust::raw_pointer_cast(c->h_columns_float[c->columnNames[i]].data()), (float_type*)tmp, (h_merge.size() + b->mRecCount)*float_size);
}
};
//merge the keys
thrust::merge(h_merge.begin(), h_merge.end(),
hh.begin(), hh.end(), (unsigned long long int*)tmp);
size_t cpy_sz = h_merge.size() + b->mRecCount;
h_merge.resize(h_merge.size() + b->mRecCount);
thrust::copy((unsigned long long int*)tmp, (unsigned long long int*)tmp + cpy_sz, h_merge.begin());
delete [] tmp;
delete [] hashes;
//cout << endl << "end b and c " << b->mRecCount << " " << c->mRecCount << endl;
//for(int i = 0; i < h_merge.size();i++)
//cout << "H " << h_merge[i] << endl;
/* bool dis_exists = 0;
for(unsigned int j=0; j < c->mColumnCount; j++) {
if (c->grp_type[j] == 6)
dis_exists = 1;
};
if (dis_exists) {
bool grp_scanned = 0;
thrust::device_ptr<bool> d_di(a->grp);
thrust::device_ptr<unsigned int> d_dii = thrust::device_malloc<unsigned int>(a->mRecCount);
thrust::identity<bool> op;
thrust::transform(d_di, d_di+a->mRecCount, d_dii, op);
thrust::device_ptr<int_type> tmp = thrust::device_malloc<int_type>(a->mRecCount);
unsigned int dist_count = 0;
for(unsigned int j=0; j < c->mColumnCount; j++) {
if (c->grp_type[j] == 6) {
if(!grp_scanned) {
d_dii[a->mRecCount-1] = 0;
thrust::inclusive_scan(d_dii, d_dii + a->mRecCount, d_dii);
thrust::gather(d_dii, d_dii + a->mRecCount, hashes.begin(), tmp); // now hashes are in tmp
grp_scanned = 1;
};
unsigned int offset = distinct_val[dist_count].size();
distinct_val[dist_count].resize(distinct_val[dist_count].size() + a->mRecCount);
distinct_hash[dist_count].resize(distinct_hash[dist_count].size() + a->mRecCount);
thrust::copy(distinct_tmp[dist_count].begin(), distinct_tmp[dist_count].begin() + a->mRecCount, distinct_val[dist_count].begin() + offset);
thrust::copy(tmp, tmp + a->mRecCount, distinct_hash[dist_count].begin() + offset);
thrust::stable_sort_by_key(distinct_val[dist_count].begin(), distinct_val[dist_count].end(), distinct_hash[dist_count].begin());
thrust::stable_sort_by_key(distinct_hash[dist_count].begin(), distinct_hash[dist_count].end(), distinct_val[dist_count].begin());
ZipIterator new_last = thrust::unique(thrust::make_zip_iterator(thrust::make_tuple(distinct_hash[dist_count].begin(), distinct_val[dist_count].begin())),
thrust::make_zip_iterator(thrust::make_tuple(distinct_hash[dist_count].end(), distinct_val[dist_count].end())));
IteratorTuple t = new_last.get_iterator_tuple();
distinct_val[dist_count].resize(thrust::get<0>(t) - distinct_hash[dist_count].begin());
distinct_hash[dist_count].resize(thrust::get<0>(t) - distinct_hash[dist_count].begin());
dist_count++;
};
};
thrust::device_free(tmp);
thrust::device_free(d_dii);
};
*/
}
void count_simple(CudaSet* c)
{
int_type count;
for(unsigned int i = 0; i < c->columnNames.size(); i++) {
if(c->grp_type[c->columnNames[i]] == 0) { // COUNT
count = thrust::reduce(c->h_columns_int[c->columnNames[i]].begin(), c->h_columns_int[c->columnNames[i]].begin() + c->mRecCount);
c->h_columns_int[c->columnNames[i]][0] = count;
};
};
if (c->mRecCount != 0) {
for(unsigned int k = 0; k < c->columnNames.size(); k++) {
if(c->grp_type[c->columnNames[k]] == 1) { // AVG
if(c->type[c->columnNames[k]] == 0 || c->type[c->columnNames[k]] == 2) {
int_type sum = thrust::reduce(c->h_columns_int[c->columnNames[k]].begin(), c->h_columns_int[c->columnNames[k]].begin() + c->mRecCount);
c->h_columns_int[c->columnNames[k]][0] = sum/count;
}
if(c->type[c->columnNames[k]] == 1) {
float_type sum = thrust::reduce(c->h_columns_float[c->columnNames[k]].begin(), c->h_columns_float[c->columnNames[k]].begin() + c->mRecCount);
c->h_columns_float[c->columnNames[k]][0] = sum/count;
};
}
else
if(c->grp_type[c->columnNames[k]] == 2) { // SUM
if(c->type[c->columnNames[k]] == 0 || c->type[c->columnNames[k]] == 2) {
int_type sum = thrust::reduce(c->h_columns_int[c->columnNames[k]].begin(), c->h_columns_int[c->columnNames[k]].begin() + c->mRecCount);
c->h_columns_int[c->columnNames[k]][0] = sum;
}
if(c->type[c->columnNames[k]] == 1) {
float_type sum = thrust::reduce(c->h_columns_float[c->columnNames[k]].begin(), c->h_columns_float[c->columnNames[k]].begin() + c->mRecCount);
c->h_columns_float[c->columnNames[k]][0] = sum;
};
}
};
}
c->mRecCount = 1;
};
void count_avg(CudaSet* c, vector<thrust::device_vector<int_type> >& distinct_hash)
{
string countstr;
thrust::equal_to<unsigned long long int> binary_pred;
thrust::maximum<unsigned long long int> binary_op_max;
thrust::minimum<unsigned long long int> binary_op_min;
for(unsigned int i = 0; i < c->columnNames.size(); i++) {
if(c->grp_type[c->columnNames[i]] == 0) { // COUNT
countstr = c->columnNames[i];
break;
};
};
thrust::host_vector<bool> grp;
size_t res_count;
if(h_merge.size()) {
grp.resize(h_merge.size());
thrust::adjacent_difference(h_merge.begin(), h_merge.end(), grp.begin());
res_count = h_merge.size() - thrust::count(grp.begin(), grp.end(), 0);
};
if (c->mRecCount != 0) {
//unsigned int dis_count = 0;
if (h_merge.size()) {
int_type* tmp = new int_type[res_count];
for(unsigned int k = 0; k < c->columnNames.size(); k++) {
if(c->grp_type[c->columnNames[k]] <= 2) { //sum || avg || count
if (c->type[c->columnNames[k]] == 0) { // int
// check for overflow
// convert to double, reduce, check if larger than max 64 bit int
float_type* tmp1 = new float_type[c->mRecCount];
float_type* tmp_res = new float_type[res_count];
for(int z = 0; z < c->mRecCount ; z++)
tmp1[z] = (float_type)(c->h_columns_int[c->columnNames[k]][z]);
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), tmp1,
thrust::make_discard_iterator(), tmp_res);
double max_overflow = 0;
for(int z = 0; z < res_count; z++) {
if (tmp_res[z] > 9223372036854775807.0) {
if(tmp_res[z] - 9223372036854775807.0 > max_overflow)
max_overflow = tmp_res[z];
};
};
if(max_overflow) {
unsigned pw = ceil(log10(max_overflow/9223372036854775807.0));
thrust::transform(c->h_columns_int[c->columnNames[k]].begin(), c->h_columns_int[c->columnNames[k]].end(), thrust::make_constant_iterator((int_type)pow(10, pw)), c->h_columns_int[c->columnNames[k]].begin(), thrust::divides<int_type>());
c->decimal_zeroes[c->columnNames[k]] = c->decimal_zeroes[c->columnNames[k]] - pw;
};
delete [] tmp1;
delete [] tmp_res;
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->columnNames[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_int[c->columnNames[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->columnNames[k]].begin());
}
else
if (c->type[c->columnNames[k]] == 1 ) { // float
float_type* tmp1 = new float_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->columnNames[k]].begin(),
thrust::make_discard_iterator(), tmp1);
c->h_columns_float[c->columnNames[k]].resize(res_count);
thrust::copy(tmp1, tmp1 + res_count, c->h_columns_float[c->columnNames[k]].begin());
delete [] tmp1;
};
}
if(c->grp_type[c->columnNames[k]] == 4) { //min
if (c->type[c->columnNames[k]] == 0 ) { // int
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->columnNames[k]].begin(),
thrust::make_discard_iterator(), tmp, binary_pred, binary_op_min);
c->h_columns_int[c->columnNames[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->columnNames[k]].begin());
}
else
if (c->type[c->columnNames[k]] == 1 ) { // float
c->h_columns_float[c->columnNames[k]].resize(res_count);
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->columnNames[k]].begin(),
thrust::make_discard_iterator(), c->h_columns_float[c->columnNames[k]].begin(), binary_pred, binary_op_min);
};
}
if(c->grp_type[c->columnNames[k]] == 5) { //max
if (c->type[c->columnNames[k]] == 0 ) { // int
int_type* tmp = new int_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->columnNames[k]].begin(),
thrust::make_discard_iterator(), tmp, binary_pred, binary_op_max);
c->h_columns_int[c->columnNames[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->columnNames[k]].begin());
delete [] tmp;
}
else
if (c->type[c->columnNames[k]] == 1 ) { // float
c->h_columns_float[c->columnNames[k]].resize(res_count);
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->columnNames[k]].begin(),
thrust::make_discard_iterator(), c->h_columns_float[c->columnNames[k]].begin(), binary_pred, binary_op_max);
};
}
else
if(c->grp_type[c->columnNames[k]] == 3) { //no group function
if (c->type[c->columnNames[k]] == 0 || c->type[c->columnNames[k]] == 2) { // int
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->columnNames[k]].begin(),
thrust::make_discard_iterator(), tmp, binary_pred, binary_op_max);
c->h_columns_int[c->columnNames[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->columnNames[k]].begin());
}
else
if (c->type[c->columnNames[k]] == 1 ) { // float
c->h_columns_float[c->columnNames[k]].resize(res_count);
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->columnNames[k]].begin(),
thrust::make_discard_iterator(), c->h_columns_float[c->columnNames[k]].begin(), binary_pred, binary_op_max);
}
};
};
c->mRecCount = res_count;
delete [] tmp;
};
for(unsigned int k = 0; k < c->columnNames.size(); k++) {
if(c->grp_type[c->columnNames[k]] == 1) { // AVG
if (c->type[c->columnNames[k]] == 0 ) { // int
if(c->decimal_zeroes[c->columnNames[k]] <= 2) {
thrust::transform(c->h_columns_int[c->columnNames[k]].begin(), c->h_columns_int[c->columnNames[k]].begin() + c->mRecCount,
c->h_columns_int[countstr].begin(), c->h_columns_int[c->columnNames[k]].begin(), div100());
c->decimal_zeroes[c->columnNames[k]] = c->decimal_zeroes[c->columnNames[k]] + 2;
}
else {
thrust::transform(c->h_columns_int[c->columnNames[k]].begin(), c->h_columns_int[c->columnNames[k]].begin() + c->mRecCount,
c->h_columns_int[countstr].begin(), c->h_columns_int[c->columnNames[k]].begin(), thrust::divides<int_type>());
};
c->grp_type[c->columnNames[k]] = 3;
}
else { // float
thrust::transform(c->h_columns_float[c->columnNames[k]].begin(), c->h_columns_float[c->columnNames[k]].begin() + c->mRecCount,
c->h_columns_int[countstr].begin(), c->h_columns_float[c->columnNames[k]].begin(), float_avg());
};
}
else
if(c->grp_type[c->columnNames[k]] == 6) {
/* unsigned int res_count = 0;
thrust::host_vector<int_type> h_hash = distinct_hash[dis_count];
int_type curr_val = h_hash[0];
unsigned int cycle_sz = h_hash.size();
for(unsigned int i = 0; i < cycle_sz; i++) {
if (h_hash[i] == curr_val) {
res_count++;
if(i == cycle_sz-1) {
c->h_columns_int[c->columnNames[k]][mymap[h_hash[i]]] = res_count;
};
}
else {
unsigned int idx = mymap[h_hash[i-1]];
c->h_columns_int[c->columnNames[k]][idx] = res_count;
curr_val = h_hash[i];
res_count = 1;
};
};
dis_count++;*/
}
else
if(c->grp_type[c->columnNames[k]] == 2) {
};
};
};
c->segCount = 1;
c->maxRecs = c->mRecCount;
};
|
the_stack
|
#ifdef _WIN32
#ifndef NOMINMAX
#define NOMINMAX
#endif
#endif
#include "chrono_sensor/optix/shaders/device_utils.h"
static __device__ __inline__ float NormalDist(const float& NdH, const float& roughness) {
float rough_sqr = roughness * roughness;
float den_2 = NdH * NdH * (rough_sqr - 1.f) + 1.f;
float denominator = den_2 * den_2;
return rough_sqr / denominator;
}
// algorithm reference: https://www.gdcvault.com/play/1024478/PBR-Diffuse-Lighting-for-GGX
static __device__ __inline__ float HammonSmith(float NdV, float NdL, const float& roughness) {
NdV = abs(NdV);
NdL = abs(NdL);
float denominator = lerp(2.f * NdV * NdL, NdL + NdV, roughness);
return 0.5f / denominator;
}
// triangle mesh querie information
__device__ __inline__ void GetTriangleData(float3& normal,
unsigned int& mat_id,
float2& uv,
float3& tangent,
const unsigned int& mesh_id) {
const int tri_id = optixGetPrimitiveIndex();
const float2 bary_coord = optixGetTriangleBarycentrics();
const MeshParameters& mesh_params = params.mesh_pool[mesh_id];
const uint4& vertex_idx = mesh_params.vertex_index_buffer[tri_id];
const float3& v1 = make_float3(mesh_params.vertex_buffer[vertex_idx.x]);
const float3& v2 = make_float3(mesh_params.vertex_buffer[vertex_idx.y]);
const float3& v3 = make_float3(mesh_params.vertex_buffer[vertex_idx.z]);
// calculate normales either from normal buffer or vertex positions
if (mesh_params.normal_index_buffer &&
mesh_params.normal_buffer) { // use vertex normals if normal index buffer exists
const uint4& normal_idx = mesh_params.normal_index_buffer[tri_id];
normal = normalize(make_float3(mesh_params.normal_buffer[normal_idx.y]) * bary_coord.x +
make_float3(mesh_params.normal_buffer[normal_idx.z]) * bary_coord.y +
make_float3(mesh_params.normal_buffer[normal_idx.x]) * (1.0f - bary_coord.x - bary_coord.y));
} else { // else use face normals calculated from vertices
normal = normalize(Cross(v2 - v1, v3 - v1));
}
// calculate texcoords if they exist
if (mesh_params.uv_index_buffer && mesh_params.uv_buffer) { // use vertex normals if normal index buffer exists
const uint4& uv_idx = mesh_params.uv_index_buffer[tri_id];
const float2& uv1 = mesh_params.uv_buffer[uv_idx.x];
const float2& uv2 = mesh_params.uv_buffer[uv_idx.y];
const float2& uv3 = mesh_params.uv_buffer[uv_idx.z];
uv = uv2 * bary_coord.x + uv3 * bary_coord.y + uv1 * (1.0f - bary_coord.x - bary_coord.y);
float3 e1 = v2 - v1;
float3 e2 = v3 - v1;
float2 delta_uv1 = uv2 - uv1;
float2 delta_uv2 = uv3 - uv1;
float f = 1.f / (delta_uv1.x * delta_uv2.y - delta_uv2.x * delta_uv1.y);
tangent.x = f * (delta_uv2.y * e1.x - delta_uv1.y * e2.x);
tangent.y = f * (delta_uv2.y * e1.y - delta_uv1.y * e2.y);
tangent.z = f * (delta_uv2.y * e1.z - delta_uv1.y * e2.z);
tangent = normalize(tangent);
} else {
uv = make_float2(0.f);
tangent = make_float3(0.f);
}
// get material index
if (mesh_params.mat_index_buffer) { // use vertex normals if normal index buffer exists
mat_id += mesh_params.mat_index_buffer[tri_id]; // the material index gives an offset id
}
}
static __device__ __inline__ void CameraShader(PerRayData_camera* prd_camera,
const MaterialParameters& mat,
const float3& world_normal,
const float2& uv,
const float3& tangent,
const float& ray_dist,
const float3& ray_orig,
const float3& ray_dir) {
// Colors in MTL files are already in linear color space
float3 subsurface_albedo = mat.Kd;
float transparency = mat.transparency;
float3 specular = mat.Ks;
int use_specular_workflow = mat.use_specular_workflow;
float3 hit_point = ray_orig + ray_dir * ray_dist;
float NdV = Dot(world_normal, -ray_dir);
if (mat.kd_tex) {
const float4 tex = tex2D<float4>(mat.kd_tex, uv.x, uv.y);
// transfer sRGB texture into linear color space. All of them need this transform
subsurface_albedo = Pow(make_float3(tex.x, tex.y, tex.z), 2.2);
if (tex.w < 1e-6)
transparency = 0.f; // to handle transparent card textures such as tree leaves
}
if (mat.ks_tex) {
const float4 tex = tex2D<float4>(mat.ks_tex, uv.x, uv.y);
specular = make_float3(tex.x, tex.y, tex.z);
}
if (mat.opacity_tex) {
transparency = tex2D<float>(mat.opacity_tex, uv.x, uv.y);
}
// if this is perfectly transparent, we ignore it and trace the next ray (handles things like tree leaf cards)
if (transparency < 1e-6) {
float3 refract_importance = prd_camera->contrib_to_pixel;
if (fmaxf(refract_importance) > params.importance_cutoff && prd_camera->depth + 1 < params.max_depth) {
PerRayData_camera prd_refraction = default_camera_prd();
prd_refraction.contrib_to_pixel = refract_importance;
prd_refraction.rng = prd_camera->rng;
prd_refraction.depth = prd_camera->depth + 1;
unsigned int opt1, opt2;
pointer_as_ints(&prd_refraction, opt1, opt2);
unsigned int raytype = (unsigned int)CAMERA_RAY_TYPE;
optixTrace(params.root, hit_point, ray_dir, params.scene_epsilon, 1e16f, optixGetRayTime(),
OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype);
prd_camera->color = prd_refraction.color;
// For GI, harmless without GI
prd_camera->albedo = prd_refraction.albedo;
prd_camera->normal = prd_refraction.normal;
}
return;
}
//=================
// Refracted color
//=================
float3 refracted_color = make_float3(0);
if (transparency < 0.99f) {
float3 refract_importance = prd_camera->contrib_to_pixel * (1 - transparency);
if (fmaxf(refract_importance) > params.importance_cutoff && prd_camera->depth + 1 < params.max_depth) {
PerRayData_camera prd_refraction = default_camera_prd();
prd_refraction.contrib_to_pixel = refract_importance;
prd_refraction.rng = prd_camera->rng;
prd_refraction.depth = prd_camera->depth + 1;
unsigned int opt1, opt2;
pointer_as_ints(&prd_refraction, opt1, opt2);
// make_camera_data(make_float3(0), refract_importance, prd_camera.rnd, prd_camera.depth + 1);
// float3 refract_dir = refract(optixGetWorldRayDirection(), world_normal, 1.f, 1.f);
float3 refract_dir = ray_dir; // pure transparency without refraction
unsigned int raytype = (unsigned int)CAMERA_RAY_TYPE;
optixTrace(params.root, hit_point, refract_dir, params.scene_epsilon, 1e16f, optixGetRayTime(),
OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype);
refracted_color = prd_refraction.color; // TODO: not sure added here or not
}
}
// query roughness and metalic values
float roughness = mat.roughness;
if (mat.roughness_tex) {
roughness = tex2D<float>(mat.roughness_tex, uv.x, uv.y);
}
float metallic = mat.metallic;
if (mat.metallic_tex) {
metallic = tex2D<float>(mat.metallic_tex, uv.x, uv.y);
}
//=================
// Surface reflection toward light sources
//=================
float3 reflected_color = make_float3(0.0f);
// iterate through the lights
for (int i = 0; i < params.num_lights; i++) {
PointLight l = params.lights[i];
float dist_to_light = Length(l.pos - hit_point);
if (dist_to_light < 2 * l.max_range) {
float3 dir_to_light = normalize(l.pos - hit_point);
float NdL = Dot(world_normal, dir_to_light);
// if we think we can see the light, let's see if we are correct
if (NdL > 0.0f) {
// check shadows
PerRayData_shadow prd_shadow = default_shadow_prd();
prd_shadow.depth = prd_camera->depth + 1;
prd_shadow.ramaining_dist = dist_to_light;
unsigned int opt1;
unsigned int opt2;
pointer_as_ints(&prd_shadow, opt1, opt2);
unsigned int raytype = (unsigned int)SHADOW_RAY_TYPE;
optixTrace(params.root, hit_point, dir_to_light, params.scene_epsilon, dist_to_light, optixGetRayTime(),
OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype);
float3 light_attenuation = prd_shadow.attenuation;
float point_light_falloff =
(l.max_range * l.max_range / (dist_to_light * dist_to_light + l.max_range * l.max_range));
float3 incoming_light_ray = l.color * light_attenuation * point_light_falloff * NdL;
if (fmaxf(incoming_light_ray) > 0.0f) {
float3 halfway = normalize(dir_to_light - ray_dir);
float NdV = Dot(world_normal, -ray_dir);
float NdH = Dot(world_normal, halfway);
float VdH = Dot(-ray_dir, halfway);
float3 F = make_float3(0.0f);
float3 subsurface_albedo_updated = subsurface_albedo;
// === dielectric workflow
if (use_specular_workflow) {
float3 F0 = specular * 0.08f;
F = fresnel_schlick(VdH, 5.f, F0,
make_float3(1.f) /*make_float3(fresnel_max) it is usually 1*/);
} else {
float3 default_dielectrics_F0 = make_float3(0.04f);
F = metallic * subsurface_albedo + (1 - metallic) * default_dielectrics_F0;
subsurface_albedo_updated =
(1 - metallic) * subsurface_albedo; // since imetals do not do subsurface reflection
}
// Diffuse portion of reflection
reflected_color += (make_float3(1.f) - F) * subsurface_albedo_updated * incoming_light_ray;
float D = NormalDist(NdH, roughness); // 1/pi omitted
float G = HammonSmith(NdV, NdL, roughness); // 4 * NdV * NdL omitted
float3 f_ct = F * D * G;
reflected_color += f_ct * incoming_light_ray;
}
}
}
}
// Correct Light reflected color contribution
reflected_color = reflected_color * prd_camera->contrib_to_pixel * mat.transparency;
//=================
// Ambient light
//=================
// ambient light model is partial "flashlight" ambient light, partially from normal direction
float3 ambient_light = params.ambient_light_color *
(make_float3(NdV) + make_float3(Dot(world_normal, make_float3(0, 0, 1)) * .5f + .5f)) *
subsurface_albedo * prd_camera->contrib_to_pixel * mat.transparency;
//=================
// If the surface is very smoooth, trace the reflected direction
// Do this reflection regardless of GI on or off.
//=================
bool mirror_reflection = false;
float3 mirror_reflection_color = make_float3(0.0);
float3 next_dir = reflect(ray_dir, world_normal);
next_dir = normalize(next_dir);
float NdL = Dot(world_normal, next_dir);
float3 halfway = normalize(next_dir - ray_dir);
float NdH = Dot(world_normal, halfway);
float VdH = Dot(-ray_dir, halfway); // Same as LdH
float3 F = make_float3(0.0f);
float3 subsurface_albedo_updated = subsurface_albedo;
// === dielectric workflow
if (use_specular_workflow) {
float3 F0 = specular * 0.08f;
F = fresnel_schlick(VdH, 5.f, F0, make_float3(1.f) /*make_float3(fresnel_max) it is usually 1*/);
} else {
float3 default_dielectrics_F0 = make_float3(0.04f);
F = metallic * subsurface_albedo + (1 - metallic) * default_dielectrics_F0;
subsurface_albedo_updated = (1 - metallic) * subsurface_albedo; // since metals do not do subsurface reflection
}
float D = NormalDist(NdH, roughness); // 1/pi omitted
float G = HammonSmith(NdV, NdL, roughness); // 4 * NdV * NdL omitted
float3 f_ct = F * D * G;
// Note only specular part appears here. Energy preserve
// Since it is not random, PDF is 1 (normally 1/pi),
float3 next_contrib_to_pixel = f_ct * NdL;
next_contrib_to_pixel = clamp(next_contrib_to_pixel / (4 * CUDART_PI_F), make_float3(0), make_float3(1));
// If the camera uses GI, then it will trace two rays. So each ray's contribution should be halfed
if (prd_camera->use_gi) {
next_contrib_to_pixel = next_contrib_to_pixel * 0.5f;
}
// corrected for transparency
next_contrib_to_pixel = next_contrib_to_pixel * mat.transparency * prd_camera->contrib_to_pixel;
if (luminance(next_contrib_to_pixel) > params.importance_cutoff && prd_camera->depth + 1 < params.max_depth) {
mirror_reflection = true;
PerRayData_camera prd_reflection = default_camera_prd();
prd_reflection.contrib_to_pixel = next_contrib_to_pixel;
prd_reflection.rng = prd_camera->rng;
prd_reflection.depth = prd_camera->depth + 1;
prd_reflection.use_gi = prd_camera->use_gi;
unsigned int opt1, opt2;
pointer_as_ints(&prd_reflection, opt1, opt2);
unsigned int raytype = (unsigned int)CAMERA_RAY_TYPE;
optixTrace(params.root, hit_point, next_dir, params.scene_epsilon, 1e16f, optixGetRayTime(),
OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype);
// mirror correction accounts for us oversampling this direction
// following line comes from a heuristic. Perect reflection for metalic smooth objects,
// no reflection for rough non-metalic objects
float mirror_correction = (1.f - roughness) * (1.f - roughness) * metallic * metallic;
mirror_reflection_color = prd_reflection.color * mirror_correction;
}
//=================
// Global illumination ray.
//=================
float3 gi_reflection_color = make_float3(0);
if (prd_camera->use_gi) {
// sample hemisphere for next ray when using global illumination
float z1 = curand_uniform(&prd_camera->rng);
float z2 = curand_uniform(&prd_camera->rng);
next_dir = sample_hemisphere_dir(z1, z2, world_normal);
NdL = Dot(world_normal, next_dir);
halfway = normalize(next_dir - ray_dir);
NdH = Dot(world_normal, halfway);
VdH = Dot(-ray_dir, halfway); // Same as LdH
F = make_float3(0.0f);
subsurface_albedo_updated = subsurface_albedo;
// === dielectric workflow
if (use_specular_workflow) {
float3 F0 = specular * 0.08f;
F = fresnel_schlick(VdH, 5.f, F0, make_float3(1.f) /*make_float3(fresnel_max) it is usually 1*/);
} else {
float3 default_dielectrics_F0 = make_float3(0.04f);
F = metallic * subsurface_albedo + (1 - metallic) * default_dielectrics_F0;
subsurface_albedo_updated =
(1 - metallic) * subsurface_albedo; // since metals do not do subsurface reflection
}
D = NormalDist(NdH, roughness); // 1/pi omitted
G = HammonSmith(NdV, NdL, roughness); // 4 * NdV * NdL omitted
f_ct = F * D * G;
// Specular part
next_contrib_to_pixel = f_ct * NdL;
// If mirror_reflection, then it will trace two rays. So each ray's contribution should be halfed
if (mirror_reflection) {
next_contrib_to_pixel = next_contrib_to_pixel * 0.5f;
}
// Diffuse part
F = clamp(F, make_float3(0), make_float3(1));
next_contrib_to_pixel += (make_float3(1.f) - F) * subsurface_albedo_updated * NdL;
// Corrected for current transparency
next_contrib_to_pixel = next_contrib_to_pixel * mat.transparency * prd_camera->contrib_to_pixel;
if (luminance(next_contrib_to_pixel) > params.importance_cutoff && prd_camera->depth + 1 < params.max_depth) {
PerRayData_camera prd_reflection = default_camera_prd();
prd_reflection.contrib_to_pixel = next_contrib_to_pixel;
prd_reflection.rng = prd_camera->rng;
prd_reflection.depth = prd_camera->depth + 1;
prd_reflection.use_gi = prd_camera->use_gi;
unsigned int opt1, opt2;
pointer_as_ints(&prd_reflection, opt1, opt2);
unsigned int raytype = (unsigned int)CAMERA_RAY_TYPE;
optixTrace(params.root, hit_point, next_dir, params.scene_epsilon, 1e16f, optixGetRayTime(),
OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype);
gi_reflection_color = prd_reflection.color; // accumulate indirect lighting color
}
}
//=================
// Combine all tracing light together
//=================
reflected_color = reflected_color + mirror_reflection_color;
prd_camera->color = reflected_color + refracted_color;
prd_camera->color += prd_camera->use_gi ? gi_reflection_color : ambient_light;
if (prd_camera->depth == 2) {
prd_camera->albedo = subsurface_albedo;
prd_camera->normal = world_normal;
}
}
static __device__ __inline__ void LidarShader(PerRayData_lidar* prd_lidar,
const MaterialParameters& mat,
const float3& world_normal,
const float2& uv,
const float3& tangent,
const float& ray_dist,
const float3& ray_orig,
const float3& ray_dir) {
prd_lidar->range = ray_dist;
prd_lidar->intensity = mat.lidar_intensity * abs(Dot(world_normal, -ray_dir));
}
static __device__ __inline__ void RadarShader(PerRayData_radar* prd_radar,
const MaterialParameters& mat,
const float3& world_normal,
const float2& uv,
const float3& tangent,
const float& ray_dist,
const float3& ray_orig,
const float3& ray_dir,
const float3& translational_velocity,
const float3& angular_velocity,
const float& objectId) {
prd_radar->range = ray_dist;
prd_radar->rcs = mat.radar_backscatter * abs(Dot(world_normal, -ray_dir));
float3 hit_point = ray_orig + ray_dir * ray_dist;
float3 origin = optixTransformPointFromObjectToWorldSpace(make_float3(0, 0, 0));
float3 r = hit_point - origin;
prd_radar->velocity = translational_velocity + Cross(angular_velocity, r);
prd_radar->objectId = objectId;
}
static __device__ __inline__ void ShadowShader(PerRayData_shadow* prd,
const MaterialParameters& mat,
const float3& world_normal,
const float2& uv,
const float3& tangent,
const float& ray_dist,
const float3& ray_orig,
const float3& ray_dir) {
float transparency = mat.transparency;
if (mat.kd_tex) {
const float4 tex = tex2D<float4>(mat.kd_tex, uv.x, uv.y);
if (tex.w < 1e-6)
transparency = 0.f; // to handle transparent card textures such as tree leaves
}
if (mat.opacity_tex) {
transparency = tex2D<float>(mat.opacity_tex, uv.x, uv.y);
}
float3 hit_point = ray_orig + ray_dir * ray_dist;
float atten = 1.f - transparency; // TODO: figure out the attenuation from the material transparency
// if the occlusion amount is below the
prd->attenuation = prd->attenuation * atten;
if (fmaxf(prd->attenuation) > params.importance_cutoff && prd->depth + 1 < params.max_depth) {
PerRayData_shadow prd_shadow = default_shadow_prd();
prd_shadow.attenuation = prd->attenuation;
prd_shadow.depth = prd->depth + 1;
prd_shadow.ramaining_dist = prd->ramaining_dist - ray_dist;
unsigned int opt1, opt2;
pointer_as_ints(&prd_shadow, opt1, opt2);
float3 hit_point = ray_orig + ray_dist * ray_dir;
unsigned int raytype = (unsigned int)SHADOW_RAY_TYPE;
optixTrace(params.root, hit_point, ray_dir, params.scene_epsilon, prd_shadow.ramaining_dist, optixGetRayTime(),
OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, 0, 1, 0, opt1, opt2, raytype);
prd->attenuation = prd_shadow.attenuation;
}
}
static __device__ __inline__ void SemanticShader(PerRayData_semantic* prd,
const MaterialParameters& mat,
const float3& world_normal,
const float2& uv,
const float3& tangent,
const float& ray_dist,
const float3& ray_orig,
const float3& ray_dir) {
prd->class_id = mat.class_id;
prd->instance_id = mat.instance_id;
}
extern "C" __global__ void __closesthit__material_shader() {
// determine parameters that are shared across all ray types
const MaterialRecordParameters* mat_params = (MaterialRecordParameters*)optixGetSbtDataPointer();
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = normalize(optixGetWorldRayDirection()); // this may be modified by the scaling transform
const float ray_dist = optixGetRayTmax();
float3 object_normal;
float2 uv;
float3 tangent;
unsigned int material_id = mat_params->material_pool_id;
// check if we hit a triangle
if (optixIsTriangleHit()) {
GetTriangleData(object_normal, material_id, uv, tangent, mat_params->mesh_pool_id);
} else {
object_normal = make_float3(int_as_float(optixGetAttribute_0()), int_as_float(optixGetAttribute_1()),
int_as_float(optixGetAttribute_2()));
uv = make_float2(int_as_float(optixGetAttribute_3()), int_as_float(optixGetAttribute_4()));
tangent = make_float3(int_as_float(optixGetAttribute_5()), int_as_float(optixGetAttribute_6()),
int_as_float(optixGetAttribute_7()));
}
const MaterialParameters& mat = params.material_pool[material_id];
if (mat.kn_tex) {
float3 bitangent = normalize(Cross(object_normal, tangent));
const float4 tex = tex2D<float4>(mat.kn_tex, uv.x, uv.y);
float3 normal_delta = make_float3(tex.x, tex.y, tex.z) * 2.f - make_float3(1.f);
object_normal =
normalize(normal_delta.x * tangent + normal_delta.y * bitangent + normal_delta.z * object_normal);
}
float3 world_normal = normalize(optixTransformNormalFromObjectToWorldSpace(object_normal));
// from here on out, things are specific to the ray type
RayType raytype = (RayType)optixGetPayload_2();
switch (raytype) {
case CAMERA_RAY_TYPE:
CameraShader(getCameraPRD(), mat, world_normal, uv, tangent, ray_dist, ray_orig, ray_dir);
break;
case LIDAR_RAY_TYPE:
LidarShader(getLidarPRD(), mat, world_normal, uv, tangent, ray_dist, ray_orig, ray_dir);
break;
case RADAR_RAY_TYPE:
RadarShader(getRadarPRD(), mat, world_normal, uv, tangent, ray_dist, ray_orig, ray_dir,
mat_params->translational_velocity, mat_params->angular_velocity, mat_params->objectId);
break;
case SHADOW_RAY_TYPE:
ShadowShader(getShadowPRD(), mat, world_normal, uv, tangent, ray_dist, ray_orig, ray_dir);
break;
case SEGMENTATION_RAY_TYPE:
SemanticShader(getSemanticPRD(), mat, world_normal, uv, tangent, ray_dist, ray_orig, ray_dir);
break;
}
}
|
the_stack
|
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include <cugraph/legacy/graph.hpp>
#include <cugraph/utilities/error.hpp>
#include <rmm/device_vector.hpp>
#include <utilities/graph_utils.cuh>
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
// TODO: Identical kernel to jaccard_row_sum!!
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void overlap_row_sum(
vertex_t n, edge_t const* csrPtr, vertex_t const* csrInd, weight_t const* v, weight_t* work)
{
vertex_t row;
edge_t start, end, length;
weight_t sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y; row < n; row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
// compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0) work[row] = sum;
} else {
work[row] = static_cast<weight_t>(length);
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// TODO: Identical kernel to jaccard_row_sum!!
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void overlap_is(vertex_t n,
edge_t const* csrPtr,
vertex_t const* csrInd,
weight_t const* v,
weight_t* work,
weight_t* weight_i,
weight_t* weight_s)
{
edge_t i, j, Ni, Nj;
vertex_t row, col;
vertex_t ref, cur, ref_col, cur_col, match;
weight_t ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z; row < n; row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y; j < csrPtr[row + 1];
j += gridDim.y * blockDim.y) {
col = csrInd[j];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[j] = min(work[row], work[col]);
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[j], ref_val); }
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
// NOTE: NOT the same as jaccard
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void overlap_is_pairs(edge_t num_pairs,
edge_t const* csrPtr,
vertex_t const* csrInd,
vertex_t const* first_pair,
vertex_t const* second_pair,
weight_t const* v,
weight_t* work,
weight_t* weight_i,
weight_t* weight_s)
{
edge_t i, idx, Ni, Nj, match;
vertex_t row, col, ref, cur, ref_col, cur_col;
weight_t ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z; idx < num_pairs;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[idx] = min(work[row], work[col]);
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[idx], ref_val); }
}
}
}
// Overlap weights (*weight)
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void overlap_jw(edge_t e,
edge_t const* csrPtr,
vertex_t const* csrInd,
weight_t* weight_i,
weight_t* weight_s,
weight_t* weight_j)
{
edge_t j;
weight_t Wi, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x; j < e; j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Wu = weight_s[j];
weight_j[j] = (Wi / Wu);
}
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int overlap(vertex_t n,
edge_t e,
edge_t const* csrPtr,
vertex_t const* csrInd,
weight_t const* weight_in,
weight_t* work,
weight_t* weight_i,
weight_t* weight_s,
weight_t* weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
overlap_row_sum<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work);
cudaDeviceSynchronize();
fill(e, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
overlap_is<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(e, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
overlap_jw<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(e, csrPtr, csrInd, weight_i, weight_s, weight_j);
return 0;
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int overlap_pairs(vertex_t n,
edge_t num_pairs,
edge_t const* csrPtr,
vertex_t const* csrInd,
vertex_t const* first_pair,
vertex_t const* second_pair,
weight_t const* weight_in,
weight_t* work,
weight_t* weight_i,
weight_t* weight_s,
weight_t* weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
overlap_row_sum<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work);
cudaDeviceSynchronize();
fill(num_pairs, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
overlap_is_pairs<weighted, vertex_t, edge_t, weight_t><<<nblocks, nthreads>>>(
num_pairs, csrPtr, csrInd, first_pair, second_pair, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(num_pairs, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
overlap_jw<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(num_pairs, csrPtr, csrInd, weight_i, weight_s, weight_j);
return 0;
}
} // namespace detail
template <typename VT, typename ET, typename WT>
void overlap(legacy::GraphCSRView<VT, ET, WT> const& graph, WT const* weights, WT* result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid input argument: result pointer is NULL");
rmm::device_vector<WT> weight_i(graph.number_of_edges);
rmm::device_vector<WT> weight_s(graph.number_of_edges);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::overlap<false, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::overlap<true, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template <typename VT, typename ET, typename WT>
void overlap_list(legacy::GraphCSRView<VT, ET, WT> const& graph,
WT const* weights,
ET num_pairs,
VT const* first,
VT const* second,
WT* result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid input argument: result pointer is NULL");
CUGRAPH_EXPECTS(first != nullptr, "Invalid input argument: first column is NULL");
CUGRAPH_EXPECTS(second != nullptr, "Invalid input argument: second column is NULL");
rmm::device_vector<WT> weight_i(num_pairs);
rmm::device_vector<WT> weight_s(num_pairs);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::overlap_pairs<false, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::overlap_pairs<true, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template void overlap<int32_t, int32_t, float>(legacy::GraphCSRView<int32_t, int32_t, float> const&,
float const*,
float*);
template void overlap<int32_t, int32_t, double>(
legacy::GraphCSRView<int32_t, int32_t, double> const&, double const*, double*);
template void overlap<int64_t, int64_t, float>(legacy::GraphCSRView<int64_t, int64_t, float> const&,
float const*,
float*);
template void overlap<int64_t, int64_t, double>(
legacy::GraphCSRView<int64_t, int64_t, double> const&, double const*, double*);
template void overlap_list<int32_t, int32_t, float>(
legacy::GraphCSRView<int32_t, int32_t, float> const&,
float const*,
int32_t,
int32_t const*,
int32_t const*,
float*);
template void overlap_list<int32_t, int32_t, double>(
legacy::GraphCSRView<int32_t, int32_t, double> const&,
double const*,
int32_t,
int32_t const*,
int32_t const*,
double*);
template void overlap_list<int64_t, int64_t, float>(
legacy::GraphCSRView<int64_t, int64_t, float> const&,
float const*,
int64_t,
int64_t const*,
int64_t const*,
float*);
template void overlap_list<int64_t, int64_t, double>(
legacy::GraphCSRView<int64_t, int64_t, double> const&,
double const*,
int64_t,
int64_t const*,
int64_t const*,
double*);
} // namespace cugraph
|
the_stack
|
#include <cstdio>
#include <iostream>
#include <fstream>
#include "cuda_kernel_utils.h"
#define POINT_BLOCK_SIZE 128
#define POINT_BLOCK_PACK_SIZE 256
////////////////////////////////////////////////////////////////////////////////// GPU
__constant__ int cellOffsets[27][3];
/**
* Method to count the neighboring points for each point.
* @param pNumPoints Number of points.
* @param pNumCells Number of cells of the grid.
* @param pAABBMinPoint Minimum point of the grid (3 componenets).
* @param pAABBMaxPoint Maximum point of the grid (3 componenets).
* @param pPoints List of points.
* @param pBatchIds List of batch ids.
* @param pPoints2 List of points from where to find neighbors.
* @param pCellIndexs Indexs of the grid cells.
* @param pOutNeigbors Output parameter with the number of neighbors of each point.
* @param pOutNumNeigbors Output parameter with the total number of neighbors.
*/
__global__ void countNeighbors(
const bool pScaleInv,
const int pNumPoints,
const int pNumCells,
const float pRadius,
const float* __restrict__ pAABBMinPoint,
const float* __restrict__ pAABBMaxPoint,
const float* __restrict__ pPoints,
const int* __restrict__ pBatchIds,
const float* __restrict__ pPoints2,
const int* __restrict__ pCellIndexs,
int* __restrict__ pOutNeigbors,
int* __restrict__ pOutNumNeigbors)
{
__shared__ int blockTotalNeighbors;
if(threadIdx.x == 0){
blockTotalNeighbors = 0;
}
__syncthreads();
int currentIndex = threadIdx.x + blockIdx.x * blockDim.x;
if(currentIndex < pNumPoints){
int currBatchId = pBatchIds[currentIndex];
int pointIndex = currentIndex * 3;
float maxAabbSize = max(max(
pAABBMaxPoint[currBatchId*3] - pAABBMinPoint[currBatchId*3],
pAABBMaxPoint[currBatchId*3+1] - pAABBMinPoint[currBatchId*3+1]),
pAABBMaxPoint[currBatchId*3+2] - pAABBMinPoint[currBatchId*3+2]);
float cellSize = maxAabbSize/(float)pNumCells;
float scaledRadius = (pScaleInv)?pRadius*maxAabbSize:pRadius;
float centralCoords[3] = {pPoints[pointIndex], pPoints[pointIndex+1], pPoints[pointIndex+2]};
int xCell = max(min((int)floor((centralCoords[0] - pAABBMinPoint[currBatchId*3])/cellSize), pNumCells -1), 0);
int yCell = max(min((int)floor((centralCoords[1] - pAABBMinPoint[currBatchId*3+1])/cellSize), pNumCells -1), 0);
int zCell = max(min((int)floor((centralCoords[2] - pAABBMinPoint[currBatchId*3+2])/cellSize), pNumCells -1), 0);
int neighborIter = 0;
for(int i = 0; i < 27; ++i)
{
int currCellIndex[3] = {xCell+cellOffsets[i][0], yCell+cellOffsets[i][1], zCell+cellOffsets[i][2]};
if(currCellIndex[0] >= 0 && currCellIndex[0] < pNumCells &&
currCellIndex[1] >= 0 && currCellIndex[1] < pNumCells &&
currCellIndex[2] >= 0 && currCellIndex[2] < pNumCells)
{
int cellIndexFlat = currBatchId*pNumCells*pNumCells*pNumCells + currCellIndex[0]*pNumCells*pNumCells + currCellIndex[1]*pNumCells + currCellIndex[2];
int initIndex = pCellIndexs[cellIndexFlat*2];
int endIndex = pCellIndexs[cellIndexFlat*2 + 1];
for(int j = initIndex; j < endIndex; ++j)
{
int currPointIndex = j * 3;
float currentCoords[3] = {pPoints2[currPointIndex], pPoints2[currPointIndex+1], pPoints2[currPointIndex+2]};
float diffVector[3] = {currentCoords[0] - centralCoords[0], currentCoords[1] - centralCoords[1], currentCoords[2] - centralCoords[2]};
float pointDist = sqrt(diffVector[0]*diffVector[0] + diffVector[1]*diffVector[1] + diffVector[2]*diffVector[2]);
if(pointDist < scaledRadius){
neighborIter++;
}
}
}
}
pOutNeigbors[currentIndex] = neighborIter;
atomicAdd(&blockTotalNeighbors, neighborIter);
}
__syncthreads();
if(threadIdx.x == 0){
atomicAdd(&pOutNumNeigbors[0], blockTotalNeighbors);
}
}
/**
* Method to compute the offsets in the neighboring list.
* @param pNumOffsets Number of offsets.
* @param pNumOffsets Number of offsets 2.
* @param pOutNeighborsOffsets List with the offsets of each block.
* @param pOutNeighborsOffsets2 List with the offsets of each block of blocks.
*/
__global__ void computeOffsets(
const bool pStep1,
const int pNumOffsets,
const int pNumOffsets2,
int* __restrict__ pOutNeighborsOffsets,
int* __restrict__ pOutNeighborsOffsets2)
{
__shared__ int groupOffsets[POINT_BLOCK_PACK_SIZE];
//Get the local and global counter.
int currCounter = threadIdx.x;
int currGlobalCounter = threadIdx.x + blockIdx.x * blockDim.x;
//Update the shared memory.
if(currGlobalCounter < pNumOffsets)
groupOffsets[currCounter] = pOutNeighborsOffsets[currGlobalCounter];
else
groupOffsets[currCounter] = 0;
//SIMD scan.
for(int i = 1; i <= POINT_BLOCK_PACK_SIZE/2; i*=2)
{
__syncthreads();
//Get the values of the pass.
int currIndex = currCounter + i;
int value1 = 0;
int value2 = 0;
if(currIndex < POINT_BLOCK_PACK_SIZE){
value1 = groupOffsets[currCounter];
value2 = groupOffsets[currIndex];
}
__syncthreads();
//Update with the new value.
if(currIndex < POINT_BLOCK_PACK_SIZE)
groupOffsets[currIndex] = value1 + value2;
}
__syncthreads();
//Save the counter into global memory.
if(currGlobalCounter < pNumOffsets){
if(currCounter > 0)
pOutNeighborsOffsets[currGlobalCounter] = groupOffsets[currCounter-1];
else
pOutNeighborsOffsets[currGlobalCounter] = 0;
}
if(pStep1){
//Update the offsets buffer.
if(currCounter == (POINT_BLOCK_PACK_SIZE-1) && blockIdx.x < pNumOffsets2)
pOutNeighborsOffsets2[blockIdx.x] = groupOffsets[POINT_BLOCK_PACK_SIZE-1];
}else{
//Update the second level offset buffer.
if(currCounter > blockIdx.x && currCounter < pNumOffsets2){
atomicAdd(&pOutNeighborsOffsets2[currCounter], groupOffsets[POINT_BLOCK_PACK_SIZE-1]);
}
}
}
/**
* Method to find the neighboring points for each point.
* @param pNumPoints Number of points.
* @param pNumCells Number of cells of the grid.
* @param pAABBMinPoint Minimum point of the grid (3 componenets).
* @param pAABBMaxPoint Maximum point of the grid (3 componenets).
* @param pPoints List of points.
* @param pBatchIds List of batch ids.
* @param pPoints2 List of points from where to find neighbors.
* @param pCellIndexs Indexs of the grid cells.
* @param pStartIndexsOffset List with the first level offset to teh start indices.
* @param pStartIndexsOffset2 List with the second level offset to teh start indices.
* @param pStartIndexs Input/Output parameter with the list of the starting indices in the neighboring list.
* @param pOutNeigbors Output parameter with the list neighbors of each point.
*/
__global__ void findNeighbors(
const bool pScaleInv,
const int pNumPoints,
const int pNumCells,
const int pNumNeighbors,
const float pRadius,
const float* __restrict__ pAABBMinPoint,
const float* __restrict__ pAABBMaxPoint,
const float* __restrict__ pPoints,
const int* __restrict__ pBatchIds,
const float* __restrict__ pPoints2,
const int* __restrict__ pCellIndexs,
const int* __restrict__ pStartIndexsOffset,
const int* __restrict__ pStartIndexsOffset2,
int* __restrict__ pStartIndexs,
int* __restrict__ pOutNeigbors)
{
int currentIndex = threadIdx.x + blockIdx.x * blockDim.x;
if(currentIndex < pNumPoints){
int currBatchId = pBatchIds[currentIndex];
int pointIndex = currentIndex * 3;
int offsetIndex = currentIndex/POINT_BLOCK_PACK_SIZE;
int globalOffsetIndex = offsetIndex/POINT_BLOCK_PACK_SIZE;
int neighborIndex = pStartIndexs[currentIndex]+pStartIndexsOffset[offsetIndex]+pStartIndexsOffset2[globalOffsetIndex];
pStartIndexs[currentIndex] = neighborIndex;
float maxAabbSize = max(max(
pAABBMaxPoint[currBatchId*3] - pAABBMinPoint[currBatchId*3],
pAABBMaxPoint[currBatchId*3+1] - pAABBMinPoint[currBatchId*3+1]),
pAABBMaxPoint[currBatchId*3+2] - pAABBMinPoint[currBatchId*3+2]);
float cellSize = maxAabbSize/(float)pNumCells;
float scaledRadius = (pScaleInv)?pRadius*maxAabbSize:pRadius;
float centralCoords[3] = {pPoints[pointIndex], pPoints[pointIndex+1], pPoints[pointIndex+2]};
int xCell = max(min((int)floor((centralCoords[0] - pAABBMinPoint[currBatchId*3])/cellSize), pNumCells -1), 0);
int yCell = max(min((int)floor((centralCoords[1] - pAABBMinPoint[currBatchId*3+1])/cellSize), pNumCells -1), 0);
int zCell = max(min((int)floor((centralCoords[2] - pAABBMinPoint[currBatchId*3+2])/cellSize), pNumCells -1), 0);
int neighborIter = 0;
for(int i = 0; i < 27; ++i)
{
int currCellIndex[3] = {xCell+cellOffsets[i][0], yCell+cellOffsets[i][1], zCell+cellOffsets[i][2]};
if(currCellIndex[0] >= 0 && currCellIndex[0] < pNumCells &&
currCellIndex[1] >= 0 && currCellIndex[1] < pNumCells &&
currCellIndex[2] >= 0 && currCellIndex[2] < pNumCells)
{
int cellIndexFlat = currBatchId*pNumCells*pNumCells*pNumCells + currCellIndex[0]*pNumCells*pNumCells + currCellIndex[1]*pNumCells + currCellIndex[2];
int initIndex = pCellIndexs[cellIndexFlat*2];
int endIndex = pCellIndexs[cellIndexFlat*2 + 1];
for(int j = initIndex; j < endIndex; ++j)
{
int currPointIndex = j * 3;
float currentCoords[3] = {pPoints2[currPointIndex], pPoints2[currPointIndex+1], pPoints2[currPointIndex+2]};
float diffVector[3] = {currentCoords[0] - centralCoords[0], currentCoords[1] - centralCoords[1], currentCoords[2] - centralCoords[2]};
float pointDist = sqrt(diffVector[0]*diffVector[0] + diffVector[1]*diffVector[1] + diffVector[2]*diffVector[2]);
if(pointDist < scaledRadius){
pOutNeigbors[neighborIndex*2 + neighborIter] = j;
pOutNeigbors[neighborIndex*2 + neighborIter + 1] = currentIndex;
neighborIter+=2;
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////// CPU
unsigned int countNeighborsCPU(
const bool pScaleInv,
const int pNumPoints,
const int pNumCells,
const float pRadius,
const float* pInPts,
const int* pInBatchIds,
const float* pInPts2,
const int* pCellIndexs,
const float* pAABBMin,
const float* pAABBMax,
int* pStartIndex)
{
//Init device symbols.
int cellOffsetsCPU[27][3] = {
{1, 1, 1},{0, 1, 1},{-1, 1, 1},
{1, 0, 1},{0, 0, 1},{-1, 0, 1},
{1, -1, 1},{0, -1, 1},{-1, -1, 1},
{1, 1, 0},{0, 1, 0},{-1, 1, 0},
{1, 0, 0},{0, 0, 0},{-1, 0, 0},
{1, -1, 0},{0, -1, 0},{-1, -1, 0},
{1, 1, -1},{0, 1, -1},{-1, 1, -1},
{1, 0, -1},{0, 0, -1},{-1, 0, -1},
{1, -1, -1},{0, -1, -1},{-1, -1, -1}};
cudaMemcpyToSymbol(cellOffsets, cellOffsetsCPU, 27*3*sizeof(int));
int numBlocksPoints = pNumPoints/POINT_BLOCK_SIZE;
numBlocksPoints += (pNumPoints%POINT_BLOCK_SIZE != 0)?1:0;
//Find the neighbors for each point.
int* totalNeighbors;
gpuErrchk(cudaMalloc(&totalNeighbors, sizeof(int)));
cudaMemset(totalNeighbors, 0, sizeof(int));
countNeighbors<<<numBlocksPoints, POINT_BLOCK_SIZE>>>(pScaleInv, pNumPoints, pNumCells,
pRadius, pAABBMin, pAABBMax, pInPts, pInBatchIds, pInPts2, pCellIndexs, pStartIndex, totalNeighbors);
gpuErrchk(cudaPeekAtLastError());
int totalNeighborsCPU = 0;
cudaMemcpy(&totalNeighborsCPU, totalNeighbors, sizeof(int), cudaMemcpyDeviceToHost);
gpuErrchk(cudaFree(totalNeighbors));
#ifdef PRINT_CONV_INFO
printf("Forward Num points: %d | Neighbors: %d\n", pNumPoints, totalNeighborsCPU);
#endif
return totalNeighborsCPU;
}
void computeAuxiliarBuffersSize(
const int pNumPoints,
int* PBufferSize1,
int* PBufferSize2)
{
(*PBufferSize1) = pNumPoints/POINT_BLOCK_PACK_SIZE;
(*PBufferSize1) += (pNumPoints%POINT_BLOCK_PACK_SIZE != 0)?1:0;
(*PBufferSize2) = (*PBufferSize1)/POINT_BLOCK_PACK_SIZE;
(*PBufferSize2) += ((*PBufferSize1)%POINT_BLOCK_PACK_SIZE != 0)?1:0;
}
void packNeighborsCPU(
const bool pScaleInv,
const int pNumPoints,
const int pNumNeighbors,
const int pNumCells,
const float pRadius,
const float* pInPts,
const int* pInBatchIds,
const float* pInPts2,
const int* pCellIndexs,
const float* pAABBMin,
const float* pAABBMax,
int* pAuxBuffOffsets,
int* pAuxBuffOffsets2,
int* pStartIndexs,
int* pPackedIndexs)
{
//Pack the indexs of the neighbors.
int numBlocksPointsPack = pNumPoints/POINT_BLOCK_PACK_SIZE;
numBlocksPointsPack += (pNumPoints%POINT_BLOCK_PACK_SIZE != 0)?1:0;
int numBlocksPointsPack2 = numBlocksPointsPack/POINT_BLOCK_PACK_SIZE;
numBlocksPointsPack2 += (numBlocksPointsPack%POINT_BLOCK_PACK_SIZE != 0)?1:0;
gpuErrchk(cudaMemset(pAuxBuffOffsets, 0, sizeof(int)*numBlocksPointsPack));
gpuErrchk(cudaMemset(pAuxBuffOffsets2, 0, sizeof(int)*numBlocksPointsPack2));
computeOffsets<<<numBlocksPointsPack, POINT_BLOCK_PACK_SIZE>>>(true, pNumPoints, numBlocksPointsPack, pStartIndexs, pAuxBuffOffsets);
gpuErrchk(cudaPeekAtLastError());
computeOffsets<<<numBlocksPointsPack2, POINT_BLOCK_PACK_SIZE>>>(false, numBlocksPointsPack, numBlocksPointsPack2, pAuxBuffOffsets, pAuxBuffOffsets2);
gpuErrchk(cudaPeekAtLastError());
int numBlocksPoints = pNumPoints/POINT_BLOCK_SIZE;
numBlocksPoints += (pNumPoints%POINT_BLOCK_SIZE != 0)?1:0;
findNeighbors<<<numBlocksPoints,POINT_BLOCK_SIZE>>>(pScaleInv, pNumPoints, pNumCells, pNumNeighbors, pRadius, pAABBMin, pAABBMax,
pInPts, pInBatchIds, pInPts2, pCellIndexs, pAuxBuffOffsets, pAuxBuffOffsets2, pStartIndexs, pPackedIndexs);
gpuErrchk(cudaPeekAtLastError());
}
|
the_stack
|
using namespace cv::cuda;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
//// Non Local Means Denosing
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
__device__ __forceinline__ float norm2(const float& v) { return v*v; }
__device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; }
__device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; }
__device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; }
template<typename T, typename B>
__global__ void nlm_kernel(const PtrStep<T> src, PtrStepSz<T> dst, const B b, int search_radius, int block_radius, float noise_mult)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
const int i = blockDim.y * blockIdx.y + threadIdx.y;
const int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j >= dst.cols || i >= dst.rows)
return;
int bsize = search_radius + block_radius;
int search_window = 2 * search_radius + 1;
float minus_search_window2_inv = -1.f/(search_window * search_window);
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0.f;
if (j - bsize >= 0 && j + bsize < dst.cols && i - bsize >= 0 && i + bsize < dst.rows)
{
for(float y = -search_radius; y <= search_radius; ++y)
for(float x = -search_radius; x <= search_radius; ++x)
{
float dist2 = 0;
for(float ty = -block_radius; ty <= block_radius; ++ty)
for(float tx = -block_radius; tx <= block_radius; ++tx)
{
value_type bv = saturate_cast<value_type>(src(i + y + ty, j + x + tx));
value_type av = saturate_cast<value_type>(src(i + ty, j + tx));
dist2 += norm2(av - bv);
}
float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv);
/*if (i == 255 && j == 255)
printf("%f %f\n", w, dist2 * minus_h2_inv + (x * x + y * y) * minus_search_window2_inv);*/
sum1 = sum1 + w * saturate_cast<value_type>(src(i + y, j + x));
sum2 += w;
}
}
else
{
for(float y = -search_radius; y <= search_radius; ++y)
for(float x = -search_radius; x <= search_radius; ++x)
{
float dist2 = 0;
for(float ty = -block_radius; ty <= block_radius; ++ty)
for(float tx = -block_radius; tx <= block_radius; ++tx)
{
value_type bv = saturate_cast<value_type>(b.at(i + y + ty, j + x + tx, src));
value_type av = saturate_cast<value_type>(b.at(i + ty, j + tx, src));
dist2 += norm2(av - bv);
}
float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv);
sum1 = sum1 + w * saturate_cast<value_type>(b.at(i + y, j + x, src));
sum2 += w;
}
}
dst(i, j) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
int block_window = 2 * block_radius + 1;
float minus_h2_inv = -1.f/(h * h * VecTraits<T>::cn);
float noise_mult = minus_h2_inv/(block_window * block_window);
cudaSafeCall( cudaFuncSetCacheConfig (nlm_kernel<T, B<T> >, cudaFuncCachePreferL1) );
nlm_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, noise_mult);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template<typename T>
void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, cudaStream_t stream)
{
typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream);
static func_t funcs[] =
{
nlm_caller<T, BrdConstant>,
nlm_caller<T, BrdReplicate>,
nlm_caller<T, BrdReflect>,
nlm_caller<T, BrdWrap>,
nlm_caller<T, BrdReflect101>
};
funcs[borderMode](src, dst, search_radius, block_radius, h, stream);
}
template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t);
template void nlm_bruteforce_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t);
template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t);
}
}}}
//////////////////////////////////////////////////////////////////////////////////
//// Non Local Means Denosing (fast approximate version)
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
template <int cn> struct Unroll;
template <> struct Unroll<1>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&> tie(float& val1, float& val2)
{
return thrust::tie(val1, val2);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op);
}
};
template <> struct Unroll<2>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&, float&> tie(float& val1, float2& val2)
{
return thrust::tie(val1, val2.x, val2.y);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op, op);
}
};
template <> struct Unroll<3>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&> tie(float& val1, float3& val2)
{
return thrust::tie(val1, val2.x, val2.y, val2.z);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op, op, op);
}
};
template <> struct Unroll<4>
{
template <int BLOCK_SIZE>
static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem)
{
return cv::cuda::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE, smem + 4 * BLOCK_SIZE);
}
static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&, float&> tie(float& val1, float4& val2)
{
return thrust::tie(val1, val2.x, val2.y, val2.z, val2.w);
}
static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float>, plus<float> > op()
{
plus<float> op;
return thrust::make_tuple(op, op, op, op, op);
}
};
__device__ __forceinline__ int calcDist(const uchar& a, const uchar& b) { return (a-b)*(a-b); }
__device__ __forceinline__ int calcDist(const uchar2& a, const uchar2& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y); }
__device__ __forceinline__ int calcDist(const uchar3& a, const uchar3& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z); }
template <class T> struct FastNonLocalMeans
{
enum
{
CTA_SIZE = 128,
TILE_COLS = 128,
TILE_ROWS = 32,
STRIDE = CTA_SIZE
};
struct plus
{
__device__ __forceinline__ float operator()(float v1, float v2) const { return v1 + v2; }
};
int search_radius;
int block_radius;
int search_window;
int block_window;
float minus_h2_inv;
FastNonLocalMeans(int search_window_, int block_window_, float h) : search_radius(search_window_/2), block_radius(block_window_/2),
search_window(search_window_), block_window(block_window_), minus_h2_inv(-1.f/(h * h * VecTraits<T>::cn)) {}
PtrStep<T> src;
mutable PtrStepi buffer;
__device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const
{
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
dist_sums[index] = 0;
for(int tx = 0; tx < block_window; ++tx)
col_sums(tx, index) = 0;
int y = index / search_window;
int x = index - y * search_window;
int ay = i;
int ax = j;
int by = i + y - search_radius;
int bx = j + x - search_radius;
#if 1
for (int tx = -block_radius; tx <= block_radius; ++tx)
{
int col_sum = 0;
for (int ty = -block_radius; ty <= block_radius; ++ty)
{
int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx));
dist_sums[index] += dist;
col_sum += dist;
}
col_sums(tx + block_radius, index) = col_sum;
}
#else
for (int ty = -block_radius; ty <= block_radius; ++ty)
for (int tx = -block_radius; tx <= block_radius; ++tx)
{
int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx));
dist_sums[index] += dist;
col_sums(tx + block_radius, index) += dist;
}
#endif
up_col_sums(j, index) = col_sums(block_window - 1, index);
}
}
__device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const
{
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
int y = index / search_window;
int x = index - y * search_window;
int ay = i;
int ax = j + block_radius;
int by = i + y - search_radius;
int bx = j + x - search_radius + block_radius;
int col_sum = 0;
for (int ty = -block_radius; ty <= block_radius; ++ty)
col_sum += calcDist(src(ay + ty, ax), src(by + ty, bx));
dist_sums[index] += col_sum - col_sums(first, index);
col_sums(first, index) = col_sum;
up_col_sums(j, index) = col_sum;
}
}
__device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const
{
int ay = i;
int ax = j + block_radius;
T a_up = src(ay - block_radius - 1, ax);
T a_down = src(ay + block_radius, ax);
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
int y = index / search_window;
int x = index - y * search_window;
int by = i + y - search_radius;
int bx = j + x - search_radius + block_radius;
T b_up = src(by - block_radius - 1, bx);
T b_down = src(by + block_radius, bx);
int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up);
dist_sums[index] += col_sum - col_sums(first, index);
col_sums(first, index) = col_sum;
up_col_sums(j, index) = col_sum;
}
}
__device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, T& dst) const
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_type;
float weights_sum = 0;
sum_type sum = VecTraits<sum_type>::all(0);
float bw2_inv = 1.f/(block_window * block_window);
int sx = j - search_radius;
int sy = i - search_radius;
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE)
{
int y = index / search_window;
int x = index - y * search_window;
float avg_dist = dist_sums[index] * bw2_inv;
float weight = __expf(avg_dist * minus_h2_inv);
weights_sum += weight;
sum = sum + weight * saturate_cast<sum_type>(src(sy + y, sx + x));
}
__shared__ float cta_buffer[CTA_SIZE * (VecTraits<T>::cn + 1)];
reduce<CTA_SIZE>(Unroll<VecTraits<T>::cn>::template smem_tuple<CTA_SIZE>(cta_buffer),
Unroll<VecTraits<T>::cn>::tie(weights_sum, sum),
threadIdx.x,
Unroll<VecTraits<T>::cn>::op());
if (threadIdx.x == 0)
dst = saturate_cast<T>(sum / weights_sum);
}
__device__ __forceinline__ void operator()(PtrStepSz<T>& dst) const
{
int tbx = blockIdx.x * TILE_COLS;
int tby = blockIdx.y * TILE_ROWS;
int tex = ::min(tbx + TILE_COLS, dst.cols);
int tey = ::min(tby + TILE_ROWS, dst.rows);
PtrStepi col_sums;
col_sums.data = buffer.ptr(dst.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window;
col_sums.step = buffer.step;
PtrStepi up_col_sums;
up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window;
up_col_sums.step = buffer.step;
extern __shared__ int dist_sums[]; //search_window * search_window
int first = 0;
for (int i = tby; i < tey; ++i)
for (int j = tbx; j < tex; ++j)
{
__syncthreads();
if (j == tbx)
{
initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums);
first = 0;
}
else
{
if (i == tby)
shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums);
else
shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums);
first = (first + 1) % block_window;
}
__syncthreads();
convolve_window(i, j, dist_sums, dst(i, j));
}
}
};
template<typename T>
__global__ void fast_nlm_kernel(const FastNonLocalMeans<T> fnlm, PtrStepSz<T> dst) { fnlm(dst); }
void nln_fast_get_buffer_size(const PtrStepSzb& src, int search_window, int block_window, int& buffer_cols, int& buffer_rows)
{
typedef FastNonLocalMeans<uchar> FNLM;
dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS));
buffer_cols = search_window * search_window * grid.y;
buffer_rows = src.cols + block_window * grid.x;
}
template<typename T>
void nlm_fast_gpu(const PtrStepSzb& src, PtrStepSzb dst, PtrStepi buffer,
int search_window, int block_window, float h, cudaStream_t stream)
{
typedef FastNonLocalMeans<T> FNLM;
FNLM fnlm(search_window, block_window, h);
fnlm.src = (PtrStepSz<T>)src;
fnlm.buffer = buffer;
dim3 block(FNLM::CTA_SIZE, 1);
dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS));
int smem = search_window * search_window * sizeof(int);
fast_nlm_kernel<<<grid, block, smem>>>(fnlm, (PtrStepSz<T>)dst);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void nlm_fast_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t);
template void nlm_fast_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t);
template void nlm_fast_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t);
__global__ void fnlm_split_kernel(const PtrStepSz<uchar3> lab, PtrStepb l, PtrStep<uchar2> ab)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < lab.cols && y < lab.rows)
{
uchar3 p = lab(y, x);
ab(y,x) = make_uchar2(p.y, p.z);
l(y,x) = p.x;
}
}
void fnlm_split_channels(const PtrStepSz<uchar3>& lab, PtrStepb l, PtrStep<uchar2> ab, cudaStream_t stream)
{
dim3 b(32, 8);
dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y));
fnlm_split_kernel<<<g, b>>>(lab, l, ab);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void fnlm_merge_kernel(const PtrStepb l, const PtrStep<uchar2> ab, PtrStepSz<uchar3> lab)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < lab.cols && y < lab.rows)
{
uchar2 p = ab(y, x);
lab(y, x) = make_uchar3(l(y, x), p.x, p.y);
}
}
void fnlm_merge_channels(const PtrStepb& l, const PtrStep<uchar2>& ab, PtrStepSz<uchar3> lab, cudaStream_t stream)
{
dim3 b(32, 8);
dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y));
fnlm_merge_kernel<<<g, b>>>(l, ab, lab);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
}}}
|
the_stack
|
void THNN_(SpatialDepthwiseConvolution_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH)
{
THCUNN_assertSameGPU(state, 3, input, output, weight);
// Only handle 4D Input Tensors for now
THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4);
THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4);
// We assume that the input and weight Tensors are shaped properly by
// the caller, so we verify that here to some extent
// Weight Tensor is shape (output_channels, 1, kH, kW)
THAssert(weight->size(1) == 1);
// Input Tensor is shape (N, input_channels, H, W)
// We verify that the # of output_channels is a multiple of input_channels
THAssert(weight->size(0) % input->size(1) == 0);
// Bias has same # of channels as output
if (bias) {
THAssert(THTensor_sizeLegacyNoScalars(bias, 0) == weight->size(0));
}
input = THCTensor_(newContiguous)(state, input);
weight = THCTensor_(newContiguous)(state, weight);
bias = bias ? THCTensor_(newContiguous)(state, bias) : bias;
// Following the behavior of other THCUNN functions, we shape the output
// Tensor ourselves
int batchSize = input->size(0);
int height = input->size(2);
int width = input->size(3);
int outputHeight = (height + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
int outputWidth = (width + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
int outputChannels = weight->size(0);
THCTensor_(resize4d)(state, output, batchSize, outputChannels, outputHeight, outputWidth);
// Create THCDeviceTensor
// Kernel currently relies upon all the Tensors to be contiguous, but we made
// them contiguous above
THCDeviceTensor<scalar_t, 4> dInput = toDeviceTensor<scalar_t, 4>(state, input);
THCDeviceTensor<scalar_t, 4> dWeight = toDeviceTensor<scalar_t, 4>(state, weight);
THCDeviceTensor<scalar_t, 4> dOutput = toDeviceTensor<scalar_t, 4>(state, output);
THCDeviceTensor<scalar_t, 1> dBias;
if (bias) {
dBias = toDeviceTensor<scalar_t, 1>(state, bias);
}
int inputChannels = input->size(1);
int depthwiseMultiplier = outputChannels / inputChannels;
// One thread per output value
int64_t n = THCTensor_(nElement)(state, output);
int blocks = GET_BLOCKS(n);
dim3 grid(blocks);
dim3 block(CUDA_NUM_THREADS);
if (kW == 3 && kH == 3) {
spatialDepthwiseConvolutionUpdateOutput<scalar_t, accreal, unsigned int, 3><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight,
kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (kW == 1 && kH == 1) {
spatialDepthwiseConvolutionUpdateOutput<scalar_t, accreal, unsigned int, 1><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight,
kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
spatialDepthwiseConvolutionUpdateOutput<scalar_t, accreal, unsigned int, 0><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight,
kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, weight);
if (bias) THCTensor_(free)(state, bias);
}
void THNN_(SpatialDepthwiseConvolution_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH)
{
THCUNN_assertSameGPU(state, 3, gradOutput, gradInput, weight);
// Only handle 4D Input Tensors for now
THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4);
THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4);
THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4);
// Minimal shape checking, as above
// Same # of elements in batch
THAssert(input->size(0) == gradOutput->size(0));
// Same # of filters as outputChannels
THAssert(weight->size(0) == gradOutput->size(1));
weight = THCTensor_(newContiguous)(state, weight);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
// Resize GradInput
THCTensor_(resizeAs)(state, gradInput, input);
int inputChannels = input->size(1);
int height = input->size(2);
int width = input->size(3);
int outputChannels = gradOutput->size(1);
int outputHeight = gradOutput->size(2);
int outputWidth = gradOutput->size(3);
int depthwiseMultiplier = outputChannels / inputChannels;
THCDeviceTensor<scalar_t, 4> dGradOutput = toDeviceTensor<scalar_t, 4>(state, gradOutput);
THCDeviceTensor<scalar_t, 4> dGradInput = toDeviceTensor<scalar_t, 4>(state, gradInput);
THCDeviceTensor<scalar_t, 4> dWeight = toDeviceTensor<scalar_t, 4>(state, weight);
// Kernel currently relies upon all the Tensors to be contiguous
THAssert(dGradOutput.isContiguous());
THAssert(dGradInput.isContiguous());
THAssert(dWeight.isContiguous());
// One thread per gradInput value
int64_t n = THCTensor_(nElement)(state, gradInput);
int blocks = GET_BLOCKS(n);
dim3 grid(blocks);
dim3 block(CUDA_NUM_THREADS);
if (kW == 3 && kH == 3)
if (dW == 1 && dH == 1){
spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 3, 1><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (dW == 2 && dH == 2) {
spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 3, 2><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 3, 0><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
else if (kW == 1 && kH == 1)
if (dW == 1 && dH == 1){
spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 1, 1><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (dW == 2 && dH == 2) {
spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 1, 2><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 1, 0><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
else
if (dW == 1 && dH == 1){
spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 0, 1><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (dW == 2 && dH == 2) {
spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 0, 2><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
spatialDepthwiseConvolutionUpdateGradInput<scalar_t, accreal, unsigned int, 0, 0><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, weight);
THCTensor_(free)(state, gradOutput);
}
void THNN_(SpatialDepthwiseConvolution_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradWeight);
// Only handle 4D Input Tensors for now
THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4);
THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4);
THAssert(!gradWeight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradWeight) == 4);
// Minimal shape checking as above
// Same # of elements in batch
THAssert(input->size(0) == gradOutput->size(0));
// Same # of filters as outputChannels
THAssert(gradWeight->size(0) == gradOutput->size(1));
int batchSize = input->size(0);
int inputChannels = input->size(1);
int height = input->size(2);
int width = input->size(3);
int outputChannels = gradOutput->size(1);
int outputHeight = gradOutput->size(2);
int outputWidth = gradOutput->size(3);
int depthwiseMultiplier = outputChannels / inputChannels;
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCDeviceTensor<scalar_t, 4> dGradOutput = toDeviceTensor<scalar_t, 4>(state, gradOutput);
THCDeviceTensor<scalar_t, 4> dInput = toDeviceTensor<scalar_t, 4>(state, input);
THCDeviceTensor<scalar_t, 4> dGradWeight = toDeviceTensor<scalar_t, 4>(state, gradWeight);
// Kernel currently relies upon all the Tensors to be contiguous
THAssert(dGradOutput.isContiguous());
THAssert(dInput.isContiguous());
THAssert(dGradWeight.isContiguous());
// We parallelize so that each block computes a single value in gradWeight
int blocks = outputChannels * kH * kW;
// Make sure we have enough threads to perform the reduction, and use this number
// to create the shared memory size for the reduction
dim3 grid(blocks);
dim3 block(getGradParamsNumThreads(batchSize));
int smem = block.x * sizeof(accreal);
spatialDepthwiseConvolutionAccGradParameters<scalar_t, accreal, unsigned int><<<grid, block, smem, c10::cuda::getCurrentCUDAStream()>>>(
dGradOutput, dInput, dGradWeight, batchSize, inputChannels, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, gradOutput);
}
#endif
|
the_stack
|
#include <cuda.h>
#include <torch/torch.h>
#include <torch/extension.h>
namespace {
__device__
float calc_single_iou(const float4 b1, const float4 b2) {
// (lt), (rb)
float l = max(b1.x, b2.x);
float t = max(b1.y, b2.y);
float r = min(b1.z, b2.z);
float b = min(b1.w, b2.w);
float first = (r - l);
first = (first < 0) ? 0 : first;
float second = (b - t);
second = (second < 0) ? 0 : second;
float intersection = first * second;
float area1 = (b1.w - b1.y) * (b1.z - b1.x);
float area2 = (b2.w - b2.y) * (b2.z - b2.x);
return intersection / (area1 + area2 - intersection);
}
// Choose whether or not to delete a box
// return 1 to delete, 0 to keep
__device__
uint8_t masked_iou(const float4 box1,
const float4 box2,
const uint8_t box2_deleted,
const float criteria) {
// if box2 isn't already deleted, calculate IoU
if (box2_deleted == 1) return 1;
float iou = calc_single_iou(box1, box2);
// if iou < criteria, keep otherwise delete
return (iou < criteria) ? 0 : 1;
}
// Based on what has been deleted, get the first non-deleted index
// and the count of non-deleted values
__device__
void get_current_num_idx(const uint8_t *deleted,
const int num_to_consider,
int *first_non_deleted,
int *remaining) {
// dumb.
// TODO: Not dumb, actually parallel
int first = INT_MAX;
int count = 0;
for (int i = 0; i < num_to_consider; ++i) {
// if element is deleted, ignore
if (deleted[i] == 0) {
first = (i < first) ? i : first;
count++;
}
}
*first_non_deleted = first;
*remaining = count;
}
__global__
void nms_kernel(const int N,
const int num_classes,
const int *score_offsets,
const float *scores,
const long *score_idx,
const float4 *bboxes,
const float criteria, // IoU threshold
const int max_num, // maximum number of candidate boxes to use
uint8_t *deleted, // assume initialised to false for all values
long *num_candidates_out, // number of outputs for this class
float *score_out, // output scores
float4 *bboxes_out, // output bboxes
long *labels_out) { // output labels
// launch one block per class for now
// Ignore class 0 (background) by starting at 1
const int cls = blockIdx.x + 1;
// offsets into scores and their indices
const int offset_start = score_offsets[cls];
const int offset_end = score_offsets[cls+1];
const int num_scores = offset_end - offset_start;
// alias into local scores, indices and deleted buffers
const float *local_scores = &scores[offset_start];
const long *local_indices = &score_idx[offset_start];
uint8_t *local_deleted = &deleted[offset_start];
// aliases into output buffers
float *local_score_out = &score_out[offset_start];
float4 *local_bbox_out = &bboxes_out[offset_start];
long *local_labels_out = &labels_out[offset_start];
// Nothing to do here - early exit
if (num_scores == 0) {
if (threadIdx.x == 0) {
num_candidates_out[cls] = 0;
}
return;
}
// how many scores we care about
int num_to_consider = min(num_scores, max_num);
int current_num = num_to_consider;
// always start by looking at the first (highest) score
int first_score_idx = 0;
// store _global_ bbox candidate indices in shmem
__shared__ int local_candidates[200];
// also store _local_ indices for scores
__shared__ int local_score_indices[200];
// only thread 0 tracks how many candidates there are - need
// to distribute that via shmem
__shared__ int shared_num_candidates;
// index into shmem buffer for storing candidates
int current_candidate_idx = 0;
// initialise all shmem values to sentinels for sanity
for (int i = threadIdx.x; i < 200; i += blockDim.x) {
local_candidates[i] = -1;
local_score_indices[i] = -1;
}
// Shouldn't be necessary, make sure that no entries are
// coming in deleted from poor initialisation.
for (int i = threadIdx.x; i < num_scores; i += blockDim.x) {
local_deleted[i] = 0;
}
__syncthreads();
// While there's more scores/boxes to process
while (current_num > 0) {
// get the candidate index & bbox
// first_score_idx is _local_ into the aliased index-storing buffer
// candidate_idx is _global_ into the bbox buffer
const long candidate_idx = local_indices[first_score_idx];
const float4 candidate_bbox = bboxes[candidate_idx];
// Now we've looked at this candidate, remove it from consideration
local_deleted[first_score_idx] = 1;
// calculate the IoUs of candidate vs. remaining boxes & manipulate delete array
// standard block-stride loop over boxes
for (int i = threadIdx.x; i < num_to_consider; i += blockDim.x) {
// Know we've already looked at all entries before the candidate, so we can ignore them
// TODO: handle this loop more efficiently w.r.t. skipped entries
if (i > first_score_idx) {
long test_idx = local_indices[i];
float4 test_bbox = bboxes[test_idx];
// Note if we need to delete this box
local_deleted[i] = masked_iou(candidate_bbox, test_bbox, local_deleted[i], criteria);
}
}
// make sure all IoU / deletion calcs are done
// NOTE: shouldn't be necessary, candidate writing isn't dependent on the results
// of IoU calcs, and sync point _after_ that writing should cover.
// __syncthreads();
// write the candidate idx into shmem and increment storage pointer
if (threadIdx.x == 0) {
// idx into global bbox array
local_candidates[current_candidate_idx] = candidate_idx;
// idx into local scores
local_score_indices[current_candidate_idx] = first_score_idx;
// increment storage location
current_candidate_idx++;
}
__syncthreads();
// Now, get the number of remaining boxes and the first non-deleted idx
get_current_num_idx(local_deleted, num_to_consider, &first_score_idx, ¤t_num);
__syncthreads();
}
// Note: Only thread 0 has the correct number of candidates (as that's the thread
// that actually handles candidate tracking). Need to bcast the correct value to
// everyone for multi-threaded output writing, so do that here via shmem.
if (threadIdx.x == 0) {
shared_num_candidates = current_candidate_idx;
}
__syncthreads();
// at this point we should have all candidate indices for this class
// use them to write out scores, bboxes and labels
for (int i = threadIdx.x; i < shared_num_candidates; i += blockDim.x) {
local_score_out[i] = local_scores[local_score_indices[i]];
local_bbox_out[i] = bboxes[local_candidates[i]]; // bboxes[local_indices[i]];
local_labels_out[i] = cls;
}
// write the final number of candidates from this class to a buffer
if (threadIdx.x == 0) {
num_candidates_out[cls] = current_candidate_idx;
}
}
__global__
void squash_outputs(const int N, // number of sets of outputs
const long *num_candidates, // number of candidates per entry
const int *output_offsets, // offsets into outputs
const float *output_scores,
const float4 *output_boxes,
const long* output_labels,
const long* squashed_offsets,
float *squashed_scores,
float4 *squashed_boxes,
long *squashed_labels) {
// block per output
const int cls = blockIdx.x + 1;
const int num_to_write = num_candidates[cls];
const long read_offset = output_offsets[cls];
const long write_offset = squashed_offsets[cls];
for (int i = threadIdx.x; i < num_to_write; i += blockDim.x) {
// Read
auto score = output_scores[read_offset + i];
auto bbox = output_boxes[read_offset + i];
auto label = output_labels[read_offset + i];
// Write
squashed_scores[write_offset + i] = score;
squashed_boxes[write_offset + i] = bbox;
squashed_labels[write_offset + i] = label;
}
}
}; // anonymous namespace
std::vector<at::Tensor> nms(const int N, // number of images
const int num_classes,
const at::Tensor score_offsets,
const at::Tensor sorted_scores,
const at::Tensor sorted_scores_idx,
const at::Tensor bboxes,
const float criteria,
const int max_num) {
// Run all classes in different blocks, ignore background class 0
const int num_blocks = num_classes - 1;
const int total_scores = score_offsets[score_offsets.numel()-1].item<int>();
// track which elements have been deleted in each iteration
at::Tensor deleted = torch::zeros({total_scores}, torch::CUDA(at::kByte));
// track how many outputs we have for each class
at::Tensor num_candidates_out = torch::zeros({num_classes}, torch::CUDA(at::kLong));
// outputs
at::Tensor score_out = torch::empty({total_scores}, torch::CUDA(at::kFloat));
at::Tensor label_out = torch::empty({total_scores}, torch::CUDA(at::kLong));
at::Tensor bbox_out = torch::empty({total_scores, 4}, torch::CUDA(at::kFloat));
// Run the kernel
const int THREADS_PER_BLOCK = 64;
auto stream = at::cuda::getCurrentCUDAStream().stream();
nms_kernel<<<num_blocks, THREADS_PER_BLOCK, 0, stream>>>(N,
num_classes,
score_offsets.data<int>(),
sorted_scores.data<float>(),
sorted_scores_idx.data<long>(),
(float4*)bboxes.data<float>(),
criteria,
max_num,
deleted.data<uint8_t>(),
num_candidates_out.data<long>(),
score_out.data<float>(),
(float4*)bbox_out.data<float>(),
label_out.data<long>());
THCudaCheck(cudaGetLastError());
// Now need to squash the output so it's contiguous.
// get prefix sum of num_candidates_out
// Note: Still need lengths
auto output_offsets = num_candidates_out.cumsum(0);
auto total_outputs = output_offsets[output_offsets.numel()-1].item<long>();
output_offsets = output_offsets - num_candidates_out;
// allocate final outputs
at::Tensor squashed_scores = torch::empty({total_outputs}, torch::CUDA(at::kFloat));
at::Tensor squashed_bboxes = torch::empty({total_outputs, 4}, torch::CUDA(at::kFloat));
at::Tensor squashed_labels = torch::empty({total_outputs}, torch::CUDA(at::kLong));
// Copy non-squashed outputs -> squashed.
squash_outputs<<<num_blocks, THREADS_PER_BLOCK, 0, stream>>>(N,
num_candidates_out.data<long>(),
score_offsets.data<int>(),
score_out.data<float>(),
(float4*)bbox_out.data<float>(),
label_out.data<long>(),
output_offsets.contiguous().data<long>(),
squashed_scores.data<float>(),
(float4*)squashed_bboxes.data<float>(),
squashed_labels.data<long>());
THCudaCheck(cudaGetLastError());
return {squashed_bboxes, squashed_scores, squashed_labels};
}
|
the_stack
|
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudev.hpp"
using namespace cv::cudev;
void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMat& mask, double scale, Stream& stream, int);
namespace
{
template <typename SrcType, typename ScalarType, typename DstType> struct MulScalarOp : unary_function<SrcType, DstType>
{
ScalarType val;
__device__ __forceinline__ DstType operator ()(SrcType a) const
{
return saturate_cast<DstType>(saturate_cast<ScalarType>(a) * val);
}
};
template <typename ScalarDepth> struct TransformPolicy : DefaultTransformPolicy
{
};
template <> struct TransformPolicy<double> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename SrcType, typename ScalarDepth, typename DstType>
void mulScalarImpl(const GpuMat& src, cv::Scalar value, GpuMat& dst, Stream& stream)
{
typedef typename MakeVec<ScalarDepth, VecTraits<SrcType>::cn>::type ScalarType;
cv::Scalar_<ScalarDepth> value_ = value;
MulScalarOp<SrcType, ScalarType, DstType> op;
op.val = VecTraits<ScalarType>::make(value_.val);
gridTransformUnary_< TransformPolicy<ScalarDepth> >(globPtr<SrcType>(src), globPtr<DstType>(dst), op, stream);
}
}
void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMat&, double scale, Stream& stream, int)
{
typedef void (*func_t)(const GpuMat& src, cv::Scalar val, GpuMat& dst, Stream& stream);
static const func_t funcs[7][7][4] =
{
{
{mulScalarImpl<uchar, float, uchar>, mulScalarImpl<uchar2, float, uchar2>, mulScalarImpl<uchar3, float, uchar3>, mulScalarImpl<uchar4, float, uchar4>},
{mulScalarImpl<uchar, float, schar>, mulScalarImpl<uchar2, float, char2>, mulScalarImpl<uchar3, float, char3>, mulScalarImpl<uchar4, float, char4>},
{mulScalarImpl<uchar, float, ushort>, mulScalarImpl<uchar2, float, ushort2>, mulScalarImpl<uchar3, float, ushort3>, mulScalarImpl<uchar4, float, ushort4>},
{mulScalarImpl<uchar, float, short>, mulScalarImpl<uchar2, float, short2>, mulScalarImpl<uchar3, float, short3>, mulScalarImpl<uchar4, float, short4>},
{mulScalarImpl<uchar, float, int>, mulScalarImpl<uchar2, float, int2>, mulScalarImpl<uchar3, float, int3>, mulScalarImpl<uchar4, float, int4>},
{mulScalarImpl<uchar, float, float>, mulScalarImpl<uchar2, float, float2>, mulScalarImpl<uchar3, float, float3>, mulScalarImpl<uchar4, float, float4>},
{mulScalarImpl<uchar, double, double>, mulScalarImpl<uchar2, double, double2>, mulScalarImpl<uchar3, double, double3>, mulScalarImpl<uchar4, double, double4>}
},
{
{mulScalarImpl<schar, float, uchar>, mulScalarImpl<char2, float, uchar2>, mulScalarImpl<char3, float, uchar3>, mulScalarImpl<char4, float, uchar4>},
{mulScalarImpl<schar, float, schar>, mulScalarImpl<char2, float, char2>, mulScalarImpl<char3, float, char3>, mulScalarImpl<char4, float, char4>},
{mulScalarImpl<schar, float, ushort>, mulScalarImpl<char2, float, ushort2>, mulScalarImpl<char3, float, ushort3>, mulScalarImpl<char4, float, ushort4>},
{mulScalarImpl<schar, float, short>, mulScalarImpl<char2, float, short2>, mulScalarImpl<char3, float, short3>, mulScalarImpl<char4, float, short4>},
{mulScalarImpl<schar, float, int>, mulScalarImpl<char2, float, int2>, mulScalarImpl<char3, float, int3>, mulScalarImpl<char4, float, int4>},
{mulScalarImpl<schar, float, float>, mulScalarImpl<char2, float, float2>, mulScalarImpl<char3, float, float3>, mulScalarImpl<char4, float, float4>},
{mulScalarImpl<schar, double, double>, mulScalarImpl<char2, double, double2>, mulScalarImpl<char3, double, double3>, mulScalarImpl<char4, double, double4>}
},
{
{0 /*mulScalarImpl<ushort, float, uchar>*/, 0 /*mulScalarImpl<ushort2, float, uchar2>*/, 0 /*mulScalarImpl<ushort3, float, uchar3>*/, 0 /*mulScalarImpl<ushort4, float, uchar4>*/},
{0 /*mulScalarImpl<ushort, float, schar>*/, 0 /*mulScalarImpl<ushort2, float, char2>*/, 0 /*mulScalarImpl<ushort3, float, char3>*/, 0 /*mulScalarImpl<ushort4, float, char4>*/},
{mulScalarImpl<ushort, float, ushort>, mulScalarImpl<ushort2, float, ushort2>, mulScalarImpl<ushort3, float, ushort3>, mulScalarImpl<ushort4, float, ushort4>},
{mulScalarImpl<ushort, float, short>, mulScalarImpl<ushort2, float, short2>, mulScalarImpl<ushort3, float, short3>, mulScalarImpl<ushort4, float, short4>},
{mulScalarImpl<ushort, float, int>, mulScalarImpl<ushort2, float, int2>, mulScalarImpl<ushort3, float, int3>, mulScalarImpl<ushort4, float, int4>},
{mulScalarImpl<ushort, float, float>, mulScalarImpl<ushort2, float, float2>, mulScalarImpl<ushort3, float, float3>, mulScalarImpl<ushort4, float, float4>},
{mulScalarImpl<ushort, double, double>, mulScalarImpl<ushort2, double, double2>, mulScalarImpl<ushort3, double, double3>, mulScalarImpl<ushort4, double, double4>}
},
{
{0 /*mulScalarImpl<short, float, uchar>*/, 0 /*mulScalarImpl<short2, float, uchar2>*/, 0 /*mulScalarImpl<short3, float, uchar3>*/, 0 /*mulScalarImpl<short4, float, uchar4>*/},
{0 /*mulScalarImpl<short, float, schar>*/, 0 /*mulScalarImpl<short2, float, char2>*/, 0 /*mulScalarImpl<short3, float, char3>*/, 0 /*mulScalarImpl<short4, float, char4>*/},
{mulScalarImpl<short, float, ushort>, mulScalarImpl<short2, float, ushort2>, mulScalarImpl<short3, float, ushort3>, mulScalarImpl<short4, float, ushort4>},
{mulScalarImpl<short, float, short>, mulScalarImpl<short2, float, short2>, mulScalarImpl<short3, float, short3>, mulScalarImpl<short4, float, short4>},
{mulScalarImpl<short, float, int>, mulScalarImpl<short2, float, int2>, mulScalarImpl<short3, float, int3>, mulScalarImpl<short4, float, int4>},
{mulScalarImpl<short, float, float>, mulScalarImpl<short2, float, float2>, mulScalarImpl<short3, float, float3>, mulScalarImpl<short4, float, float4>},
{mulScalarImpl<short, double, double>, mulScalarImpl<short2, double, double2>, mulScalarImpl<short3, double, double3>, mulScalarImpl<short4, double, double4>}
},
{
{0 /*mulScalarImpl<int, float, uchar>*/, 0 /*mulScalarImpl<int2, float, uchar2>*/, 0 /*mulScalarImpl<int3, float, uchar3>*/, 0 /*mulScalarImpl<int4, float, uchar4>*/},
{0 /*mulScalarImpl<int, float, schar>*/, 0 /*mulScalarImpl<int2, float, char2>*/, 0 /*mulScalarImpl<int3, float, char3>*/, 0 /*mulScalarImpl<int4, float, char4>*/},
{0 /*mulScalarImpl<int, float, ushort>*/, 0 /*mulScalarImpl<int2, float, ushort2>*/, 0 /*mulScalarImpl<int3, float, ushort3>*/, 0 /*mulScalarImpl<int4, float, ushort4>*/},
{0 /*mulScalarImpl<int, float, short>*/, 0 /*mulScalarImpl<int2, float, short2>*/, 0 /*mulScalarImpl<int3, float, short3>*/, 0 /*mulScalarImpl<int4, float, short4>*/},
{mulScalarImpl<int, float, int>, mulScalarImpl<int2, float, int2>, mulScalarImpl<int3, float, int3>, mulScalarImpl<int4, float, int4>},
{mulScalarImpl<int, float, float>, mulScalarImpl<int2, float, float2>, mulScalarImpl<int3, float, float3>, mulScalarImpl<int4, float, float4>},
{mulScalarImpl<int, double, double>, mulScalarImpl<int2, double, double2>, mulScalarImpl<int3, double, double3>, mulScalarImpl<int4, double, double4>}
},
{
{0 /*mulScalarImpl<float, float, uchar>*/, 0 /*mulScalarImpl<float2, float, uchar2>*/, 0 /*mulScalarImpl<float3, float, uchar3>*/, 0 /*mulScalarImpl<float4, float, uchar4>*/},
{0 /*mulScalarImpl<float, float, schar>*/, 0 /*mulScalarImpl<float2, float, char2>*/, 0 /*mulScalarImpl<float3, float, char3>*/, 0 /*mulScalarImpl<float4, float, char4>*/},
{0 /*mulScalarImpl<float, float, ushort>*/, 0 /*mulScalarImpl<float2, float, ushort2>*/, 0 /*mulScalarImpl<float3, float, ushort3>*/, 0 /*mulScalarImpl<float4, float, ushort4>*/},
{0 /*mulScalarImpl<float, float, short>*/, 0 /*mulScalarImpl<float2, float, short2>*/, 0 /*mulScalarImpl<float3, float, short3>*/, 0 /*mulScalarImpl<float4, float, short4>*/},
{0 /*mulScalarImpl<float, float, int>*/, 0 /*mulScalarImpl<float2, float, int2>*/, 0 /*mulScalarImpl<float3, float, int3>*/, 0 /*mulScalarImpl<float4, float, int4>*/},
{mulScalarImpl<float, float, float>, mulScalarImpl<float2, float, float2>, mulScalarImpl<float3, float, float3>, mulScalarImpl<float4, float, float4>},
{mulScalarImpl<float, double, double>, mulScalarImpl<float2, double, double2>, mulScalarImpl<float3, double, double3>, mulScalarImpl<float4, double, double4>}
},
{
{0 /*mulScalarImpl<double, double, uchar>*/, 0 /*mulScalarImpl<double2, double, uchar2>*/, 0 /*mulScalarImpl<double3, double, uchar3>*/, 0 /*mulScalarImpl<double4, double, uchar4>*/},
{0 /*mulScalarImpl<double, double, schar>*/, 0 /*mulScalarImpl<double2, double, char2>*/, 0 /*mulScalarImpl<double3, double, char3>*/, 0 /*mulScalarImpl<double4, double, char4>*/},
{0 /*mulScalarImpl<double, double, ushort>*/, 0 /*mulScalarImpl<double2, double, ushort2>*/, 0 /*mulScalarImpl<double3, double, ushort3>*/, 0 /*mulScalarImpl<double4, double, ushort4>*/},
{0 /*mulScalarImpl<double, double, short>*/, 0 /*mulScalarImpl<double2, double, short2>*/, 0 /*mulScalarImpl<double3, double, short3>*/, 0 /*mulScalarImpl<double4, double, short4>*/},
{0 /*mulScalarImpl<double, double, int>*/, 0 /*mulScalarImpl<double2, double, int2>*/, 0 /*mulScalarImpl<double3, double, int3>*/, 0 /*mulScalarImpl<double4, double, int4>*/},
{0 /*mulScalarImpl<double, double, float>*/, 0 /*mulScalarImpl<double2, double, float2>*/, 0 /*mulScalarImpl<double3, double, float3>*/, 0 /*mulScalarImpl<double4, double, float4>*/},
{mulScalarImpl<double, double, double>, mulScalarImpl<double2, double, double2>, mulScalarImpl<double3, double, double3>, mulScalarImpl<double4, double, double4>}
}
};
const int sdepth = src.depth();
const int ddepth = dst.depth();
const int cn = src.channels();
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F && cn <= 4 );
val[0] *= scale;
val[1] *= scale;
val[2] *= scale;
val[3] *= scale;
const func_t func = funcs[sdepth][ddepth][cn - 1];
if (!func)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src, val, dst, stream);
}
#endif
|
the_stack
|
#include "cudakernel/memory/channel_shuffle.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include "cudakernel/common/common.h"
template <typename T>
__global__ void ppl_cukernel_channel_shuffle(
int64_t num_elems,
int64_t num_elems_pad,
int32_t group,
int32_t channels_per_group,
GArray<DivModFast> input_strides_fast,
const T* input,
T* output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems_pad)
return;
int64_t output_offset = 0;
int n_idx, c_idx, hw_idx, remain = index;
input_strides_fast[0].divmod(remain, n_idx, remain);
output_offset += (index - remain);
input_strides_fast[1].divmod(remain, c_idx, remain);
hw_idx = remain;
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
output_offset += out_c_idx * input_strides_fast[1].d_ + hw_idx;
output[output_offset] = index >= num_elems ? 0 : input[index];
}
__global__ void ppl_cukernel_channel_shuffle_int8(
int64_t num_elems,
int64_t num_elems_pad,
int32_t group,
int32_t channels_per_group,
GArray<DivModFast> input_strides_fast,
const int8_t* input,
int8_t* output,
float in_scale,
float out_scale)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems_pad)
return;
int64_t output_offset = 0;
int n_idx, c_idx, hw_idx, remain = index;
input_strides_fast[0].divmod(remain, n_idx, remain);
output_offset += (index - remain);
input_strides_fast[1].divmod(remain, c_idx, remain);
hw_idx = remain;
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
output_offset += out_c_idx * input_strides_fast[1].d_ + hw_idx;
int res = round(input[index] * in_scale / out_scale);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output[output_offset] = index >= num_elems ? 0 : res;
}
template <typename T>
__global__ void ppl_cukernel_channel_shuffle_nhwc(
int64_t num_elems,
int64_t num_elems_pad,
int32_t group,
int channels_per_group,
int pad_channels,
DivModFast channels_fast,
const T *input,
T *output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems_pad)
return;
int64_t input_offset = 0;
int64_t output_offset = 0;
int nhw_idx, c_idx, remain = index;
channels_fast.divmod(remain, nhw_idx, c_idx);
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
input_offset += nhw_idx * pad_channels + c_idx;
output_offset += nhw_idx * pad_channels + out_c_idx;
output[output_offset] = index >= num_elems ? 0 : input[input_offset];
}
__global__ void ppl_cukernel_channel_shuffle_nhwc_int8(
int64_t num_elems,
int64_t num_elems_pad,
int32_t group,
int channels_per_group,
int pad_channels,
DivModFast channels_fast,
const int8_t *input,
int8_t *output,
float in_scale,
float out_scale)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems_pad)
return;
int64_t input_offset = 0;
int64_t output_offset = 0;
int nhw_idx, c_idx, remain = index;
channels_fast.divmod(remain, nhw_idx, c_idx);
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
input_offset += nhw_idx * pad_channels + c_idx;
output_offset += nhw_idx * pad_channels + out_c_idx;
int res = round(input[input_offset] * in_scale / out_scale);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output[output_offset] = index >= num_elems ? 0 : res;
}
ppl::common::RetCode PPLCUDAChannelShuffleForwardImp(
cudaStream_t stream,
int group,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output,
float in_scale,
float out_scale)
{
// num_dims must be equal to 4
int num_dims = output_shape->GetDimCount();
int64_t num_elems = output_shape->GetElementsExcludingPadding();
int64_t num_elems_pad = output_shape->GetElementsExcludingPadding();
// for ndarray layout
int num_input_strides_dims = num_dims - 2;
GArray<DivModFast> input_strides_fast(num_input_strides_dims);
int elems_hw = input_shape->GetDim(2) * input_shape->GetDim(3);
input_strides_fast[1] = DivModFast(elems_hw);
int elems_chw = input_shape->GetDim(1) * elems_hw;
input_strides_fast[0] = DivModFast(elems_chw);
// for nhwc layout
int pad_channels = input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1);
DivModFast channels_fast(input_shape->GetDim(1));
int block_size = 256;
int grid_size = (num_elems_pad + block_size - 1) / block_size;
int channels_per_group = input_shape->GetDim(1) / group;
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC8 || \
output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC16){ \
ppl_cukernel_channel_shuffle_nhwc<<<grid_size, block_size, 0, stream>>>( \
num_elems, num_elems_pad, group, channels_per_group, pad_channels, channels_fast, \
(const TYPE *)input, (TYPE *)output); \
} else { \
ppl_cukernel_channel_shuffle<<<grid_size, block_size, 0, stream>>>( \
num_elems, num_elems_pad, group, channels_per_group, input_strides_fast, (const TYPE *)input, (TYPE *)output); \
} \
return ppl::common::RC_SUCCESS; \
} \
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
case sizeof(int8_t): {
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC8 ||
output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC16){
ppl_cukernel_channel_shuffle_nhwc_int8<<<grid_size, block_size, 0, stream>>>(
num_elems, num_elems_pad, group, channels_per_group, pad_channels, channels_fast,
(const int8_t *)input, (int8_t *)output, in_scale, out_scale);
} else {
ppl_cukernel_channel_shuffle_int8<<<grid_size, block_size, 0, stream>>>(
num_elems, num_elems_pad, group, channels_per_group, input_strides_fast, (int8_t *)input, (int8_t *)output, in_scale, out_scale);
}
return ppl::common::RC_SUCCESS;
}
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
template <typename T>
__global__ void ppl_cukernel_fuse_channel_shuffle(
int64_t num_elems,
int64_t num_elems_pad,
int32_t group,
int32_t channels_per_group,
GArray<DivModFast> input_strides_fast,
const T* input1,
const T* input2,
T* output1,
T* output2)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= 2 * num_elems_pad)
return;
int64_t output_offset = 0;
int n_idx, c_idx, hw_idx, remain = index;
int hw = input_strides_fast[1].d_;
input_strides_fast[0].divmod(remain, n_idx, remain); // index / chw
output_offset += (index - remain);
input_strides_fast[1].divmod(remain, c_idx, remain); // index / hw
hw_idx = remain;
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
output_offset += out_c_idx * input_strides_fast[1].d_ + hw_idx;
int out_div_hw = output_offset / hw;
int in_div_hw = index / hw;
if(out_div_hw % 2) {
if(in_div_hw % 2) {
output2[(out_div_hw - 1) / 2 * hw + hw_idx] = index >= 2 * num_elems ? 0 : input2[(in_div_hw - 1) / 2 * hw + hw_idx];
} else {
output2[(out_div_hw - 1) / 2 * hw + hw_idx] = index >= 2 * num_elems ? 0 : input1[in_div_hw / 2 * hw + hw_idx];
}
} else {
if(in_div_hw % 2) {
output1[out_div_hw / 2 * hw + hw_idx] = index >= 2 * num_elems ? 0 : input2[(in_div_hw - 1) / 2 * hw + hw_idx];
} else {
output1[out_div_hw / 2 * hw + hw_idx] = index >= 2 * num_elems ? 0 : input1[in_div_hw / 2 * hw + hw_idx];
}
}
}
__global__ void ppl_cukernel_fuse_channel_shuffle_int8(
int64_t num_elems,
int64_t num_elems_pad,
int32_t group,
int32_t channels_per_group,
GArray<DivModFast> input_strides_fast,
const int8_t* input1,
const int8_t* input2,
int8_t* output1,
int8_t* output2,
float in_scale0,
float in_scale1,
float out_scale0,
float out_scale1)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= 2 * num_elems_pad)
return;
int64_t output_offset = 0;
int n_idx, c_idx, hw_idx, remain = index;
int hw = input_strides_fast[1].d_;
input_strides_fast[0].divmod(remain, n_idx, remain); // index / chw
output_offset += (index - remain);
input_strides_fast[1].divmod(remain, c_idx, remain); // index / hw
hw_idx = remain;
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
output_offset += out_c_idx * input_strides_fast[1].d_ + hw_idx;
int out_div_hw = output_offset / hw;
int in_div_hw = index / hw;
if(out_div_hw % 2) {
if(in_div_hw % 2) {
int res = round(input2[(in_div_hw - 1) / 2 * hw + hw_idx] * in_scale1 / out_scale1);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output2[(out_div_hw - 1) / 2 * hw + hw_idx] = index >= 2 * num_elems ? 0 : res;
} else {
int res = round(input1[in_div_hw / 2 * hw + hw_idx] * in_scale0 / out_scale1);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output2[(out_div_hw - 1) / 2 * hw + hw_idx] = index >= 2 * num_elems ? 0 : res;
}
} else {
if(in_div_hw % 2) {
int res = round(input2[(in_div_hw - 1) / 2 * hw + hw_idx] * in_scale1 / out_scale0);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output1[out_div_hw / 2 * hw + hw_idx] = index >= 2 * num_elems ? 0 : res;
} else {
int res = round(input1[in_div_hw / 2 * hw + hw_idx] * in_scale0 / out_scale0);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output1[out_div_hw / 2 * hw + hw_idx] = index >= 2 * num_elems ? 0 : res;
}
}
}
template <typename T>
__global__ void ppl_cukernel_fuse_channel_shuffle_nhwc(
int64_t num_elems,
int64_t num_elems_pad,
int32_t group,
int channels_per_group,
int pad_channels,
DivModFast channels_fast,
const T *input1,
const T *input2,
T *output1,
T *output2,
int elems_nhw,
int elems_c)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= 2 * num_elems_pad)
return;
int64_t input_offset = 0;
int64_t output_offset = 0;
int nhw_idx, c_idx, remain = index;
channels_fast.divmod(remain, nhw_idx, c_idx);
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
input_offset += nhw_idx * 2 * elems_c + c_idx;
output_offset += nhw_idx * 2 * elems_c + out_c_idx;
if(output_offset % (2 * elems_c) >= elems_c) {
if(input_offset % (2 * elems_c) >= elems_c) {
output2[nhw_idx * pad_channels + out_c_idx - elems_c] = index >= 2 * num_elems ? 0 : input2[nhw_idx * pad_channels + c_idx - elems_c];
} else {
output2[nhw_idx * pad_channels + out_c_idx - elems_c] = index >= 2 * num_elems ? 0 : input1[nhw_idx * pad_channels + c_idx];
}
} else {
if(input_offset % (2 * elems_c) >= elems_c) {
output1[nhw_idx * pad_channels + out_c_idx] = index >= 2 * num_elems ? 0 : input2[nhw_idx * pad_channels + c_idx - elems_c];
} else {
output1[nhw_idx * pad_channels + out_c_idx] = index >= 2 * num_elems ? 0 : input1[nhw_idx * pad_channels + c_idx];
}
}
}
__global__ void ppl_cukernel_fuse_channel_shuffle_nhwc_int8(
int64_t num_elems,
int64_t num_elems_pad,
int32_t group,
int channels_per_group,
int pad_channels,
DivModFast channels_fast,
const int8_t *input1,
const int8_t *input2,
int8_t *output1,
int8_t *output2,
int elems_nhw,
int elems_c,
float in_scale0,
float in_scale1,
float out_scale0,
float out_scale1)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= 2 * num_elems_pad)
return;
int64_t input_offset = 0;
int64_t output_offset = 0;
int nhw_idx, c_idx, remain = index;
channels_fast.divmod(remain, nhw_idx, c_idx);
int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group;
input_offset += nhw_idx * 2 * elems_c + c_idx;
output_offset += nhw_idx * 2 * elems_c + out_c_idx;
if(output_offset % (2 * elems_c) >= elems_c) {
if(input_offset % (2 * elems_c) >= elems_c) {
int res = round(input2[nhw_idx * pad_channels + c_idx - elems_c] * in_scale1 / out_scale1);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output2[nhw_idx * pad_channels + out_c_idx - elems_c] = index >= 2 * num_elems ? 0 : res;
} else {
int res = round(input1[nhw_idx * pad_channels + c_idx] * in_scale0 / out_scale1);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output2[nhw_idx * pad_channels + out_c_idx - elems_c] = index >= 2 * num_elems ? 0 : res;
}
} else {
if(input_offset % (2 * elems_c) >= elems_c) {
int res = round(input2[nhw_idx * pad_channels + c_idx - elems_c] * in_scale1 / out_scale0);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output1[nhw_idx * pad_channels + out_c_idx] = index >= 2 * num_elems ? 0 : res;
} else {
int res = round(input1[nhw_idx * pad_channels + c_idx] * in_scale0 / out_scale0);
if(res > 127) res = 127;
else if(res < -128) res = -128;
output1[nhw_idx * pad_channels + out_c_idx] = index >= 2 * num_elems ? 0 : res;
}
}
}
ppl::common::RetCode PPLCUDAFuseChannelShuffleForwardImp(
cudaStream_t stream,
int group,
const ppl::nn::TensorShape* input_shape,
const void* input1,
const void* input2,
const ppl::nn::TensorShape* output_shape,
void* output1,
void* output2,
float in_scale0,
float in_scale1,
float out_scale0,
float out_scale1)
{
// num_dims must be equal to 4
int num_dims = output_shape->GetDimCount();
int64_t num_elems = output_shape->GetElementsExcludingPadding();
int64_t num_elems_pad = output_shape->GetElementsExcludingPadding();
// for ndarray layout
int num_input_strides_dims = num_dims - 2;
GArray<DivModFast> input_strides_fast(num_input_strides_dims);
int elems_hw = input_shape->GetDim(2) * input_shape->GetDim(3);
input_strides_fast[1] = DivModFast(elems_hw);
int elems_chw = 2 * input_shape->GetDim(1) * elems_hw;
input_strides_fast[0] = DivModFast(elems_chw);
// for nhwc layout
int pad_channels = input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1);
DivModFast channels_fast(2 * input_shape->GetDim(1));
int elems_nhw = elems_hw * input_shape->GetDim(0);
int elems_c = input_shape->GetDim(1);
int block_size = 256;
int grid_size = (2 * num_elems_pad + block_size - 1) / block_size;
int channels_per_group = (2 * input_shape->GetDim(1)) / group;
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): { \
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC8 || \
output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC16) { \
ppl_cukernel_fuse_channel_shuffle_nhwc<<<grid_size, block_size, 0, stream>>>( \
num_elems, num_elems_pad, group, channels_per_group, pad_channels, channels_fast, \
(const TYPE *)input1, (const TYPE *) input2, (TYPE *)output1, (TYPE *)output2, elems_nhw, elems_c); \
} else { \
ppl_cukernel_fuse_channel_shuffle<<<grid_size, block_size, 0, stream>>>( \
num_elems, num_elems_pad, group, channels_per_group, input_strides_fast, (const TYPE *)input1, (const TYPE *)input2,\
(TYPE *)output1, (TYPE *)output2); \
} \
return ppl::common::RC_SUCCESS; \
} \
switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) {
case sizeof(int8_t): {
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC8 ||
output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC16){
ppl_cukernel_fuse_channel_shuffle_nhwc_int8<<<grid_size, block_size, 0, stream>>>(
num_elems, num_elems_pad, group, channels_per_group, pad_channels, channels_fast,
(const int8_t *)input1, (const int8_t *) input2, (int8_t *)output1, (int8_t *)output2, elems_nhw, elems_c,
in_scale0, in_scale1, out_scale0, out_scale1);
} else {
ppl_cukernel_fuse_channel_shuffle_int8<<<grid_size, block_size, 0, stream>>>(
num_elems, num_elems_pad, group, channels_per_group, input_strides_fast, (const int8_t *)input1, (const int8_t *)input2,
(int8_t *)output1, (int8_t *)output2, in_scale0, in_scale1, out_scale0, out_scale1);
}
return ppl::common::RC_SUCCESS;
}
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
|
the_stack
|
namespace amgx
{
namespace
{
template <typename TConfig>
static void error_vector_too_small(const Vector<TConfig> &v, int required_size)
{
std::stringstream ss;
ss << "Vector size too small: not enough space for halo elements." << std::endl;
ss << "Vector: {tag = " << v.tag << ", " << "size = " << v.size() << "}" << std::endl;
ss << "Required size: " << required_size << std::endl;
FatalError(ss.str(), AMGX_ERR_INTERNAL);
}
}
//required by do_add_from_halo()
//{
template<typename TConfig, typename Tb>
void AddFromHalo1Functor<TConfig, Tb>::operator()(CommsMPIHostBufferStream<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int num_rings = get_num_rings();
int neighbors = comm.get_neighbors();
int bsize = b.get_block_size();
int num_cols = b.get_num_cols();
cudaStream_t &stream = get_stream();
int tag = get_tag();
assert( tag == m.manager->global_id() );
get_send_size() = (m.manager->halo_offset(neighbors * num_rings) - m.manager->halo_offset(0)) * bsize;
if (get_send_size() != 0)
{
cudaMemcpyAsync(&(b.explicit_host_buffer[0]), b.buffer->raw(), get_send_size()*sizeof(typename Tb::value_type), cudaMemcpyDefault, stream);
cudaStreamSynchronize(stream);
}
MPI_Comm mpi_comm = comm.get_mpi_comm();
int offset = 0;
for (int i = 0; i < neighbors; i++)
{
// Count total size to send to my neighbor
int size = 0;
for (int j = 0; j < num_rings; j++)
{
size += (m.manager->halo_offset(j * neighbors + i + 1) - m.manager->halo_offset(j * neighbors + i)) * bsize;
}
if (size != 0)
MPI_Isend(&(b.explicit_host_buffer[offset]),
size * sizeof(typename Tb::value_type),
MPI_BYTE,
m.manager->neighbors[i],
tag,
mpi_comm,
&b.requests[i]);
else
{
MPI_Isend(&(b.host_buffer[0]), size * sizeof(typename Tb::value_type), MPI_BYTE, m.manager->neighbors[i], tag, mpi_comm, &b.requests[i]);
}
offset += size;
}
#endif
}
template<typename TConfig, typename Tb>
void AddFromHalo1Functor<TConfig, Tb>::operator()(CommsMPIDirect<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int bsize = b.get_block_size();
int neighbors = m.manager->num_neighbors();
int num_rings = get_num_rings();
int num_cols = b.get_num_cols();
cudaStream_t &stream = get_stream();
int tag = get_tag();
assert( tag == m.manager->global_id() );
get_send_size() = (m.manager->halo_offset(neighbors * num_rings) - m.manager->halo_offset(0)) * bsize;
if (get_send_size() != 0)
{
cudaStreamSynchronize(stream);
}
MPI_Comm mpi_comm = comm.get_mpi_comm();
int offset = 0;
for (int i = 0; i < neighbors; i++)
{
// Count total size to send to my neighbor
int size = 0;
for (int j = 0; j < num_rings; j++)
{
size += (m.manager->halo_offset(j * neighbors + i + 1) - m.manager->halo_offset(j * neighbors + i)) * bsize;
}
MPI_Isend(b.buffer->raw() + offset,
size * sizeof(typename Tb::value_type),
MPI_BYTE,
m.manager->neighbors[i],
tag,
mpi_comm,
&b.requests[i]);
offset += size;
}
#endif
}
//step 2:
//
template<typename TConfig, typename Tb>
void AddFromHalo2Functor<TConfig, Tb>::operator()(CommsMPIHostBufferStream<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int num_rings = get_num_rings();
int neighbors = comm.get_neighbors();
int bsize = b.get_block_size();
int num_cols = b.get_num_cols();
int send_size = get_send_size();
int offset = 0;
MPI_Comm mpi_comm = comm.get_mpi_comm();
for (int i = 0; i < neighbors; i++)
{
// Count total size to receive from one neighbor
int size = m.manager->getB2Lrings()[i][num_rings] * bsize;
if (size != 0)
MPI_Irecv(&(b.explicit_host_buffer[send_size + offset]),
size * sizeof(typename Tb::value_type),
MPI_BYTE,
m.manager->neighbors[i],
m.manager->neighbors[i],
mpi_comm,
&b.requests[neighbors + i]);
else
{
MPI_Irecv(&(b.host_buffer[0]), size * sizeof(typename Tb::value_type), MPI_BYTE, m.manager->neighbors[i], m.manager->neighbors[i], mpi_comm, &b.requests[neighbors + i]);
}
offset += size;
}
get_offset() = offset;
#endif
}
template<typename TConfig, typename Tb>
void AddFromHalo2Functor<TConfig, Tb>::operator()(CommsMPIDirect<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int num_rings = get_num_rings();
int neighbors = m.manager->num_neighbors();
int bsize = b.get_block_size();
int num_cols = b.get_num_cols();
int send_size = get_send_size();
int offset = 0;
MPI_Comm mpi_comm = comm.get_mpi_comm();
for (int i = 0; i < neighbors; i++)
{
int size = m.manager->getB2Lrings()[i][num_rings] * bsize;
MPI_Irecv(b.buffer->raw() + send_size + offset,
size * sizeof(typename Tb::value_type),
MPI_BYTE,
m.manager->neighbors[i],
m.manager->neighbors[i],
mpi_comm,
&b.requests[neighbors + i]);
offset += size;
}
get_offset() = offset;
#endif
}
//step 3:
//
template<typename TConfig, typename Tb>
void AddFromHalo3Functor<TConfig, Tb>::operator()(CommsMPIHostBufferStream<TConfig> &comm)
{
Tb &b = get_b();
b.in_transfer = IDLE;
#ifdef AMGX_WITH_MPI
const Matrix<TConfig> &m = get_m();
int recv_size = get_recv_size();
int send_size = get_send_size();
cudaStream_t &stream = get_stream();
int neighbors = comm.get_neighbors();
int bsize = b.get_block_size();
int num_rings = get_num_rings();
typedef typename Tb::value_type vtyp;
// Copy into b.buffer, single copy
if (recv_size != 0)
{
cudaMemcpyAsync(b.buffer->raw() + send_size, &(b.explicit_host_buffer[send_size]), recv_size * sizeof(typename Tb::value_type), cudaMemcpyDefault, stream);
}
int offset = 0;
bool linear_buffers_changed = false;
for (int i = 0 ; i < neighbors; i++)
{
if (b.linear_buffers[i] != b.buffer->raw() + send_size + offset)
{
linear_buffers_changed = true;
b.linear_buffers[i] = b.buffer->raw() + send_size + offset;
}
offset += m.manager->getB2Lrings()[i][num_rings] * bsize;
}
// Copy host to device:
//
if (linear_buffers_changed)
{
b.linear_buffers_ptrs.resize(neighbors);
cudaMemcpyAsync(thrust::raw_pointer_cast(&b.linear_buffers_ptrs[0]), &(b.linear_buffers[0]), neighbors * sizeof(vtyp *), cudaMemcpyDefault, stream);
}
// If we are on a stream synchronise the copies
if(stream != 0)
{
cudaStreamSynchronize(stream);
}
#endif
}
template<typename TConfig, typename Tb>
void AddFromHalo3Functor<TConfig, Tb>::operator()(CommsMPIDirect<TConfig> &comm)
{
Tb &b = get_b();
b.in_transfer = IDLE;
#ifdef AMGX_WITH_MPI
int recv_size = get_recv_size();
int send_size = get_send_size();
cudaStream_t &stream = get_stream();
const Matrix<TConfig> &m = get_m();
int neighbors = comm.get_neighbors();
int bsize = b.get_block_size();
int num_rings = get_num_rings();
typedef typename Tb::value_type vtyp;
if (recv_size != 0)
{
cudaStreamSynchronize(stream);
}
int offset = 0;
bool linear_buffers_changed = false;
for (int i = 0 ; i < neighbors; i++)
{
if (b.linear_buffers[i] != b.buffer->raw() + send_size + offset)
{
linear_buffers_changed = true;
b.linear_buffers[i] = b.buffer->raw() + send_size + offset;
}
offset += m.manager->getB2Lrings()[i][num_rings] * bsize;
}
// Copy device to device:
//
if (linear_buffers_changed)
{
b.linear_buffers_ptrs.resize(neighbors);
cudaMemcpyAsync(thrust::raw_pointer_cast(&b.linear_buffers_ptrs[0]),
& (b.linear_buffers[0]),
neighbors * sizeof(vtyp *),
cudaMemcpyDefault, stream);
cudaStreamSynchronize(stream);
}
#endif
}
//} end do_add_from_halo()
//required by do_send_receive_wait()
//{
template<typename TConfig, typename Tb>
void SendRecvWait1Functor<TConfig, Tb>::operator()(CommsMPIHostBufferStream<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int neighbors = comm.get_neighbors();
int bsize = b.get_block_size();
int num_cols = b.get_num_cols();
cudaStream_t &stream = get_stream();
int tag = get_tag();
const int ring1 = 1;
get_send_size() = b.buffer_size;
if (get_send_size() != 0)
{
cudaMemcpyAsync(&(b.explicit_host_buffer[0]), b.buffer->raw(), get_send_size()*sizeof(typename Tb::value_type), cudaMemcpyDefault, stream);
cudaStreamSynchronize(stream);
}
MPI_Comm mpi_comm = comm.get_mpi_comm();
int offset = 0;
for (int i = 0; i < neighbors; i++)
{
int size = m.manager->getB2Lrings()[i][ring1] * bsize;
if (size != 0)
MPI_Isend(&(b.explicit_host_buffer[offset]),
size * sizeof(typename Tb::value_type),
MPI_BYTE,
m.manager->neighbors[i],
tag,
mpi_comm,
&b.requests[i]);
else
{
b.requests[i] = MPI_REQUEST_NULL;
}
offset += size;
}
#endif
}
template<typename TConfig, typename Tb>
void SendRecvWait1Functor<TConfig, Tb>::operator()(CommsMPIDirect<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int neighbors = m.manager->num_neighbors();
int bsize = b.get_block_size();
cudaStream_t &stream = get_stream();
int tag = get_tag();
const int ring1 = 1;
get_send_size() = b.buffer_size;
if (get_send_size() != 0)
{
cudaStreamSynchronize(stream);
}
MPI_Comm mpi_comm = comm.get_mpi_comm();
int offset = 0;
for (int i = 0; i < neighbors; i++)
{
int size = m.manager->getB2Lrings()[i][ring1] * bsize;
if (size != 0)
MPI_Isend(b.buffer->raw() + offset,
size * sizeof(typename Tb::value_type),
MPI_BYTE,
m.manager->neighbors[i],
tag,
mpi_comm,
&b.requests[i]);
else
{
b.requests[i] = MPI_REQUEST_NULL;
}
offset += size;
}
#endif
}
template<typename TConfig, typename Tb>
void SendRecvWait2Functor<TConfig, Tb>::operator()(CommsMPIHostBufferStream<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int neighbors = comm.get_neighbors();
int bsize = b.get_block_size();
int tag = get_tag();
MPI_Comm mpi_comm = comm.get_mpi_comm();
int offset = 0;
for (int i = 0; i < neighbors; i++)
{
int size = (m.manager->halo_offset(i + 1) - m.manager->halo_offset(i)) * bsize;
if (size != 0)
MPI_Irecv(&(b.explicit_host_buffer[b.buffer_size + offset]),
size * sizeof(typename Tb::value_type),
MPI_BYTE,
m.manager->neighbors[i],
tag,
mpi_comm,
&b.requests[neighbors + i]);
else
{
b.requests[neighbors + i] = MPI_REQUEST_NULL;
}
offset += size;
int required_size = m.manager->halo_offset(i) * bsize + size;
if (required_size > b.size())
{
error_vector_too_small(b, required_size);
}
}
#endif
}
template<typename TConfig, typename Tb>
void SendRecvWait2Functor<TConfig, Tb>::operator()(CommsMPIDirect<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int neighbors = m.manager->num_neighbors();
int bsize = b.get_block_size();
int tag = get_tag();
int send_size = m.manager->halo_offset(0) * bsize;
MPI_Comm mpi_comm = comm.get_mpi_comm();
int offset = 0;
for (int i = 0; i < neighbors; i++)
{
int size = (m.manager->halo_offset(i + 1) - m.manager->halo_offset(i)) * bsize;
if (size != 0)
MPI_Irecv(b.raw() + send_size + offset,
size * sizeof(typename Tb::value_type),
MPI_BYTE,
m.manager->neighbors[i],
tag,
mpi_comm,
&b.requests[neighbors + i]);
else
{
b.requests[neighbors + i] = MPI_REQUEST_NULL;
}
offset += size;
int required_size = m.manager->halo_offset(i) * bsize + size;
if (required_size > b.size())
{
error_vector_too_small(b, required_size);
}
}
#endif
}
template<typename TConfig, typename Tb>
void SendRecvWait3Functor<TConfig, Tb>::operator()(CommsMPIHostBufferStream<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int neighbors = comm.get_neighbors();
int bsize = b.get_block_size();
cudaStream_t &stream = get_stream();
int size = (m.manager->halo_offset(neighbors) - m.manager->halo_offset(0)) * bsize;
if (size != 0)
{
cudaMemcpyAsync(b.raw() + m.manager->halo_offset(0)*bsize, &(b.explicit_host_buffer[b.buffer_size]), size * sizeof(typename Tb::value_type), cudaMemcpyDefault, stream);
cudaStreamSynchronize(stream);
}
b.dirtybit = 0;
b.in_transfer = IDLE;
#endif
}
template<typename TConfig, typename Tb>
void SendRecvWait3Functor<TConfig, Tb>::operator()(CommsMPIDirect<TConfig> &comm)
{
#ifdef AMGX_WITH_MPI
Tb &b = get_b();
const Matrix<TConfig> &m = get_m();
int neighbors = m.manager->num_neighbors();
int bsize = b.get_block_size();
cudaStream_t &stream = get_stream();
int size = (m.manager->halo_offset(neighbors) - m.manager->halo_offset(0)) * bsize;
if (size != 0)
{
cudaStreamSynchronize(stream);
}
b.dirtybit = 0;
b.in_transfer = IDLE;
#endif
}
//} end do_send_receive_wait()
//###################################### Explicit Instantiations: #############################
// (must be in same translation unit, no effect if in another translation unit...)
#include <distributed/comms_visitors4_eti.h>
} // namespace amgx
|
the_stack
|
#include "core/providers/cuda/cu_inc/common.cuh"
#include "gist_impl.h"
#include "gist.h"
#include <cuda_runtime.h>
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _GistBinarizeEncoderKernel(
const T* input_data,
bool* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = (input_data[id] > (T)0);
}
template <typename T>
__global__ void _GistBinarizeDecoderKernel(
const bool* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = (input_data[id] ? (T)1 : (T)0);
}
template <typename T>
__global__ void _GistPack1EncoderKernel(
const T* input_data,
uint8_t* output_data,
const size_t factor,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); // id of Y (compressed tensor)
uint8_t out = 0x0;
uint8_t bit_out = 0x0;
size_t begin = id * factor;
size_t end = id * factor + factor;
for(size_t idx = begin; idx < end; idx++){
bool bit = (input_data[idx] > (T)0);
int nidxshift = idx % factor;
bit_out = bit ? (0x80 >> nidxshift) : 0;
out |= bit_out;
}
output_data[id] = out;
}
template <typename T>
__global__ void _GistPack1DecoderKernel(
const uint8_t* input_data,
T* output_data,
const size_t factor,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); // id of Y (uncompressed tensor)
int nidx = id / factor;
int nidxshift = id % factor;
uint8_t mask = 0x80 >> nidxshift;
uint8_t in = input_data[nidx] & mask;
output_data[id] = (in > 0) ? (T)1 : (T)0;
}
template <typename T>
__global__ void _GistPack8EncoderKernel(
const T* input_data,
uint8_t* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T X = input_data[id];
if (X == (T)0) {
output_data[id] = (uint8_t)(0);
return;
}
uint32_t i = (uint32_t)__float_as_uint(X);
uint32_t e_mask = 0x7f800000;
uint32_t m_residual_mask = 0x00080000;
uint32_t m_mask = 0x007fffff;
uint32_t m_size = 23;
uint32_t e_size = 8;
uint32_t pack_e_size = 5;
uint32_t pack_m_size = 2;
uint8_t bias = 127;
switch(sizeof(T)){
case 4:
m_size = 23;
e_size = 8;
e_mask = 0x7f800000;
m_mask = 0x007fffff;
m_residual_mask = 0x00080000;
bias = 127;
break;
case 2:
m_size = 10;
e_size = 5;
e_mask = 0x0f800000;
m_mask = 0x000003ff;
m_residual_mask = 0x00000007;
bias = 15;
break;
}
uint32_t pack_e_shift = e_size - pack_e_size;
uint32_t pack_m_shift = m_size - pack_m_size;
uint32_t s = i >> (m_size + e_size);
uint32_t e = i & e_mask;
e >>= (m_size);
e -= bias;
uint32_t m = i & m_mask;
uint32_t pack_e = e >> pack_e_shift;
uint32_t pack_m = m >> pack_m_shift;
uint32_t m_residual = m & m_residual_mask;
if(m_residual > 0){ // round up
if(pack_m == 0x3){
pack_e +=1; // increase exponent
pack_m = 0;
}
else{
pack_m +=1; // increase mantissa
}
}
if (pack_e >= 0x1f) { //NaN values
pack_e = 0;
}
output_data[id] = (s << (pack_e_size + pack_m_size)) | (pack_e << pack_m_size) | pack_m;
}
template <typename T>
__global__ void _GistPack8DecoderKernel(
const uint8_t* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
uint8_t i = input_data[id];
if (i == 0) {
output_data[id] = (T)0;
return;
}
uint32_t pack_e_size = 5;
uint32_t pack_m_size = 2;
uint32_t pack_e_mask = 0x0000007c;
uint32_t pack_m_mask = 0x00000003;
uint32_t m_size = 23;
uint32_t e_size = 8;
uint32_t bias = 127;
switch(sizeof(T)){
case 4:
m_size = 23;
e_size = 8;
bias = 127;
break;
case 2:
m_size = 10;
e_size = 5;
bias = 15;
break;
}
uint32_t pack_e_shift = e_size - pack_e_size;
uint32_t s = i >> (pack_e_size+ pack_m_size);
uint32_t pack_e = i & pack_e_mask;
pack_e >>= pack_m_size;
uint32_t pack_m = i & pack_m_mask;
uint32_t unpack_e = pack_e << (pack_e_shift + m_size);
unpack_e += bias;
uint32_t unpack_m = pack_m << (m_size -pack_m_size);
uint32_t unpack = (s << (m_size+e_size)) | unpack_e | unpack_m;
output_data[id] = (T)__uint_as_float((unsigned int)unpack);
}
template <typename T>
__global__ void _GistPack16EncoderKernel(
const T* input_data,
half* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T X = input_data[id];
output_data[id] = __float2half(X);
}
template <typename T>
__global__ void _GistPack16DecoderKernel(
const half* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
half X = input_data[id];
output_data[id] = (T)__half2float(X);
}
template <typename T>
__global__ void _GistPackMsfp15EncoderKernel(
const T* input_data,
uint8_t* output_data,
const CUDA_LONG num_threads,
const CUDA_LONG pre_axis_size,
const CUDA_LONG axis_size,
const CUDA_LONG num_tiles,
const CUDA_LONG tile_size) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, num_threads);
// Quantization parameters
const int bits = 7;
// mantissa bits, remove sign
const int m_bits = bits - 1;
// float32 parameters
const uint32_t s_mask = 0x80000000;
const int s_shift = 31;
const int pack_s_shift = 6;
const uint32_t e_mask = 0x7f800000;
const int e_shift = 23;
const int pack_e_shift = 7;
const uint32_t m_mask = 0x007fffff;
const int tile_i = id % num_tiles;
const int pre_axis_i = id / num_tiles;
// Loop over bounding box to find shared exponent
uint32_t shared_exp = 0;
for (size_t i = 0; i < tile_size; i++) {
// Get input
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
T X = input_data[in_i];
uint32_t X_i = (uint32_t)__float_as_uint(X);
// Get exponent
uint32_t exp = (X_i & e_mask) >> e_shift;
// Shared exponent is max of exponents
if (exp > shared_exp) {
shared_exp = exp;
}
}
// If inf/nan is found, zero out values
if (shared_exp >= 0xff) {
for (size_t i = 0; i < tile_size; i++) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
output_data[in_i] = 0;
}
return;
}
// Copy of shared exponent for packing
uint32_t pack_shared_exp = shared_exp;
// Loop over bounding box to quantize
for (size_t i = 0; i < tile_size; i++) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
T X = input_data[in_i];
uint32_t X_i = (uint32_t)__float_as_uint(X);
// Get biased exponent
uint32_t exp = (X_i & e_mask) >> e_shift;
uint32_t sign;
uint32_t mantissa;
if (exp == 0) {
// Flush denorm to 0
sign = 0;
mantissa = 0;
} else {
// Decode float
sign = X_i & s_mask;
mantissa = X_i & m_mask;
// Difference in exponents
uint32_t exp_diff = shared_exp - exp;
// Implied 1
mantissa = mantissa + (1 << 23);
// Adjust for shared exponent
mantissa = mantissa >> exp_diff;
// Shift down to target bit width + 1
mantissa = mantissa >> (24 - m_bits - 1);
// Rounding (with overflow check)
if (mantissa != ((1 << (m_bits + 1)) - 1)) {
mantissa += 1;
}
// Shift away last bit
mantissa = mantissa >> 1;
}
// Store {exponent bit, mantissa} in output
uint8_t exp_bit = (pack_shared_exp % 2) << pack_e_shift;
pack_shared_exp = pack_shared_exp >> 1;
output_data[in_i] = (uint8_t) (exp_bit | (sign >> (s_shift - pack_s_shift)) | mantissa);
}
}
template <typename T>
__global__ void _GistPackMsfp15DecoderKernel(
const uint8_t* input_data,
T* output_data,
const CUDA_LONG num_threads,
const CUDA_LONG pre_axis_size,
const CUDA_LONG axis_size,
const CUDA_LONG num_tiles,
const CUDA_LONG tile_size) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, num_threads);
// Quantization parameters
const int bits = 7;
// mantissa bits, remove sign
const int mbits = bits - 1;
const int s_shift = 31;
const int pack_s_shift = 6;
const uint8_t pack_s_mask = 0x40;
const int e_shift = 23;
const int pack_e_shift = 7;
const uint8_t pack_m_mask = 0x3f;
const int tile_i = id % num_tiles;
const int pre_axis_i = id / num_tiles;
// Extract exponent
uint32_t shared_exp = 0;
for (int i = 7; i >= 0; i--) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
shared_exp = shared_exp << 1;
shared_exp += (input_data[in_i] >> pack_e_shift);
}
// De-quantize values
for (size_t i = 0; i < tile_size; i++) {
size_t in_i = pre_axis_i * axis_size +
tile_i * tile_size +
i;
uint8_t X = input_data[in_i];
// Get sign bit
uint32_t sign = X & pack_s_mask;
// Get mantissa
uint32_t mantissa = (uint32_t) (X & pack_m_mask);
if (mantissa == 0) {
output_data[in_i] = 0.0;
} else {
// Find leading 1
uint8_t leading_bit_pos = floorf(log2f(mantissa));
// Difference from shared exponent of this value
int exp_diff = 5 - leading_bit_pos;
// Adjust exponent
uint32_t exp = shared_exp - exp_diff;
// Shift back to restore mantissa
mantissa = mantissa << (24 - mbits + exp_diff);
// Remove implied 1
mantissa = mantissa & ((1 << 23) - 1);
// Reconstruct float number
uint32_t output = (sign << (s_shift - pack_s_shift)) | (exp << e_shift) | mantissa;
output_data[in_i] = (float)__uint_as_float(output);
}
}
}
template <typename T>
void GistBinarizeEncoderImpl(
cudaStream_t stream,
const T* input_data,
bool* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistBinarizeEncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistBinarizeDecoderImpl(
cudaStream_t stream,
const bool* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistBinarizeDecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack1EncoderImpl(
cudaStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
cudaMemset(output_data, 0, N);
_GistPack1EncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, GIST_PACK1_FACTOR, (CUDA_LONG)N);
}
template <typename T>
void GistPack1DecoderImpl(
cudaStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack1DecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, GIST_PACK1_FACTOR, (CUDA_LONG)N);
}
template <typename T>
void GistPack8EncoderImpl(
cudaStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack8EncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack8DecoderImpl(
cudaStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack8DecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack16EncoderImpl(
cudaStream_t stream,
const T* input_data,
half* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack16EncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPack16DecoderImpl(
cudaStream_t stream,
const half* input_data,
T* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
_GistPack16DecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input_data, output_data, (CUDA_LONG)N);
}
template <typename T>
void GistPackMsfp15EncoderImpl(
cudaStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size) {
assert(axis_size % tile_size == 0);
const int num_tiles = static_cast<int>(axis_size / tile_size);
const int threads = static_cast<int>(pre_axis_size * num_tiles);
int blocksPerGrid = (int)(ceil(static_cast<float>(threads) / GridDim::maxThreadsPerBlock));
_GistPackMsfp15EncoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
output_data,
(CUDA_LONG)threads,
(CUDA_LONG)pre_axis_size,
(CUDA_LONG)axis_size,
(CUDA_LONG)num_tiles,
(CUDA_LONG)tile_size
);
}
template <typename T>
void GistPackMsfp15DecoderImpl(
cudaStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size) {
assert(axis_size % tile_size == 0);
const int num_tiles = static_cast<int>(axis_size / tile_size);
const int threads = static_cast<int>(pre_axis_size * num_tiles);
int blocksPerGrid = (int)(ceil(static_cast<float>(threads) / GridDim::maxThreadsPerBlock));
_GistPackMsfp15DecoderKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
output_data,
(CUDA_LONG)threads,
(CUDA_LONG)pre_axis_size,
(CUDA_LONG)axis_size,
(CUDA_LONG)num_tiles,
(CUDA_LONG)tile_size
);
}
#define SPECIALIZED_IMPL_BIN_ENC(T) \
template void GistBinarizeEncoderImpl<T>(cudaStream_t stream, const T* input_data, bool* output_data, const size_t N);
#define SPECIALIZED_IMPL_BIN_DEC(T) \
template void GistBinarizeDecoderImpl<T>(cudaStream_t stream, const bool* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK1_ENC(T) \
template void GistPack1EncoderImpl<T>(cudaStream_t stream, const T* input_data, uint8_t* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK1_DEC(T) \
template void GistPack1DecoderImpl<T>(cudaStream_t stream, const uint8_t* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK8_ENC(T) \
template void GistPack8EncoderImpl<T>(cudaStream_t stream, const T* input_data, uint8_t* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK8_DEC(T) \
template void GistPack8DecoderImpl<T>(cudaStream_t stream, const uint8_t* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK16_ENC(T) \
template void GistPack16EncoderImpl<T>(cudaStream_t stream, const T* input_data, half* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACK16_DEC(T) \
template void GistPack16DecoderImpl<T>(cudaStream_t stream, const half* input_data, T* output_data, const size_t N);
#define SPECIALIZED_IMPL_PACKMSFP15_ENC(T) \
template void GistPackMsfp15EncoderImpl<T>(cudaStream_t stream, const T* input_data, uint8_t* output_data, const size_t pre_axis_size, const size_t axis_size, const size_t tile_size);
#define SPECIALIZED_IMPL_PACKMSFP15_DEC(T) \
template void GistPackMsfp15DecoderImpl<T>(cudaStream_t stream, const uint8_t* input_data, T* output_data, const size_t pre_axis_size, const size_t axis_size, const size_t tile_size);
SPECIALIZED_IMPL_BIN_ENC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_BIN_ENC(half)
#endif
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
SPECIALIZED_IMPL_BIN_ENC(double)
#endif
SPECIALIZED_IMPL_BIN_DEC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_BIN_DEC(half)
#endif
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
SPECIALIZED_IMPL_BIN_DEC(double)
#endif
SPECIALIZED_IMPL_PACK1_ENC(bool)
SPECIALIZED_IMPL_PACK1_ENC(float)
SPECIALIZED_IMPL_PACK1_DEC(bool)
SPECIALIZED_IMPL_PACK1_DEC(float)
SPECIALIZED_IMPL_PACK8_ENC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_PACK8_ENC(half)
#endif
SPECIALIZED_IMPL_PACK8_DEC(float)
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
SPECIALIZED_IMPL_PACK8_DEC(half)
#endif
SPECIALIZED_IMPL_PACK16_ENC(float)
SPECIALIZED_IMPL_PACK16_DEC(float)
SPECIALIZED_IMPL_PACKMSFP15_ENC(float)
SPECIALIZED_IMPL_PACKMSFP15_DEC(float)
} // namespace cuda
} // namespace onnxruntime
|
the_stack
|
#include "util/type_name.hpp"
#include "util/miscellany.cuh"
#include <kat/on_device/streams/printfing_ostream.cuh>
#include <kat/on_device/collaboration/block.cuh>
#include <kat/on_device/time.cuh>
#include <doctest.h>
#include <cuda/api_wrappers.hpp>
constexpr const auto num_grid_blocks { 2 };
constexpr const auto block_size { 2 };
constexpr const std::size_t stringstream_buffer_size { 50 };
KAT_DEV kat::stringstream& operator<<(kat::stringstream& os, const util::constexpr_string& arg)
{
return os << strf::range<const char*>(arg.begin(), arg.end());
}
namespace kernels {
__global__ void stream_different_types_to_stringstream()
{
char buff[40] = "char buffer original contents";
kat::stringstream ss(stringstream_buffer_size);
ss << "A string literal.\n";
ss << "A single character - the letter a: " << 'a' << "\n";
ss << "An array of of characters on the stack: \"" << buff << "\"\n";
ss << "Positive signed int literal: " << 123 << '\n';
ss << "Negative signed int literal: " << -456 << '\n';
ss << "A float-type value (1/3): " << ( ((float) 1) / 3 ) << '\n';
ss << "A double-type value (1/3): " << ( ((float) 1) / 3.0 ) << '\n';
// This is not supported:
// ss << "A non-latin, non-ASCII character: " << (char32_t) 'ת' << '\n';
printf("The stringstream contents:\n%s", ss.c_str());
}
template <typename T>
__global__ void stream_to_stringstream_templated()
{
kat::stringstream ss(stringstream_buffer_size);
ss << "A default-initialized value of type T (" << util::type_name<T>() << "): " << T{} << '\n';
printf("The stringstream contents:\n%s", ss.c_str());
}
__global__ void use_formatting_functions()
{
kat::stringstream ss(stringstream_buffer_size);
auto width = 8;
ss << "No actual formatting, just wrapping in strf::fmt():\n";
ss << strf::fmt(123) << '\n';
ss << "Hexadecimal:\n";
ss << strf::hex(123) << '\n';
ss << "Octal:\n";
ss << strf::oct(123) << '\n';
ss << "Binary:\n";
ss << strf::bin(123) << '\n';
ss << "Set fill character, without setting width:\n";
ss << strf::fmt(123).fill('0') << '\n';
ss << "Set fill character, set width of " << width << ", right alignment; then a space and more text:\n";
ss << (strf::fmt(123).fill('0') > width) << " and more text\n";
ss << "Set fill character, set width of " << width << ", left alignment; then a space more text:\n";
ss << (strf::fmt(123).fill('0') < width) << " and more text\n";
ss << "Set fill character, set width of " << width << ", center alignment; then a space more text:\n";
ss << (strf::fmt(123).fill('0') ^ width) << " and more text\n";
ss << "Set fill character, set width of " << width << ", internal fill using hex; then a space more text:\n";
ss << (strf::fmt(123).fill('0').hex() % width) << " and more text\n";
// TODO: More strf formatting functions
ss << "strf::right(0,2,'0') gives " << strf::right(0, 2, '0');
printf("The stringstream contents: \"%s\"\n\n", ss.c_str());
}
__global__ void use_stringstream()
{
kat::stringstream ss{stringstream_buffer_size}; // longer than some, but not all, of the strings we have here
ss << "A string literal";
printf("Position in the stream: %u. stream contents (enclosed in double-quotes): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
printf("Seeking to the beginning of the stream.\n");
ss.seekp(0);
printf("Position in the stream: %u. stream contents (enclosed in double-quotes, should be empty): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
}
__global__ void use_zero_initialized_stringstream()
{
kat::stringstream ss{0};
ss << "A string literal";
printf("Position in the stream: %u. stream contents (enclosed in double-quotes): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
printf("Seeking to the beginning of the stream.\n");
ss.seekp(0);
printf("Position in the stream: %u. stream contents (enclosed in double-quotes, should be empty): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
}
__global__ void use_printfing_ostream()
{
kat::printfing_ostream cout;
cout << "String literal 1 with newline - to be printed on call of .flush() method\n";
cout.flush();
kat::collaborative::block::barrier();
if (kat::linear_grid::grid_info::thread::index_in_block() == 0) {
printf("All threads in block %d have flushed cout.\n", blockIdx.x);
}
cout << "String literal 2 with newline - to be printed on use of flush manipulator\n";
cout << kat::flush;
kat::collaborative::block::barrier();
if (kat::linear_grid::grid_info::thread::index_in_block() == 0) {
printf("All threads in block %d have streamed the flush manipulator to their cout.\n", blockIdx.x);
}
cout << "String literal 3 with newline - to be printed on destruction\n";
}
__global__ void printfing_ostream_settings()
{
kat::printfing_ostream cout;
namespace gi = kat::linear_grid::grid_info;
cout << "Before any setting\n";
cout.flush();
// TODO: What if the text is big enough to cause recycling? That shouldn't matter, but we should try it.
cout.append_newline_on_flush();
cout << "SHOULD see \\n between threads' printouts of this sentence. ";
cout.flush();
cout.no_newline_on_flush();
cout << "SHOULD NOT see \\n between threads' printouts of this sentence. ";
cout.flush();
if (kat::linear_grid::grid_info::thread::is_first_in_grid()) {
cout << '\n';
cout.flush();
}
// This will just add a newline after the long paragraph of many threads' non-newline-terminated strings.
auto block_and_thread_gen = [](kat::stringstream& ss) {
ss << "Block " << gi::block::index() << ", Thread " << gi::thread::index_in_block() << ": ";
};
cout.set_prefix_generator(block_and_thread_gen);
cout << "A prefix with the thread and block number SHOULD appear before this sentence.\n";
cout.flush();
cout.no_prefix();
cout << "A prefix with the thread and block number SHOULD NOT appear before this sentence.\n";
cout.flush();
// resolution and identification
}
__global__ void stream_manipulators_into_printfing_ostream()
{
kat::printfing_ostream cout;
using kat::flush;
namespace gi = kat::linear_grid::grid_info;
cout << "Before any setting\n" << flush;
// TODO: What if the text is big enough to cause recycling? That shouldn't matter, but we should try it.
cout << kat::manipulators::newline_on_flush
<< "SHOULD see \\n between threads' printouts of this sentence. " << flush
<< kat::manipulators::no_newline_on_flush
<< "SHOULD NOT see \\n between threads' printouts of this sentence. " << flush;
if (kat::linear_grid::grid_info::thread::is_first_in_grid()) {
// This will just add a newline after the long paragraph of many threads' non-newline-terminated strings.
cout << kat::manipulators::endl;
}
auto block_and_thread_gen = [](kat::stringstream& ss) {
ss << "Block " << gi::block::index() << ", Thread " << gi::thread::index_in_block() << ": ";
};
cout << kat::manipulators::prefix(block_and_thread_gen)
<< "A prefix with the thread and block number SHOULD appear before this sentence.\n" << flush
<< kat::manipulators::no_prefix
<< "A prefix with the thread and block number SHOULD NOT appear before this sentence.\n" << flush;
// resolution and self-identification
// cout << strf::join_right(15,'*')("joined right") << '\n' << flush;
}
__device__ const char* to_string(kat::printfing_ostream::resolution res)
{
switch(res) {
case kat::printfing_ostream::resolution::thread : return "thread";
case kat::printfing_ostream::resolution::warp : return "warp";
case kat::printfing_ostream::resolution::block : return "block";
case kat::printfing_ostream::resolution::grid : return "grid";
}
return nullptr;
}
__global__ void print_at_different_resolutions()
{
kat::printfing_ostream cout;
using kat::flush;
namespace gi = kat::linear_grid::grid_info;
cout << kat::manipulators::resolution(kat::printfing_ostream::resolution::grid);
cout << "Printing at grid resolution. The printing thread is (" << blockIdx.x << "," << threadIdx.x << ")\n" << flush;
cout << kat::manipulators::resolution(kat::printfing_ostream::resolution::warp);
cout << "Printing at warp resolution. The printing thread is (" << blockIdx.x << "," << threadIdx.x << ")\n" << flush;
cout << kat::manipulators::resolution(kat::printfing_ostream::resolution::thread);
cout << "Printing at thread resolution. The printing thread is (" << blockIdx.x << "," << threadIdx.x << ")\n" << flush;
}
__device__ void sipo_for_resolution(kat::printfing_ostream& os, kat::printfing_ostream::resolution res)
{
os
<< kat::manipulators::resolution(res)
<< kat::linear_grid::manipulators::identify
<< "Printing to a self-identifying ostream with resolution "
<< to_string(os.printing_resolution())
<< kat::endl;
}
__global__ void self_identifying_printfing_ostream()
{
kat::printfing_ostream cout;
using kat::flush;
namespace gi = kat::linear_grid::grid_info;
sipo_for_resolution(cout, kat::printfing_ostream::resolution::grid);
kat::sleep<kat::sleep_resolution::clock_cycles>(1e8);
sipo_for_resolution(cout, kat::printfing_ostream::resolution::block);
__syncthreads();
sipo_for_resolution(cout, kat::printfing_ostream::resolution::warp);
__syncthreads();
sipo_for_resolution(cout, kat::printfing_ostream::resolution::thread);
__syncthreads();
}
} // namespace kernels
TEST_SUITE("printing") {
TEST_CASE("use_stringstream")// INTEGER_TYPES)
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::use_stringstream, launch_config);
cuda::outstanding_error::ensure_none();
// TODO: We could redirect the standard output stream into a buffer before launching the kernel,
// then check the buffer contains what we want. However, this can probably be interfered with,
// so I'm not sure it's a good idea even in principle.
device.synchronize();
}
TEST_CASE("use_zero_initialized_stringstream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::use_zero_initialized_stringstream, launch_config);
cuda::outstanding_error::ensure_none();
// TODO: We could redirect the standard output stream into a buffer before launching the kernel,
// then check the buffer contains what we want. However, this can probably be interfered with,
// so I'm not sure it's a good idea even in principle.
device.synchronize();
}
TEST_CASE("stream_different_types_to_stringstream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::stream_different_types_to_stringstream, launch_config);
cuda::outstanding_error::ensure_none();
// TODO: We could redirect the standard output stream into a buffer before launching the kernel,
// then check the buffer contains what we want. However, this can probably be interfered with,
// so I'm not sure it's a good idea even in principle.
device.synchronize();
}
TEST_CASE_TEMPLATE("stream_to_stringstream_templated", T, long long int, short)
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::stream_to_stringstream_templated<T>, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("use_formatting_functions")
{
auto device { cuda::device::current::get() };
device.reset();
cuda::launch(kernels::use_formatting_functions, single_thread_launch_config());
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("use_printfing_ostream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::use_printfing_ostream, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("printfing_ostream_settings")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::printfing_ostream_settings, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("stream manipulators into printfing_ostream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::stream_manipulators_into_printfing_ostream, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("print_at_different_resolutions")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::print_at_different_resolutions, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("self-identifying printfing_ostream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::self_identifying_printfing_ostream, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
} // TEST_SUITE("printing")
|
the_stack
|
* Abstract tile-processing functionality for partitioning downsweep scan
* kernels
******************************************************************************/
#pragma once
#include <b40c/util/cuda_properties.cuh>
#include <b40c/util/basic_utils.cuh>
#include <b40c/util/io/modified_load.cuh>
#include <b40c/util/io/modified_store.cuh>
#include <b40c/util/io/load_tile.cuh>
#include <b40c/util/io/scatter_tile.cuh>
#include <b40c/util/reduction/serial_reduce.cuh>
#include <b40c/util/scan/serial_scan.cuh>
#include <b40c/util/scan/warp_scan.cuh>
#include <b40c/util/device_intrinsics.cuh>
namespace b40c {
namespace partition {
namespace downsweep {
/**
* Tile
*
* Abstract class
*/
template <
typename KernelPolicy,
typename DerivedTile>
struct Tile
{
//---------------------------------------------------------------------
// Typedefs and Constants
//---------------------------------------------------------------------
typedef typename KernelPolicy::KeyType KeyType;
typedef typename KernelPolicy::ValueType ValueType;
typedef typename KernelPolicy::SizeT SizeT;
typedef DerivedTile Dispatch;
enum {
LOAD_VEC_SIZE = KernelPolicy::LOAD_VEC_SIZE,
LOADS_PER_CYCLE = KernelPolicy::LOADS_PER_CYCLE,
CYCLES_PER_TILE = KernelPolicy::CYCLES_PER_TILE,
TILE_ELEMENTS_PER_THREAD = KernelPolicy::TILE_ELEMENTS_PER_THREAD,
SCAN_LANES_PER_CYCLE = KernelPolicy::SCAN_LANES_PER_CYCLE,
INVALID_BIN = -1,
};
//---------------------------------------------------------------------
// Members
//---------------------------------------------------------------------
// The keys (and values) this thread will read this cycle
KeyType keys[CYCLES_PER_TILE][LOADS_PER_CYCLE][LOAD_VEC_SIZE];
ValueType values[TILE_ELEMENTS_PER_THREAD];
int local_ranks[CYCLES_PER_TILE][LOADS_PER_CYCLE][LOAD_VEC_SIZE]; // The local rank of each key
int key_bins[CYCLES_PER_TILE][LOADS_PER_CYCLE][LOAD_VEC_SIZE]; // The bin for each key
SizeT scatter_offsets[CYCLES_PER_TILE][LOADS_PER_CYCLE][LOAD_VEC_SIZE]; // The global rank of each key
int counter_offsets[LOADS_PER_CYCLE][LOAD_VEC_SIZE]; // The (byte) counter offset for each key
// Counts of my bin in each load in each cycle, valid in threads [0,BINS)
int bin_counts[CYCLES_PER_TILE][LOADS_PER_CYCLE];
//---------------------------------------------------------------------
// Abstract Interface
//---------------------------------------------------------------------
/**
* Returns whether or not the key is valid.
*
* To be overloaded.
*/
template <typename Cta>
__device__ __forceinline__ SizeT ValidElements(Cta *cta, const SizeT &guarded_elements)
{
return guarded_elements;
}
/**
* Returns the bin into which the specified key is to be placed.
*
* To be overloaded
*/
template <typename Cta>
__device__ __forceinline__ int DecodeBin(KeyType key, Cta *cta);
/**
* Returns whether or not the key is valid.
*
* To be overloaded.
*/
template <int CYCLE, int LOAD, int VEC>
__device__ __forceinline__ bool IsValid();
/**
* Loads keys into the tile
*
* Can be overloaded.
*/
template <typename Cta>
__device__ __forceinline__ void LoadKeys(
Cta *cta,
SizeT cta_offset,
const SizeT &guarded_elements)
{
util::io::LoadTile<
KernelPolicy::LOG_LOADS_PER_TILE,
KernelPolicy::LOG_LOAD_VEC_SIZE,
KernelPolicy::THREADS,
KernelPolicy::READ_MODIFIER,
KernelPolicy::CHECK_ALIGNMENT>::LoadValid(
(KeyType (*)[KernelPolicy::LOAD_VEC_SIZE]) keys,
cta->d_in_keys,
cta_offset,
guarded_elements);
}
/**
* Scatter keys from the tile
*
* Can be overloaded.
*/
template <typename Cta>
__device__ __forceinline__ void ScatterKeys(
Cta *cta,
const SizeT &guarded_elements)
{
// Scatter keys to global bin partitions
util::io::ScatterTile<
KernelPolicy::LOG_TILE_ELEMENTS_PER_THREAD,
0,
KernelPolicy::THREADS,
KernelPolicy::WRITE_MODIFIER>::Scatter(
cta->d_out_keys,
(KeyType (*)[1]) keys,
(SizeT (*)[1]) scatter_offsets,
guarded_elements);
}
/**
* Loads values into the tile
*
* Can be overloaded.
*/
template <typename Cta>
__device__ __forceinline__ void LoadValues(
Cta *cta,
SizeT cta_offset,
const SizeT &guarded_elements)
{
// Read values
util::io::LoadTile<
KernelPolicy::LOG_LOADS_PER_TILE,
KernelPolicy::LOG_LOAD_VEC_SIZE,
KernelPolicy::THREADS,
KernelPolicy::READ_MODIFIER,
KernelPolicy::CHECK_ALIGNMENT>::LoadValid(
(ValueType (*)[KernelPolicy::LOAD_VEC_SIZE]) values,
cta->d_in_values,
cta_offset,
guarded_elements);
}
/**
* Scatter values from the tile
*
* Can be overloaded.
*/
template <typename Cta>
__device__ __forceinline__ void ScatterValues(
Cta *cta,
const SizeT &guarded_elements)
{
// Scatter values to global bin partitions
util::io::ScatterTile<
KernelPolicy::LOG_TILE_ELEMENTS_PER_THREAD,
0,
KernelPolicy::THREADS,
KernelPolicy::WRITE_MODIFIER>::Scatter(
cta->d_out_values,
(ValueType (*)[1]) values,
(SizeT (*)[1]) scatter_offsets,
guarded_elements);
}
//---------------------------------------------------------------------
// Helper Structures
//---------------------------------------------------------------------
/**
* Computes the number of previously-binned keys owned by the calling thread
* that have been marked for the specified bin.
*/
struct SameBinCount
{
// Inspect previous vec-element
template <int CYCLE, int LOAD, int VEC>
struct Iterate
{
static __device__ __forceinline__ int Invoke(Tile *tile, int current_bin)
{
return (current_bin == tile->key_bins[CYCLE][LOAD][VEC - 1]) +
Iterate<CYCLE, LOAD, VEC - 1>::Invoke(tile, current_bin);
}
};
// Terminate (0th vec-element has no previous elements)
template <int CYCLE, int LOAD>
struct Iterate<CYCLE, LOAD, 0>
{
static __device__ __forceinline__ int Invoke(Tile *tile, int current_bin)
{
return 0;
}
};
};
//---------------------------------------------------------------------
// Cycle Methods
//---------------------------------------------------------------------
/**
* DecodeKeys
*/
template <int CYCLE, int LOAD, int VEC, typename Cta>
__device__ __forceinline__ void DecodeKeys(Cta *cta)
{
Dispatch *dispatch = (Dispatch *) this;
// Update composite-counter
if (dispatch->template IsValid<CYCLE, LOAD, VEC>()) {
const int PADDED_BYTES_PER_LANE = KernelPolicy::Grid::ROWS_PER_LANE * KernelPolicy::Grid::PADDED_PARTIALS_PER_ROW * 4;
const int LOAD_OFFSET_BYTES = LOAD * KernelPolicy::SCAN_LANES_PER_LOAD * PADDED_BYTES_PER_LANE;
const KeyType COUNTER_BYTE_MASK = (KernelPolicy::LOG_BINS < 2) ? 0x1 : 0x3;
// Decode the bin for this key
key_bins[CYCLE][LOAD][VEC] = dispatch->DecodeBin(keys[CYCLE][LOAD][VEC], cta);
// Decode composite-counter lane and sub-counter from bin
int lane = key_bins[CYCLE][LOAD][VEC] >> 2; // extract composite counter lane
int sub_counter = key_bins[CYCLE][LOAD][VEC] & COUNTER_BYTE_MASK; // extract 8-bit counter offset
// Compute partial (because we overwrite, we need to accommodate all previous
// vec-elements if they have the same bin)
int partial = 1 + SameBinCount::template Iterate<CYCLE, LOAD, VEC>::Invoke(
dispatch,
key_bins[CYCLE][LOAD][VEC]);
// Counter offset in bytes from this thread's "base_composite_counter" location
counter_offsets[LOAD][VEC] =
LOAD_OFFSET_BYTES +
util::FastMul(lane, PADDED_BYTES_PER_LANE) +
sub_counter;
// Overwrite partial
unsigned char *base_partial_chars = (unsigned char *) cta->base_composite_counter;
base_partial_chars[counter_offsets[LOAD][VEC]] = partial;
} else {
key_bins[CYCLE][LOAD][VEC] = INVALID_BIN;
}
}
/**
* ExtractRanks
*/
template <int CYCLE, int LOAD, int VEC, typename Cta>
__device__ __forceinline__ void ExtractRanks(Cta *cta)
{
Dispatch *dispatch = (Dispatch *) this;
if (dispatch->template IsValid<CYCLE, LOAD, VEC>()) {
unsigned char *base_partial_chars = (unsigned char *) cta->base_composite_counter;
local_ranks[CYCLE][LOAD][VEC] = base_partial_chars[counter_offsets[LOAD][VEC]] +
SameBinCount::template Iterate<CYCLE, LOAD, VEC>::Invoke(
dispatch,
key_bins[CYCLE][LOAD][VEC]);
} else {
// Put invalid keys just after the end of the valid swap exchange.
local_ranks[CYCLE][LOAD][VEC] = KernelPolicy::TILE_ELEMENTS;
}
}
/**
* UpdateRanks
*/
template <int CYCLE, int LOAD, int VEC, typename Cta>
__device__ __forceinline__ void UpdateRanks(Cta *cta)
{
Dispatch *dispatch = (Dispatch *) this;
if (dispatch->template IsValid<CYCLE, LOAD, VEC>()) {
// Update this key's rank with the bin-prefix for it's bin
local_ranks[CYCLE][LOAD][VEC] +=
cta->smem_storage.bin_prefixes[CYCLE][LOAD][key_bins[CYCLE][LOAD][VEC]];
}
}
/**
* UpdateGlobalOffsets
*/
template <int CYCLE, int LOAD, int VEC, typename Cta>
__device__ __forceinline__ void UpdateGlobalOffsets(Cta *cta)
{
Dispatch *dispatch = (Dispatch *) this;
if (dispatch->template IsValid<CYCLE, LOAD, VEC>()) {
// Update this key's global scatter offset with its
// cycle rank and with the bin-prefix for it's bin
scatter_offsets[CYCLE][LOAD][VEC] =
local_ranks[CYCLE][LOAD][VEC] +
cta->smem_storage.bin_prefixes[CYCLE][LOAD][key_bins[CYCLE][LOAD][VEC]];
}
}
/**
* ResetLanes
*/
template <int LANE, typename Cta>
__device__ __forceinline__ void ResetLanes(Cta *cta)
{
cta->base_composite_counter[LANE][0] = 0;
}
//---------------------------------------------------------------------
// IterateCycleLanes Structures
//---------------------------------------------------------------------
/**
* Iterate next lane
*/
template <int LANE, int dummy = 0>
struct IterateCycleLanes
{
// ResetLanes
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ResetLanes(Cta *cta, Tile *tile)
{
tile->ResetLanes<LANE>(cta);
IterateCycleLanes<LANE + 1>::ResetLanes(cta, tile);
}
};
/**
* Terminate lane iteration
*/
template <int dummy>
struct IterateCycleLanes<SCAN_LANES_PER_CYCLE, dummy>
{
// ResetLanes
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ResetLanes(Cta *cta, Tile *tile) {}
};
//---------------------------------------------------------------------
// IterateCycleElements Structures
//---------------------------------------------------------------------
/**
* Iterate next vector element
*/
template <int CYCLE, int LOAD, int VEC, int dummy = 0>
struct IterateCycleElements
{
// DecodeKeys
template <typename Cta, typename Tile>
static __device__ __forceinline__ void DecodeKeys(Cta *cta, Tile *tile)
{
tile->DecodeKeys<CYCLE, LOAD, VEC>(cta);
IterateCycleElements<CYCLE, LOAD, VEC + 1>::DecodeKeys(cta, tile);
}
// ExtractRanks
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExtractRanks(Cta *cta, Tile *tile)
{
tile->ExtractRanks<CYCLE, LOAD, VEC>(cta);
IterateCycleElements<CYCLE, LOAD, VEC + 1>::ExtractRanks(cta, tile);
}
// UpdateRanks
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateRanks(Cta *cta, Tile *tile)
{
tile->UpdateRanks<CYCLE, LOAD, VEC>(cta);
IterateCycleElements<CYCLE, LOAD, VEC + 1>::UpdateRanks(cta, tile);
}
// UpdateGlobalOffsets
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateGlobalOffsets(Cta *cta, Tile *tile)
{
tile->UpdateGlobalOffsets<CYCLE, LOAD, VEC>(cta);
IterateCycleElements<CYCLE, LOAD, VEC + 1>::UpdateGlobalOffsets(cta, tile);
}
};
/**
* IterateCycleElements next load
*/
template <int CYCLE, int LOAD, int dummy>
struct IterateCycleElements<CYCLE, LOAD, LOAD_VEC_SIZE, dummy>
{
// DecodeKeys
template <typename Cta, typename Tile>
static __device__ __forceinline__ void DecodeKeys(Cta *cta, Tile *tile)
{
IterateCycleElements<CYCLE, LOAD + 1, 0>::DecodeKeys(cta, tile);
}
// ExtractRanks
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExtractRanks(Cta *cta, Tile *tile)
{
IterateCycleElements<CYCLE, LOAD + 1, 0>::ExtractRanks(cta, tile);
}
// UpdateRanks
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateRanks(Cta *cta, Tile *tile)
{
IterateCycleElements<CYCLE, LOAD + 1, 0>::UpdateRanks(cta, tile);
}
// UpdateGlobalOffsets
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateGlobalOffsets(Cta *cta, Tile *tile)
{
IterateCycleElements<CYCLE, LOAD + 1, 0>::UpdateGlobalOffsets(cta, tile);
}
};
/**
* Terminate iteration
*/
template <int CYCLE, int dummy>
struct IterateCycleElements<CYCLE, LOADS_PER_CYCLE, 0, dummy>
{
// DecodeKeys
template <typename Cta, typename Tile>
static __device__ __forceinline__ void DecodeKeys(Cta *cta, Tile *tile) {}
// ExtractRanks
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExtractRanks(Cta *cta, Tile *tile) {}
// UpdateRanks
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateRanks(Cta *cta, Tile *tile) {}
// UpdateGlobalOffsets
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateGlobalOffsets(Cta *cta, Tile *tile) {}
};
//---------------------------------------------------------------------
// Tile Internal Methods
//---------------------------------------------------------------------
/**
* Scan Cycle
*/
template <int CYCLE, typename Cta>
__device__ __forceinline__ void ScanCycle(Cta *cta)
{
Dispatch *dispatch = (Dispatch*) this;
// Reset smem composite counters
IterateCycleLanes<0>::ResetLanes(cta, dispatch);
// Decode bins and update 8-bit composite counters for the keys in this cycle
IterateCycleElements<CYCLE, 0, 0>::DecodeKeys(cta, dispatch);
__syncthreads();
// Use our raking threads to, in aggregate, scan the composite counter lanes
if (threadIdx.x < KernelPolicy::Grid::RAKING_THREADS) {
// Upsweep rake
int partial = util::reduction::SerialReduce<KernelPolicy::Grid::PARTIALS_PER_SEG>::Invoke(
cta->raking_segment);
int warpscan_lane = threadIdx.x >> KernelPolicy::Grid::LOG_RAKING_THREADS_PER_LANE;
int warpscan_tid = threadIdx.x & (KernelPolicy::Grid::RAKING_THREADS_PER_LANE - 1);
// Inclusive warpscan in bin warpscan_lane
int inclusive_prefix = util::scan::WarpScan<KernelPolicy::Grid::LOG_RAKING_THREADS_PER_LANE, false>::Invoke(
partial,
cta->smem_storage.lanes_warpscan[warpscan_lane],
warpscan_tid);
int exclusive_prefix = inclusive_prefix - partial;
// Save off each lane's warpscan total for this cycle
if (warpscan_tid == KernelPolicy::Grid::RAKING_THREADS_PER_LANE - 1) {
cta->smem_storage.lane_totals[CYCLE][warpscan_lane][0] = exclusive_prefix;
cta->smem_storage.lane_totals[CYCLE][warpscan_lane][1] = partial;
}
// Downsweep rake
util::scan::SerialScan<KernelPolicy::Grid::PARTIALS_PER_SEG>::Invoke(
cta->raking_segment,
exclusive_prefix);
}
__syncthreads();
// Extract the local ranks of each key
IterateCycleElements<CYCLE, 0, 0>::ExtractRanks(cta, dispatch);
}
/**
* RecoverBinCounts
*
* Called by threads [0, KernelPolicy::BINS)
*/
template <int CYCLE, int LOAD, typename Cta>
__device__ __forceinline__ void RecoverBinCounts(
int my_base_lane, int my_quad_byte, Cta *cta)
{
bin_counts[CYCLE][LOAD] =
cta->smem_storage.lane_totals_c[CYCLE][LOAD][my_base_lane][0][my_quad_byte] +
cta->smem_storage.lane_totals_c[CYCLE][LOAD][my_base_lane][1][my_quad_byte];
}
/**
* UpdateBinPrefixes
*
* Called by threads [0, KernelPolicy::BINS)
*/
template <int CYCLE, int LOAD, typename Cta>
__device__ __forceinline__ void UpdateBinPrefixes(int bin_prefix, Cta *cta)
{
cta->smem_storage.bin_prefixes[CYCLE][LOAD][threadIdx.x] = bin_counts[CYCLE][LOAD] + bin_prefix;
}
/**
* DecodeGlobalOffsets
*/
template <int ELEMENT, typename Cta>
__device__ __forceinline__ void DecodeGlobalOffsets(Cta *cta)
{
Dispatch *dispatch = (Dispatch*) this;
KeyType *linear_keys = (KeyType *) keys;
SizeT *linear_offsets = (SizeT *) scatter_offsets;
int bin = dispatch->DecodeBin(linear_keys[ELEMENT], cta);
linear_offsets[ELEMENT] =
cta->smem_storage.bin_carry[bin] +
(KernelPolicy::THREADS * ELEMENT) + threadIdx.x;
}
//---------------------------------------------------------------------
// IterateCycles Structures
//---------------------------------------------------------------------
/**
* Iterate next cycle
*/
template <int CYCLE, int dummy = 0>
struct IterateCycles
{
// UpdateRanks
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateRanks(Cta *cta, Tile *tile)
{
IterateCycleElements<CYCLE, 0, 0>::UpdateRanks(cta, tile);
IterateCycles<CYCLE + 1>::UpdateRanks(cta, tile);
}
// UpdateRanks
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateGlobalOffsets(Cta *cta, Tile *tile)
{
IterateCycleElements<CYCLE, 0, 0>::UpdateGlobalOffsets(cta, tile);
IterateCycles<CYCLE + 1>::UpdateGlobalOffsets(cta, tile);
}
// ScanCycles
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ScanCycles(Cta *cta, Tile *tile)
{
tile->ScanCycle<CYCLE>(cta);
IterateCycles<CYCLE + 1>::ScanCycles(cta, tile);
}
};
/**
* Terminate iteration
*/
template <int dummy>
struct IterateCycles<CYCLES_PER_TILE, dummy>
{
// UpdateRanks
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateRanks(Cta *cta, Tile *tile) {}
// UpdateGlobalOffsets
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateGlobalOffsets(Cta *cta, Tile *tile) {}
// ScanCycles
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ScanCycles(Cta *cta, Tile *tile) {}
};
//---------------------------------------------------------------------
// IterateCycleLoads Structures
//---------------------------------------------------------------------
/**
* Iterate next load
*/
template <int CYCLE, int LOAD, int dummy = 0>
struct IterateCycleLoads
{
// RecoverBinCounts
template <typename Cta, typename Tile>
static __device__ __forceinline__ void RecoverBinCounts(
int my_base_lane, int my_quad_byte, Cta *cta, Tile *tile)
{
tile->template RecoverBinCounts<CYCLE, LOAD>(my_base_lane, my_quad_byte, cta);
IterateCycleLoads<CYCLE, LOAD + 1>::RecoverBinCounts(my_base_lane, my_quad_byte, cta, tile);
}
// UpdateBinPrefixes
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateBinPrefixes(
int bin_prefix, Cta *cta, Tile *tile)
{
tile->template UpdateBinPrefixes<CYCLE, LOAD>(bin_prefix, cta);
IterateCycleLoads<CYCLE, LOAD + 1>::UpdateBinPrefixes(bin_prefix, cta, tile);
}
};
/**
* Iterate next cycle
*/
template <int CYCLE, int dummy>
struct IterateCycleLoads<CYCLE, LOADS_PER_CYCLE, dummy>
{
// RecoverBinCounts
template <typename Cta, typename Tile>
static __device__ __forceinline__ void RecoverBinCounts(
int my_base_lane, int my_quad_byte, Cta *cta, Tile *tile)
{
IterateCycleLoads<CYCLE + 1, 0>::RecoverBinCounts(my_base_lane, my_quad_byte, cta, tile);
}
// UpdateBinPrefixes
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateBinPrefixes(
int bin_prefix, Cta *cta, Tile *tile)
{
IterateCycleLoads<CYCLE + 1, 0>::UpdateBinPrefixes(bin_prefix, cta, tile);
}
};
/**
* Terminate iteration
*/
template <int dummy>
struct IterateCycleLoads<CYCLES_PER_TILE, 0, dummy>
{
// RecoverBinCounts
template <typename Cta, typename Tile>
static __device__ __forceinline__ void RecoverBinCounts(int my_base_lane, int my_quad_byte, Cta *cta, Tile *tile) {}
// UpdateBinPrefixes
template <typename Cta, typename Tile>
static __device__ __forceinline__ void UpdateBinPrefixes(int bin_prefix, Cta *cta, Tile *tile) {}
};
//---------------------------------------------------------------------
// IterateElements Structures
//---------------------------------------------------------------------
/**
* Iterate next tile element
*/
template <int ELEMENT, int dummy = 0>
struct IterateElements
{
// DecodeGlobalOffsets
template <typename Cta, typename Tile>
static __device__ __forceinline__ void DecodeGlobalOffsets(Cta *cta, Tile *tile)
{
tile->DecodeGlobalOffsets<ELEMENT>(cta);
IterateElements<ELEMENT + 1>::DecodeGlobalOffsets(cta, tile);
}
};
/**
* Terminate iteration
*/
template <int dummy>
struct IterateElements<TILE_ELEMENTS_PER_THREAD, dummy>
{
// DecodeGlobalOffsets
template <typename Cta, typename Tile>
static __device__ __forceinline__ void DecodeGlobalOffsets(Cta *cta, Tile *tile) {}
};
//---------------------------------------------------------------------
// Partition/scattering specializations
//---------------------------------------------------------------------
template <
ScatterStrategy SCATTER_STRATEGY,
int dummy = 0>
struct PartitionTile;
/**
* Specialized for two-phase scatter, keys-only
*/
template <
ScatterStrategy SCATTER_STRATEGY,
int dummy>
struct PartitionTile
{
enum {
MEM_BANKS = 1 << B40C_LOG_MEM_BANKS(__B40C_CUDA_ARCH__),
DIGITS_PER_SCATTER_PASS = KernelPolicy::WARPS * (B40C_WARP_THREADS(__B40C_CUDA_ARCH__) / (MEM_BANKS)),
SCATTER_PASSES = KernelPolicy::BINS / DIGITS_PER_SCATTER_PASS,
};
template <typename T>
static __device__ __forceinline__ void Nop(T &t) {}
/**
* Warp based scattering that does not cross alignment boundaries, e.g., for SM1.0-1.1
* coalescing rules
*/
template <int PASS, int SCATTER_PASSES>
struct WarpScatter
{
template <typename T, void Transform(T&), typename Cta>
static __device__ __forceinline__ void ScatterPass(
Cta *cta,
T *exchange,
T *d_out,
const SizeT &valid_elements)
{
const int LOG_STORE_TXN_THREADS = B40C_LOG_MEM_BANKS(__B40C_CUDA_ARCH__);
const int STORE_TXN_THREADS = 1 << LOG_STORE_TXN_THREADS;
int store_txn_idx = threadIdx.x & (STORE_TXN_THREADS - 1);
int store_txn_digit = threadIdx.x >> LOG_STORE_TXN_THREADS;
int my_digit = (PASS * DIGITS_PER_SCATTER_PASS) + store_txn_digit;
if (my_digit < KernelPolicy::BINS) {
int my_exclusive_scan = cta->smem_storage.bin_warpscan[1][my_digit - 1];
int my_inclusive_scan = cta->smem_storage.bin_warpscan[1][my_digit];
int my_digit_count = my_inclusive_scan - my_exclusive_scan;
int my_carry = cta->smem_storage.bin_carry[my_digit] + my_exclusive_scan;
int my_aligned_offset = store_txn_idx - (my_carry & (STORE_TXN_THREADS - 1));
while (my_aligned_offset < my_digit_count) {
if ((my_aligned_offset >= 0) && (my_exclusive_scan + my_aligned_offset < valid_elements)) {
T datum = exchange[my_exclusive_scan + my_aligned_offset];
Transform(datum);
d_out[my_carry + my_aligned_offset] = datum;
}
my_aligned_offset += STORE_TXN_THREADS;
}
}
WarpScatter<PASS + 1, SCATTER_PASSES>::template ScatterPass<T, Transform>(
cta,
exchange,
d_out,
valid_elements);
}
};
// Terminate
template <int SCATTER_PASSES>
struct WarpScatter<SCATTER_PASSES, SCATTER_PASSES>
{
template <typename T, void Transform(T&), typename Cta>
static __device__ __forceinline__ void ScatterPass(
Cta *cta,
T *exchange,
T *d_out,
const SizeT &valid_elements) {}
};
template <bool KEYS_ONLY, int dummy2 = 0>
struct ScatterValues
{
template <typename Cta, typename Tile>
static __device__ __forceinline__ void Invoke(
SizeT cta_offset,
const SizeT &guarded_elements,
const SizeT &valid_elements,
Cta *cta,
Tile *tile)
{
// Load values
tile->LoadValues(cta, cta_offset, guarded_elements);
// Scatter values to smem by local rank
util::io::ScatterTile<
KernelPolicy::LOG_TILE_ELEMENTS_PER_THREAD,
0,
KernelPolicy::THREADS,
util::io::st::NONE>::Scatter(
cta->smem_storage.value_exchange,
(ValueType (*)[1]) tile->values,
(int (*)[1]) tile->local_ranks);
__syncthreads();
if (SCATTER_STRATEGY == SCATTER_WARP_TWO_PHASE) {
WarpScatter<0, SCATTER_PASSES>::template ScatterPass<ValueType, Nop<ValueType> >(
cta,
cta->smem_storage.value_exchange,
cta->d_out_values,
valid_elements);
__syncthreads();
} else {
// Gather values linearly from smem (vec-1)
util::io::LoadTile<
KernelPolicy::LOG_TILE_ELEMENTS_PER_THREAD,
0,
KernelPolicy::THREADS,
util::io::ld::NONE,
false>::LoadValid( // No need to check alignment
(ValueType (*)[1]) tile->values,
cta->smem_storage.value_exchange,
0);
__syncthreads();
// Scatter values to global bin partitions
tile->ScatterValues(cta, valid_elements);
}
}
};
template <int dummy2>
struct ScatterValues<true, dummy2>
{
template <typename Cta, typename Tile>
static __device__ __forceinline__ void Invoke(
SizeT cta_offset,
const SizeT &guarded_elements,
const SizeT &valid_elements,
Cta *cta,
Tile *tile) {}
};
template <typename Cta, typename Tile>
static __device__ __forceinline__ void Invoke(
SizeT cta_offset,
const SizeT &guarded_elements,
Cta *cta,
Tile *tile)
{
// Load keys
tile->LoadKeys(cta, cta_offset, guarded_elements);
// Scan cycles
IterateCycles<0>::ScanCycles(cta, tile);
// Scan across bins
if (threadIdx.x < KernelPolicy::BINS) {
// Recover bin-counts from lane totals
int my_base_lane = threadIdx.x >> 2;
int my_quad_byte = threadIdx.x & 3;
IterateCycleLoads<0, 0>::RecoverBinCounts(
my_base_lane, my_quad_byte, cta, tile);
// Scan across my bin counts for each load
int tile_bin_total = util::scan::SerialScan<KernelPolicy::LOADS_PER_TILE>::Invoke(
(int *) tile->bin_counts, 0);
// Add the previous tile's inclusive-scan to the running bin-carry
SizeT my_carry = cta->smem_storage.bin_carry[threadIdx.x] +
cta->smem_storage.bin_warpscan[1][threadIdx.x];
// Perform overflow-free inclusive SIMD Kogge-Stone across bins
int tile_bin_inclusive = util::scan::WarpScan<KernelPolicy::LOG_BINS, false>::Invoke(
tile_bin_total,
cta->smem_storage.bin_warpscan);
// Save inclusive scan in bin_warpscan
cta->smem_storage.bin_warpscan[1][threadIdx.x] = tile_bin_inclusive;
// Calculate exclusive scan
int tile_bin_exclusive = tile_bin_inclusive - tile_bin_total;
// Subtract the bin prefix from the running carry (to offset threadIdx during scatter)
cta->smem_storage.bin_carry[threadIdx.x] = my_carry - tile_bin_exclusive;
// Compute the bin prefixes for this tile for each load
IterateCycleLoads<0, 0>::UpdateBinPrefixes(tile_bin_exclusive, cta, tile);
}
__syncthreads();
// Update the local ranks in each load with the bin prefixes for the tile
IterateCycles<0>::UpdateRanks(cta, tile);
// Scatter keys to smem by local rank
util::io::ScatterTile<
KernelPolicy::LOG_TILE_ELEMENTS_PER_THREAD,
0,
KernelPolicy::THREADS,
util::io::st::NONE>::Scatter(
cta->smem_storage.key_exchange,
(KeyType (*)[1]) tile->keys,
(int (*)[1]) tile->local_ranks);
__syncthreads();
SizeT valid_elements = tile->ValidElements(cta, guarded_elements);
if (SCATTER_STRATEGY == SCATTER_WARP_TWO_PHASE) {
WarpScatter<0, SCATTER_PASSES>::template ScatterPass<KeyType, KernelPolicy::PostprocessKey>(
cta,
cta->smem_storage.key_exchange,
cta->d_out_keys,
valid_elements);
__syncthreads();
} else {
// Gather keys linearly from smem (vec-1)
util::io::LoadTile<
KernelPolicy::LOG_TILE_ELEMENTS_PER_THREAD,
0,
KernelPolicy::THREADS,
util::io::ld::NONE,
false>::LoadValid( // No need to check alignment
(KeyType (*)[1]) tile->keys,
cta->smem_storage.key_exchange,
0);
__syncthreads();
// Compute global scatter offsets for gathered keys
IterateElements<0>::DecodeGlobalOffsets(cta, tile);
// Scatter keys to global bin partitions
tile->ScatterKeys(cta, valid_elements);
}
// Partition values
ScatterValues<KernelPolicy::KEYS_ONLY>::Invoke(
cta_offset, guarded_elements, valid_elements, cta, tile);
}
};
/**
* Specialized for direct scatter
*/
template <int dummy>
struct PartitionTile<SCATTER_DIRECT, dummy>
{
template <bool KEYS_ONLY, int dummy2 = 0>
struct ScatterValues
{
template <typename Cta, typename Tile>
static __device__ __forceinline__ void Invoke(
SizeT cta_offset,
const SizeT &guarded_elements,
const SizeT &valid_elements,
Cta *cta,
Tile *tile)
{
// Load values
tile->LoadValues(cta, cta_offset, guarded_elements);
// Scatter values to global bin partitions
tile->ScatterValues(cta, valid_elements);
}
};
template <int dummy2>
struct ScatterValues<true, dummy2>
{
template <typename Cta, typename Tile>
static __device__ __forceinline__ void Invoke(
SizeT cta_offset,
const SizeT &guarded_elements,
const SizeT &valid_elements,
Cta *cta,
Tile *tile) {}
};
template <typename Cta, typename Tile>
static __device__ __forceinline__ void Invoke(
SizeT cta_offset,
const SizeT &guarded_elements,
Cta *cta,
Tile *tile)
{
// Load keys
tile->LoadKeys(cta, cta_offset, guarded_elements);
// Scan cycles
IterateCycles<0>::ScanCycles(cta, tile);
// Scan across bins
if (threadIdx.x < KernelPolicy::BINS) {
// Recover bin-counts from lane totals
int my_base_lane = threadIdx.x >> 2;
int my_quad_byte = threadIdx.x & 3;
IterateCycleLoads<0, 0>::RecoverBinCounts(
my_base_lane, my_quad_byte, cta, tile);
// Scan across my bin counts for each load
int tile_bin_total = util::scan::SerialScan<KernelPolicy::LOADS_PER_TILE>::Invoke(
(int *) tile->bin_counts, 0);
// Add the previous tile's inclusive-scan to the running bin-carry
SizeT my_carry = cta->smem_storage.bin_carry[threadIdx.x];
// Update bin prefixes with the incoming carry
IterateCycleLoads<0, 0>::UpdateBinPrefixes(my_carry, cta, tile);
// Update carry
cta->smem_storage.bin_carry[threadIdx.x] = my_carry + tile_bin_total;
}
__syncthreads();
SizeT valid_elements = tile->ValidElements(cta, guarded_elements);
// Update the scatter offsets in each load with the bin prefixes for the tile
IterateCycles<0>::UpdateGlobalOffsets(cta, tile);
// Scatter keys to global bin partitions
tile->ScatterKeys(cta, valid_elements);
// Partition values
ScatterValues<KernelPolicy::KEYS_ONLY>::Invoke(
cta_offset, guarded_elements, valid_elements, cta, tile);
}
};
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Loads, decodes, and scatters a tile into global partitions
*/
template <typename Cta>
__device__ __forceinline__ void Partition(
SizeT cta_offset,
const SizeT &guarded_elements,
Cta *cta)
{
PartitionTile<KernelPolicy::SCATTER_STRATEGY>::Invoke(
cta_offset,
guarded_elements,
cta,
(Dispatch *) this);
}
};
} // namespace downsweep
} // namespace partition
} // namespace b40c
|
the_stack
|
#include "../traversal_common.cuh"
#include <thrust/host_vector.h>
#include <cub/cub.cuh>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include <raft/cudart_utils.h>
#include <raft/integer_utils.h>
#include <raft/handle.hpp>
namespace cugraph {
namespace mg {
namespace detail {
template <typename degree_t>
constexpr int BitsPWrd = sizeof(degree_t) * 8;
template <typename degree_t>
constexpr int NumberBins = sizeof(degree_t) * 8 + 1;
template <typename return_t>
constexpr inline return_t number_of_words(return_t number_of_bits)
{
return raft::div_rounding_up_safe(number_of_bits, static_cast<return_t>(BitsPWrd<uint32_t>));
}
template <typename edge_t>
struct isDegreeZero {
edge_t const* offset_;
isDegreeZero(edge_t const* offset) : offset_(offset) {}
__device__ bool operator()(const edge_t& id) const { return (offset_[id + 1] == offset_[id]); }
};
struct set_nth_bit {
uint32_t* bmap_;
set_nth_bit(uint32_t* bmap) : bmap_(bmap) {}
template <typename return_t>
__device__ void operator()(const return_t& id)
{
atomicOr(bmap_ + (id / BitsPWrd<uint32_t>), (uint32_t{1} << (id % BitsPWrd<uint32_t>)));
}
};
template <typename vertex_t>
bool is_vertex_isolated(rmm::device_vector<uint32_t>& bmap, vertex_t id)
{
uint32_t word = bmap[id / BitsPWrd<uint32_t>];
uint32_t active_bit = static_cast<uint32_t>(1) << (id % BitsPWrd<uint32_t>);
// If idth bit of bmap is set to 1 then return true
return ((active_bit & word) != 0);
}
template <typename vertex_t, typename edge_t>
struct BFSStepNoDist {
uint32_t* output_frontier_;
uint32_t* visited_;
vertex_t* predecessors_;
BFSStepNoDist(uint32_t* output_frontier, uint32_t* visited, vertex_t* predecessors)
: output_frontier_(output_frontier), visited_(visited), predecessors_(predecessors)
{
}
__device__ bool operator()(vertex_t src, vertex_t dst)
{
uint32_t active_bit = static_cast<uint32_t>(1) << (dst % BitsPWrd<uint32_t>);
uint32_t prev_word = atomicOr(output_frontier_ + (dst / BitsPWrd<uint32_t>), active_bit);
bool dst_not_visited_earlier = !(active_bit & visited_[dst / BitsPWrd<uint32_t>]);
bool dst_not_visited_current = !(prev_word & active_bit);
// If this thread activates the frontier bitmap for a destination
// then the source is the predecessor of that destination
if (dst_not_visited_earlier && dst_not_visited_current) {
predecessors_[dst] = src;
return true;
} else {
return false;
}
}
// No-op
void increment_level(void) {}
};
template <typename vertex_t, typename edge_t>
struct BFSStep {
uint32_t* output_frontier_;
uint32_t* visited_;
vertex_t* predecessors_;
vertex_t* distances_;
vertex_t level_;
BFSStep(uint32_t* output_frontier, uint32_t* visited, vertex_t* predecessors, vertex_t* distances)
: output_frontier_(output_frontier),
visited_(visited),
predecessors_(predecessors),
distances_(distances),
level_(0)
{
}
__device__ bool operator()(vertex_t src, vertex_t dst)
{
uint32_t active_bit = static_cast<uint32_t>(1) << (dst % BitsPWrd<uint32_t>);
uint32_t prev_word = atomicOr(output_frontier_ + (dst / BitsPWrd<uint32_t>), active_bit);
bool dst_not_visited_earlier = !(active_bit & visited_[dst / BitsPWrd<uint32_t>]);
bool dst_not_visited_current = !(prev_word & active_bit);
// If this thread activates the frontier bitmap for a destination
// then the source is the predecessor of that destination
if (dst_not_visited_earlier && dst_not_visited_current) {
distances_[dst] = level_;
predecessors_[dst] = src;
return true;
} else {
return false;
}
}
void increment_level(void) { ++level_; }
};
template <typename vertex_t, typename edge_t, typename weight_t>
vertex_t populate_isolated_vertices(
raft::handle_t const& handle,
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
rmm::device_vector<vertex_t>& isolated_vertex_ids)
{
bool is_mg = (handle.comms_initialized() && (graph.local_vertices != nullptr) &&
(graph.local_offsets != nullptr));
edge_t vertex_begin_, vertex_end_;
if (is_mg) {
vertex_begin_ = graph.local_offsets[handle.get_comms().get_rank()];
vertex_end_ = graph.local_offsets[handle.get_comms().get_rank()] +
graph.local_vertices[handle.get_comms().get_rank()];
} else {
vertex_begin_ = 0;
vertex_end_ = graph.number_of_vertices;
}
auto count = thrust::copy_if(handle.get_thrust_policy(),
thrust::make_counting_iterator<vertex_t>(vertex_begin_),
thrust::make_counting_iterator<vertex_t>(vertex_end_),
thrust::make_counting_iterator<edge_t>(0),
isolated_vertex_ids.begin(),
isDegreeZero<edge_t>(graph.offsets)) -
isolated_vertex_ids.begin();
return static_cast<vertex_t>(count);
}
template <typename return_t>
return_t collect_vectors(raft::handle_t const& handle,
rmm::device_vector<size_t>& buffer_len,
rmm::device_vector<return_t>& local,
return_t local_count,
rmm::device_vector<return_t>& global)
{
CHECK_CUDA(handle.get_stream());
buffer_len.resize(handle.get_comms().get_size());
auto my_rank = handle.get_comms().get_rank();
buffer_len[my_rank] = static_cast<size_t>(local_count);
handle.get_comms().allgather(
buffer_len.data().get() + my_rank, buffer_len.data().get(), 1, handle.get_stream());
CHECK_CUDA(handle.get_stream());
// buffer_len now contains the lengths of all local buffers
// for all ranks
thrust::host_vector<size_t> h_buffer_len = buffer_len;
// h_buffer_offsets has to be int because raft allgatherv expects
// int array for displacement vector. This should be changed in
// raft so that the displacement is templated
thrust::host_vector<size_t> h_buffer_offsets(h_buffer_len.size());
thrust::exclusive_scan(
thrust::host, h_buffer_len.begin(), h_buffer_len.end(), h_buffer_offsets.begin());
return_t global_buffer_len = h_buffer_len.back() + h_buffer_offsets.back();
handle.get_comms().allgatherv(local.data().get(),
global.data().get(),
h_buffer_len.data(),
h_buffer_offsets.data(),
handle.get_stream());
CHECK_CUDA(handle.get_stream());
return global_buffer_len;
}
template <typename return_t>
void add_to_bitmap(raft::handle_t const& handle,
rmm::device_vector<uint32_t>& bmap,
rmm::device_vector<return_t>& id,
return_t count)
{
cudaStream_t stream = handle.get_stream();
thrust::for_each(
handle.get_thrust_policy(), id.begin(), id.begin() + count, set_nth_bit(bmap.data().get()));
CHECK_CUDA(stream);
}
// For all vertex ids i which are isolated (out degree is 0), set
// ith bit of isolated_bmap to 1
template <typename vertex_t, typename edge_t, typename weight_t>
void create_isolated_bitmap(raft::handle_t const& handle,
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
rmm::device_vector<vertex_t>& local_isolated_ids,
rmm::device_vector<vertex_t>& global_isolated_ids,
rmm::device_vector<size_t>& temp_buffer_len,
rmm::device_vector<uint32_t>& isolated_bmap)
{
size_t word_count = detail::number_of_words(graph.number_of_vertices);
local_isolated_ids.resize(graph.number_of_vertices);
global_isolated_ids.resize(graph.number_of_vertices);
temp_buffer_len.resize(handle.get_comms().get_size());
isolated_bmap.resize(word_count);
vertex_t local_isolated_count = populate_isolated_vertices(handle, graph, local_isolated_ids);
vertex_t global_isolated_count = collect_vectors(
handle, temp_buffer_len, local_isolated_ids, local_isolated_count, global_isolated_ids);
add_to_bitmap(handle, isolated_bmap, global_isolated_ids, global_isolated_count);
}
template <typename return_t>
return_t remove_duplicates(raft::handle_t const& handle,
rmm::device_vector<return_t>& data,
return_t data_len)
{
cudaStream_t stream = handle.get_stream();
thrust::sort(handle.get_thrust_policy(), data.begin(), data.begin() + data_len);
auto unique_count =
thrust::unique(handle.get_thrust_policy(), data.begin(), data.begin() + data_len) -
data.begin();
return static_cast<return_t>(unique_count);
}
// Use the fact that any value in id array can only be in
// the range [id_begin, id_end) to create a unique set of
// ids. bmap is expected to be of the length
// id_end/BitsPWrd<uint32_t> and is set to 0 initially
template <uint32_t BLOCK_SIZE, typename return_t>
__global__ void remove_duplicates_kernel(uint32_t* bmap,
return_t* in_id,
return_t id_begin,
return_t id_end,
return_t count,
return_t* out_id,
return_t* out_count)
{
return_t tid = blockIdx.x * blockDim.x + threadIdx.x;
return_t id;
if (tid < count) {
id = in_id[tid];
} else {
// Invalid vertex id to avoid partial thread block execution
id = id_end;
}
int acceptable_vertex = 0;
// If id is not in the acceptable range then set it to
// an invalid vertex id
if ((id >= id_begin) && (id < id_end)) {
uint32_t active_bit = static_cast<uint32_t>(1) << (id % BitsPWrd<uint32_t>);
uint32_t prev_word = atomicOr(bmap + (id / BitsPWrd<uint32_t>), active_bit);
// If bit was set by this thread then the id is unique
if (!(prev_word & active_bit)) { acceptable_vertex = 1; }
}
__shared__ return_t block_offset;
typedef cub::BlockScan<int, BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
int thread_write_offset;
int block_acceptable_vertex_count;
BlockScan(temp_storage)
.ExclusiveSum(acceptable_vertex, thread_write_offset, block_acceptable_vertex_count);
// If the block is not going to write unique ids then return
if (block_acceptable_vertex_count == 0) { return; }
if (threadIdx.x == 0) {
block_offset = cugraph::detail::traversal::atomicAdd(
out_count, static_cast<return_t>(block_acceptable_vertex_count));
}
__syncthreads();
if (acceptable_vertex) { out_id[block_offset + thread_write_offset] = id; }
}
template <uint32_t BLOCK_SIZE, typename return_t>
__global__ void remove_duplicates_kernel(uint32_t* bmap,
uint32_t* isolated_bmap,
return_t* in_id,
return_t id_begin,
return_t id_end,
return_t count,
return_t* out_id,
return_t* out_count)
{
return_t tid = blockIdx.x * blockDim.x + threadIdx.x;
return_t id;
if (tid < count) {
id = in_id[tid];
} else {
// Invalid vertex id to avoid partial thread block execution
id = id_end;
}
int acceptable_vertex = 0;
// If id is not in the acceptable range then set it to
// an invalid vertex id
if ((id >= id_begin) && (id < id_end)) {
uint32_t active_bit = static_cast<uint32_t>(1) << (id % BitsPWrd<uint32_t>);
uint32_t prev_word = atomicOr(bmap + (id / BitsPWrd<uint32_t>), active_bit);
// If bit was set by this thread then the id is unique
if (!(prev_word & active_bit)) {
// If id is isolated (out-degree == 0) then mark it as unacceptable
bool is_dst_isolated = active_bit & isolated_bmap[id / BitsPWrd<uint32_t>];
acceptable_vertex = !is_dst_isolated;
}
}
__shared__ return_t block_offset;
typedef cub::BlockScan<int, BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
int thread_write_offset;
int block_acceptable_vertex_count;
BlockScan(temp_storage)
.ExclusiveSum(acceptable_vertex, thread_write_offset, block_acceptable_vertex_count);
// If the block is not going to write unique ids then return
if (block_acceptable_vertex_count == 0) { return; }
if (threadIdx.x == 0) {
block_offset = cugraph::detail::traversal::atomicAdd(
out_count, static_cast<return_t>(block_acceptable_vertex_count));
}
__syncthreads();
if (acceptable_vertex) { out_id[block_offset + thread_write_offset] = id; }
}
template <typename return_t>
return_t remove_duplicates(raft::handle_t const& handle,
rmm::device_vector<uint32_t>& bmap,
rmm::device_vector<return_t>& data,
return_t data_len,
return_t data_begin,
return_t data_end,
rmm::device_vector<return_t>& out_data)
{
cudaStream_t stream = handle.get_stream();
rmm::device_vector<return_t> unique_count(1, 0);
thrust::fill(handle.get_thrust_policy(), bmap.begin(), bmap.end(), static_cast<uint32_t>(0));
constexpr return_t threads = 256;
return_t blocks = raft::div_rounding_up_safe(data_len, threads);
remove_duplicates_kernel<threads><<<blocks, threads, 0, stream>>>(bmap.data().get(),
data.data().get(),
data_begin,
data_end,
data_len,
out_data.data().get(),
unique_count.data().get());
CHECK_CUDA(stream);
return static_cast<return_t>(unique_count[0]);
}
template <typename vertex_t, typename edge_t, typename weight_t>
vertex_t preprocess_input_frontier(
raft::handle_t const& handle,
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
rmm::device_vector<uint32_t>& bmap,
rmm::device_vector<uint32_t>& isolated_bmap,
rmm::device_vector<vertex_t>& input_frontier,
vertex_t input_frontier_len,
rmm::device_vector<vertex_t>& output_frontier)
{
cudaStream_t stream = handle.get_stream();
vertex_t vertex_begin = graph.local_offsets[handle.get_comms().get_rank()];
vertex_t vertex_end = graph.local_offsets[handle.get_comms().get_rank()] +
graph.local_vertices[handle.get_comms().get_rank()];
rmm::device_vector<vertex_t> unique_count(1, 0);
thrust::fill(handle.get_thrust_policy(), bmap.begin(), bmap.end(), static_cast<uint32_t>(0));
constexpr vertex_t threads = 256;
vertex_t blocks = raft::div_rounding_up_safe(input_frontier_len, threads);
remove_duplicates_kernel<threads><<<blocks, threads, 0, stream>>>(bmap.data().get(),
isolated_bmap.data().get(),
input_frontier.data().get(),
vertex_begin,
vertex_end,
input_frontier_len,
output_frontier.data().get(),
unique_count.data().get());
CHECK_CUDA(stream);
return static_cast<vertex_t>(unique_count[0]);
}
template <typename vertex_t, typename edge_t, typename weight_t>
vertex_t preprocess_input_frontier(
raft::handle_t const& handle,
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
rmm::device_vector<uint32_t>& bmap,
rmm::device_vector<vertex_t>& input_frontier,
vertex_t input_frontier_len,
rmm::device_vector<vertex_t>& output_frontier)
{
cudaStream_t stream = handle.get_stream();
vertex_t vertex_begin = graph.local_offsets[handle.get_comms().get_rank()];
vertex_t vertex_end = graph.local_offsets[handle.get_comms().get_rank()] +
graph.local_vertices[handle.get_comms().get_rank()];
rmm::device_vector<vertex_t> unique_count(1, 0);
thrust::fill(handle.get_thrust_policy(), bmap.begin(), bmap.end(), static_cast<uint32_t>(0));
constexpr vertex_t threads = 256;
vertex_t blocks = raft::div_rounding_up_safe(input_frontier_len, threads);
remove_duplicates_kernel<threads><<<blocks, threads, 0, stream>>>(bmap.data().get(),
input_frontier.data().get(),
vertex_begin,
vertex_end,
input_frontier_len,
output_frontier.data().get(),
unique_count.data().get());
CHECK_CUDA(stream);
return static_cast<vertex_t>(unique_count[0]);
}
template <typename vertex_t>
__global__ void fill_kernel(vertex_t* distances, vertex_t count, vertex_t start_vertex)
{
vertex_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= count) { return; }
if (tid == start_vertex) {
distances[tid] = vertex_t{0};
} else {
distances[tid] = cugraph::detail::traversal::vec_t<vertex_t>::max;
}
}
template <typename vertex_t, typename edge_t, typename weight_t>
void fill_max_dist(raft::handle_t const& handle,
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
vertex_t start_vertex,
vertex_t global_number_of_vertices,
vertex_t* distances)
{
if (distances == nullptr) { return; }
vertex_t array_size = global_number_of_vertices;
constexpr vertex_t threads = 256;
vertex_t blocks = raft::div_rounding_up_safe(array_size, threads);
fill_kernel<<<blocks, threads, 0, handle.get_stream()>>>(distances, array_size, start_vertex);
}
template <typename vertex_t, typename edge_t, typename weight_t>
vertex_t get_global_vertex_count(
raft::handle_t const& handle,
cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph)
{
rmm::device_vector<vertex_t> id(1);
id[0] = *thrust::max_element(
handle.get_thrust_policy(), graph.indices, graph.indices + graph.number_of_edges);
handle.get_comms().allreduce(
id.data().get(), id.data().get(), 1, raft::comms::op_t::MAX, handle.get_stream());
vertex_t max_vertex_id = id[0];
if ((graph.number_of_vertices - 1) > max_vertex_id) {
max_vertex_id = graph.number_of_vertices - 1;
}
return max_vertex_id + 1;
}
} // namespace detail
} // namespace mg
} // namespace cugraph
|
the_stack
|
#include <torch/extension.h>
#include "cuda_util.cuh"
#include "data_spec_packed.cuh"
#include "render_util.cuh"
#include <iostream>
#include <cstdint>
#include <tuple>
namespace {
const int WARP_SIZE = 32;
const int TRACE_RAY_CUDA_THREADS = 128;
const int TRACE_RAY_CUDA_RAYS_PER_BLOCK = TRACE_RAY_CUDA_THREADS / WARP_SIZE;
const int TRACE_RAY_BKWD_CUDA_THREADS = 128;
const int TRACE_RAY_BKWD_CUDA_RAYS_PER_BLOCK = TRACE_RAY_BKWD_CUDA_THREADS / WARP_SIZE;
const int MIN_BLOCKS_PER_SM = 8;
typedef cub::WarpReduce<float> WarpReducef;
namespace device {
// * For ray rendering
__device__ __inline__ void trace_ray_nvol(
const PackedSparseGridSpec& __restrict__ grid,
SingleRaySpec& __restrict__ ray,
const RenderOptions& __restrict__ opt,
uint32_t lane_id,
float* __restrict__ sphfunc_val,
WarpReducef::TempStorage& __restrict__ temp_storage,
float* __restrict__ out) {
const uint32_t lane_colorgrp_id = lane_id % grid.basis_dim;
const uint32_t lane_colorgrp = lane_id / grid.basis_dim;
if (ray.tmin > ray.tmax) {
out[lane_colorgrp] = (grid.background_nlayers == 0) ? opt.background_brightness : 0.f;
return;
}
float t = ray.tmin;
float outv = 0.f;
float total_alpha = 0.f;
while (t <= ray.tmax) {
#pragma unroll 3
for (int j = 0; j < 3; ++j) {
ray.pos[j] = fmaf(t, ray.dir[j], ray.origin[j]);
ray.pos[j] = min(max(ray.pos[j], 0.f), grid.size[j] - 1.f);
ray.l[j] = min(static_cast<int32_t>(ray.pos[j]), grid.size[j] - 2);
ray.pos[j] -= static_cast<float>(ray.l[j]);
}
const float skip = compute_skip_dist(ray,
grid.links, grid.stride_x,
grid.size[2], 0);
if (skip >= opt.step_size) {
// For consistency, we skip the by step size
t += ceilf(skip / opt.step_size) * opt.step_size;
continue;
}
float sigma = trilerp_cuvol_one(
grid.links, grid.density_data,
grid.stride_x,
grid.size[2],
1,
ray.l, ray.pos,
0);
if (sigma > opt.sigma_thresh) {
float lane_color = trilerp_cuvol_one(
grid.links,
grid.sh_data,
grid.stride_x,
grid.size[2],
grid.sh_data_dim,
ray.l, ray.pos, lane_id);
lane_color *= sphfunc_val[lane_colorgrp_id];
const float new_total_alpha = fminf(total_alpha + 1.f - _EXP(
-ray.world_step * sigma), 1.f);
const float weight = new_total_alpha - total_alpha;
total_alpha = new_total_alpha;
float lane_color_total = WarpReducef(temp_storage).HeadSegmentedSum(
lane_color, lane_colorgrp_id == 0);
outv += weight * fmaxf(lane_color_total + 0.5f, 0.f); // Clamp to [+0, infty)
if (total_alpha >= 1.f) break;
}
t += opt.step_size;
}
if (grid.background_nlayers == 0) {
outv += (1.f - total_alpha) * opt.background_brightness;
}
if (lane_colorgrp_id == 0) {
out[lane_colorgrp] = outv;
}
}
__device__ __inline__ void trace_ray_nvol_backward(
const PackedSparseGridSpec& __restrict__ grid,
const float* __restrict__ grad_output,
const float* __restrict__ color_cache,
SingleRaySpec& __restrict__ ray,
const RenderOptions& __restrict__ opt,
uint32_t lane_id,
const float* __restrict__ sphfunc_val,
float* __restrict__ grad_sphfunc_val,
WarpReducef::TempStorage& __restrict__ temp_storage,
float sparsity_loss,
PackedGridOutputGrads& __restrict__ grads
) {
const uint32_t lane_colorgrp_id = lane_id % grid.basis_dim;
const uint32_t lane_colorgrp = lane_id / grid.basis_dim;
const uint32_t leader_mask = 1U | (1U << grid.basis_dim) | (1U << (2 * grid.basis_dim));
if (ray.tmin > ray.tmax) {
return;
}
float t = ray.tmin;
const float gout = grad_output[lane_colorgrp];
float total_alpha = 0.f;
float last_total_color = 0.f;
// remat samples
while (t <= ray.tmax) {
#pragma unroll 3
for (int j = 0; j < 3; ++j) {
ray.pos[j] = fmaf(t, ray.dir[j], ray.origin[j]);
ray.pos[j] = min(max(ray.pos[j], 0.f), grid.size[j] - 1.f);
ray.l[j] = min(static_cast<int32_t>(ray.pos[j]), grid.size[j] - 2);
ray.pos[j] -= static_cast<float>(ray.l[j]);
}
const float skip = compute_skip_dist(ray,
grid.links, grid.stride_x,
grid.size[2], 0);
if (skip >= opt.step_size) {
// For consistency, we skip the by step size
t += ceilf(skip / opt.step_size) * opt.step_size;
continue;
}
float sigma = trilerp_cuvol_one(
grid.links,
grid.density_data,
grid.stride_x,
grid.size[2],
1,
ray.l, ray.pos,
0);
if (sigma > opt.sigma_thresh) {
float lane_color = trilerp_cuvol_one(
grid.links,
grid.sh_data,
grid.stride_x,
grid.size[2],
grid.sh_data_dim,
ray.l, ray.pos, lane_id);
float weighted_lane_color = lane_color * sphfunc_val[lane_colorgrp_id];
const float curr_transmit = _EXP(-ray.world_step * sigma);
const float new_total_alpha = fminf(total_alpha + 1.f - curr_transmit, 1.f);
const float weight = new_total_alpha - total_alpha;
bool not_last = new_total_alpha < 1.f;
total_alpha = new_total_alpha;
const float lane_color_total = WarpReducef(temp_storage).HeadSegmentedSum(
weighted_lane_color, lane_colorgrp_id == 0) + 0.5f;
float total_color = fmaxf(lane_color_total, 0.f);
float color_in_01 = total_color == lane_color_total;
total_color *= gout; // Clamp to [+0, infty)
float total_color_c1 = __shfl_sync(leader_mask, total_color, grid.basis_dim);
total_color += __shfl_sync(leader_mask, total_color, 2 * grid.basis_dim);
total_color += total_color_c1;
color_in_01 = __shfl_sync((1U << grid.sh_data_dim) - 1, color_in_01, lane_colorgrp * grid.basis_dim);
const float grad_common = weight * color_in_01 * gout;
const float curr_grad_color = sphfunc_val[lane_colorgrp_id] * grad_common;
if (grid.basis_type != BASIS_TYPE_SH) {
float curr_grad_sphfunc = lane_color * grad_common;
const float curr_grad_up2 = __shfl_down_sync((1U << grid.sh_data_dim) - 1,
curr_grad_sphfunc, 2 * grid.basis_dim);
curr_grad_sphfunc += __shfl_down_sync((1U << grid.sh_data_dim) - 1,
curr_grad_sphfunc, grid.basis_dim);
curr_grad_sphfunc += curr_grad_up2;
if (lane_id < grid.basis_dim) {
grad_sphfunc_val[lane_id] += curr_grad_sphfunc;
}
}
trilerp_backward_cuvol_one(grid.links, grads.grad_sh_out,
grid.stride_x,
grid.size[2],
grid.sh_data_dim,
ray.l, ray.pos,
curr_grad_color, lane_id);
if (not_last) {
float curr_grad_sigma = ray.world_step * curr_transmit * total_color;
if (sparsity_loss > 0.f) {
// Cauchy version (from SNeRG)
curr_grad_sigma += sparsity_loss * (4 * sigma / (1 + 2 * (sigma * sigma)));
// Alphs version (from PlenOctrees)
// curr_grad_sigma += sparsity_loss * _EXP(-pcnt) * ray.world_step;
}
if (lane_id == 0) {
trilerp_backward_cuvol_one_density(
grid.links,
grads.grad_density_out,
grads.mask_out,
grid.stride_x,
grid.size[2],
ray.l, ray.pos, curr_grad_sigma);
}
} else {
ray.tmax = t;
last_total_color = total_color;
break;
}
}
t += opt.step_size;
}
if (total_alpha < 1.f) {
// Never saturatedo
last_total_color = opt.background_brightness * (
grad_output[0] + grad_output[1] + grad_output[2]);
}
if (last_total_color != 0.f) {
t = ray.tmin;
total_alpha = 0.f;
while (t <= ray.tmax) {
#pragma unroll 3
for (int j = 0; j < 3; ++j) {
ray.pos[j] = fmaf(t, ray.dir[j], ray.origin[j]);
ray.pos[j] = min(max(ray.pos[j], 0.f), grid.size[j] - 1.f);
ray.l[j] = min(static_cast<int32_t>(ray.pos[j]), grid.size[j] - 2);
ray.pos[j] -= static_cast<float>(ray.l[j]);
}
const float skip = compute_skip_dist(ray,
grid.links, grid.stride_x,
grid.size[2], 0);
if (skip >= opt.step_size) {
// For consistency, we skip the by step size
t += ceilf(skip / opt.step_size) * opt.step_size;
continue;
}
float sigma = trilerp_cuvol_one(
grid.links,
grid.density_data,
grid.stride_x,
grid.size[2],
1,
ray.l, ray.pos,
0);
const float curr_transmit = _EXP(-ray.world_step * sigma);
total_alpha = fminf(total_alpha + 1.f - curr_transmit, 1.f);
// const float weight = new_total_alpha - total_alpha;
// total_alpha = new_total_alpha;
if (total_alpha >= 1.f) break;
float curr_grad_sigma = -ray.world_step * curr_transmit * last_total_color;
if (lane_id == 0) {
trilerp_backward_cuvol_one_density(
grid.links,
grads.grad_density_out,
grads.mask_out,
grid.stride_x,
grid.size[2],
ray.l, ray.pos, curr_grad_sigma);
}
t += opt.step_size;
}
}
}
// BEGIN KERNELS
__launch_bounds__(TRACE_RAY_CUDA_THREADS, MIN_BLOCKS_PER_SM)
__global__ void render_ray_kernel(
PackedSparseGridSpec grid,
PackedRaysSpec rays,
RenderOptions opt,
torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> out) {
CUDA_GET_THREAD_ID(tid, int(rays.origins.size(0)) * WARP_SIZE);
const int ray_id = tid >> 5;
const int ray_blk_id = threadIdx.x >> 5;
const int lane_id = threadIdx.x & 0x1F;
if (lane_id >= grid.sh_data_dim)
return;
__shared__ float sphfunc_val[TRACE_RAY_CUDA_RAYS_PER_BLOCK][10];
__shared__ SingleRaySpec ray_spec[TRACE_RAY_CUDA_RAYS_PER_BLOCK];
__shared__ typename WarpReducef::TempStorage temp_storage[
TRACE_RAY_CUDA_RAYS_PER_BLOCK];
ray_spec[ray_blk_id].set(rays.origins[ray_id].data(),
rays.dirs[ray_id].data());
calc_sphfunc(grid, lane_id,
ray_id,
ray_spec[ray_blk_id].dir,
sphfunc_val[ray_blk_id]);
ray_find_bounds(ray_spec[ray_blk_id], grid, opt, ray_id);
__syncwarp((1U << grid.sh_data_dim) - 1);
trace_ray_nvol(
grid,
ray_spec[ray_blk_id],
opt,
lane_id,
sphfunc_val[ray_blk_id],
temp_storage[ray_blk_id],
out[ray_id].data());
}
__launch_bounds__(TRACE_RAY_BKWD_CUDA_THREADS, MIN_BLOCKS_PER_SM)
__global__ void render_ray_backward_kernel(
PackedSparseGridSpec grid,
const float* __restrict__ grad_output,
const float* __restrict__ color_cache,
PackedRaysSpec rays,
RenderOptions opt,
bool grad_out_is_rgb,
float sparsity_loss,
PackedGridOutputGrads grads) {
CUDA_GET_THREAD_ID(tid, int(rays.origins.size(0)) * WARP_SIZE);
const int ray_id = tid >> 5;
const int ray_blk_id = threadIdx.x >> 5;
const int lane_id = threadIdx.x & 0x1F;
if (lane_id >= grid.sh_data_dim)
return;
__shared__ float sphfunc_val[TRACE_RAY_BKWD_CUDA_RAYS_PER_BLOCK][10];
__shared__ float grad_sphfunc_val[TRACE_RAY_CUDA_RAYS_PER_BLOCK][10];
__shared__ SingleRaySpec ray_spec[TRACE_RAY_BKWD_CUDA_RAYS_PER_BLOCK];
__shared__ typename WarpReducef::TempStorage temp_storage[
TRACE_RAY_CUDA_RAYS_PER_BLOCK];
ray_spec[ray_blk_id].set(rays.origins[ray_id].data(),
rays.dirs[ray_id].data());
const float vdir[3] = {ray_spec[ray_blk_id].dir[0],
ray_spec[ray_blk_id].dir[1],
ray_spec[ray_blk_id].dir[2] };
if (lane_id < grid.basis_dim) {
grad_sphfunc_val[ray_blk_id][lane_id] = 0.f;
}
calc_sphfunc(grid, lane_id,
ray_id,
vdir, sphfunc_val[ray_blk_id]);
if (lane_id == 0) {
ray_find_bounds(ray_spec[ray_blk_id], grid, opt, ray_id);
}
float grad_out[3];
if (grad_out_is_rgb) {
const float norm_factor = 2.f / (3 * int(rays.origins.size(0)));
#pragma unroll 3
for (int i = 0; i < 3; ++i) {
const float resid = color_cache[ray_id * 3 + i] - grad_output[ray_id * 3 + i];
grad_out[i] = resid * norm_factor;
}
} else {
#pragma unroll 3
for (int i = 0; i < 3; ++i) {
grad_out[i] = grad_output[ray_id * 3 + i];
}
}
__syncwarp((1U << grid.sh_data_dim) - 1);
trace_ray_nvol_backward(
grid,
grad_out,
color_cache + ray_id * 3,
ray_spec[ray_blk_id],
opt,
lane_id,
sphfunc_val[ray_blk_id],
grad_sphfunc_val[ray_blk_id],
temp_storage[ray_blk_id],
sparsity_loss,
grads);
calc_sphfunc_backward(
grid, lane_id,
ray_id,
vdir,
sphfunc_val[ray_blk_id],
grad_sphfunc_val[ray_blk_id],
grads.grad_basis_out);
}
} // namespace device
} // namespace
torch::Tensor volume_render_nvol(SparseGridSpec& grid, RaysSpec& rays, RenderOptions& opt) {
DEVICE_GUARD(grid.sh_data);
grid.check();
rays.check();
const auto Q = rays.origins.size(0);
torch::Tensor results = torch::empty_like(rays.origins);
const int cuda_n_threads = TRACE_RAY_CUDA_THREADS;
const int blocks = CUDA_N_BLOCKS_NEEDED(Q * WARP_SIZE, cuda_n_threads);
device::render_ray_kernel<<<blocks, cuda_n_threads>>>(
grid, rays, opt,
// Output
results.packed_accessor32<float, 2, torch::RestrictPtrTraits>());
CUDA_CHECK_ERRORS;
return results;
}
void volume_render_nvol_backward(
SparseGridSpec& grid,
RaysSpec& rays,
RenderOptions& opt,
torch::Tensor grad_out,
torch::Tensor color_cache,
GridOutputGrads& grads) {
DEVICE_GUARD(grid.sh_data);
grid.check();
rays.check();
grads.check();
const auto Q = rays.origins.size(0);
const int cuda_n_threads_render_backward = TRACE_RAY_BKWD_CUDA_THREADS;
const int blocks = CUDA_N_BLOCKS_NEEDED(Q * WARP_SIZE, cuda_n_threads_render_backward);
device::render_ray_backward_kernel<<<blocks,
cuda_n_threads_render_backward>>>(
grid,
grad_out.data_ptr<float>(),
color_cache.data_ptr<float>(),
rays, opt,
false,
0.f,
// Output
grads);
CUDA_CHECK_ERRORS;
}
void volume_render_nvol_fused(
SparseGridSpec& grid,
RaysSpec& rays,
RenderOptions& opt,
torch::Tensor rgb_gt,
float _, // not supported
float sparsity_loss,
torch::Tensor rgb_out,
GridOutputGrads& grads) {
DEVICE_GUARD(grid.sh_data);
CHECK_INPUT(rgb_gt);
CHECK_INPUT(rgb_out);
grid.check();
rays.check();
grads.check();
const auto Q = rays.origins.size(0);
{
const int blocks = CUDA_N_BLOCKS_NEEDED(Q * WARP_SIZE, TRACE_RAY_CUDA_THREADS);
device::render_ray_kernel<<<blocks, TRACE_RAY_CUDA_THREADS>>>(
grid, rays, opt,
// Output
rgb_out.packed_accessor32<float, 2, torch::RestrictPtrTraits>());
}
{
const int blocks = CUDA_N_BLOCKS_NEEDED(Q * WARP_SIZE, TRACE_RAY_BKWD_CUDA_THREADS);
device::render_ray_backward_kernel<<<blocks, TRACE_RAY_BKWD_CUDA_THREADS>>>(
grid,
rgb_gt.data_ptr<float>(),
rgb_out.data_ptr<float>(),
rays,
opt,
true,
sparsity_loss,
// Output
grads);
}
CUDA_CHECK_ERRORS;
}
|
the_stack
|
#include <pybind11/pybind11.h>
#include <iostream>
#include <torch/extension.h>
#include <functional>
#include <pybind11/stl.h>
#include <pybind11/functional.h>
#include <torch/extension.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/cuda/detail/DeviceThreadHandles.h>
#include <ATen/cuda/CUDAContext.h>
__device__ int bisect_index(const int* values, int len, int needle) {
int a = 0, b = len;
while (b > a + 1) {
int m = (a + b) / 2;
if(values[m] > needle) {
b = m;
} else {
a = m;
}
}
if(values[a] != needle) {
printf("Error!! needle %d not found in array of length %d\n", needle, len);
}
return a;
}
__global__ void mult_MtM_kernel(int batchSize,
int M_numRows,
int M_nnz,
const int* M_rowPtr,
const int* M_colInd,
const double* Ms_val,
int MtM_numRows,
int MtM_nnz,
const int* MtM_rowPtr,
const int* MtM_colInd,
double* MtMs_val) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int batchIndex = blockIdx.y * blockDim.y + threadIdx.y;
if(batchIndex >= batchSize || row >= M_numRows) {
return;
}
// matrices are in CSR format:
// rowPtr determines begin/end of row data,
// colInd determines the column index
int srcRow_offset = M_rowPtr[row];
int srcRow_len = M_rowPtr[row+1] - srcRow_offset;
const int* srcRow_colInd = M_colInd + srcRow_offset;
const double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset;
double* MtMs_batch_val = MtMs_val + batchIndex * MtM_nnz;
for(int i = 0; i < srcRow_len; i++) {
int dstRow = srcRow_colInd[i];
int dstRow_offset = MtM_rowPtr[dstRow];
int dstRow_len = MtM_rowPtr[dstRow + 1] - MtM_rowPtr[dstRow];
const int* dstRow_colInd = MtM_colInd + dstRow_offset;
double* dstRow_val = MtMs_batch_val + dstRow_offset;
for(int j = 0; j < srcRow_len; j++) {
double val = srcRow_val[i] * srcRow_val[j];
int dstCol = srcRow_colInd[j];
// The result has a different sparsity pattern. Therefore we have to
// identify where the destination's `colInd` is `dstCol`, working
// in row of order `dstRow` in destination
int positionInDstRow = bisect_index(dstRow_colInd, dstRow_len, dstCol);
atomicAdd(dstRow_val + positionInDstRow, val);
}
}
}
torch::Tensor mult_MtM(int batchSize,
const torch::Tensor& M_rowPtr,
const torch::Tensor& M_colInd,
const torch::Tensor& Ms_val,
const torch::Tensor& MtM_rowPtr,
const torch::Tensor& MtM_colInd) {
int64_t M_numRows = M_rowPtr.size(0) - 1;
int64_t M_nnz = M_colInd.size(0);
TORCH_CHECK(M_rowPtr.device().is_cuda());
TORCH_CHECK(M_colInd.device().is_cuda());
TORCH_CHECK(Ms_val.device().is_cuda());
TORCH_CHECK(M_rowPtr.dtype() == torch::kInt);
TORCH_CHECK(M_colInd.dtype() == torch::kInt);
TORCH_CHECK(Ms_val.dtype() == torch::kDouble); // TODO: add support for float
TORCH_CHECK(M_rowPtr.dim() == 1);
TORCH_CHECK(M_colInd.dim() == 1);
TORCH_CHECK(Ms_val.dim() == 2);
TORCH_CHECK(Ms_val.size(0) == batchSize);
TORCH_CHECK(Ms_val.size(1) == M_nnz);
int64_t MtM_numRows = MtM_rowPtr.size(0) - 1;
int64_t MtM_nnz = MtM_colInd.size(0);
TORCH_CHECK(MtM_rowPtr.device().is_cuda());
TORCH_CHECK(MtM_colInd.device().is_cuda());
TORCH_CHECK(MtM_rowPtr.dim() == 1);
TORCH_CHECK(MtM_colInd.dim() == 1);
auto xOptions = torch::TensorOptions().dtype(torch::kDouble).device(Ms_val.device());
torch::Tensor MtMs_val = torch::zeros({(long)batchSize, (long)MtM_nnz}, xOptions);
// TODO: do experiments on choice of work group size
dim3 wgs(1, 16);
dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y);
M_rowPtr.data_ptr<int>();
M_colInd.data_ptr<int>();
Ms_val.data_ptr<double>();
MtM_rowPtr.data_ptr<int>();
MtM_colInd.data_ptr<int>();
MtMs_val.data_ptr<double>();
// TODO: set stream according to torch
mult_MtM_kernel<<<numBlocks, wgs>>>(batchSize,
M_numRows,
M_nnz,
M_rowPtr.data_ptr<int>(),
M_colInd.data_ptr<int>(),
Ms_val.data_ptr<double>(),
MtM_numRows,
MtM_nnz,
MtM_rowPtr.data_ptr<int>(),
MtM_colInd.data_ptr<int>(),
MtMs_val.data_ptr<double>());
return MtMs_val;
}
__global__ void mat_vec_kernel(int batchSize,
int M_numRows,
int M_numCols,
int M_nnz,
const int* M_rowPtr,
const int* M_colInd,
const double* Ms_val,
const double* vec,
double* retv) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int batchIndex = blockIdx.y * blockDim.y + threadIdx.y;
if(batchIndex >= batchSize || row >= M_numRows) {
return;
}
int srcRow_offset = M_rowPtr[row];
int srcRow_len = M_rowPtr[row+1] - srcRow_offset;
const int* srcRow_colInd = M_colInd + srcRow_offset;
const double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset;
const double* srcVec = vec + batchIndex * M_numCols;
double value = 0.0;
for(int i = 0; i < srcRow_len; i++) {
value += srcRow_val[i] * srcVec[srcRow_colInd[i]];
}
*(retv + batchIndex * M_numRows + row) = value;
}
torch::Tensor mat_vec(int batchSize,
int M_numCols,
const torch::Tensor& M_rowPtr,
const torch::Tensor& M_colInd,
const torch::Tensor& Ms_val,
const torch::Tensor& vec) {
int64_t M_numRows = M_rowPtr.size(0) - 1;
int64_t M_nnz = M_colInd.size(0);
TORCH_CHECK(M_rowPtr.device().is_cuda());
TORCH_CHECK(M_colInd.device().is_cuda());
TORCH_CHECK(Ms_val.device().is_cuda());
TORCH_CHECK(M_rowPtr.dtype() == torch::kInt);
TORCH_CHECK(M_colInd.dtype() == torch::kInt);
TORCH_CHECK(Ms_val.dtype() == torch::kDouble); // TODO: add support for float
TORCH_CHECK(M_rowPtr.dim() == 1);
TORCH_CHECK(M_colInd.dim() == 1);
TORCH_CHECK(Ms_val.dim() == 2);
TORCH_CHECK(Ms_val.size(0) == batchSize);
TORCH_CHECK(Ms_val.size(1) == M_nnz);
TORCH_CHECK(vec.device().is_cuda());
TORCH_CHECK(vec.dim() == 2);
TORCH_CHECK(vec.size(0) == batchSize);
TORCH_CHECK(vec.size(1) == M_numCols);
auto xOptions = torch::TensorOptions().dtype(torch::kDouble).device(Ms_val.device());
torch::Tensor retv = torch::empty({(long)batchSize, (long)M_numRows}, xOptions);
// TODO: do experiments on choice of work group size
dim3 wgs(1, 16);
dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y);
mat_vec_kernel<<<numBlocks, wgs>>>(batchSize,
M_numRows,
M_numCols,
M_nnz,
M_rowPtr.data_ptr<int>(),
M_colInd.data_ptr<int>(),
Ms_val.data_ptr<double>(),
vec.data_ptr<double>(),
retv.data_ptr<double>());
return retv;
}
__global__ void tmat_vec_kernel(int batchSize,
int M_numRows,
int M_numCols,
int M_nnz,
const int* M_rowPtr,
const int* M_colInd,
const double* Ms_val,
const double* vec,
double* retv) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int batchIndex = blockIdx.y * blockDim.y + threadIdx.y;
if(batchIndex >= batchSize || row >= M_numRows) {
return;
}
int srcRow_offset = M_rowPtr[row];
int srcRow_len = M_rowPtr[row+1] - srcRow_offset;
const int* srcRow_colInd = M_colInd + srcRow_offset;
const double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset;
double vecVal = vec[batchIndex * M_numRows + row];
double* dstVec = retv + batchIndex * M_numCols;
for(int i = 0; i < srcRow_len; i++) {
atomicAdd(dstVec + srcRow_colInd[i], vecVal * srcRow_val[i]);
}
}
torch::Tensor tmat_vec(int batchSize,
int M_numCols,
const torch::Tensor& M_rowPtr,
const torch::Tensor& M_colInd,
const torch::Tensor& Ms_val,
const torch::Tensor& vec) {
int64_t M_numRows = M_rowPtr.size(0) - 1;
int64_t M_nnz = M_colInd.size(0);
TORCH_CHECK(M_rowPtr.device().is_cuda());
TORCH_CHECK(M_colInd.device().is_cuda());
TORCH_CHECK(Ms_val.device().is_cuda());
TORCH_CHECK(M_rowPtr.dtype() == torch::kInt);
TORCH_CHECK(M_colInd.dtype() == torch::kInt);
TORCH_CHECK(Ms_val.dtype() == torch::kDouble); // TODO: add support for float
TORCH_CHECK(M_rowPtr.dim() == 1);
TORCH_CHECK(M_colInd.dim() == 1);
TORCH_CHECK(Ms_val.dim() == 2);
TORCH_CHECK(Ms_val.size(0) == batchSize);
TORCH_CHECK(Ms_val.size(1) == M_nnz);
TORCH_CHECK(vec.device().is_cuda());
TORCH_CHECK(vec.dim() == 2);
TORCH_CHECK(vec.size(0) == batchSize);
TORCH_CHECK(vec.size(1) == M_numRows);
auto xOptions = torch::TensorOptions().dtype(torch::kDouble).device(Ms_val.device());
torch::Tensor retv = torch::zeros({(long)batchSize, (long)M_numCols}, xOptions);
// TODO: do experiments on choice of work group size
dim3 wgs(1, 16);
dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y);
tmat_vec_kernel<<<numBlocks, wgs>>>(batchSize,
M_numRows,
M_numCols,
M_nnz,
M_rowPtr.data_ptr<int>(),
M_colInd.data_ptr<int>(),
Ms_val.data_ptr<double>(),
vec.data_ptr<double>(),
retv.data_ptr<double>());
return retv;
}
__global__ void apply_damping_kernel(int batchSize,
int M_numRows,
int M_numCols,
int M_nnz,
const int* M_rowPtr,
const int* M_colInd,
double* Ms_val,
double alpha,
double beta) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int batchIndex = blockIdx.y * blockDim.y + threadIdx.y;
if(batchIndex >= batchSize || row >= M_numRows) {
return;
}
int srcRow_offset = M_rowPtr[row];
int srcRow_len = M_rowPtr[row+1] - srcRow_offset;
const int* srcRow_colInd = M_colInd + srcRow_offset;
double* srcRow_val = Ms_val + batchIndex * M_nnz + srcRow_offset;
for(int i = 0; i < srcRow_len; i++) {
if(srcRow_colInd[i] == row) {
srcRow_val[i] += alpha * srcRow_val[i] + beta;
}
}
}
void apply_damping(int batchSize,
int M_numCols,
const torch::Tensor& M_rowPtr,
const torch::Tensor& M_colInd,
const torch::Tensor& Ms_val,
double alpha,
double beta) {
int64_t M_numRows = M_rowPtr.size(0) - 1;
int64_t M_nnz = M_colInd.size(0);
TORCH_CHECK(M_rowPtr.device().is_cuda());
TORCH_CHECK(M_colInd.device().is_cuda());
TORCH_CHECK(Ms_val.device().is_cuda());
TORCH_CHECK(M_rowPtr.dtype() == torch::kInt);
TORCH_CHECK(M_colInd.dtype() == torch::kInt);
TORCH_CHECK(Ms_val.dtype() == torch::kDouble); // TODO: add support for float
TORCH_CHECK(M_rowPtr.dim() == 1);
TORCH_CHECK(M_colInd.dim() == 1);
TORCH_CHECK(Ms_val.dim() == 2);
TORCH_CHECK(Ms_val.size(0) == batchSize);
TORCH_CHECK(Ms_val.size(1) == M_nnz);
// TODO: do experiments on choice of work group size
dim3 wgs(1, 16);
dim3 numBlocks((M_numRows + wgs.x - 1) / wgs.x, (batchSize + wgs.y - 1) / wgs.y);
apply_damping_kernel<<<numBlocks, wgs>>>(batchSize,
M_numRows,
M_numCols,
M_nnz,
M_rowPtr.data_ptr<int>(),
M_colInd.data_ptr<int>(),
Ms_val.data_ptr<double>(),
alpha,
beta);
}
PYBIND11_MODULE(mat_mult, m) {
m.doc() = "Python bindings for batched mat operations";
m.def("mult_MtM", &mult_MtM,
"Batched multiplication of mat by transpose: Mt * M\n"
"The sparse structure of the result must be computed\n"
"beforehand and supplied as MtM_rowPtr, MtM_colInd",
py::arg("batch_size"),
py::arg("M_rowPtr"),
py::arg("M_colInd"),
py::arg("Ms_val"),
py::arg("MtM_rowPtr"),
py::arg("MtM_colInd")
);
m.def("mat_vec", &mat_vec,
"Batched multiplication of mat by vector: M * v",
py::arg("batch_size"),
py::arg("M_numCols"),
py::arg("M_rowPtr"),
py::arg("M_colInd"),
py::arg("Ms_val"),
py::arg("vec")
);
m.def("tmat_vec", &tmat_vec,
"Batched multiplication of transposed mat by vector: Mt * v",
py::arg("batch_size"),
py::arg("M_numCols"),
py::arg("M_rowPtr"),
py::arg("M_colInd"),
py::arg("Ms_val"),
py::arg("vec")
);
m.def("apply_damping", &apply_damping,
"M.diagonal() += M.diagonal() * alpha + beta",
py::arg("batch_size"),
py::arg("M_numCols"),
py::arg("M_rowPtr"),
py::arg("M_colInd"),
py::arg("Ms_val"),
py::arg("alpha"),
py::arg("beta")
);
};
|
the_stack
|
#if __CUDA_ARCH__ >= 300
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
void setsizes(long long N, dim3 *gridp, int *nthreadsp);
__forceinline__ __device__ int solve1(int j) {
float v = sqrtf((float)j);
#pragma unroll
for (int k = 0; k < 5; k++) {
v = v - (v*(v+1)-2*j)/(2*v+1); // Newton iterations to find first index.
}
return (int)(v+2e-5f);
}
__forceinline__ __device__ void solvex(int n, int v, int &i, int &j) {
int n1 = ((n >> 1) << 1) + 1;
int n2 = (n + 1) >> 1;
int even = (n1 != n);
j = v / n1;
i = v - n1 * j;
if (j > i - even) {
i = n1 - i - 1;
j = n2 + n2 - j + 1;
} else {
i = i - even;
}
}
// Feature hashing multiply and multiply-transpose.
// This one enumerates, hashes and multiplies all pairs of features.
//
// NOTE: The single-matrix version (hashmult) uses a fast lookup recurrence which is only valid up to 3000 base features per column (approx 4.5 million pairs)
// Given dense A and sparse B, for each column of B, enumerate all pairs of features, hash to a single feature index, and multiply by A into C
__global__ void __hashmult(int nrows, int nfeats, int bncols, int brows1, int brows2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {
bool doit = false;
int istart = ((long long)blockIdx.x) * bncols/ gridDim.x;
int iend = ((long long)(blockIdx.x + 1)) * bncols / gridDim.x;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart = Bjc[i]; // Range of nz rows in this column
int jend = Bjc[i+1];
int nr = jend - jstart; // Number of nz rows
int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs)
for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column
int j1 = solve1(j); // Compute the first and second indices
int j2 = j - j1*(j1+1)/2;
// int j1, j2;
// solvex(todo, j, j1, j2);
float f1 = Bdata[jstart + j1]; // Get the two features
float f2 = Bdata[jstart + j2];
int r1 = Bir[jstart + j1]; // And their row indices
int r2 = Bir[jstart + j2];
long long rank = r1 + 1;
float prod = f1;
if (j1 == j2) {
doit = (rank < brows1);
} else {
prod *= f2;
rank *= r2 + 1;
doit = (rank < brows2);
}
if (doit) {
int ind = mmhash2(r1, r2, nfeats); // Hash the indices
if (transpose > 0) {
float sum = A[threadIdx.x + nrows * i] * prod; // Do the product
atomicAdd(&C[threadIdx.x + nrows * ind], sum);
} else {
float sum = A[threadIdx.x + nrows * ind] * prod; // Do the product
atomicAdd(&C[threadIdx.x + nrows * i], sum);
}
}
}
}
}
__forceinline__ __device__ int hash2(int a, int b, int modulus) {
return (((a * 453423453) + b) * 34143242142) % modulus;
}
#if __CUDA_ARCH__ >= 300
// This version is designed for few (or one) row in A. It allocates one warp per column
__global__ void __hashmult2(int nrows, int nfeats, int ncols, int brows1, int brows2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {
bool doit = false;
int istart = ((long long)blockIdx.x) * ncols / gridDim.x;
int iend = ((long long)(blockIdx.x+1)) * ncols / gridDim.x;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart = Bjc[i]; // Range of nz rows in this column
int jend = Bjc[i+1];
int nr = jend - jstart; // Number of nz rows
for (int j1 = 0; j1 < nr; j1 += blockDim.x) { // work on a block of data
float f1 = 0;
int r1 = -1;
if (j1 + threadIdx.x < nr) {
f1 = Bdata[jstart + j1 + threadIdx.x]; // Get the two features
r1 = Bir[jstart + j1 + threadIdx.x]; // And their row indices
}
for (int j2 = j1; j2 < nr; j2 += blockDim.x) { // work on a block of data
float f2 = 0;
int r2 = -1;
if (j2 + threadIdx.x < nr) {
f2 = Bdata[jstart + j2 + threadIdx.x];
r2 = Bir[jstart + j2 + threadIdx.x];
}
for (int k = 0; k < 32; k++) {
float f2shift = __shfl(f2, k);
int r2shift = __shfl(r2, k);
if (j2 + k < nr && r1 >= 0) {
long long rank = r1 + 1;
float prod = f1;
doit = false;
if (j1 + threadIdx.x == j2 + k) {
doit = (rank < brows1);
} else if (j1 + threadIdx.x < j2 + k) {
prod *= f2shift;
rank *= r2shift + 1;
doit = (rank < brows2);
}
if (doit) {
int ind = mmhash2(r1, r2shift, nfeats); // Hash the indices
if (transpose > 0) {
for (int m = 0; m < nrows; m++) {
float sum = A[m + nrows * i] * prod; // Do the product
atomicAdd(&C[m + nrows * ind], sum);
// atomicAdd(&C[0], sum);
}
} else {
for (int m = 0; m < nrows; m++) {
float sum = A[m + nrows * ind] * prod; // Do the product
atomicAdd(&C[m + nrows * i], sum);
// atomicAdd(&C[0], sum);
}
}
}
}
}
}
}
}
}
#else
__global__ void __hashmult2(int nrows, int nfeats, int ncols, int brows1, int brows2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {}
#endif
int hashmult(int nrows, int nfeats, int ncols, int brows1, int brows2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {
if (nrows >= 0) {
int nt = max(1, 256/nrows);
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, ncols);
__hashmult<<<nblocks,threadDim>>>(nrows, nfeats, ncols, brows1, brows2, A, Bdata, Bir, Bjc, C, transpose);
} else {
dim3 threadDim(32, 1, 1);
int nblocks = min(MAXXGRID, ncols);
__hashmult2<<<nblocks,threadDim>>>(nrows, nfeats, ncols, brows1, brows2, A, Bdata, Bir, Bjc, C, transpose);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
//__forceinline__ __device__ long long __pairembed(long long r1, int r2) {
// return ((r1+r2)*(r1+r2+1) >> 1) + r2;
//}
// The pair embedding function assumes r1x > r2x >= 0;
__forceinline__ __device__ long long __pairembed(long long r1x, int r2x) {
long long r1 = r1x+1;
int r2 = r2x+1;
float loc1 = (float) r1;
float loc2 = (float) r2;
int nbits1 = ((*(int *)(&loc1)) >> 23) - 126;
int nbits2 = ((*(int *)(&loc2)) >> 23) - 126;
int len = nbits1 + nbits2 - 2;
float loc3 = (float) len;
int lenbits = 0;
if (len > 1) lenbits = ((*(int *)(&loc3)) >> 23) - 127;
r2 = r2 & ((1 << (nbits2-1)) - 1);
long long x = (((r1 << (nbits2-1)) | r2) << lenbits) | (nbits2-1);
return max((long long)0,x-2);
}
__global__ void __dopairembed(int *r1, int *r2, long long *res, int n) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < n; i += blockDim.x * gridDim.x * gridDim.y) {
res[i] = __pairembed(r1[i], r2[i]);
}
}
int pairembed(int *r1, int *r2, long long *res, int n) {
int nthreads;
dim3 griddims;
setsizes(n, &griddims, &nthreads);
__dopairembed<<<griddims,nthreads>>>(r1, r2, res, n);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
// Pair mult multplies base features and pairs of features.
//
// NOTE: The single-matrix version uses a fast lookup recurrence which is only valid up to 3000 base features per column (approx 4.5 million pairs)
// Given dense A and sparse B, for each column of B, enumerate all pairs of features, hash to a single feature index, and multiply by A into C
// todo: fix offsets
__global__ void __pairmult(int nrows, int bncols, int brows1, int brows2, float *A, int lda, float *A2, int lda2,
float *Bdata, int *Bir, int *Bjc, int broff, int bcoff, float *C, int ldc, int transpose) {
bool doit = false;
int istart = ((long long)blockIdx.x) * bncols/ gridDim.x;
int iend = ((long long)(blockIdx.x + 1)) * bncols / gridDim.x;
float *AX;
int ldax;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart = Bjc[i + bcoff]; // Range of nz rows in this column
int jend = Bjc[i+1 + bcoff];
int nr = jend - jstart; // Number of nz rows
int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs)
for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column
int j1 = solve1(j); // Compute the first and second indices
int j2 = j - j1*(j1+1)/2;
// int j1, j2;
// solvex(todo, j, j1, j2);
float f1 = Bdata[jstart + j1]; // Get the two features
float f2 = Bdata[jstart + j2];
int r1 = Bir[jstart + j1] - broff; // And their row indices
int r2 = Bir[jstart + j2] - broff;
long long rank = r1;
float prod = f1;
doit = (r1 >= 0 && r2 >= 0);
if (j1 == j2) {
doit = doit && r1 < brows1;
AX = A;
ldax = lda;
} else {
rank = __pairembed(r1, r2);
doit = doit && (rank >= 0 && rank < brows2);
if (doit) {
prod = f1*f2/(abs(f1)+abs(f2)+1.0e-7f);
AX = A2;
ldax = lda2;
}
}
if (doit) {
if (transpose > 0) {
float sum = AX[threadIdx.x + ldax * i] * prod; // Do the product
atomicAdd(&C[threadIdx.x + ldc * rank], sum);
} else {
float sum = AX[threadIdx.x + ldax * rank] * prod; // Do the product
atomicAdd(&C[threadIdx.x + ldc * i], sum);
}
}
}
}
}
#if __CUDA_ARCH__ >= 300
// This version is designed for few (or one) rows in A. It allocates one warp per column
// todo: implement the offsets.
__global__ void __pairmult2(int nrows, int bncols, int brows1, int brows2, float *A, int lda, float *A2, int lda2,
float *Bdata, int *Bir, int *Bjc, int broff, int bcoff, float *C, int ldc, int transpose) {
bool doit = false;
int istart = ((long long)blockIdx.x) * bncols / gridDim.x;
int iend = ((long long)(blockIdx.x+1)) * bncols / gridDim.x;
float *AX;
int ldax;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart = Bjc[i + bcoff]; // Range of nz rows in this column
int jend = Bjc[i+1 + bcoff];
int nr = jend - jstart; // Number of nz rows
for (int j1 = 0; j1 < nr; j1 += blockDim.x) { // work on a block of data
float f1 = 0;
int r1 = -1;
if (j1 + threadIdx.x < nr) {
f1 = Bdata[jstart + j1 + threadIdx.x]; // Get the two features
r1 = Bir[jstart + j1 + threadIdx.x] - broff; // And their row indices
}
for (int j2 = j1; j2 < nr; j2 += blockDim.x) { // work on a block of data
float f2 = 0;
int r2 = -1;
if (j2 + threadIdx.x < nr) {
f2 = Bdata[jstart + j2 + threadIdx.x];
r2 = Bir[jstart + j2 + threadIdx.x] - broff;
}
for (int k = 0; k < 32; k++) {
float f2shift = __shfl(f2, k);
int r2shift = __shfl(r2, k);
if (j2 + k < nr && r1 >= 0) {
long long rank = r1;
float prod = f1;
doit = (r1 >= 0 && r1 < brows1 && r2 >= 0 && r2 < brows1);
if (j1 + threadIdx.x == j2 + k) {
AX = A;
ldax = lda;
} else if (j1 + threadIdx.x < j2 + k) {
rank = __pairembed(r1, r2);
doit = doit && (rank < brows2);
if (doit) {
prod *= f2shift;
AX = A2;
ldax = lda2;
}
}
if (doit) {
if (transpose > 0) {
for (int m = 0; m < nrows; m++) {
float sum = AX[m + ldax * i] * prod; // Do the product
atomicAdd(&C[m + ldc * rank], sum);
// atomicAdd(&C[0], sum);
}
} else {
for (int m = 0; m < nrows; m++) {
float sum = AX[m + ldax * rank] * prod; // Do the product
atomicAdd(&C[m + ldc * i], sum);
// atomicAdd(&C[0], sum);
}
}
}
}
}
}
}
}
}
#else
__global__ void __pairmult2(int nrows, int bncols, int brows1, int brows2, float *A, int lda, float *A2, int lda2,
float *Bdata, int *Bir, int *Bjc, int broff, int bcoff, float *C, int ldc, int transpose) {}
#endif
int pairMultTile(int nrows, int bncols, int brows1, int brows2, float *A, int lda, float *A2, int lda2,
float *Bdata, int *Bir, int *Bjc, int broff, int bcoff, float *C, int ldc, int transpose) {
if (nrows >= 0) {
int nt = max(1, 256/nrows);
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, bncols);
__pairmult<<<nblocks,threadDim>>>(nrows, bncols, brows1, brows2, A, lda, A2, lda2, Bdata, Bir, Bjc, broff, bcoff, C, ldc, transpose);
} else {
dim3 threadDim(32, 1, 1);
int nblocks = min(MAXXGRID, bncols);
__pairmult2<<<nblocks,threadDim>>>(nrows, bncols, brows1, brows2, A, lda, A2, lda2, Bdata, Bir, Bjc, broff, bcoff, C, ldc, transpose);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__forceinline__ __device__ void __gupdate(float grad, int i, int ihere, int jhere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float lr, ve, te, pve, ste, ngrad, ssq, ssqnew;
ssq = Sumsq[ihere];
ssqnew = hypotf(grad,ssq);
atomicAdd(&Sumsq[ihere], ssqnew - ssq);
ssq = ssqnew * sqrtf(istep);
if (addgrad) {
lr = (lrlen > 1) ? lrate[i] : lrate[0];
ve = (vexplen > 1) ? vexp[i] : vexp[0];
te = (texplen > 1) ? texp[i] : texp[0];
pve = (ve == 0.5f) ? ssq : ((ve == 0) ? 1.0f : pow(ssq, 2*ve));
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
atomicAdd(&MM[ihere], ngrad);
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[jhere] == 0) MM[ihere] = 0;
}
}
}
/*
__forceinline__ __device__ void __gupdate(float grad, int i, int ithere, int jthere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float lr, ve, te, pve, ste, ngrad;
Sumsq[ithere] += grad * grad + epsilon;
if (addgrad) {
lr = (lrlen > 1) ? lrate[i] : lrate[0];
ve = (vexplen > 1) ? vexp[i] : vexp[0];
te = (texplen > 1) ? texp[i] : texp[0];
pve = (ve == 0) ? 1.0f : pow(Sumsq[ithere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
atomicAdd(&MM[ithere], ngrad);
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ithere] == 0) MM[ithere] = 0;
} else {
if (Mask[jthere] == 0) MM[ithere] = 0;
}
}
}*/
__global__ void __hashmultADAGrad(int nrows, int nfeats, int ncols, int brows1, int brows2, float *A, float *Bdata, int *Bir, int *Bjc, int transpose,
float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
bool doit = false;
int ihere, ithere, jthere;
float grad;
int istart = ((long long)blockIdx.x) * ncols/ gridDim.x;
int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart = Bjc[i]; // Range of nz rows in this column
int jend = Bjc[i+1];
int nr = jend - jstart; // Number of nz rows
int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs)
for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column
int j1 = solve1(j); // Compute the first and second indices
int j2 = j - j1*(j1+1)/2;
// int j1, j2;
// solvex(todo, j, j1, j2);
float f1 = Bdata[jstart + j1]; // Get the two features
float f2 = Bdata[jstart + j2];
int r1 = Bir[jstart + j1]; // And their row indices
int r2 = Bir[jstart + j2];
long long rank = r1 + 1;
float prod = f1;
if (j1 == j2) {
doit = (rank < brows1);
} else {
prod *= f2;
rank *= r2 + 1;
doit = (rank < brows2);
}
if (doit) {
int ind = mmhash2(r1, r2, nfeats); // Hash the indices
if (transpose > 0) {
ihere = threadIdx.x + nrows * i;
ithere = threadIdx.x + nrows * ind;
jthere = ind;
} else {
ithere = threadIdx.x + nrows * i;
jthere = i;
ihere = threadIdx.x + nrows * ind;
}
grad = A[ihere] * prod; // raw gradient
__gupdate(grad, threadIdx.x, ithere, jthere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
}
}
int hashmultADAGrad(int nrows, int nfeats, int ncols, int brows1, int brows2, float *A, float *Bdata, int *Bir, int *Bjc, int transpose,
float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
int nt = max(1, 256/nrows);
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, ncols);
__hashmultADAGrad<<<nblocks,threadDim>>>(nrows, nfeats, ncols, brows1, brows2, A, Bdata, Bir, Bjc, transpose,
MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
//
// nrows = rows of MM (and other model mats)
// ncols = columns of B = columns of A
//
__global__ void __pairMultADAGradTile(int nrows, int bncols, int brows1, int brows2, float *A, int lda, int aroff, int acoff,
float *Bdata, int *Bir, int *Bjc, int broff, int bcoff, int transpose,
float *MM, int ldmm, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
bool doit = false;
int ihere, ithere, jhere, jthere;
float grad;
int istart = ((long long)blockIdx.x) * bncols/ gridDim.x;
int iend = ((long long)(blockIdx.x + 1)) * bncols / gridDim.x;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart = Bjc[i+bcoff]; // Range of nz rows in this column
int jend = Bjc[i+1+bcoff];
int nr = jend - jstart; // Number of nz rows
int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs)
for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column
int j1 = solve1(j); // Compute the first and second indices
int j2 = j - j1*(j1+1)/2;
// int j1, j2;
// solvex(todo, j, j1, j2);
float f1 = Bdata[jstart + j1]; // Get the two features
float f2 = Bdata[jstart + j2];
int r1 = Bir[jstart + j1]-broff; // And their row indices
int r2 = Bir[jstart + j2]-broff;
long long rank = r1;
float prod = f1;
doit = (r1 >= 0 && r2 >= 0);
if (doit) {
if (j1 == j2) {
doit = doit && r1 < brows1;
ithere = 0;
jthere = 0;
} else {
rank = __pairembed(r1, r2);
doit = doit && (rank < brows2);
if (doit) {
prod = f1*f2/(abs(f1)+abs(f2)+1.0e-7f);
ithere = ldmm;
jthere = 1;
}
}
}
if (doit) {
if (transpose > 0) {
ihere = threadIdx.x + aroff + lda * (i + acoff);
jhere = threadIdx.x + aroff;
ithere += threadIdx.x + 2 * ldmm * rank;
jthere += 2 * rank;
} else {
ihere = threadIdx.x + aroff + lda * (rank + acoff);
jhere = threadIdx.x + aroff;
ithere += threadIdx.x + 2 * ldmm * i;
jthere += 2 * i;
}
grad = A[ihere] * prod; // raw gradient
__gupdate(grad, jhere, ithere, jthere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
}
}
int pairMultADAGradTile(int nrows, int bncols, int brows1, int brows2, float *A, int lda, int aroff, int acoff,
float *Bdata, int *Bir, int *Bjc, int broff, int bcoff, int transpose,
float *MM, int ldmm, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
int nt = max(1, 256/nrows);
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, bncols);
__pairMultADAGradTile<<<nblocks,threadDim>>>(nrows, bncols, brows1, brows2, A, lda, aroff, acoff, Bdata, Bir, Bjc, broff, bcoff, transpose,
MM, ldmm, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __hashcross(int nrows, int nfeats, int ncols,
float *A,
float *Bdata, int *Bir, int *Bjc,
float *Cdata, int *Cir, int *Cjc,
float *D, int transpose) {
int r1, r2, ind;
int istart = ((long long)blockIdx.x) * ncols/ gridDim.x;
int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart1 = Bjc[i]; // Range of nz rows in this column of B
int jend1 = Bjc[i+1];
int jstart2 = Cjc[i]; // Range of nz rows in this column of C
int jend2 = Cjc[i+1];
int nr1 = jend1 - jstart1; // Number of nz rows
int nr2 = jend2 - jstart2; // Number of nz rows
int todo = (nr1+1) * (nr2+1) - 1; // Number of pairs + singletons to process
for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column
int j1 = j / nr2;
int j2 = j - j1 * nr2;
float prod = 1.0f;
int hash = seed;
if (j1 < nr1) {
prod *= Bdata[jstart1 + j1]; // Get the two features
r1 = Bir[jstart1 + j1]; // And their row indices
hash = h1(r1, hash);
}
if (j2 < nr2) {
prod *= Cdata[jstart2 + j2];
r2 = Cir[jstart2 + j2];
hash = h1(r2, hash); // Hash the indices
}
ind = mmhashend(hash, nfeats);
if (transpose > 0) {
float sum = A[threadIdx.x + nrows * i] * prod; // Do the product
atomicAdd(&D[threadIdx.x + nrows * ind], sum);
} else {
float sum = A[threadIdx.x + nrows * ind] * prod;
atomicAdd(&D[threadIdx.x + nrows * i], sum);
}
}
}
}
int hashcross(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *Cdata, int *Cir, int *Cjc, float *D, int transpose) {
int nt = max(1, 256/nrows);
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, ncols);
__hashcross<<<nblocks,threadDim>>>(nrows, nfeats, ncols, A, Bdata, Bir, Bjc, Cdata, Cir, Cjc, D, transpose);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
|
the_stack
|
using namespace sd;
/**
* This is utility kernel, that updates given special buffer with proper values in device memory
*/
extern "C" SD_KERNEL void prepareShapeBuffer(int* dimension, int* maxDimension, sd::LongType* specialPointer, int rows,
sd::DataType dataType) {
sd::LongType tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0) return;
dimension[0] = 0;
maxDimension[0] = 1;
specialPointer[0] = 2;
specialPointer[1] = rows;
specialPointer[2] = 1;
specialPointer[3] = 1;
specialPointer[4] = 1;
specialPointer[5] = 0;
specialPointer[6] = 1;
specialPointer[7] = 99;
ArrayOptions::setDataType(specialPointer, dataType);
// printf("special[0]: [%lld]\n", (long long) specialPointer[0]);
// shape::printShapeInfoLinear("prepareShapeBuffer", specialPointer);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execPairwiseTransform(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void const* hY,
sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, void* extraParams) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
if (xType != zType && yType != zType)
throw std::runtime_error(
"NativeOpExecutioner::execPairwiseTransform requires Z operand to have either X or Y type");
if (lc == nullptr)
throw std::runtime_error("NativeOpExecutioner::execPairwiseTransform: launch context cannot be nullptr !");
if (stream == nullptr)
throw std::runtime_error("NativeOpExecutioner::execPairwiseTransform: CUDA stream cannot be nullptr !");
dim3 launchDims(256, 1024, 8192);
#ifdef SD_EXPERIMENTAL_ENABLED
BUILD_PAIRWISE_SELECTOR(
xType, yType, zType, functions::pairwise_transforms::PairWiseTransform,
::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams),
SD_COMMON_TYPES, SD_COMMON_TYPES)
#else
BUILD_SINGLE_SELECTOR_THRICE(
xType, functions::pairwise_transforms::PairWiseTransform,
::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams),
SD_COMMON_TYPES)
#endif
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execPairwiseTransform failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execPairwiseBoolTransform(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void const* hY,
sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, void* extraParams) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
if (!DataTypeUtils::isB(zType))
throw sd::datatype_exception::build("NativeOpExecutioner::execPairwiseBoolTransform wrong Z operand data type",
sd::DataType::BOOL, zType);
if (yType != xType)
throw sd::datatype_exception::build(
"NativeOpExecutioner::execPairwiseBoolTransform both operands must have same data type", xType, yType);
dim3 launchDims(256, 1024, 16384);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::pairwise_transforms::PairWiseBoolTransform,
::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams),
SD_COMMON_TYPES, SD_BOOL_TYPES)
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execPairwiseBoolTransform failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execPairwiseIntTransform(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void const* hY,
sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, void* extraParams) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
if (!DataTypeUtils::isZ(zType))
throw sd::datatype_exception::build("NativeOpExecutioner::execPairwiseIntTransform wrong Z operand data type",
sd::DataType::BOOL, zType);
if (yType != xType || zType != xType)
throw sd::datatype_exception::build(
"NativeOpExecutioner::execPairwiseIntTransform both operands must have same data type", xType, yType);
dim3 launchDims(256, 1024, 16384);
BUILD_SINGLE_SELECTOR(
xType, functions::pairwise_transforms::PairWiseIntTransform,
::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams),
SD_INTEGER_TYPES)
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execPairwiseIntTransform failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execSummaryStatsScalar(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, bool biasCorrected) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
dim3 launchDims = dim3(256, SD_CUDA_BLOCK_SIZE, 1024);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::summarystats::SummaryStatsReduce,
::execSummaryStatsReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ,
dZShapeInfo, hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execSummaryStatsScalar failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execBroadcastBool(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void const* hY,
sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, void* extraParams,
int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ,
sd::LongType const* tadOffsetsZ) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires Z operand to have BOOL type");
if (yType != xType)
throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires both X & Y operands to have same type");
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F3B opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 1024);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::broadcast::BroadcastBool,
::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams,
dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES, SD_BOOL_TYPES)
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execBroadcastBool(sd::LaunchContext* lc, const int opNum, const void* hX,
const sd::LongType* hXShapeInfo, const void* dX,
const sd::LongType* dXShapeInfo, const void* hY,
const sd::LongType* hYShapeInfo, const void* dY,
const sd::LongType* dYShapeInfo, void* hZ, const sd::LongType* hZShapeInfo,
void* dZ, const sd::LongType* dZShapeInfo, void* extraParams) {
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
dim3 launchDims;
launchDims.y = SD_MAX_NUM_THREADS / 4; // threadsPerBlock
launchDims.x = (shape::length(hZShapeInfo) + launchDims.y - 1) / launchDims.y; // blocksPerGrid
launchDims.z = 1024; // shared memory
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::broadcast::BroadcastBool,
::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams),
SD_COMMON_TYPES, SD_BOOL_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res);
}
void NativeOpExecutioner::execInverseBroadcastBool(
sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, void* extraParams, int* dimension, int dimensionLength,
sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ,
sd::LongType const* tadOffsetsZ) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires Z operand to have BOOL type");
if (yType != xType)
throw std::runtime_error("NativeOpExecutioner::execBroadcastBool requires both X & Y operands to have same type");
dim3 launchDims(256, 256, 1024);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::broadcast::BroadcastBool,
::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraParams,
dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES, SD_BOOL_TYPES)
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execInverseBroadcastBool failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execBroadcastInt(
sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
if (!DataTypeUtils::isZ(zType))
throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires Z operand to have INT type");
if (yType != xType || zType != xType)
throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires both X & Y operands to have same type");
dim3 launchDims(256, 256, 1024);
BUILD_SINGLE_SELECTOR(
xType, functions::broadcast::BroadcastInt,
::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension,
dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ),
SD_INTEGER_TYPES)
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execBroadcastInt(sd::LaunchContext* lc, const int opNum, const void* hX,
const sd::LongType* hXShapeInfo, const void* dX,
const sd::LongType* dXShapeInfo, const void* hY,
const sd::LongType* hYShapeInfo, const void* dY,
const sd::LongType* dYShapeInfo, void* hZ, const sd::LongType* hZShapeInfo,
void* dZ, const sd::LongType* dZShapeInfo) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
if (!DataTypeUtils::isZ(zType))
throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires Z operand to have INT type");
if (yType != xType || zType != xType)
throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires both X & Y operands to have same type");
dim3 launchDims;
launchDims.y = SD_MAX_NUM_THREADS / 4; // threadsPerBlock
launchDims.x = (shape::length(hZShapeInfo) + launchDims.y - 1) / launchDims.y; // blocksPerGrid
launchDims.z = 1024; // shared memory
BUILD_SINGLE_SELECTOR(xType, functions::broadcast::BroadcastInt,
::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo),
SD_INTEGER_TYPES)
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execBroadcastBool failed", res);
}
void NativeOpExecutioner::execInverseBroadcastInt(
sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
if (!DataTypeUtils::isZ(zType))
throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires Z operand to have INT type");
if (yType != xType || zType != xType)
throw std::runtime_error("NativeOpExecutioner::execBroadcastInt requires both X & Y operands to have same type");
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F3BI opNum:[%i]\n", opNum);
dim3 launchDims(256, 256, 1024);
BUILD_SINGLE_SELECTOR(
xType, functions::broadcast::BroadcastInt,
::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension,
dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ),
SD_INTEGER_TYPES)
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execInverseBroadcastInt failed", res);
}
////////////////////////////////////////////////////////////////////////
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOpExecutioner::execBroadcast(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void const* hY,
sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength,
sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets,
sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
dim3 launchDims(256, 256, 1024);
#ifdef SD_EXPERIMENTAL_ENABLED
BUILD_PAIRWISE_SELECTOR(
xType, yType, zType, functions::broadcast::Broadcast,
::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension,
dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES, SD_COMMON_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(
xType, functions::broadcast::Broadcast,
::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension,
dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES);
#endif
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execBroadcast failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execBroadcast(sd::LaunchContext* lc, const int opNum, const void* hX,
const sd::LongType* hXShapeInfo, const void* dX,
const sd::LongType* dXShapeInfo, const void* hY,
const sd::LongType* hYShapeInfo, const void* dY,
const sd::LongType* dYShapeInfo, void* hZ, const sd::LongType* hZShapeInfo,
void* dZ, const sd::LongType* dZShapeInfo) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
dim3 launchDims;
launchDims.y = SD_MAX_NUM_THREADS / 4; // threadsPerBlock
launchDims.x = (shape::length(hZShapeInfo) + launchDims.y - 1) / launchDims.y; // blocksPerGrid
launchDims.z = 1024; // shared memory
#ifdef SD_EXPERIMENTAL_ENABLED
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::broadcast::Broadcast,
::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo),
SD_COMMON_TYPES, SD_COMMON_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(
xType, functions::broadcast::Broadcast,
::execBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo), SD_COMMON_TYPES);
#endif
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execBroadcast failed", res);
}
void NativeOpExecutioner::execInverseBroadcast(
sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void const* hY, sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets, sd::LongType const* tadOnlyShapeInfoZ, sd::LongType const* tadOffsetsZ) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hYShapeInfo)) return;
dim3 launchDims(256, 256, 1024);
#ifdef SD_EXPERIMENTAL_ENABLED
BUILD_PAIRWISE_SELECTOR(
xType, yType, zType, functions::broadcast::Broadcast,
::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension,
dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES, SD_COMMON_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(
xType, functions::broadcast::Broadcast,
::execInverseBroadcast(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, dimension,
dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES);
#endif
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execInverseBroadcast failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduceSame(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo,
int* dimension, int dimensionLength) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("SF7 opNum:[%i]\n", opNum);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != xType)
throw datatype_exception::build(
"NativeOpExecutioner::execReduceSame requires both X & Z operands to have same type", xType, zType);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction,
::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams,
reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension),
SD_COMMON_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduceSame failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduceLong(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo,
int* dimension, int dimensionLength) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("LF7 opNum:[%i]\n", opNum);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::INT64)
throw datatype_exception::build("NativeOpExecutioner::execReduceLong wrong Z data type", sd::DataType::INT64,
zType);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction,
::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams,
reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension),
SD_COMMON_TYPES, SD_LONG_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduceLong failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduceBool(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo,
int* dimension, int dimensionLength) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("BF7 opNum:[%i]\n", opNum);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::BOOL)
throw std::runtime_error("NativeOpExecutioner::execReduceBool requires Z operand to have BOOL type");
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction,
::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams,
reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension),
SD_COMMON_TYPES, SD_BOOL_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduceBool failed", res);
}
////////////////////////////////////////////////////////////////////////
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
void NativeOpExecutioner::execReduceFloat(sd::LaunchContext* lc, int opNum, const void* hX,
const sd::LongType* hXShapeInfo, const void* dX,
const sd::LongType* dXShapeInfo, void* extraParams, void* hZ,
const sd::LongType* hZShapeInfo, void* dZ, const sd::LongType* dZShapeInfo,
int* dimension, int dimensionLength) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F8 opNum:[%i]\n", opNum);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, 256, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction,
::execReduceXD(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams,
reductionPointer, dZ, dZShapeInfo, hZShapeInfo, dimension),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduceFloat failed", res);
}
////////////////////////////////////////////////////////////////////////
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOpExecutioner::execIndexReduce(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo,
int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo,
sd::LongType const* tadOffsets) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
auto allocationPointer = lc->getAllocationPointer();
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F2 opNum:[%i]\n", opNum);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
auto numBlocks = shape::length(hZShapeInfo);
auto tadLength = shape::length(hXShapeInfo) / numBlocks;
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, tadLength < SD_CUDA_BLOCK_SIZE ? tadLength : SD_CUDA_BLOCK_SIZE,
1024);
if (zType != sd::DataType::INT64 && zType != sd::DataType::INT32)
throw datatype_exception::build("NativeOpExecutioner::execIndexReduce requires Z operand to have INT32/INT64 type",
zType);
auto dz = reinterpret_cast<sd::LongType*>(dZ);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::indexreduce::IndexReduce,
::executeIndexReduce(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz,
dZShapeInfo, shape::rank(hZShapeInfo), dimension, dimensionLength, 1, allocationPointer,
reductionPointer, tadShapeInfo, tadOffsets),
SD_COMMON_TYPES, SD_INDEXING_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execIndexReduce failed", res);
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execIndexReduceScalar(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo) {
if (sd::Environment::getInstance().isDebug()) printf("F1 opNum:[%i]\n", opNum);
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
auto allocationPointer = lc->getAllocationPointer();
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024);
if (sd::Environment::getInstance().isDebugAndVerbose() && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
// FIXME: we want Z to be one of integer types
// if (!DataTypeUtils::isZ(zType))
// throw sd::datatype_exception("NativeOpExecutioner::execIndexReduceScalar requires Z operand to have one of
// integer types")
if (zType != sd::DataType::INT64 && zType != sd::DataType::INT32)
throw sd::datatype_exception::build(
"NativeOpExecutioner::execIndexReduceScalar requires Z operand to have INT32/INT64 data type", zType);
auto dz = reinterpret_cast<sd::LongType*>(dZ);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::indexreduce::IndexReduce,
::executeIndexReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, shape::rank(hXShapeInfo), extraParams, dz,
dZShapeInfo, 0, nullptr, 0, 1, allocationPointer, reductionPointer, nullptr, nullptr),
SD_COMMON_TYPES, SD_INDEXING_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execIndexReduceScalar failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduceFloatScalar(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceFloatFunction,
::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ,
dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduceFloatScalar failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduceBoolScalar(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::BOOL)
throw std::runtime_error("NativeOpExecutioner::execReduceBoolScalar requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = SD_CUDA_BLOCK_SIZE;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction,
::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ,
dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr),
SD_COMMON_TYPES, SD_BOOL_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduceBoolScalar failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduceSameScalar(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != xType)
throw datatype_exception::build(
"NativeOpExecutioner::execReduceSameScalar requires both X & Z operands to have same type", xType, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = SD_CUDA_BLOCK_SIZE;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024);
BUILD_SINGLE_SELECTOR(xType, functions::reduce::ReduceSameFunction,
::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ,
dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr),
SD_COMMON_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduceSameScalar failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduceLongScalar(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::INT64)
throw datatype_exception::build("NativeOpExecutioner::execReduceLongScalar wrong Z data type", sd::DataType::INT64,
zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = SD_CUDA_BLOCK_SIZE;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction,
::execReduceScalar(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ,
dZShapeInfo, hZShapeInfo, nullptr, 0, reductionPointer, nullptr),
SD_COMMON_TYPES, SD_LONG_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduceLongScalar failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execTransformSame(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, void* extraParams,
sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) {
auto stream = lc->getCudaStream();
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo)) {
return;
}
if (xType != zType) {
throw std::runtime_error("NativeOpExecutioner::execTransformSame requires X & Z to have same type");
}
dim3 launchDims(512, 512, 16384);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformSame,
::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ,
dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr),
SD_COMMON_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execTransformSame failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execTransformBool(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, void* extraParams,
sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) {
auto stream = lc->getCudaStream();
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo)) {
return;
}
if (!DataTypeUtils::isB(zType)) {
throw std::runtime_error("NativeOpExecutioner::execTransformBool requires Z to have same boolean type");
}
dim3 launchDims(512, 512, 16384);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformBool,
::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ,
dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr),
SD_COMMON_TYPES, SD_BOOL_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execTransformBool failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execTransformAny(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, void* extraParams,
sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets,
bool allowParallelism) {
auto stream = lc->getCudaStream();
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo)) return;
if (opNum == sd::transform::Assign && shape::order(hXShapeInfo) == shape::order(hZShapeInfo) &&
shape::order(hXShapeInfo) == 'c' && xType == zType && shape::elementWiseStride(hXShapeInfo) == 1 &&
shape::elementWiseStride(hZShapeInfo) == 1) {
cudaMemcpyAsync(dZ, dX, shape::length(hXShapeInfo) * sd::DataTypeUtils::sizeOfElement(xType),
cudaMemcpyDeviceToDevice, *stream);
} else {
dim3 launchDims(512, 512, 2048);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformAny,
::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ,
dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr),
SD_COMMON_TYPES, SD_COMMON_TYPES);
}
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execTransformAny failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execTransformStrict(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, void* extraParams,
sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) {
auto stream = lc->getCudaStream();
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo)) {
return;
}
if (xType != zType || !DataTypeUtils::isR(xType)) {
throw datatype_exception::build(
"NativeOpExecutioner::execTransformStrict requires X & Z to have same floating point type", xType, zType);
}
dim3 launchDims(512, 512, 16384);
BUILD_SINGLE_SELECTOR(xType, functions::transform::TransformStrict,
::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ,
dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr),
SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execTransformStrict failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execTransformFloat(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, void* extraParams,
sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
auto xRank = shape::rank(hXShapeInfo);
auto zRank = shape::rank(hZShapeInfo);
auto xType = ArrayOptions::dataType(hXShapeInfo);
auto zType = ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo)) return;
if (!DataTypeUtils::isR(zType))
throw datatype_exception::build("NativeOpExecutioner::execTransformFloat requires Z to have floating point type",
zType);
dim3 launchDims(512, 512, 2048);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::transform::TransformFloat,
::executeTransformShaped(launchDims, stream, opNum, dX, dXShapeInfo, xRank, extraParams, dZ,
dZShapeInfo, zRank, nullptr, nullptr, nullptr, nullptr),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execTransformFloat failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execSummaryStats(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo,
bool biasCorrected) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
dim3 launchDims = dim3(256, SD_CUDA_BLOCK_SIZE, 1024);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw sd::datatype_exception::build(
"NativeOpExecutioner::execSummaryStats requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::summarystats::SummaryStatsReduce,
::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams, dZ, dZShapeInfo,
hZShapeInfo, nullptr, nullptr, biasCorrected, reductionPointer),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execSummaryStats A failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execSummaryStats(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo,
int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo,
sd::LongType const* tadOffsets, bool biasCorrected) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
dim3 launchDims = dim3(256, SD_CUDA_BLOCK_SIZE, 1024);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (!DataTypeUtils::isR(zType))
throw sd::datatype_exception::build(
"NativeOpExecutioner::execSummaryStats requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::summarystats::SummaryStatsReduce,
::execSummaryStatsReduce(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, extraParams,
dZ, dZShapeInfo, hZShapeInfo, dimension, dimensionLength, tadShapeInfo,
tadOffsets, biasCorrected, reductionPointer),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execSummaryStats B failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduce3(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo,
void const* dX, sd::LongType const* dXShapeInfo, void* extraParams,
void const* hY, sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo) {
auto stream = lc->getCudaStream();
auto reductionPointer = lc->getReductionPointer();
auto allocationPointer = lc->getAllocationPointer();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
auto blockWidth = SD_CUDA_BLOCK_SIZE;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(shape::length(hXShapeInfo), blockWidth);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024);
if (xType != yType)
throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3 requires Y operand to have X type", xType,
yType);
if (!DataTypeUtils::isR(zType))
throw sd::datatype_exception::build(
"NativeOpExecutioner::execReduce3 requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3,
::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ,
dZShapeInfo, allocationPointer, reductionPointer, nullptr),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduce3 failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduce3(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo,
void const* dX, sd::LongType const* dXShapeInfo, void* extraParams,
void const* hY, sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength,
sd::LongType const* tadOnlyShapeInfo, sd::LongType const* tadOffsets,
sd::LongType const* yTadOnlyShapeInfo, sd::LongType const* yTadOffsets) {
if (shape::isScalar(hZShapeInfo)) {
NativeOpExecutioner::execReduce3(lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY,
dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo);
return;
}
auto stream = lc->getCudaStream();
auto allocationPointer = lc->getAllocationPointer();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType)
throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3 requires Y operand to have X type", xType,
yType);
if (!DataTypeUtils::isR(zType))
throw sd::datatype_exception::build(
"NativeOpExecutioner::execReduce3 requires Z operand to have floating point data type", zType);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::reduce3::Reduce3,
::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension,
dimensionLength, 1, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduce3 B failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduce3Scalar(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void const* hY,
sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo) {
auto stream = lc->getCudaStream();
auto allocationPointer = lc->getAllocationPointer();
auto reductionPointer = lc->getReductionPointer();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = SD_CUDA_BLOCK_SIZE;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, blockWidth, 1024);
if (xType != yType)
throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3Scalar requires Y operand to have X type",
xType, yType);
if (!DataTypeUtils::isR(zType))
throw sd::datatype_exception::build(
"NativeOpExecutioner::execReduce3Scalar requires Z operand to have floating point data type", zType);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce3::Reduce3,
::execScalar(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ,
dZShapeInfo, allocationPointer, reductionPointer, nullptr),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduce3Scalar failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execScalarBool(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, void const* hScalar,
sd::LongType const* hScalarShapeInfo, void const* dScalar,
sd::LongType const* dScalarShapeInfo, void* extraParams,
bool allowParallelism) {
auto stream = lc->getCudaStream();
dim3 launchDims = dim3(256, 512, 8192);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return;
if (xType != yType) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires X & Y to have same type");
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOpExecutioner::execScalarBool requires Z operand to have BOOL type");
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::scalar::ScalarBoolTransform,
::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams),
SD_COMMON_TYPES, SD_BOOL_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execScalarBool failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execScalarBool(
sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, void const* hScalars, sd::LongType const* hScalarShapeInfo, void const* dScalars,
sd::LongType const* dScalarShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo,
sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) {
auto stream = lc->getCudaStream();
dim3 launchDims(256, 512, 8192);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return;
if (xType != yType) throw std::runtime_error("NativeOpExecutioner::execScalarBool requires X & Y to have same type");
if (!DataTypeUtils::isB(zType))
throw std::runtime_error("NativeOpExecutioner::execScalarBool requires Z operand to have BOOL type");
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::scalar::ScalarBoolTransform,
::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams,
dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES, SD_BOOL_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execScalarBool B failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execScalarInt(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, void const* hScalar,
sd::LongType const* hScalarShapeInfo, void const* dScalar,
sd::LongType const* dScalarShapeInfo, void* extraParams,
bool allowParallelism) {
auto stream = lc->getCudaStream();
dim3 launchDims = dim3(256, 512, 8192);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return;
if (xType != yType || zType != xType)
throw std::runtime_error("NativeOpExecutioner::execScalarInt requires X & Y to have same type");
if (!DataTypeUtils::isZ(zType))
throw std::runtime_error("NativeOpExecutioner::execScalarInt requires Z operand to have INT type");
BUILD_SINGLE_SELECTOR(
xType, functions::scalar::ScalarIntTransform,
::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalar, extraParams),
SD_INTEGER_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execScalarInt failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execScalarInt(
sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void* hZ, sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, void const* hScalars, sd::LongType const* hScalarShapeInfo, void const* dScalars,
sd::LongType const* dScalarShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo,
sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) {
auto stream = lc->getCudaStream();
dim3 launchDims(256, 512, 8192);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return;
if (xType != yType || zType != xType)
throw std::runtime_error("NativeOpExecutioner::execScalarInt requires X & Y to have same type");
if (!DataTypeUtils::isZ(zType))
throw std::runtime_error("NativeOpExecutioner::execScalarInt requires Z operand to have INT type");
BUILD_SINGLE_SELECTOR(
xType, functions::scalar::ScalarIntTransform,
::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams,
dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_INTEGER_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execScalarInt B failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo,
void const* dX, sd::LongType const* dXShapeInfo, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo,
void const* hScalar, sd::LongType const* hScalarShapeInfo, void const* dScalar,
sd::LongType const* dScalarShapeInfo, void* extraParams, bool allowParallelism) {
auto stream = lc->getCudaStream();
dim3 launchDims(256, 512, 8192);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return;
#ifdef SD_EXPERIMENTAL_ENABLED
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform,
::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ, dZShapeInfo,
hZShapeInfo, dScalar, extraParams),
SD_COMMON_TYPES, SD_COMMON_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform,
::executeCudaShaped(launchDims, stream, opNum, dX, dXShapeInfo, hXShapeInfo, dZ,
dZShapeInfo, hZShapeInfo, dScalar, extraParams),
SD_COMMON_TYPES);
#endif
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execScalar failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execScalar(sd::LaunchContext* lc, int opNum, void const* hX, sd::LongType const* hXShapeInfo,
void const* dX, sd::LongType const* dXShapeInfo, void* extraParams, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo,
void const* hScalars, sd::LongType const* hScalarShapeInfo, void const* dScalars,
sd::LongType const* dScalarShapeInfo, int* dimension, int dimensionLength,
sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets,
sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) {
auto stream = lc->getCudaStream();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (shape::isEmpty(hXShapeInfo) || shape::isEmpty(hScalarShapeInfo)) return;
dim3 launchDims(256, 256, 16384);
#ifdef SD_EXPERIMENTAL_ENABLED
BUILD_PAIRWISE_SELECTOR(
xType, yType, zType, functions::scalar::ScalarTransform,
::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams,
dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES, SD_COMMON_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(
xType, functions::scalar::ScalarTransform,
::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams,
dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES);
#endif
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execScalar B failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execRandom(sd::LaunchContext* lc, int opNum, sd::Pointer stateHost, void* hZ,
sd::LongType const* hZShapeInfo, void* dZ, sd::LongType const* dZShapeInfo,
void* extraArguments) {
auto stream = lc->getCudaStream();
auto sizeOf = sizeof(sd::graph::RandomGenerator);
sd::Pointer stateDevice;
cudaError_t res = cudaMalloc(reinterpret_cast<void**>(&stateDevice), sizeOf);
checkCudaErrors(cudaStreamSynchronize(*stream));
checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream));
dim3 launchDims = dim3(512, 512, 32768);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
auto rng = reinterpret_cast<sd::graph::RandomGenerator*>(stateHost);
// functions::random::RandomFunction<float>::executeCudaSingle(launchDims, extraPointers, opNum, stateHost, dZ,
// dZShapeInfo, extraArguments),
BUILD_SINGLE_SELECTOR(zType, functions::random::RandomFunction,
::executeCudaSingle(launchDims, stream, opNum, stateDevice, dZ, dZShapeInfo, extraArguments),
SD_FLOAT_TYPES);
res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execRandom X failed", res);
cudaFree(stateDevice);
rng->rewindH(shape::length(hZShapeInfo));
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execRandom(sd::LaunchContext* lc, int opNum, sd::Pointer stateHost, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo,
void* hZ, sd::LongType const* hZShapeInfo, void* dZ,
sd::LongType const* dZShapeInfo, void* extraArguments) {
auto stream = lc->getCudaStream();
auto sizeOf = sizeof(sd::graph::RandomGenerator);
sd::Pointer stateDevice;
cudaError_t res = cudaMalloc(reinterpret_cast<void**>(&stateDevice), sizeOf);
checkCudaErrors(cudaStreamSynchronize(*stream));
checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream));
auto rng = reinterpret_cast<sd::graph::RandomGenerator*>(stateHost);
dim3 launchDims = dim3(512, 512, 32768);
auto xType = sd::ArrayOptions::dataType(hZShapeInfo);
// functions::random::RandomFunction<float>::executeCudaDouble(launchDims, extraPointers, opNum, stateHost, dX,
// dXShapeInfo, dZ, dZShapeInfo, extraArguments);
BUILD_SINGLE_SELECTOR(
xType, functions::random::RandomFunction,
::executeCudaDouble(launchDims, stream, opNum, stateDevice, dX, dXShapeInfo, dZ, dZShapeInfo, extraArguments),
SD_FLOAT_TYPES);
res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execRandom XY failed", res);
cudaFree(stateDevice);
rng->rewindH(shape::length(hZShapeInfo));
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execRandom(sd::LaunchContext* lc, int opNum, sd::Pointer stateHost, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX, sd::LongType const* dXShapeInfo,
void const* hY, sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, void* extraArguments) {
auto stream = lc->getCudaStream();
auto sizeOf = sizeof(sd::graph::RandomGenerator);
sd::Pointer stateDevice;
cudaError_t res = cudaMalloc(reinterpret_cast<void**>(&stateDevice), sizeOf);
checkCudaErrors(cudaStreamSynchronize(*stream));
checkCudaErrors(cudaMemcpyAsync(stateDevice, stateHost, sizeOf, cudaMemcpyHostToDevice, *stream));
auto rng = reinterpret_cast<sd::graph::RandomGenerator*>(stateHost);
dim3 launchDims = dim3(512, 512, 32768);
auto xType = sd::ArrayOptions::dataType(hZShapeInfo);
// functions::random::RandomFunction<float>::executeCudaTriple(launchDims, extraPointers, opNum, stateHost, dX,
// dXShapeInfo, dY, dYShapeInfo, dZ, dZShapeInfo, extraArguments);
BUILD_SINGLE_SELECTOR(xType, functions::random::RandomFunction,
::executeCudaTriple(launchDims, stream, opNum, stateDevice, dX, dXShapeInfo, dY, dYShapeInfo,
dZ, dZShapeInfo, extraArguments),
SD_FLOAT_TYPES);
res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execRandom XYZ failed", res);
cudaFree(stateDevice);
rng->rewindH(shape::length(hZShapeInfo));
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduce3All(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParamsVals, void const* hY,
sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength,
sd::LongType const* xTadShapeInfo, sd::LongType const* xOffsets,
sd::LongType const* yTadShapeInfo, sd::LongType const* yOffsets) {
auto stream = lc->getCudaStream();
auto allocationPointer = lc->getAllocationPointer();
auto reductionPointer = lc->getReductionPointer();
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("D119 opNum:[%i]\n", opNum);
dim3 launchDims(shape::length(hZShapeInfo), SD_CUDA_BLOCK_SIZE / 2, 1024);
if (sd::Environment::getInstance().isVerbose() && launchDims.x == 1) printf("AD119 opNum:[%i]\n", opNum);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType)
throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3All both operands must have same data type",
xType, yType);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::reduce3::Reduce3,
::execAll(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParamsVals, dZ, dZShapeInfo,
dimension, dimensionLength, 1, allocationPointer, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduce3All failed", res);
}
////////////////////////////////////////////////////////////////////////
void NativeOpExecutioner::execReduce3TAD(sd::LaunchContext* lc, int opNum, void const* hX,
sd::LongType const* hXShapeInfo, void const* dX,
sd::LongType const* dXShapeInfo, void* extraParams, void const* hY,
sd::LongType const* hYShapeInfo, void const* dY,
sd::LongType const* dYShapeInfo, void* hZ, sd::LongType const* hZShapeInfo,
void* dZ, sd::LongType const* dZShapeInfo, int* dimension, int dimensionLength,
sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets,
sd::LongType const* yTadShapeInfo, sd::LongType const* yTadOffsets) {
if (shape::isScalar(hZShapeInfo)) {
NativeOpExecutioner::execReduce3(lc, opNum, hX, hXShapeInfo, dX, dXShapeInfo, extraParams, hY, hYShapeInfo, dY,
dYShapeInfo, hZ, hZShapeInfo, dZ, dZShapeInfo);
return;
}
auto stream = lc->getCudaStream();
auto allocationPointer = lc->getAllocationPointer();
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (xType != yType)
throw sd::datatype_exception::build("NativeOpExecutioner::execReduce3TAD requires Y operand to have X type", xType,
yType);
if (!DataTypeUtils::isR(zType))
throw sd::datatype_exception::build(
"NativeOpExecutioner::execReduce3TAD requires Z operand to have floating point data type", zType);
auto numBlocks = shape::length(hZShapeInfo);
dim3 launchDims(numBlocks == 0 ? 1 : numBlocks, SD_CUDA_BLOCK_SIZE, 1024);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::reduce3::Reduce3,
::exec(launchDims, stream, opNum, dX, dXShapeInfo, dY, dYShapeInfo, extraParams, dZ, dZShapeInfo, dimension,
dimensionLength, 1, allocationPointer, tadShapeInfo, tadOffsets, yTadShapeInfo, yTadOffsets),
SD_COMMON_TYPES, SD_FLOAT_TYPES);
// TODO: remove after the release
auto res = cudaStreamSynchronize(*stream);
if (res != 0) throw cuda_exception::build("execReduce3TAD failed", res);
}
|
the_stack
|
#include <cstdio>
#include <iostream>
#include <fstream>
#include "cuda_kernel_utils.h"
#define EXECUTION_BLOCK_MLP_SIZE 128
////////////////////////////////////////////////////////////////////////////////// GPU
__device__ void evaluateMLP(
const int pThreadId,
const int pOffset,
const int pTotalBlocks,
const int pNumFeatures,
const int pNumOutFeatures,
const int pNumNeuronsOut,
const int pFeatureIndex,
const int pOutFeatureIndex,
const float pNumSamples,
const float pCurrentPDF,
const float pCurrPointCoords[3],
const float* __restrict__ pWeightsHidd1,
const float* __restrict__ pWeightsHidd2,
const float* __restrict__ pWeightsOut,
const float* __restrict__ pBiasHidd1,
const float* __restrict__ pBiasHidd2,
const float* __restrict__ pBiasOut,
const float* __restrict__ pFeatures,
float* __restrict__ pTmpVector1,
float* __restrict__ pTmpVector2,
float* __restrict__ pOutFeatures)
{
//Compute output first layer.
pTmpVector1[pThreadId] = max(pCurrPointCoords[0]*pWeightsHidd1[pThreadId*3] +
pCurrPointCoords[1]*pWeightsHidd1[pThreadId*3 + 1] +
pCurrPointCoords[2]*pWeightsHidd1[pThreadId*3 + 2] +
pBiasHidd1[pThreadId], 0.0);
__syncthreads();
//Compute output second layer.
float auxResult = 0.0;
for(int j = 0; j < BLOCK_MLP_SIZE; ++j)
{
auxResult += pTmpVector1[j]*pWeightsHidd2[pThreadId*BLOCK_MLP_SIZE + j];
}
pTmpVector2[pThreadId] = max(auxResult + pBiasHidd2[pThreadId], 0.0);
__syncthreads();
//Compute output layer.
if((pOffset+pThreadId) < pNumNeuronsOut){
auxResult = 0.0;
for(int j = 0; j < BLOCK_MLP_SIZE; ++j)
{
auxResult += pTmpVector2[j]*pWeightsOut[pThreadId*BLOCK_MLP_SIZE + j];
}
auxResult = auxResult + pBiasOut[pThreadId];
int currInFeatureIndex = (pOffset+pThreadId)%pNumFeatures;
int currOutFeatureIndex = (pOffset+pThreadId)/pNumFeatures;
atomicAdd(&pOutFeatures[pOutFeatureIndex+currOutFeatureIndex],
(pFeatures[pFeatureIndex+currInFeatureIndex]*auxResult)/(pCurrentPDF*pNumSamples));
}
}
/**
* Method to evaluate the MLP.
* @param pAVG Boolean that indicates if the results is divided by the number of neighbors or not.
* @param pScaleInv Boolean that indicates if the radius is defined relative to the bounding box.
* @param pNumPoints Number of points.
* @param pNumNeighbors Number of neighboring points.
* @param pNumFeatures Number of input features per point.
* @param pNumOutFeatures Number of output features per point.
* @param pRadius Radius of the convolution.
* @param pWeightsHidd1 Weights of the neurons in the first hidden layer.
* @param pWeightsHidd2 Weights of the neurons in the second hidden layer.
* @param pWeightsOut Weights of the neurons in the output layer.
* @param pBiasHidd1 Biases of the neurons in the first hidden layer.
* @param pBiasHidd2 Biases of the neurons in the second hidden layer.
* @param pBiasOut Biases of the neurons in the output layer.
* @param pPoints List of points.
* @param pBatchIds List of batch ids.
* @param pFeatures List of input features.
* @param pStartIndexs List of start indices for each point.
* @param pNeigbors List neighbors of each point.
* @param pPDFs List of the pdf values.
* @param pOutFeatures Output parameter with the list of output features.
*/
__global__ void evaluateMLPKernel(
const bool pAvg,
const bool pScaleInv,
const int pNumPoints,
const int pNumNeighbors,
const int pNumFeatures,
const int pNumOutFeatures,
const float pRadius,
const float* __restrict__ pAABBMin,
const float* __restrict__ pAABBMax,
const float* __restrict__ pWeightsHidd1,
const float* __restrict__ pWeightsHidd2,
const float* __restrict__ pWeightsOut,
const float* __restrict__ pBiasHidd1,
const float* __restrict__ pBiasHidd2,
const float* __restrict__ pBiasOut,
const float* __restrict__ pSamples,
const float* __restrict__ pPoints,
const int* __restrict__ pBatchIds,
const float* __restrict__ pFeatures,
const int* __restrict__ pStartIndexs,
const int* __restrict__ pNeigbors,
const float* __restrict__ pPDFs,
float* __restrict__ pOutFeatures)
{
extern __shared__ float mlpIntermediateRes[];
int neuronsOut = pNumOutFeatures*pNumFeatures;
int numBlocksXNeigh = neuronsOut/BLOCK_MLP_SIZE;
numBlocksXNeigh += (neuronsOut%BLOCK_MLP_SIZE != 0)?1:0;
unsigned long long int currentIndex = threadIdx.x +
blockDim.x*(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y);
int currentNeighborIndex = currentIndex/(numBlocksXNeigh*BLOCK_MLP_SIZE);
int offset = currentIndex%(numBlocksXNeigh*BLOCK_MLP_SIZE);
offset = offset - offset%BLOCK_MLP_SIZE;
int threadId = threadIdx.x%BLOCK_MLP_SIZE;
int threadOffset = threadIdx.x - threadId;
if(currentNeighborIndex < pNumNeighbors){
int neighborIndex = currentNeighborIndex * 2;
int currentPointIndex = pNeigbors[neighborIndex];
int centralPointIndex = pNeigbors[neighborIndex+1];
int currBatchId = pBatchIds[currentPointIndex];
float maxAabbSize = max(max(
pAABBMax[currBatchId*3] - pAABBMin[currBatchId*3],
pAABBMax[currBatchId*3+1] - pAABBMin[currBatchId*3+1]),
pAABBMax[currBatchId*3+2] - pAABBMin[currBatchId*3+2]);
float scaledRadius = (pScaleInv)?pRadius*maxAabbSize:pRadius;
float currPointCoords[3] = {
(pPoints[currentPointIndex*3] - pSamples[centralPointIndex*3])/scaledRadius,
(pPoints[currentPointIndex*3+1] - pSamples[centralPointIndex*3+1])/scaledRadius,
(pPoints[currentPointIndex*3+2] - pSamples[centralPointIndex*3+2])/scaledRadius};
float currPDF = pPDFs[currentNeighborIndex];
int initIter = pStartIndexs[centralPointIndex];
int endIter = (centralPointIndex < pNumPoints-1)?pStartIndexs[centralPointIndex+1]:pNumNeighbors;
float numNeighbors = (pAvg)?(float)(endIter-initIter):1.0;
int featureIndex = currentPointIndex*pNumFeatures;
int outFeatureIndex = centralPointIndex*pNumOutFeatures;
float* temporalMemory1 = &mlpIntermediateRes[threadOffset];
float* temporalMemory2 = &mlpIntermediateRes[EXECUTION_BLOCK_MLP_SIZE + threadOffset];
evaluateMLP(threadId, offset, numBlocksXNeigh, pNumFeatures, pNumOutFeatures, neuronsOut,
featureIndex, outFeatureIndex, numNeighbors, currPDF, currPointCoords,
&pWeightsHidd1[offset*3], &pWeightsHidd2[offset*BLOCK_MLP_SIZE], &pWeightsOut[offset*BLOCK_MLP_SIZE],
&pBiasHidd1[offset], &pBiasHidd2[offset], &pBiasOut[offset],
pFeatures, temporalMemory1, temporalMemory2, pOutFeatures);
}
}
__device__ void evaluateMLPNoComb(
const int pThreadId,
const int pOffset,
const int pTotalBlocks,
const int pNumFeatures,
const int pFeatureIndex,
const int pOutFeatureIndex,
const float pNumSamples,
const float pCurrentPDF,
const float pCurrPointCoords[3],
const float* __restrict__ pWeightsHidd1,
const float* __restrict__ pWeightsHidd2,
const float* __restrict__ pWeightsOut,
const float* __restrict__ pBiasHidd1,
const float* __restrict__ pBiasHidd2,
const float* __restrict__ pBiasOut,
const float* __restrict__ pFeatures,
float* __restrict__ pTmpVector1,
float* __restrict__ pTmpVector2,
float* __restrict__ pOutFeatures)
{
//Compute output first layer.
pTmpVector1[pThreadId] = max(pCurrPointCoords[0]*pWeightsHidd1[pThreadId*3] +
pCurrPointCoords[1]*pWeightsHidd1[pThreadId*3 + 1] +
pCurrPointCoords[2]*pWeightsHidd1[pThreadId*3 + 2] +
pBiasHidd1[pThreadId], 0.0);
__syncthreads();
//Compute output second layer.
float auxResult = 0.0;
for(int j = 0; j < BLOCK_MLP_SIZE; ++j)
{
auxResult += pTmpVector1[j]*pWeightsHidd2[pThreadId*BLOCK_MLP_SIZE + j];
}
pTmpVector2[pThreadId] = max(auxResult + pBiasHidd2[pThreadId], 0.0);
__syncthreads();
//Compute output layer.
if((pOffset+pThreadId) < pNumFeatures){
auxResult = 0.0;
for(int j = 0; j < BLOCK_MLP_SIZE; ++j)
{
auxResult += pTmpVector2[j]*pWeightsOut[pThreadId*BLOCK_MLP_SIZE + j];
}
auxResult = auxResult + pBiasOut[pThreadId];
int currFeatureIndex = (pOffset+pThreadId)%pNumFeatures;
atomicAdd(&pOutFeatures[pOutFeatureIndex+currFeatureIndex],
(pFeatures[pFeatureIndex+currFeatureIndex]*auxResult)/(pCurrentPDF*pNumSamples));
}
}
/**
* Method to evaluate the MLP.
* @param pAVG Boolean that indicates if the results is divided by the number of neighbors or not.
* @param pScaleInv Boolean that indicates if the radius is defined relative to the bounding box.
* @param pNumPoints Number of points.
* @param pNumNeighbors Number of neighboring points.
* @param pNumFeatures Number of input features per point.
* @param pRadius Radius of the convolution.
* @param pWeightsHidd1 Weights of the neurons in the first hidden layer.
* @param pWeightsHidd2 Weights of the neurons in the second hidden layer.
* @param pWeightsOut Weights of the neurons in the output layer.
* @param pBiasHidd1 Biases of the neurons in the first hidden layer.
* @param pBiasHidd2 Biases of the neurons in the second hidden layer.
* @param pBiasOut Biases of the neurons in the output layer.
* @param pPoints List of points.
* @param pBatchIds List of batch ids.
* @param pFeatures List of input features.
* @param pStartIndexs List of start indices for each point.
* @param pNeigbors List neighbors of each point.
* @param pPDFs List of the pdf values.
* @param pOutFeatures Output parameter with the list of output features.
*/
__global__ void evaluateMLPNoCombinKernel(
const bool pAvg,
const bool pScaleInv,
const int pNumPoints,
const int pNumNeighbors,
const int pNumFeatures,
const float pRadius,
const float* __restrict__ pAABBMin,
const float* __restrict__ pAABBMax,
const float* __restrict__ pWeightsHidd1,
const float* __restrict__ pWeightsHidd2,
const float* __restrict__ pWeightsOut,
const float* __restrict__ pBiasHidd1,
const float* __restrict__ pBiasHidd2,
const float* __restrict__ pBiasOut,
const float* __restrict__ pSamples,
const float* __restrict__ pPoints,
const int* __restrict__ pBatchIds,
const float* __restrict__ pFeatures,
const int* __restrict__ pStartIndexs,
const int* __restrict__ pNeigbors,
const float* __restrict__ pPDFs,
float* __restrict__ pOutFeatures)
{
extern __shared__ float mlpIntermediateRes[];
int neuronsOut = pNumFeatures;
int numBlocksXNeigh = neuronsOut/BLOCK_MLP_SIZE;
numBlocksXNeigh += (neuronsOut%BLOCK_MLP_SIZE != 0)?1:0;
unsigned long long int currentIndex = threadIdx.x +
blockDim.x*(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y);
int currentNeighborIndex = currentIndex/(numBlocksXNeigh*BLOCK_MLP_SIZE);
int offset = currentIndex%(numBlocksXNeigh*BLOCK_MLP_SIZE);
offset = offset - offset%BLOCK_MLP_SIZE;
int threadId = threadIdx.x%BLOCK_MLP_SIZE;
int threadOffset = threadIdx.x - threadId;
if(currentNeighborIndex < pNumNeighbors){
int neighborIndex = currentNeighborIndex * 2;
int currentPointIndex = pNeigbors[neighborIndex];
int centralPointIndex = pNeigbors[neighborIndex+1];
int currBatchId = pBatchIds[currentPointIndex];
float maxAabbSize = max(max(
pAABBMax[currBatchId*3] - pAABBMin[currBatchId*3],
pAABBMax[currBatchId*3+1] - pAABBMin[currBatchId*3+1]),
pAABBMax[currBatchId*3+2] - pAABBMin[currBatchId*3+2]);
float scaledRadius = (pScaleInv)?pRadius*maxAabbSize:pRadius;
float currPointCoords[3] = {
(pPoints[currentPointIndex*3] - pSamples[centralPointIndex*3])/scaledRadius,
(pPoints[currentPointIndex*3+1] - pSamples[centralPointIndex*3+1])/scaledRadius,
(pPoints[currentPointIndex*3+2] - pSamples[centralPointIndex*3+2])/scaledRadius};
float currPDF = pPDFs[currentNeighborIndex];
int initIter = pStartIndexs[centralPointIndex];
int endIter = (centralPointIndex < pNumPoints-1)?pStartIndexs[centralPointIndex+1]:pNumNeighbors;
float numNeighbors = (pAvg)?(float)(endIter-initIter):1.0;
int featureIndex = currentPointIndex*pNumFeatures;
int outFeatureIndex = centralPointIndex*pNumFeatures;
float* temporalMemory1 = &mlpIntermediateRes[threadOffset];
float* temporalMemory2 = &mlpIntermediateRes[EXECUTION_BLOCK_MLP_SIZE + threadOffset];
evaluateMLPNoComb(threadId, offset, numBlocksXNeigh, pNumFeatures,
featureIndex, outFeatureIndex, numNeighbors, currPDF, currPointCoords,
&pWeightsHidd1[offset*3], &pWeightsHidd2[offset*BLOCK_MLP_SIZE], &pWeightsOut[offset*BLOCK_MLP_SIZE],
&pBiasHidd1[offset], &pBiasHidd2[offset], &pBiasOut[offset],
pFeatures, temporalMemory1, temporalMemory2, pOutFeatures);
}
}
__device__ void computedconvj_d(
const int pThreadId,
const int pOffset,
const int pTotalBlocks,
const int pNumNeuronsOut,
const int pNumFeatures,
const int pPointIndex,
const int pCentralPointIndex,
const int pInFeatureIndex,
const int pOutFeatureIndex,
const float pNumSamples,
const float pCurrentPDF,
const float pCurrPointCoords[3],
const float pRadius,
const float* __restrict__ pWeightsHidd1,
const float* __restrict__ pWeightsHidd2,
const float* __restrict__ pWeightsOut,
const float* __restrict__ pBiasHidd1,
const float* __restrict__ pBiasHidd2,
const float* __restrict__ pBiasOut,
const float* __restrict__ pInFeatures,
const float* __restrict__ pOutFeatureGradients,
float* __restrict__ pTmpVector1,
float* __restrict__ pTmpVector2,
float* __restrict__ pTmpVector3,
float* __restrict__ pTmpVector4,
float* __restrict__ pWeightsHidd1Grad,
float* __restrict__ pWeightsHidd2Grad,
float* __restrict__ pWeightsOutGrad,
float* __restrict__ pBiasHidd1Grad,
float* __restrict__ pBiasHidd2Grad,
float* __restrict__ pBiasOutGrad,
float* __restrict__ pFeaturesGrads)
{
//Compute output first layer.
pTmpVector1[pThreadId] = pCurrPointCoords[0]*pWeightsHidd1[pThreadId*3] +
pCurrPointCoords[1]*pWeightsHidd1[pThreadId*3 + 1] +
pCurrPointCoords[2]*pWeightsHidd1[pThreadId*3 + 2] +
pBiasHidd1[pThreadId];
__syncthreads();
//Compute output second layer.
float auxResult = 0.0;
for(int j = 0; j < BLOCK_MLP_SIZE; ++j)
{
auxResult += max(pTmpVector1[j], 0.0)*pWeightsHidd2[pThreadId*BLOCK_MLP_SIZE + j];
}
pTmpVector2[pThreadId] = auxResult +pBiasHidd2[pThreadId];
__syncthreads();
//Gradients computation
//Gradients of the output layer parameters w and b and in features.
if((pOffset+pThreadId) < pNumNeuronsOut){
int currInFeatureIndex = (pOffset+pThreadId)%pNumFeatures;
int currOutFeatureIndex = (pOffset+pThreadId)/pNumFeatures;
float currFeature = pInFeatures[pInFeatureIndex + currInFeatureIndex];
float outGradient = pOutFeatureGradients[pOutFeatureIndex+currOutFeatureIndex];
float commonFactor = (currFeature*outGradient)/(pCurrentPDF*pNumSamples);
auxResult = 0.0;
for(int j = 0; j < BLOCK_MLP_SIZE; ++j)
{
atomicAdd(&pWeightsOutGrad[pThreadId*BLOCK_MLP_SIZE + j], commonFactor*max(pTmpVector2[j], 0.0));
auxResult += max(pTmpVector2[j], 0.0)*pWeightsOut[pThreadId*BLOCK_MLP_SIZE + j];
}
atomicAdd(&pBiasOutGrad[pThreadId], commonFactor);
//In features gradient update.
auxResult = auxResult + pBiasOut[pThreadId];
atomicAdd(&pFeaturesGrads[pInFeatureIndex + currInFeatureIndex], outGradient*auxResult/(pCurrentPDF*pNumSamples));
}
//Gradients of the second hiddend layer.
auxResult = 0.0;
float commonFactor = (pTmpVector2[pThreadId] >= 0.0)?1.0:0.0;
int numOutsBlock = min(pNumNeuronsOut - pOffset, BLOCK_MLP_SIZE);
for(int i = 0; i < numOutsBlock; ++i)
{
int currInFeatureIndex = (i+pOffset)%pNumFeatures;
int currOutFeatureIndex = (i+pOffset)/pNumFeatures;
float currFeature = pInFeatures[pInFeatureIndex + currInFeatureIndex];
float outGradient = pOutFeatureGradients[pOutFeatureIndex+currOutFeatureIndex];
auxResult += outGradient*currFeature*pWeightsOut[pThreadId + i*BLOCK_MLP_SIZE];
}
pTmpVector3[pThreadId] = (commonFactor*auxResult)/(pCurrentPDF*pNumSamples);
__syncthreads();
//Gradients of the second hiddend layer parameters w and b.
commonFactor = pTmpVector3[pThreadId];
for(int i = 0; i < BLOCK_MLP_SIZE; ++i)
{
atomicAdd(&pWeightsHidd2Grad[pThreadId*BLOCK_MLP_SIZE + i],
commonFactor*max(pTmpVector1[i], 0.0));
}
atomicAdd(&pBiasHidd2Grad[pThreadId], commonFactor);
//Gradients of the first hiddend layer.
auxResult = 0.0;
commonFactor = (pTmpVector1[pThreadId] >= 0.0)?1.0:0.0;
for(int i = 0; i < BLOCK_MLP_SIZE; ++i)
{
auxResult += pTmpVector3[i]*pWeightsHidd2[pThreadId+ i*BLOCK_MLP_SIZE];
}
pTmpVector4[pThreadId] = commonFactor*auxResult;
__syncthreads();
//Gradients of the first hiddend layer parameters w and b.
commonFactor = pTmpVector4[pThreadId];
for(int i = 0; i < 3; ++i)
{
atomicAdd(&pWeightsHidd1Grad[pThreadId*3 + i], commonFactor*pCurrPointCoords[i]);
}
atomicAdd(&pBiasHidd1Grad[pThreadId], commonFactor);
}
/**
* Method to evaluate the MLP.
* @param pAVG Boolean that indicates if the results is divided by the number of neighbors or not.
* @param pScaleInv Boolean that indicates if the radius is defined relative to the bounding box.
* @param pNumPoints Number of points.
* @param pNumNeighbors Number of neighboring points.
* @param pNumFeatures Number of input features per point.
* @param pNumOutFeatures Number of output features per point.
* @param pRadius Radius of the convolution.
* @param pWeightsHidd1 Weights of the neurons in the first hidden layer.
* @param pWeightsHidd2 Weights of the neurons in the second hidden layer.
* @param pWeightsOut Weights of the neurons in the output layer.
* @param pBiasHidd1 Biases of the neurons in the first hidden layer.
* @param pBiasHidd2 Biases of the neurons in the second hidden layer.
* @param pBiasOut Biases of the neurons in the output layer.
* @param pPoints List of points.
* @param pBatchIds List of batch ids.
* @param pFeatures List of input features.
* @param pOutFeaturesGrads Gradients of the output convolutions.
* @param pStartIndexs List of start indices for each point.
* @param pNeigbors List neighbors of each point.
* @param pPDFs List of the pdf values.
* @param pWeightsHidd1Grads Output parameter with the list of gradients for the weights of the first hidden layer.
* @param pWeightsHidd2Grads Output parameter with the list of gradients for the weights of the second hidden layer.
* @param pWeightsOutGrads Output parameter with the list of gradients for the weights of the outpu layer.
* @param pBiasHidd1Grads Output parameter with the list of gradients for the biases of the first hidden layer.
* @param pBiasHidd2Grads Output parameter with the list of gradients for the biases of the second hidden layer.
* @param pBiasOutGrads Output parameter with the list of gradients for the biases of the output layer.
* @param pPointsGrads Output parameter with the list of gradients for the points.
*/
__global__ void computedconvj_dKernel(
const bool pAvg,
const bool pScaleInv,
const int pNumPoints,
const int pNumNeighbors,
const int pNumFeatures,
const int pNumOutFeatures,
const float pRadius,
const float* __restrict__ pAABBMin,
const float* __restrict__ pAABBMax,
const float* __restrict__ pWeightsHidd1,
const float* __restrict__ pWeightsHidd2,
const float* __restrict__ pWeightsOut,
const float* __restrict__ pBiasHidd1,
const float* __restrict__ pBiasHidd2,
const float* __restrict__ pBiasOut,
const float* __restrict__ pSamples,
const float* __restrict__ pPoints,
const int* __restrict__ pBatchIds,
const float* __restrict__ pFeatures,
const float* __restrict__ pOutFeaturesGrads,
const int* __restrict__ pStartIndexs,
const int* __restrict__ pNeigbors,
const float* __restrict__ pPDFs,
float* __restrict__ pWeightsHidd1Grads,
float* __restrict__ pWeightsHidd2Grads,
float* __restrict__ pWeightsOutGrads,
float* __restrict__ pBiasHidd1Grads,
float* __restrict__ pBiasHidd2Grads,
float* __restrict__ pBiasOutGrads,
float* __restrict__ pFeaturesGrads)
{
extern __shared__ float mlpdconvjIntermediateRes[];
int neuronsOut = pNumOutFeatures*pNumFeatures;
int numBlocksXNeigh = neuronsOut/BLOCK_MLP_SIZE;
numBlocksXNeigh += (neuronsOut%BLOCK_MLP_SIZE != 0)?1:0;
unsigned long long int currentIndex = threadIdx.x +
blockDim.x*(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y);
int currentNeighborIndex = currentIndex/(numBlocksXNeigh*BLOCK_MLP_SIZE);
int offset = currentIndex%(numBlocksXNeigh*BLOCK_MLP_SIZE);
offset = offset - offset%BLOCK_MLP_SIZE;
int threadId = threadIdx.x%BLOCK_MLP_SIZE;
int threadOffset = threadIdx.x - threadId;
if(currentNeighborIndex < pNumNeighbors){
int neighborIndex = currentNeighborIndex * 2;
int currentPointIndex = pNeigbors[neighborIndex];
int centralPointIndex = pNeigbors[neighborIndex+1];
int currBatchId = pBatchIds[currentPointIndex];
float maxAabbSize = max(max(
pAABBMax[currBatchId*3] - pAABBMin[currBatchId*3],
pAABBMax[currBatchId*3+1] - pAABBMin[currBatchId*3+1]),
pAABBMax[currBatchId*3+2] - pAABBMin[currBatchId*3+2]);
float scaledRadius = (pScaleInv)?pRadius*maxAabbSize:pRadius;
float currPointCoords[3] = {
(pPoints[currentPointIndex*3] - pSamples[centralPointIndex*3])/scaledRadius,
(pPoints[currentPointIndex*3+1] - pSamples[centralPointIndex*3+1])/scaledRadius,
(pPoints[currentPointIndex*3+2] - pSamples[centralPointIndex*3+2])/scaledRadius};
float currPDF = pPDFs[currentNeighborIndex];
int initIter = pStartIndexs[centralPointIndex];
int endIter = (centralPointIndex < pNumPoints-1)?pStartIndexs[centralPointIndex+1]:pNumNeighbors;
float numNeighbors = (pAvg)?(float)(endIter-initIter):1.0;
int featureIndex = currentPointIndex*pNumFeatures;
int outFeatureIndex = centralPointIndex*pNumOutFeatures;
float* temporalMemory1 = &(mlpdconvjIntermediateRes[threadOffset]);
float* temporalMemory2 = &(mlpdconvjIntermediateRes[EXECUTION_BLOCK_MLP_SIZE + threadOffset]);
float* temporalMemory3 = &(mlpdconvjIntermediateRes[EXECUTION_BLOCK_MLP_SIZE*2 + threadOffset]);
float* temporalMemory4 = &(mlpdconvjIntermediateRes[EXECUTION_BLOCK_MLP_SIZE*3 + threadOffset]);
computedconvj_d(threadId, offset, numBlocksXNeigh, neuronsOut, pNumFeatures, currentPointIndex, centralPointIndex,
featureIndex, outFeatureIndex, numNeighbors, currPDF, currPointCoords, scaledRadius,
&pWeightsHidd1[offset*3], &pWeightsHidd2[offset*BLOCK_MLP_SIZE], &pWeightsOut[offset*BLOCK_MLP_SIZE],
&pBiasHidd1[offset], &pBiasHidd2[offset], &pBiasOut[offset],
pFeatures, pOutFeaturesGrads, temporalMemory1, temporalMemory2, temporalMemory3, temporalMemory4,
&pWeightsHidd1Grads[offset*3], &pWeightsHidd2Grads[offset*BLOCK_MLP_SIZE],
&pWeightsOutGrads[offset*BLOCK_MLP_SIZE], &pBiasHidd1Grads[offset],
&pBiasHidd2Grads[offset], &pBiasOutGrads[offset], pFeaturesGrads);
}
}
__device__ void computedconvj_dNoCombin(
const int pThreadId,
const int pOffset,
const int pTotalBlocks,
const int pNumFeatures,
const int pPointIndex,
const int pCentralPointIndex,
const int pInFeatureIndex,
const int pOutFeatureIndex,
const float pNumSamples,
const float pCurrentPDF,
const float pCurrPointCoords[3],
const float pRadius,
const float* __restrict__ pWeightsHidd1,
const float* __restrict__ pWeightsHidd2,
const float* __restrict__ pWeightsOut,
const float* __restrict__ pBiasHidd1,
const float* __restrict__ pBiasHidd2,
const float* __restrict__ pBiasOut,
const float* __restrict__ pInFeatures,
const float* __restrict__ pOutFeatureGradients,
float* __restrict__ pTmpVector1,
float* __restrict__ pTmpVector2,
float* __restrict__ pTmpVector3,
float* __restrict__ pTmpVector4,
float* __restrict__ pWeightsHidd1Grad,
float* __restrict__ pWeightsHidd2Grad,
float* __restrict__ pWeightsOutGrad,
float* __restrict__ pBiasHidd1Grad,
float* __restrict__ pBiasHidd2Grad,
float* __restrict__ pBiasOutGrad,
float* __restrict__ pFeaturesGrads)
{
//Compute output first layer.
pTmpVector1[pThreadId] = pCurrPointCoords[0]*pWeightsHidd1[pThreadId*3] +
pCurrPointCoords[1]*pWeightsHidd1[pThreadId*3 + 1] +
pCurrPointCoords[2]*pWeightsHidd1[pThreadId*3 + 2] +
pBiasHidd1[pThreadId];
__syncthreads();
//Compute output second layer.
float auxResult = 0.0;
for(int j = 0; j < BLOCK_MLP_SIZE; ++j)
{
auxResult += max(pTmpVector1[j], 0.0)*pWeightsHidd2[pThreadId*BLOCK_MLP_SIZE + j];
}
pTmpVector2[pThreadId] = auxResult +pBiasHidd2[pThreadId];
__syncthreads();
//Gradients computation
//Gradients of the output layer parameters w and b and in features.
if((pOffset+pThreadId) < pNumFeatures){
int currFeatureIndex = (pOffset+pThreadId)%pNumFeatures;
float currFeature = pInFeatures[pInFeatureIndex + currFeatureIndex];
float outGradient = pOutFeatureGradients[pOutFeatureIndex+currFeatureIndex];
float commonFactor = (currFeature*outGradient)/(pCurrentPDF*pNumSamples);
auxResult = 0.0;
for(int j = 0; j < BLOCK_MLP_SIZE; ++j)
{
atomicAdd(&pWeightsOutGrad[pThreadId*BLOCK_MLP_SIZE + j], commonFactor*max(pTmpVector2[j], 0.0));
auxResult += max(pTmpVector2[j], 0.0)*pWeightsOut[pThreadId*BLOCK_MLP_SIZE + j];
}
atomicAdd(&pBiasOutGrad[pThreadId], commonFactor);
//In features gradient update.
auxResult = auxResult + pBiasOut[pThreadId];
atomicAdd(&pFeaturesGrads[pInFeatureIndex + currFeatureIndex], outGradient*auxResult/(pCurrentPDF*pNumSamples));
}
//Gradients of the second hiddend layer.
auxResult = 0.0;
float commonFactor = (pTmpVector2[pThreadId] >= 0.0)?1.0:0.0;
int numOutsBlock = min(pNumFeatures - pOffset, BLOCK_MLP_SIZE);
for(int i = 0; i < numOutsBlock; ++i)
{
int currFeatureIndex = (i+pOffset)%pNumFeatures;
float currFeature = pInFeatures[pInFeatureIndex + currFeatureIndex];
float outGradient = pOutFeatureGradients[pOutFeatureIndex+currFeatureIndex];
auxResult += outGradient*currFeature*pWeightsOut[pThreadId + i*BLOCK_MLP_SIZE];
}
pTmpVector3[pThreadId] = (commonFactor*auxResult)/(pCurrentPDF*pNumSamples);
__syncthreads();
//Gradients of the second hiddend layer parameters w and b.
commonFactor = pTmpVector3[pThreadId];
for(int i = 0; i < BLOCK_MLP_SIZE; ++i)
{
atomicAdd(&pWeightsHidd2Grad[pThreadId*BLOCK_MLP_SIZE + i],
commonFactor*max(pTmpVector1[i], 0.0));
}
atomicAdd(&pBiasHidd2Grad[pThreadId], commonFactor);
//Gradients of the first hiddend layer.
auxResult = 0.0;
commonFactor = (pTmpVector1[pThreadId] >= 0.0)?1.0:0.0;
for(int i = 0; i < BLOCK_MLP_SIZE; ++i)
{
auxResult += pTmpVector3[i]*pWeightsHidd2[pThreadId+ i*BLOCK_MLP_SIZE];
}
pTmpVector4[pThreadId] = commonFactor*auxResult;
__syncthreads();
//Gradients of the first hiddend layer parameters w and b.
commonFactor = pTmpVector4[pThreadId];
for(int i = 0; i < 3; ++i)
{
atomicAdd(&pWeightsHidd1Grad[pThreadId*3 + i], commonFactor*pCurrPointCoords[i]);
}
atomicAdd(&pBiasHidd1Grad[pThreadId], commonFactor);
}
/**
* Method to evaluate the MLP.
* @param pAVG Boolean that indicates if the results is divided by the number of neighbors or not.
* @param pScaleInv Boolean that indicates if the radius is defined relative to the bounding box.
* @param pNumPoints Number of points.
* @param pNumNeighbors Number of neighboring points.
* @param pNumFeatures Number of input features per point.
* @param pRadius Radius of the convolution.
* @param pWeightsHidd1 Weights of the neurons in the first hidden layer.
* @param pWeightsHidd2 Weights of the neurons in the second hidden layer.
* @param pWeightsOut Weights of the neurons in the output layer.
* @param pBiasHidd1 Biases of the neurons in the first hidden layer.
* @param pBiasHidd2 Biases of the neurons in the second hidden layer.
* @param pBiasOut Biases of the neurons in the output layer.
* @param pPoints List of points.
* @param pBatchIds List of batch ids.
* @param pFeatures List of input features.
* @param pOutFeaturesGrads Gradients of the output convolutions.
* @param pStartIndexs List of start indices for each point.
* @param pNeigbors List neighbors of each point.
* @param pPDFs List of the pdf values.
* @param pWeightsHidd1Grads Output parameter with the list of gradients for the weights of the first hidden layer.
* @param pWeightsHidd2Grads Output parameter with the list of gradients for the weights of the second hidden layer.
* @param pWeightsOutGrads Output parameter with the list of gradients for the weights of the outpu layer.
* @param pBiasHidd1Grads Output parameter with the list of gradients for the biases of the first hidden layer.
* @param pBiasHidd2Grads Output parameter with the list of gradients for the biases of the second hidden layer.
* @param pBiasOutGrads Output parameter with the list of gradients for the biases of the output layer.
* @param pPointsGrads Output parameter with the list of gradients for the points.
*/
__global__ void computedconvj_dNoCombinKernel(
const bool pAvg,
const bool pScaleInv,
const int pNumPoints,
const int pNumNeighbors,
const int pNumFeatures,
const float pRadius,
const float* __restrict__ pAABBMin,
const float* __restrict__ pAABBMax,
const float* __restrict__ pWeightsHidd1,
const float* __restrict__ pWeightsHidd2,
const float* __restrict__ pWeightsOut,
const float* __restrict__ pBiasHidd1,
const float* __restrict__ pBiasHidd2,
const float* __restrict__ pBiasOut,
const float* __restrict__ pSamples,
const float* __restrict__ pPoints,
const int* __restrict__ pBatchIds,
const float* __restrict__ pFeatures,
const float* __restrict__ pOutFeaturesGrads,
const int* __restrict__ pStartIndexs,
const int* __restrict__ pNeigbors,
const float* __restrict__ pPDFs,
float* __restrict__ pWeightsHidd1Grads,
float* __restrict__ pWeightsHidd2Grads,
float* __restrict__ pWeightsOutGrads,
float* __restrict__ pBiasHidd1Grads,
float* __restrict__ pBiasHidd2Grads,
float* __restrict__ pBiasOutGrads,
float* __restrict__ pFeaturesGrads)
{
extern __shared__ float mlpdconvjIntermediateRes[];
int neuronsOut = pNumFeatures;
int numBlocksXNeigh = neuronsOut/BLOCK_MLP_SIZE;
numBlocksXNeigh += (neuronsOut%BLOCK_MLP_SIZE != 0)?1:0;
unsigned long long int currentIndex = threadIdx.x +
blockDim.x*(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y);
int currentNeighborIndex = currentIndex/(numBlocksXNeigh*BLOCK_MLP_SIZE);
int offset = currentIndex%(numBlocksXNeigh*BLOCK_MLP_SIZE);
offset = offset - offset%BLOCK_MLP_SIZE;
int threadId = threadIdx.x%BLOCK_MLP_SIZE;
int threadOffset = threadIdx.x - threadId;
if(currentNeighborIndex < pNumNeighbors){
int neighborIndex = currentNeighborIndex * 2;
int currentPointIndex = pNeigbors[neighborIndex];
int centralPointIndex = pNeigbors[neighborIndex+1];
int currBatchId = pBatchIds[currentPointIndex];
float maxAabbSize = max(max(
pAABBMax[currBatchId*3] - pAABBMin[currBatchId*3],
pAABBMax[currBatchId*3+1] - pAABBMin[currBatchId*3+1]),
pAABBMax[currBatchId*3+2] - pAABBMin[currBatchId*3+2]);
float scaledRadius = (pScaleInv)?pRadius*maxAabbSize:pRadius;
float currPointCoords[3] = {
(pPoints[currentPointIndex*3] - pSamples[centralPointIndex*3])/scaledRadius,
(pPoints[currentPointIndex*3+1] - pSamples[centralPointIndex*3+1])/scaledRadius,
(pPoints[currentPointIndex*3+2] - pSamples[centralPointIndex*3+2])/scaledRadius};
float currPDF = pPDFs[currentNeighborIndex];
int initIter = pStartIndexs[centralPointIndex];
int endIter = (centralPointIndex < pNumPoints-1)?pStartIndexs[centralPointIndex+1]:pNumNeighbors;
float numNeighbors = (pAvg)?(float)(endIter-initIter):1.0;
int featureIndex = currentPointIndex*pNumFeatures;
int outFeatureIndex = centralPointIndex*pNumFeatures;
float* temporalMemory1 = &(mlpdconvjIntermediateRes[threadOffset]);
float* temporalMemory2 = &(mlpdconvjIntermediateRes[EXECUTION_BLOCK_MLP_SIZE + threadOffset]);
float* temporalMemory3 = &(mlpdconvjIntermediateRes[EXECUTION_BLOCK_MLP_SIZE*2 + threadOffset]);
float* temporalMemory4 = &(mlpdconvjIntermediateRes[EXECUTION_BLOCK_MLP_SIZE*3 + threadOffset]);
computedconvj_dNoCombin(threadId, offset, numBlocksXNeigh, pNumFeatures, currentPointIndex, centralPointIndex,
featureIndex, outFeatureIndex, numNeighbors, currPDF, currPointCoords, scaledRadius,
&pWeightsHidd1[offset*3], &pWeightsHidd2[offset*BLOCK_MLP_SIZE], &pWeightsOut[offset*BLOCK_MLP_SIZE],
&pBiasHidd1[offset], &pBiasHidd2[offset], &pBiasOut[offset],
pFeatures, pOutFeaturesGrads, temporalMemory1, temporalMemory2, temporalMemory3, temporalMemory4,
&pWeightsHidd1Grads[offset*3], &pWeightsHidd2Grads[offset*BLOCK_MLP_SIZE],
&pWeightsOutGrads[offset*BLOCK_MLP_SIZE], &pBiasHidd1Grads[offset],
&pBiasHidd2Grads[offset], &pBiasOutGrads[offset], pFeaturesGrads);
}
}
////////////////////////////////////////////////////////////////////////////////// CPU
void spatialConvCPU(
bool pAvg,
bool pScaleInv,
int pNumNeighbors,
int pNumInFeatures,
int pNumOutFeatures,
int pNumSamples,
bool pCombin,
float pRadius,
const float* pInPoints,
const int* pBatchIds,
const float* pInFeatures,
const float* pPDFs,
const float* pSamples,
const int* pStartIndexs,
const int* pPackedNeighs,
const float* pAABBMin,
const float* pAABBMax,
const float* pWeights1,
const float* pBiases1,
const float* pWeights2,
const float* pBiases2,
const float* pWeightsOut,
const float* pBiasesOut,
float* pOutFeatues)
{
#ifdef PRINT_CONV_INFO
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
#endif
//Evaluate MLP.
if(pCombin){
cudaMemset(pOutFeatues, 0, pNumOutFeatures*pNumSamples*sizeof(float));
int numBlocksPerPoint = (pNumOutFeatures*pNumInFeatures)/BLOCK_MLP_SIZE;
numBlocksPerPoint += ((pNumOutFeatures*pNumInFeatures)%BLOCK_MLP_SIZE != 0)?1:0;
dim3 gridDimension = computeBlockGrid(
(unsigned long long int)pNumNeighbors*
(unsigned long long int)numBlocksPerPoint*
(unsigned long long int)BLOCK_MLP_SIZE, EXECUTION_BLOCK_MLP_SIZE);
evaluateMLPKernel<<<gridDimension, EXECUTION_BLOCK_MLP_SIZE, EXECUTION_BLOCK_MLP_SIZE*2*sizeof(float)>>>(
pAvg, pScaleInv, pNumSamples, pNumNeighbors, pNumInFeatures, pNumOutFeatures, pRadius, pAABBMin, pAABBMax,
pWeights1, pWeights2, pWeightsOut, pBiases1, pBiases2, pBiasesOut, pSamples, pInPoints, pBatchIds,
pInFeatures, pStartIndexs, pPackedNeighs, pPDFs, pOutFeatues);
gpuErrchk(cudaPeekAtLastError());
}else{
cudaMemset(pOutFeatues, 0, pNumInFeatures*pNumSamples*sizeof(float));
int numBlocksPerPoint = (pNumInFeatures)/BLOCK_MLP_SIZE;
numBlocksPerPoint += ((pNumInFeatures)%BLOCK_MLP_SIZE != 0)?1:0;
dim3 gridDimension = computeBlockGrid(
(unsigned long long int)pNumNeighbors*
(unsigned long long int)numBlocksPerPoint*
(unsigned long long int)BLOCK_MLP_SIZE, EXECUTION_BLOCK_MLP_SIZE);
evaluateMLPNoCombinKernel<<<gridDimension, EXECUTION_BLOCK_MLP_SIZE, EXECUTION_BLOCK_MLP_SIZE*2*sizeof(float)>>>(
pAvg, pScaleInv, pNumSamples, pNumNeighbors, pNumInFeatures, pRadius, pAABBMin, pAABBMax,
pWeights1, pWeights2, pWeightsOut, pBiases1, pBiases2, pBiasesOut, pSamples, pInPoints, pBatchIds,
pInFeatures, pStartIndexs, pPackedNeighs, pPDFs, pOutFeatues);
gpuErrchk(cudaPeekAtLastError());
}
#ifdef PRINT_CONV_INFO
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Forward Num points: %d | Neighbors: %d | Time %f\n", pNumSamples, pNumNeighbors, milliseconds);
#endif
}
void spatialConvGradsCPU(
bool pAvg,
bool pScaleInv,
int pNumNeighbors,
int pNumInFeatures,
int pNumOutFeatures,
int pNumSamples,
int pNumPoints,
bool pCombin,
float pRadius,
const float* pInPoints,
const int* pBatchIds,
const float* pInFeatures,
const float* pPDFs,
const float* pSamples,
const int* pStartIndexs,
const int* pPackedNeighs,
const float* pAABBMin,
const float* pAABBMax,
const float* pWeights1,
const float* pBiases1,
const float* pWeights2,
const float* pBiases2,
const float* pWeightsOut,
const float* pBiasesOut,
const float* pInOutFeatueGrads,
float* pOutFeatureGrads,
float* pWeights1Grads,
float* pWeight2Grads,
float* pWeightOutGrads,
float* pBiases1Grads,
float* pBiases2Grads,
float* pBiasesOutGrads)
{
#ifdef PRINT_CONV_INFO
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
#endif
//Compute dconv_j_d.
if(pCombin){
int numBlocksPerPoint = (pNumOutFeatures*pNumInFeatures)/BLOCK_MLP_SIZE;
numBlocksPerPoint += ((pNumOutFeatures*pNumInFeatures)%BLOCK_MLP_SIZE != 0)?1:0;
cudaMemset(pWeights1Grads, 0, sizeof(float)*3*numBlocksPerPoint*BLOCK_MLP_SIZE);
cudaMemset(pWeight2Grads, 0, sizeof(float)*BLOCK_MLP_SIZE*numBlocksPerPoint*BLOCK_MLP_SIZE);
cudaMemset(pWeightOutGrads, 0, sizeof(float)*(pNumOutFeatures*pNumInFeatures)*BLOCK_MLP_SIZE);
cudaMemset(pBiases1Grads, 0, sizeof(float)*numBlocksPerPoint*BLOCK_MLP_SIZE);
cudaMemset(pBiases2Grads, 0, sizeof(float)*numBlocksPerPoint*BLOCK_MLP_SIZE);
cudaMemset(pBiasesOutGrads, 0, sizeof(float)*(pNumOutFeatures*pNumInFeatures));
cudaMemset(pOutFeatureGrads, 0, sizeof(float)*pNumPoints*pNumInFeatures);
dim3 gridDimension = computeBlockGrid(pNumNeighbors*numBlocksPerPoint*BLOCK_MLP_SIZE, EXECUTION_BLOCK_MLP_SIZE);
computedconvj_dKernel<<<gridDimension, EXECUTION_BLOCK_MLP_SIZE, EXECUTION_BLOCK_MLP_SIZE*4*sizeof(float)>>>(
pAvg, pScaleInv, pNumSamples, pNumNeighbors, pNumInFeatures,
pNumOutFeatures, pRadius, pAABBMin, pAABBMax, pWeights1, pWeights2, pWeightsOut, pBiases1, pBiases2, pBiasesOut,
pSamples, pInPoints, pBatchIds, pInFeatures, pInOutFeatueGrads, pStartIndexs, pPackedNeighs, pPDFs, pWeights1Grads,
pWeight2Grads, pWeightOutGrads, pBiases1Grads, pBiases2Grads, pBiasesOutGrads, pOutFeatureGrads);
gpuErrchk(cudaPeekAtLastError());
}else{
int numBlocksPerPoint = (pNumInFeatures)/BLOCK_MLP_SIZE;
numBlocksPerPoint += ((pNumInFeatures)%BLOCK_MLP_SIZE != 0)?1:0;
cudaMemset(pWeights1Grads, 0, sizeof(float)*3*numBlocksPerPoint*BLOCK_MLP_SIZE);
cudaMemset(pWeight2Grads, 0, sizeof(float)*BLOCK_MLP_SIZE*numBlocksPerPoint*BLOCK_MLP_SIZE);
cudaMemset(pWeightOutGrads, 0, sizeof(float)*pNumInFeatures*BLOCK_MLP_SIZE);
cudaMemset(pBiases1Grads, 0, sizeof(float)*numBlocksPerPoint*BLOCK_MLP_SIZE);
cudaMemset(pBiases2Grads, 0, sizeof(float)*numBlocksPerPoint*BLOCK_MLP_SIZE);
cudaMemset(pBiasesOutGrads, 0, sizeof(float)*pNumInFeatures);
cudaMemset(pOutFeatureGrads, 0, sizeof(float)*pNumPoints*pNumInFeatures);
dim3 gridDimension = computeBlockGrid(pNumNeighbors*numBlocksPerPoint*BLOCK_MLP_SIZE, EXECUTION_BLOCK_MLP_SIZE);
computedconvj_dNoCombinKernel<<<gridDimension, EXECUTION_BLOCK_MLP_SIZE, EXECUTION_BLOCK_MLP_SIZE*4*sizeof(float)>>>(
pAvg, pScaleInv, pNumSamples, pNumNeighbors, pNumInFeatures,
pRadius, pAABBMin, pAABBMax, pWeights1, pWeights2, pWeightsOut, pBiases1, pBiases2, pBiasesOut,
pSamples, pInPoints, pBatchIds, pInFeatures, pInOutFeatueGrads, pStartIndexs, pPackedNeighs, pPDFs, pWeights1Grads,
pWeight2Grads, pWeightOutGrads, pBiases1Grads, pBiases2Grads, pBiasesOutGrads, pOutFeatureGrads);
gpuErrchk(cudaPeekAtLastError());
}
#ifdef PRINT_CONV_INFO
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Backward Num points: %d | Neighbors: %d | Time %f\n", pNumSamples, pNumNeighbors, milliseconds);
#endif
}
|
the_stack
|
namespace VGUGV
{
namespace Common
{
texture<uchar, cudaTextureType2D, cudaReadModeElementType> FrameCuda_GrayImageTexture;
texture<uchar, cudaTextureType2D, cudaReadModeElementType> FrameCuda_MaskImageTexture;
__device__ bool pixelLieOutsideImageMask(CUDA_PitchMemory<uchar> maskImage, int x, int y);
__global__ void kernel_imagePyramid(uchar* pTargetImage, size_t pitch, uint step);
__global__ void kernel_imageGradient(CUDA_PitchMemory<unsigned char> maskImage,
CUDA_PitchMemory<float> gradientMagMap,
CUDA_PitchMemory<Eigen::Vector2f> gradientVecMap,
int nCols,
int nRows,
size_t scale);
template<class T_FeatureType, class T_FeatureDescriptorType>
Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>::Frame_CUDA(int frameId, const CameraBase::Ptr& camera, const unsigned char* imageData, const unsigned char* maskImage, int nRows, int nCols, int nChannels)
: Base(frameId, camera, imageData, maskImage, nRows, nCols, nChannels)
, mpPyramidImages_CUDA(NULL)
, mpPyramidImageGradientMag_CUDA(NULL)
, mpPyramidImageGradientVec_CUDA(NULL)
{
// always copy gray image to cuda device at the moment
CUDA_SAFE_CALL(cudaMallocPitch((void**)&mpImageData_CUDA.dataPtr, &mpImageData_CUDA.pitch, mnCols, mnRows));
CUDA_SAFE_CALL(cudaMemcpy2D(mpImageData_CUDA.dataPtr, mpImageData_CUDA.pitch, mpGrayImageData_CPU, mnCols, mnCols, mnRows, cudaMemcpyHostToDevice));
if(maskImage != NULL)
{
CUDA_SAFE_CALL(cudaMallocPitch((void**)&mpImageMaskData_CUDA.dataPtr, &mpImageMaskData_CUDA.pitch, mnCols, mnRows));
CUDA_SAFE_CALL(cudaMemcpy2D(mpImageMaskData_CUDA.dataPtr, mpImageMaskData_CUDA.pitch, maskImage, mnCols, mnCols, mnRows, cudaMemcpyHostToDevice));
}
// initiate texture
FrameCuda_GrayImageTexture.addressMode[0] = cudaAddressModeWrap;
FrameCuda_GrayImageTexture.addressMode[1] = cudaAddressModeWrap;
FrameCuda_GrayImageTexture.filterMode = cudaFilterModePoint;
FrameCuda_GrayImageTexture.normalized = false;
FrameCuda_MaskImageTexture.addressMode[0] = cudaAddressModeWrap;
FrameCuda_MaskImageTexture.addressMode[1] = cudaAddressModeWrap;
FrameCuda_MaskImageTexture.filterMode = cudaFilterModePoint;
FrameCuda_MaskImageTexture.normalized = false;
mFrameCuda_uchar1ChannelDesc = cudaCreateChannelDesc<uchar>();
}
template<class T_FeatureType, class T_FeatureDescriptorType>
Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>::~Frame_CUDA()
{
CUDA_SAFE_CALL(cudaFree(mpImageData_CUDA.dataPtr));
CUDA_SAFE_CALL(cudaFree(mpImageMaskData_CUDA.dataPtr));
for(int i = 0; i < mnPyramidLevels; i++)
{
if(mpPyramidImages_CUDA != NULL) CUDA_SAFE_CALL(cudaFree(mpPyramidImages_CUDA[i].dataPtr));
if(mpPyramidImageGradientMag_CUDA != NULL) CUDA_SAFE_CALL(cudaFree(mpPyramidImageGradientMag_CUDA[i].dataPtr));
if(mpPyramidImageGradientVec_CUDA != NULL) CUDA_SAFE_CALL(cudaFree(mpPyramidImageGradientVec_CUDA[i].dataPtr));
if(mpPyramidImages != NULL)
{
delete [] mpPyramidImages[i];
mpPyramidImages[i] = NULL;
}
if(mpPyramidImageGradientMag != NULL)
{
delete [] mpPyramidImageGradientMag[i];
mpPyramidImageGradientMag[i] = NULL;
}
if(mpPyramidImageGradientVec != NULL)
{
delete [] mpPyramidImageGradientVec[i];
mpPyramidImageGradientVec[i] = NULL;
}
}
delete [] mpPyramidImages_CUDA; mpPyramidImages_CUDA = NULL;
delete [] mpPyramidImageGradientMag_CUDA; mpPyramidImageGradientMag_CUDA = NULL;
delete [] mpPyramidImageGradientVec_CUDA; mpPyramidImageGradientVec_CUDA = NULL;
delete [] mpPyramidImages; mpPyramidImages = NULL;
delete [] mpPyramidImageGradientMag; mpPyramidImageGradientMag = NULL;
delete [] mpPyramidImageGradientVec; mpPyramidImageGradientVec = NULL;
}
template<class T_FeatureType, class T_FeatureDescriptorType>
void Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>::computeImagePyramids(int nTotalLevels)
{
if(nTotalLevels < 1) return;
if(mpPyramidImages_CUDA != NULL) return;
mnPyramidLevels = nTotalLevels;
mpPyramidImages_CUDA = new CUDA_PitchMemory<unsigned char>[nTotalLevels];
CUDA_SAFE_CALL(cudaMallocPitch(&mpPyramidImages_CUDA[0].dataPtr, &mpPyramidImages_CUDA[0].pitch, mnCols, mnRows));
CUDA_SAFE_CALL(cudaMemcpy2D(mpPyramidImages_CUDA[0].dataPtr, mpPyramidImages_CUDA[0].pitch, mpImageData_CUDA.dataPtr, mpImageData_CUDA.pitch, mnCols, mnRows, cudaMemcpyDeviceToDevice));
for(int i = 1; i < nTotalLevels; i++)
{
int scale = 1 << i;
int nRows = mnRows / scale;
int nCols = mnCols / scale;
// bind texture with lower level imageData
CUDA_SAFE_CALL( cudaBindTexture2D(0,
FrameCuda_GrayImageTexture,
mpPyramidImages_CUDA[i-1].dataPtr,
mFrameCuda_uchar1ChannelDesc,
nCols * 2,
nRows * 2,
mpPyramidImages_CUDA[i-1].pitch));
CUDA_SAFE_CALL(cudaMallocPitch(&mpPyramidImages_CUDA[i].dataPtr, &mpPyramidImages_CUDA[i].pitch, nCols, nRows));
// invoke cuda kernel
int step = 4;
int thread_x = 8;
int thread_y = 8;
dim3 blockDim(thread_x, thread_y);
dim3 gridDim(intergerDivUp(nCols, (step * thread_x)), intergerDivUp(nRows, thread_y));
kernel_imagePyramid<<<gridDim, blockDim>>>(mpPyramidImages_CUDA[i].dataPtr, mpPyramidImages_CUDA[i].pitch, step);
// unbind texture
CUDA_SAFE_CALL(cudaUnbindTexture(FrameCuda_GrayImageTexture));
}
}
template<class T_FeatureType, class T_FeatureDescriptorType>
void Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>:: computeImagePyramidsGradients(int nTotalLevels)
{
if(nTotalLevels < 1) return;
if(mpPyramidImageGradientMag_CUDA != NULL) return;
mnPyramidLevels = nTotalLevels;
mpPyramidImageGradientMag_CUDA = new CUDA_PitchMemory<float>[nTotalLevels];
mpPyramidImageGradientVec_CUDA = new CUDA_PitchMemory<Eigen::Vector2f>[nTotalLevels];
if(mpImageMaskData_CUDA.dataPtr != NULL)
{
CUDA_SAFE_CALL(cudaBindTexture2D(0,
FrameCuda_MaskImageTexture,
mpImageMaskData_CUDA.dataPtr,
mFrameCuda_uchar1ChannelDesc,
mnCols,
mnRows,
mpImageMaskData_CUDA.pitch));
}
for(int i = 0; i < nTotalLevels; i++)
{
int scale = 1 << i;
int nRows = mnRows / scale;
int nCols = mnCols / scale;
// bind texture with lower level imageData
CUDA_SAFE_CALL( cudaBindTexture2D(0,
FrameCuda_GrayImageTexture,
mpPyramidImages_CUDA[i].dataPtr,
mFrameCuda_uchar1ChannelDesc,
nCols,
nRows,
mpPyramidImages_CUDA[i].pitch));
CUDA_SAFE_CALL( cudaMallocPitch(&mpPyramidImageGradientMag_CUDA[i].dataPtr, &mpPyramidImageGradientMag_CUDA[i].pitch, nCols * sizeof(float), nRows));
CUDA_SAFE_CALL( cudaMallocPitch(&mpPyramidImageGradientVec_CUDA[i].dataPtr, &mpPyramidImageGradientVec_CUDA[i].pitch, nCols * sizeof(Eigen::Vector2f), nRows));
// invoke kernel
int thread_x = 8;
int thread_y = 8;
dim3 blockDim(thread_x, thread_y);
dim3 gridDim(intergerDivUp(nCols, thread_x), intergerDivUp(nRows, thread_y));
kernel_imageGradient<<<gridDim, blockDim>>>(mpImageMaskData_CUDA, mpPyramidImageGradientMag_CUDA[i], mpPyramidImageGradientVec_CUDA[i], nCols, nRows, 1 << i);
// unbind texture
CUDA_SAFE_CALL(cudaUnbindTexture(FrameCuda_GrayImageTexture));
}
CUDA_SAFE_CALL(cudaUnbindTexture(FrameCuda_MaskImageTexture));
}
template<class T_FeatureType, class T_FeatureDescriptorType>
bool Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>::pixelLieOutsideImageMask(int r, int c)
{
if(mpImageMaskData_CPU == NULL) return true;
int index = r * mnCols + c;
return (mpImageMaskData_CPU[index] > 100);
}
template<class T_FeatureType, class T_FeatureDescriptorType>
unsigned char* Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>::getGrayImage(DEVICE_TYPE device)
{
if(device == DEVICE_TYPE::CPU)
{
return mpGrayImageData_CPU;
}
else
{
return mpImageData_CUDA.dataPtr;
}
}
template<class T_FeatureType, class T_FeatureDescriptorType>
size_t Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>::getGrayImageCUDAPitch()
{
return mpImageData_CUDA.pitch;
}
template<class T_FeatureType, class T_FeatureDescriptorType>
unsigned char* Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>::getPyramidImage(int level, DEVICE_TYPE device)
{
if(device == DEVICE_TYPE::CPU)
{
// download data from GPU
if(mpPyramidImages == NULL)
{
mpPyramidImages = new unsigned char*[mnPyramidLevels];
for (int i = 0; i < mnPyramidLevels; i++)
{
int scale = 1 << i;
int nRows = mnRows / scale;
int nCols = mnCols / scale;
int nSize = nRows * nCols;
mpPyramidImages[i] = new unsigned char[nSize];
CUDA_SAFE_CALL(cudaMemcpy2D(mpPyramidImages[i],
nCols,
mpPyramidImages_CUDA[i].dataPtr,
mpPyramidImages_CUDA[i].pitch,
nCols,
nRows,
cudaMemcpyDeviceToHost));
}
}
return mpPyramidImages[level];
}
else
{
return mpPyramidImages_CUDA[level].dataPtr;
}
}
template<class T_FeatureType, class T_FeatureDescriptorType>
float* Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>::getPyramidImageGradientMag(int level, DEVICE_TYPE device)
{
if(device == DEVICE_TYPE::CPU)
{
// download data from GPU
if(mpPyramidImageGradientMag == NULL)
{
mpPyramidImageGradientMag = new float*[mnPyramidLevels];
for (int i = 0; i < mnPyramidLevels; i++)
{
int scale = 1 << i;
int nRows = mnRows / scale;
int nCols = mnCols / scale;
int nSize = nRows * nCols;
mpPyramidImageGradientMag[i] = new float[nSize];
CUDA_SAFE_CALL(cudaMemcpy2D(mpPyramidImageGradientMag[i],
nCols * sizeof(float),
mpPyramidImageGradientMag_CUDA[i].dataPtr,
mpPyramidImageGradientMag_CUDA[i].pitch,
nCols * sizeof(float),
nRows,
cudaMemcpyDeviceToHost));
}
}
return mpPyramidImageGradientMag[level];
}
else
{
return mpPyramidImageGradientMag_CUDA[level].dataPtr;
}
}
template<class T_FeatureType, class T_FeatureDescriptorType>
Eigen::Vector2f* Frame_CUDA<T_FeatureType, T_FeatureDescriptorType>::getPyramidImageGradientVec(int nLevel, DEVICE_TYPE device)
{
if(device == DEVICE_TYPE::CPU)
{
// download data from GPU
if(mpPyramidImageGradientVec == NULL)
{
mpPyramidImageGradientVec = new Eigen::Vector2f*[mnPyramidLevels];
for (int i = 0; i < mnPyramidLevels; i++)
{
int scale = 1 << i;
int nRows = mnRows / scale;
int nCols = mnCols / scale;
int nSize = nRows * nCols;
mpPyramidImageGradientVec[i] = new Eigen::Vector2f[nSize];
CUDA_SAFE_CALL(cudaMemcpy2D(mpPyramidImageGradientVec[i],
nCols * sizeof(Eigen::Vector2f),
mpPyramidImageGradientVec_CUDA[i].dataPtr,
mpPyramidImageGradientVec_CUDA[i].pitch,
nCols * sizeof(Eigen::Vector2f),
nRows,
cudaMemcpyDeviceToHost));
}
}
return mpPyramidImageGradientVec[nLevel];
}
else
{
return mpPyramidImageGradientVec_CUDA[nLevel].dataPtr;
}
}
/* institiate template class */
template class Frame_CUDA<Feature_depthMap<DepthHypothesis_GMM>, DepthHypothesis_GMM>;
/*****************************************************************************************************************
* **************************************** Implement CUDA kernels ***********************************************
* **************************************************************************************************************/
__device__ bool pixelLieOutsideImageMask(CUDA_PitchMemory<uchar> maskImage, int x, int y)
{
if(maskImage.dataPtr == NULL) return true;
float intensity = tex2D(FrameCuda_MaskImageTexture, x, y);
if(intensity < 100) return false;
return true;
}
__global__ void kernel_imagePyramid(uchar* pTargetImage, size_t pitch, uint step)
{
// step <= 8;
uint x = (blockIdx.x * blockDim.x + threadIdx.x) * step;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
// compute pixel coordinate at lower level
uint y0 = 2 * y + 1;
uint x0 = 2 * x + 1;
// compute pixel coordinate surrounding (r0, c0)
uint2 p00 = make_uint2(x0 - 1, y0 - 1);
uint2 p01 = make_uint2(x0 - 1, y0 + 1);
float I00 = tex2D(FrameCuda_GrayImageTexture, p00.x, p00.y);
float I01 = tex2D(FrameCuda_GrayImageTexture, p01.x, p01.y);
uchar targetVal4[8];
for(int i = 0; i < step; ++i)
{
uint2 p10 = make_uint2(p00.x + 2, p00.y);
uint2 p11 = make_uint2(p01.x + 2, p01.y);
float I10 = tex2D(FrameCuda_GrayImageTexture, p10.x, p10.y);
float I11 = tex2D(FrameCuda_GrayImageTexture, p11.x, p11.y);
targetVal4[i] = (uchar)((I00 + I01 + I10 + I11) * 0.25f);
I00 = I10; I01 = I11;
p00 = p10; p01 = p11;
}
// write to target image
memcpy(pTargetImage + y * pitch + x, targetVal4, step);
}
__global__ void kernel_imageGradient(CUDA_PitchMemory<uchar> maskImage,
CUDA_PitchMemory<float> gradientMagMap,
CUDA_PitchMemory<Eigen::Vector2f> gradientVecMap,
int nCols,
int nRows,
size_t scale)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
int xInTopLevel = scale * x + scale - 1;
int yInTopLevel = scale * y + scale - 1;
float* pGradientMag = (float*)( (char*)gradientMagMap.dataPtr + y * gradientMagMap.pitch) + x;
Eigen::Vector2f* pGradientVec = (Eigen::Vector2f*)( (char*)gradientVecMap.dataPtr + y * gradientVecMap.pitch) + x;
if(x == 0 || y == 0 || x == nCols - 1 || y == nRows - 1 || !pixelLieOutsideImageMask(maskImage, xInTopLevel, yInTopLevel))
{
pGradientMag[0] = 0;
pGradientVec[0] = Eigen::Vector2f(0, 0);
return;
}
// fectch 4 neighbour pixels
float top = tex2D(FrameCuda_GrayImageTexture, x, y - 1);
float left = tex2D(FrameCuda_GrayImageTexture, x - 1, y);
float rght = tex2D(FrameCuda_GrayImageTexture, x + 1, y);
float bot = tex2D(FrameCuda_GrayImageTexture, x, y + 1);
float dx = (rght - left) * 0.5f;
float dy = (bot - top) * 0.5f;
float mag = sqrt(dx * dx + dy * dy);
pGradientMag[0] = mag;
pGradientVec[0] = Eigen::Vector2f(dx, dy);
}
}
}
|
the_stack
|
NAMESPACE_BEGIN(enoki)
extern uint32_t cuda_log_level();
__global__ void arange(uint32_t n, uint32_t *out) {
for (uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x)
out[i] = i;
}
ENOKI_EXPORT
size_t cuda_partition(size_t size, const void **ptrs_, void ***ptrs_unique_out,
uint32_t **counts_out, uint32_t ***perm_out) {
#if !defined(NDEBUG)
if (cuda_log_level() >= 4)
std::cerr << "cuda_partition(size=" << size << ")" << std::endl;
#endif
size_t temp_size = 0;
void *temp = nullptr;
uint32_t *perm = (uint32_t *) cuda_malloc(size * sizeof(uint32_t)),
*perm_sorted = (uint32_t *) cuda_malloc(size * sizeof(uint32_t));
uintptr_t *ptrs = (uintptr_t *) ptrs_,
*ptrs_sorted = (uintptr_t *) cuda_malloc(size * sizeof(uintptr_t));
arange<<<256, 256>>>((uint32_t) size, perm);
// Sort the key array
cuda_check(cub::DeviceRadixSort::SortPairs(
temp, temp_size, ptrs, ptrs_sorted, perm, perm_sorted, size));
temp = cuda_malloc(temp_size);
cuda_check_maybe_redo(cub::DeviceRadixSort::SortPairs(
temp, temp_size, ptrs, ptrs_sorted, perm, perm_sorted, size));
// Release memory that is no longer needed
cuda_free(temp);
cuda_free(perm);
temp_size = 0; temp = nullptr;
uintptr_t *ptrs_unique = (uintptr_t *) cuda_malloc(size * sizeof(uintptr_t));
uint32_t *counts = (uint32_t *) cuda_malloc(size * sizeof(uint32_t));
size_t *num_runs = (size_t *) cuda_malloc(sizeof(size_t));
// RLE-encode the sorted pointer list
cuda_check(cub::DeviceRunLengthEncode::Encode(
temp, temp_size, ptrs_sorted, ptrs_unique, counts, num_runs, size));
temp = cuda_malloc(temp_size);
cuda_check_maybe_redo(cub::DeviceRunLengthEncode::Encode(
temp, temp_size, ptrs_sorted, ptrs_unique, counts, num_runs, size));
// Release memory that is no longer needed
cuda_free(temp);
cuda_free(ptrs_sorted);
size_t num_runs_out = 0;
cuda_check(cudaMemcpy(&num_runs_out, num_runs, sizeof(size_t), cudaMemcpyDeviceToHost));
*ptrs_unique_out = (void **) malloc(sizeof(void *) * num_runs_out);
*counts_out = (uint32_t *) malloc(sizeof(uint32_t) * num_runs_out);
*perm_out = (uint32_t **) malloc(sizeof(uint32_t *) * num_runs_out);
cuda_check(cudaMemcpy(*ptrs_unique_out, ptrs_unique, num_runs_out * sizeof(void *), cudaMemcpyDeviceToHost));
cuda_check(cudaMemcpy(*counts_out, counts, num_runs_out * sizeof(uint32_t), cudaMemcpyDeviceToHost));
uint32_t *ptr = perm_sorted;
for (size_t i = 0; i < num_runs_out; ++i) {
size_t size = (*counts_out)[i];
(*perm_out)[i] = (uint32_t *) cuda_malloc(size * sizeof(uint32_t));
cuda_check(cudaMemcpyAsync((*perm_out)[i], ptr,
size * sizeof(uint32_t),
cudaMemcpyDeviceToDevice));
ptr += size;
}
cuda_free(num_runs);
cuda_free(ptrs_unique);
cuda_free(perm_sorted);
cuda_free(counts);
return num_runs_out;
}
template <typename T, std::enable_if_t<std::is_unsigned<T>::value, int> = 0>
void cuda_compress_impl(size_t size, const T *data, const bool *mask, T **out_data, size_t *out_size) {
#if !defined(NDEBUG)
if (cuda_log_level() >= 4)
std::cerr << "cuda_compress(size=" << size << ")" << std::endl;
#endif
size_t temp_size = 0,
*out_size_p = nullptr;
void *temp = nullptr;
T *result_p = nullptr;
*out_data = (T *) cuda_malloc(size * sizeof(T));
out_size_p = (size_t *) cuda_malloc(sizeof(size_t));
cuda_check(cub::DeviceSelect::Flagged(temp, temp_size, data, mask, result_p, out_size_p, size));
temp = cuda_malloc(temp_size);
cuda_check_maybe_redo(cub::DeviceSelect::Flagged(temp, temp_size, data, mask, *out_data, out_size_p, size));
cuda_check(cudaMemcpy(out_size, out_size_p, sizeof(size_t), cudaMemcpyDeviceToHost));
cuda_free(temp);
cuda_free(out_size_p);
}
template <typename T, std::enable_if_t<!std::is_unsigned<T>::value && sizeof(T) == 4, int> = 0>
void cuda_compress_impl(size_t size, const T *data, const bool *mask, T **out_data, size_t *out_size) {
cuda_compress_impl(size, (const uint32_t *) data, mask, (uint32_t **) out_data, out_size);
}
template <typename T, std::enable_if_t<!std::is_unsigned<T>::value && sizeof(T) == 8, int> = 0>
void cuda_compress_impl(size_t size, const T *data, const bool *mask, T **out_data, size_t *out_size) {
cuda_compress_impl(size, (const uint64_t *) data, mask, (uint64_t **) out_data, out_size);
}
template <typename T> void cuda_compress(size_t size, const T *data, const bool *mask, T **out_data, size_t *out_size) {
cuda_compress_impl(size, data, mask, out_data, out_size);
}
template <typename T> T* cuda_hsum(size_t size, const T *data) {
#if !defined(NDEBUG)
if (cuda_log_level() >= 4)
std::cerr << "cuda_hsum(size=" << size << ")" << std::endl;
#endif
size_t temp_size = 0;
void *temp = nullptr;
T *result_p = nullptr;
cuda_check(cub::DeviceReduce::Sum(temp, temp_size, data, result_p, size));
temp = cuda_malloc(temp_size);
result_p = (T *) cuda_malloc(sizeof(T));
cuda_check_maybe_redo(cub::DeviceReduce::Sum(temp, temp_size, data, result_p, size));
cuda_free(temp);
return result_p;
}
struct ReductionOpMul {
template <typename T>
__device__ __forceinline__
T operator()(T a, T b) const {
return a * b;
}
};
template <typename T> T* cuda_hprod(size_t size, const T *data) {
#if !defined(NDEBUG)
if (cuda_log_level() >= 4)
std::cerr << "cuda_hprod(size=" << size << ")" << std::endl;
#endif
size_t temp_size = 0;
void *temp = nullptr;
T *result_p = nullptr;
ReductionOpMul mul_op;
cuda_check(cub::DeviceReduce::Reduce(temp, temp_size, data, result_p, size,
mul_op, T(1)));
temp = cuda_malloc(temp_size);
result_p = (T *) cuda_malloc(sizeof(T));
cuda_check_maybe_redo(cub::DeviceReduce::Reduce(temp, temp_size, data, result_p, size,
mul_op, T(1)));
cuda_free(temp);
return result_p;
}
template <typename T> T* cuda_hmax(size_t size, const T *data) {
#if !defined(NDEBUG)
if (cuda_log_level() >= 4)
std::cerr << "cuda_hmax(size=" << size << ")" << std::endl;
#endif
size_t temp_size = 0;
void *temp = nullptr;
T *result_p = nullptr;
cuda_check(cub::DeviceReduce::Max(temp, temp_size, data, result_p, size));
temp = cuda_malloc(temp_size);
result_p = (T *) cuda_malloc(sizeof(T));
cuda_check_maybe_redo(cub::DeviceReduce::Max(temp, temp_size, data, result_p, size));
cuda_free(temp);
return result_p;
}
template <typename T> T* cuda_hmin(size_t size, const T *data) {
#if !defined(NDEBUG)
if (cuda_log_level() >= 4)
std::cerr << "cuda_hmin(size=" << size << ")" << std::endl;
#endif
size_t temp_size = 0;
void *temp = nullptr;
T *result_p = nullptr;
cuda_check(cub::DeviceReduce::Min(temp, temp_size, data, result_p, size));
temp = cuda_malloc(temp_size);
result_p = (T *) cuda_malloc(sizeof(T));
cuda_check_maybe_redo(cub::DeviceReduce::Min(temp, temp_size, data, result_p, size));
cuda_free(temp);
return result_p;
}
struct ReductionOpAll {
__device__ __forceinline__
bool operator()(bool a, bool b) const {
return a && b;
}
};
struct ReductionOpAny {
__device__ __forceinline__
bool operator()(bool a, bool b) const {
return a || b;
}
};
ENOKI_EXPORT bool cuda_all(size_t size, const bool *data) {
#if !defined(NDEBUG)
if (cuda_log_level() >= 4)
std::cerr << "cuda_all(size=" << size << ")" << std::endl;
#endif
size_t temp_size = 0;
void *temp = nullptr;
bool result = false, *result_p = nullptr;
ReductionOpAll all_op;
cuda_check(cub::DeviceReduce::Reduce(temp, temp_size, data, result_p, size,
all_op, true));
temp = cuda_malloc(temp_size);
result_p = (bool *) cuda_malloc(sizeof(bool));
cuda_check_maybe_redo(cub::DeviceReduce::Reduce(temp, temp_size, data, result_p, size,
all_op, true));
cuda_free(temp);
cuda_check(cudaMemcpy(&result, result_p, sizeof(bool), cudaMemcpyDeviceToHost));
cuda_free(result_p);
return result;
}
ENOKI_EXPORT bool cuda_any(size_t size, const bool *data) {
#if !defined(NDEBUG)
if (cuda_log_level() >= 4)
std::cerr << "cuda_any(size=" << size << ")" << std::endl;
#endif
size_t temp_size = 0;
void *temp = nullptr;
bool result = false, *result_p = nullptr;
ReductionOpAny any_op;
cuda_check(cub::DeviceReduce::Reduce(temp, temp_size, data, result_p, size,
any_op, false));
temp = cuda_malloc(temp_size);
result_p = (bool *) cuda_malloc(sizeof(bool));
cuda_check_maybe_redo(cub::DeviceReduce::Reduce(temp, temp_size, data, result_p, size,
any_op, false));
cuda_free(temp);
cuda_check(cudaMemcpy(&result, result_p, sizeof(bool), cudaMemcpyDeviceToHost));
cuda_free(result_p);
return result;
}
ENOKI_EXPORT size_t cuda_count(size_t size, const bool *data) {
#if !defined(NDEBUG)
if (cuda_log_level() >= 4)
std::cerr << "cuda_count(size=" << size << ")" << std::endl;
#endif
size_t temp_size = 0;
void *temp = nullptr;
size_t result = 0, *result_p = nullptr;
cuda_check(cub::DeviceReduce::Sum(temp, temp_size, data, result_p, size));
temp = cuda_malloc(temp_size);
result_p = (size_t *) cuda_malloc(sizeof(size_t));
cuda_check_maybe_redo(cub::DeviceReduce::Sum(temp, temp_size, data, result_p, size));
cuda_free(temp);
cuda_check(cudaMemcpy(&result, result_p, sizeof(size_t), cudaMemcpyDeviceToHost));
cuda_free(result_p);
return result;
}
template ENOKI_EXPORT int32_t* cuda_hsum(size_t, const int32_t *);
template ENOKI_EXPORT uint32_t* cuda_hsum(size_t, const uint32_t *);
template ENOKI_EXPORT int64_t* cuda_hsum(size_t, const int64_t *);
template ENOKI_EXPORT uint64_t* cuda_hsum(size_t, const uint64_t *);
template ENOKI_EXPORT float* cuda_hsum(size_t, const float *);
template ENOKI_EXPORT double* cuda_hsum(size_t, const double *);
template ENOKI_EXPORT int32_t* cuda_hprod(size_t, const int32_t *);
template ENOKI_EXPORT uint32_t* cuda_hprod(size_t, const uint32_t *);
template ENOKI_EXPORT int64_t* cuda_hprod(size_t, const int64_t *);
template ENOKI_EXPORT uint64_t* cuda_hprod(size_t, const uint64_t *);
template ENOKI_EXPORT float* cuda_hprod(size_t, const float *);
template ENOKI_EXPORT double* cuda_hprod(size_t, const double *);
template ENOKI_EXPORT int32_t* cuda_hmax(size_t, const int32_t *);
template ENOKI_EXPORT uint32_t* cuda_hmax(size_t, const uint32_t *);
template ENOKI_EXPORT int64_t* cuda_hmax(size_t, const int64_t *);
template ENOKI_EXPORT uint64_t* cuda_hmax(size_t, const uint64_t *);
template ENOKI_EXPORT float* cuda_hmax(size_t, const float *);
template ENOKI_EXPORT double* cuda_hmax(size_t, const double *);
template ENOKI_EXPORT int32_t* cuda_hmin(size_t, const int32_t *);
template ENOKI_EXPORT uint32_t* cuda_hmin(size_t, const uint32_t *);
template ENOKI_EXPORT int64_t* cuda_hmin(size_t, const int64_t *);
template ENOKI_EXPORT uint64_t* cuda_hmin(size_t, const uint64_t *);
template ENOKI_EXPORT float* cuda_hmin(size_t, const float *);
template ENOKI_EXPORT double* cuda_hmin(size_t, const double *);
template ENOKI_EXPORT void cuda_compress(size_t, const bool *, const bool *mask, bool **out_ptr, size_t *out_size);
template ENOKI_EXPORT void cuda_compress(size_t, const int32_t *, const bool *mask, int32_t **out_ptr, size_t *out_size);
template ENOKI_EXPORT void cuda_compress(size_t, const uint32_t *, const bool *mask, uint32_t **out_ptr, size_t *out_size);
template ENOKI_EXPORT void cuda_compress(size_t, const int64_t *, const bool *mask, int64_t **out_ptr, size_t *out_size);
template ENOKI_EXPORT void cuda_compress(size_t, const uint64_t *, const bool *mask, uint64_t **out_ptr, size_t *out_size);
template ENOKI_EXPORT void cuda_compress(size_t, const float *, const bool *mask, float **out_ptr, size_t *out_size);
template ENOKI_EXPORT void cuda_compress(size_t, const double *, const bool *mask, double **out_ptr, size_t *out_size);
NAMESPACE_END(enoki)
|
the_stack
|
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx1 (b,m,nsample), idx2 (b,m,nsample), idx3 (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_level_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx1, int *idx2, int *idx3, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx1 += m*nsample*batch_index;
idx2 += m*nsample*batch_index;
idx3 += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt1 = 0;
int cnt2 = 0;
int cnt3 = 0;
int idxs = -1;
for (int k=0;k<n;++k) {
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d < 1e-5)
idxs = k;
//0.577
if (d<radius*0.577) {
if (cnt1 < nsample) {
if (cnt1==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx1[j*nsample+l] = k;
}
idx1[j*nsample+cnt1] = k;
cnt1+=1;
}
}
//0.816
if (d<radius*0.816 && d >= radius * 0.577) {
if (cnt2 < nsample) {
if (cnt2==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx2[j*nsample+l] = k;
}
idx2[j*nsample+cnt2] = k;
cnt2+=1;
}
}
if (d<radius && d >= radius * 0.816) {
if (cnt3 < nsample) {
if (cnt3==0) {
for (int l=0;l<nsample;++l)
idx3[j*nsample+l] = k;
}
idx3[j*nsample+cnt3] = k;
cnt3+=1;
}
}
}
idxs = -(idxs + 1);
if (cnt1 == 0) {
for (int l = 0; l < nsample; ++l)
idx1[j*nsample+l] = idxs;
}
if (cnt2 == 0) {
for (int l = 0; l < nsample; ++l)
idx2[j*nsample+l] = idxs;
}
if (cnt3 == 0) {
for (int l = 0; l < nsample; ++l)
idx3[j*nsample+l] = idxs;
}
pts_cnt[j] = cnt1+cnt2+cnt3;
}
}
// input: radius (1), nsample (1), tangent (b,n,2)
// output: idx1 (b,n,nsample), idx2 (b,m,nsample), idx3 (b,n,nsample), pts_cnt (b,n)
__global__ void query_tangent_point_level_gpu(int b, int n, int m, float radius, int nsample, const float *tangent, const int* group, int *idx1, int *idx2, int *idx3, int *pts_cnt) {
int batch_index = blockIdx.x;
tangent += n*m*2*batch_index;
group += n*m*batch_index;
idx1 += n*nsample*batch_index;
idx2 += n*nsample*batch_index;
idx3 += n*nsample*batch_index;
pts_cnt += n*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<n;j+=stride) {
int cnt1 = 0;
int cnt2 = 0;
int cnt3 = 0;
int idxs = -1;
for (int k=0;k<m;++k) {
float tx = std::abs(tangent[(j*m+k)*2]);
float ty = std::abs(tangent[(j*m+k)*2+1]);
if (tx < 1e-5 && ty < 1e-5)
idxs = group[j*m+k];
//0.577
tx /= radius;
ty /= radius;
if (tx <= 0.5 && ty <= 0.5) {
if (cnt1 < nsample) {
if (cnt1==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx1[j*nsample+l] = group[j*m+k];
}
idx1[j*nsample+cnt1] = group[j*m+k];
cnt1+=1;
}
}
//0.816
else if (tx > 0.5 && ty > 0.5) {
if (cnt2 < nsample) {
if (cnt2==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx2[j*nsample+l] = group[j*m+k];
}
idx2[j*nsample+cnt2] = group[j*m+k];
cnt2+=1;
}
}
else {
if (cnt3 < nsample) {
if (cnt3==0) {
for (int l=0;l<nsample;++l)
idx3[j*nsample+l] = group[j*m+k];
}
idx3[j*nsample+cnt3] = group[j*m+k];
cnt3+=1;
}
}
}
idxs = -(idxs + 1);
if (cnt1 == 0) {
for (int l = 0; l < nsample; ++l)
idx1[j*nsample+l] = idxs;
}
if (cnt2 == 0) {
for (int l = 0; l < nsample; ++l)
idx2[j*nsample+l] = idxs;
}
if (cnt3 == 0) {
for (int l = 0; l < nsample; ++l)
idx3[j*nsample+l] = idxs;
}
pts_cnt[j] = cnt1+cnt2+cnt3;
}
}
// input: radius (1), nsample (1), tangent (b,n,2)
// output: idx1 (b,n,nsample), idx2 (b,m,nsample), idx3 (b,n,nsample), pts_cnt (b,n)
__global__ void query_radius_point_level_gpu(int b, int n, int m, float radius, int nsample, const float *tangent, const int* group, int *idx1, int *idx2, int *idx3, int *pts_cnt) {
int batch_index = blockIdx.x;
tangent += n*m*2*batch_index;
group += n*m*batch_index;
idx1 += n*nsample*batch_index;
idx2 += n*nsample*batch_index;
idx3 += n*nsample*batch_index;
pts_cnt += n*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<n;j+=stride) {
int cnt1 = 0;
int cnt2 = 0;
int cnt3 = 0;
int idxs = -1;
for (int k=0;k<m;++k) {
float tx = std::abs(tangent[(j*m+k)*2]);
float ty = std::abs(tangent[(j*m+k)*2+1]);
if (tx < 1e-5 && ty < 1e-5)
idxs = group[j*m+k];
//0.577
tx /= radius;
ty /= radius;
float sum_r = sqrt(tx * tx + ty * ty);
if (sum_r < 0.577) {
if (cnt1 < nsample) {
if (cnt1==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx1[j*nsample+l] = group[j*m+k];
}
idx1[j*nsample+cnt1] = group[j*m+k];
cnt1+=1;
}
}
//0.816
else if (tx > 0.816) {
if (cnt2 < nsample) {
if (cnt2==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx2[j*nsample+l] = group[j*m+k];
}
idx2[j*nsample+cnt2] = group[j*m+k];
cnt2+=1;
}
}
else {
if (cnt3 < nsample) {
if (cnt3==0) {
for (int l=0;l<nsample;++l)
idx3[j*nsample+l] = group[j*m+k];
}
idx3[j*nsample+cnt3] = group[j*m+k];
cnt3+=1;
}
}
}
idxs = -(idxs + 1);
if (cnt1 == 0) {
for (int l = 0; l < nsample; ++l)
idx1[j*nsample+l] = idxs;
}
if (cnt2 == 0) {
for (int l = 0; l < nsample; ++l)
idx2[j*nsample+l] = idxs;
}
if (cnt3 == 0) {
for (int l = 0; l < nsample; ++l)
idx3[j*nsample+l] = idxs;
}
pts_cnt[j] = cnt1+cnt2+cnt3;
}
}
// input: radius (1), nsample (1), tangent (b,n,2)
// output: idx1 (b,n,nsample), idx2 (b,m,nsample), idx3 (b,n,nsample), pts_cnt (b,n)
__global__ void query_radius_angle_point_level_gpu(int b, int n, int m, float start_angle, float radius, int nsample, const float *tangent, const int* group, int *idx1, int *idx2, int *idx3, int *idx4, int *idx5, int *idx6, int *idx7, int *idx8, int *idx9, int *pts_cnt) {
int batch_index = blockIdx.x;
tangent += n*m*2*batch_index;
group += n*m*batch_index;
idx1 += n*nsample*batch_index;
idx2 += n*nsample*batch_index;
idx3 += n*nsample*batch_index;
idx4 += n*nsample*batch_index;
idx5 += n*nsample*batch_index;
idx6 += n*nsample*batch_index;
idx7 += n*nsample*batch_index;
idx8 += n*nsample*batch_index;
idx9 += n*nsample*batch_index;
pts_cnt += n*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<n;j+=stride) {
int cnt1 = 0;
int cnt2 = 0;
int cnt3 = 0;
int cnt4 = 0;
int cnt5 = 0;
int cnt6 = 0;
int cnt7 = 0;
int cnt8 = 0;
int cnt9 = 0;
int idxs = -1;
for (int k=0;k<m;++k) {
float angle = atan2(tangent[(j*m+k)*2],tangent[(j*m+k)*2+1]) / 3.141592654 * 180.0 + start_angle;
int angle_diff = (int)angle % 360 / 120;
float tx = std::abs(tangent[(j*m+k)*2]);
float ty = std::abs(tangent[(j*m+k)*2+1]);
if (tx < 1e-5 && ty < 1e-5)
idxs = group[j*m+k];
//0.577
tx /= radius;
ty /= radius;
float sum_r = sqrt(tx * tx + ty * ty);
if (sum_r < 0.577) {
if (angle_diff == 0) {
if (cnt1 < nsample) {
if (cnt1==0) {
for (int l=0;l<nsample;++l)
idx1[j*nsample+l] = group[j*m+k];
}
idx1[j*nsample+cnt1] = group[j*m+k], cnt1 += 1;
}
}
else if (angle_diff == 1) {
if (cnt2 < nsample) {
if (cnt2==0) {
for (int l=0;l<nsample;++l)
idx2[j*nsample+l] = group[j*m+k];
}
idx2[j*nsample+cnt2] = group[j*m+k], cnt2 += 1;
}
}
else {
if (cnt3 < nsample) {
if (cnt3==0) {
for (int l=0;l<nsample;++l)
idx3[j*nsample+l] = group[j*m+k];
}
idx3[j*nsample+cnt3] = group[j*m+k], cnt3 += 1;
}
}
}
//0.816
else if (tx > 0.816) {
if (angle_diff == 0) {
if (cnt4 < nsample) {
if (cnt4==0) {
for (int l=0;l<nsample;++l)
idx4[j*nsample+l] = group[j*m+k];
}
idx4[j*nsample+cnt4] = group[j*m+k], cnt4 += 1;
}
}
else if (angle_diff == 1) {
if (cnt5 < nsample) {
if (cnt5==0) {
for (int l=0;l<nsample;++l)
idx5[j*nsample+l] = group[j*m+k];
}
idx5[j*nsample+cnt5] = group[j*m+k], cnt5 += 1;
}
}
else {
if (cnt6 < nsample) {
if (cnt6==0) {
for (int l=0;l<nsample;++l)
idx6[j*nsample+l] = group[j*m+k];
}
idx6[j*nsample+cnt6] = group[j*m+k], cnt6 += 1;
}
}
}
else {
if (angle_diff == 0) {
if (cnt7 < nsample) {
if (cnt7==0) {
for (int l=0;l<nsample;++l)
idx7[j*nsample+l] = group[j*m+k];
}
idx7[j*nsample+cnt7] = group[j*m+k], cnt7 += 1;
}
}
else if (angle_diff == 1) {
if (cnt8 < nsample) {
if (cnt8==0) {
for (int l=0;l<nsample;++l)
idx8[j*nsample+l] = group[j*m+k];
}
idx8[j*nsample+cnt8] = group[j*m+k], cnt8 += 1;
}
}
else {
if (cnt9 < nsample) {
if (cnt9==0) {
for (int l=0;l<nsample;++l)
idx9[j*nsample+l] = group[j*m+k];
}
idx9[j*nsample+cnt9] = group[j*m+k], cnt9 += 1;
}
}
}
}
idxs = -(idxs + 1);
if (cnt1 == 0) {
for (int l = 0; l < nsample; ++l)
idx1[j*nsample+l] = idxs;
}
if (cnt2 == 0) {
for (int l = 0; l < nsample; ++l)
idx2[j*nsample+l] = idxs;
}
if (cnt3 == 0) {
for (int l = 0; l < nsample; ++l)
idx3[j*nsample+l] = idxs;
}
if (cnt4 == 0) {
for (int l = 0; l < nsample; ++l)
idx4[j*nsample+l] = idxs;
}
if (cnt5 == 0) {
for (int l = 0; l < nsample; ++l)
idx5[j*nsample+l] = idxs;
}
if (cnt6 == 0) {
for (int l = 0; l < nsample; ++l)
idx6[j*nsample+l] = idxs;
}
if (cnt7 == 0) {
for (int l = 0; l < nsample; ++l)
idx7[j*nsample+l] = idxs;
}
if (cnt8 == 0) {
for (int l = 0; l < nsample; ++l)
idx8[j*nsample+l] = idxs;
}
if (cnt9 == 0) {
for (int l = 0; l < nsample; ++l)
idx9[j*nsample+l] = idxs;
}
pts_cnt[j] = cnt1+cnt2+cnt3+cnt4+cnt5+cnt6+cnt7+cnt8+cnt9;
}
}
// input: radius (1), nsample (1), tangent (b,n,2)
// output: idx1 (b,n,nsample), idx2 (b,m,nsample), idx3 (b,n,nsample), pts_cnt (b,n)
__global__ void query_tangent9_point_level_gpu(int b, int n, int m, float start_angle, float radius, int nsample, const float *tangent, const int* group, int *idx1, int *idx2, int *idx3, int *idx4, int *idx5, int *idx6, int *idx7, int *idx8, int *idx9, int *pts_cnt) {
int batch_index = blockIdx.x;
tangent += n*m*2*batch_index;
group += n*m*batch_index;
idx1 += n*nsample*batch_index;
idx2 += n*nsample*batch_index;
idx3 += n*nsample*batch_index;
idx4 += n*nsample*batch_index;
idx5 += n*nsample*batch_index;
idx6 += n*nsample*batch_index;
idx7 += n*nsample*batch_index;
idx8 += n*nsample*batch_index;
idx9 += n*nsample*batch_index;
pts_cnt += n*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<n;j+=stride) {
int cnt1 = 0;
int cnt2 = 0;
int cnt3 = 0;
int cnt4 = 0;
int cnt5 = 0;
int cnt6 = 0;
int cnt7 = 0;
int cnt8 = 0;
int cnt9 = 0;
int idxs = -1;
for (int k=0;k<m;++k) {
float tx = tangent[(j*m+k)*2];
float ty = tangent[(j*m+k)*2+1];
if (abs(tx) < 1e-5 && abs(ty) < 1e-5)
idxs = group[j*m+k];
//0.577
tx /= radius;
ty /= radius;
int angle_diff = 0;
if (tx > -0.5)
angle_diff = 1;
if (tx > 0.5)
angle_diff = 2;
if (ty < -0.5) {
if (angle_diff == 0) {
if (cnt1 < nsample) {
if (cnt1==0) {
for (int l=0;l<nsample;++l)
idx1[j*nsample+l] = group[j*m+k];
}
idx1[j*nsample+cnt1] = group[j*m+k], cnt1 += 1;
}
}
else if (angle_diff == 1) {
if (cnt2 < nsample) {
if (cnt2==0) {
for (int l=0;l<nsample;++l)
idx2[j*nsample+l] = group[j*m+k];
}
idx2[j*nsample+cnt2] = group[j*m+k], cnt2 += 1;
}
}
else {
if (cnt3 < nsample) {
if (cnt3==0) {
for (int l=0;l<nsample;++l)
idx3[j*nsample+l] = group[j*m+k];
}
idx3[j*nsample+cnt3] = group[j*m+k], cnt3 += 1;
}
}
}
//0.816
else if (ty > 0.5) {
if (angle_diff == 0) {
if (cnt4 < nsample) {
if (cnt4==0) {
for (int l=0;l<nsample;++l)
idx4[j*nsample+l] = group[j*m+k];
}
idx4[j*nsample+cnt4] = group[j*m+k], cnt4 += 1;
}
}
else if (angle_diff == 1) {
if (cnt5 < nsample) {
if (cnt5==0) {
for (int l=0;l<nsample;++l)
idx5[j*nsample+l] = group[j*m+k];
}
idx5[j*nsample+cnt5] = group[j*m+k], cnt5 += 1;
}
}
else {
if (cnt6 < nsample) {
if (cnt6==0) {
for (int l=0;l<nsample;++l)
idx6[j*nsample+l] = group[j*m+k];
}
idx6[j*nsample+cnt6] = group[j*m+k], cnt6 += 1;
}
}
}
else {
if (angle_diff == 0) {
if (cnt7 < nsample) {
if (cnt7==0) {
for (int l=0;l<nsample;++l)
idx7[j*nsample+l] = group[j*m+k];
}
idx7[j*nsample+cnt7] = group[j*m+k], cnt7 += 1;
}
}
else if (angle_diff == 1) {
if (cnt8 < nsample) {
if (cnt8==0) {
for (int l=0;l<nsample;++l)
idx8[j*nsample+l] = group[j*m+k];
}
idx8[j*nsample+cnt8] = group[j*m+k], cnt8 += 1;
}
}
else {
if (cnt9 < nsample) {
if (cnt9==0) {
for (int l=0;l<nsample;++l)
idx9[j*nsample+l] = group[j*m+k];
}
idx9[j*nsample+cnt9] = group[j*m+k], cnt9 += 1;
}
}
}
}
idxs = -(idxs + 1);
if (cnt1 == 0) {
for (int l = 0; l < nsample; ++l)
idx1[j*nsample+l] = idxs;
}
if (cnt2 == 0) {
for (int l = 0; l < nsample; ++l)
idx2[j*nsample+l] = idxs;
}
if (cnt3 == 0) {
for (int l = 0; l < nsample; ++l)
idx3[j*nsample+l] = idxs;
}
if (cnt4 == 0) {
for (int l = 0; l < nsample; ++l)
idx4[j*nsample+l] = idxs;
}
if (cnt5 == 0) {
for (int l = 0; l < nsample; ++l)
idx5[j*nsample+l] = idxs;
}
if (cnt6 == 0) {
for (int l = 0; l < nsample; ++l)
idx6[j*nsample+l] = idxs;
}
if (cnt7 == 0) {
for (int l = 0; l < nsample; ++l)
idx7[j*nsample+l] = idxs;
}
if (cnt8 == 0) {
for (int l = 0; l < nsample; ++l)
idx8[j*nsample+l] = idxs;
}
if (cnt9 == 0) {
for (int l = 0; l < nsample; ++l)
idx9[j*nsample+l] = idxs;
}
pts_cnt[j] = cnt1+cnt2+cnt3+cnt4+cnt5+cnt6+cnt7+cnt8+cnt9;
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out, int relative) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
if (ii < 0) {
if (relative == 0)
out[j * nsample * c + k * c + l] = 0;
else
out[j * nsample * c + k * c + l] = points[(-ii-1)*c+l];
} else {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points, int relative) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
if (ii < 0) {
if (relative == 1)
atomicAdd(&grad_points[(-ii-1)*c+l], grad_out[j*nsample*c+k*c+l]);
} else {
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
}
// input: k (1), distance matrix dist (b,m,n)
// output: idx (b,m,n), dist_out (b,m,n)
// only the top k results within n are useful
__global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
int batch_index = blockIdx.x;
dist+=m*n*batch_index;
outi+=m*n*batch_index;
out+=m*n*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
// copy from dist to dist_out
for (int j=index;j<m;j+=stride) {
for (int s=0;s<n;++s) {
out[j*n+s] = dist[j*n+s];
outi[j*n+s] = s;
}
}
float *p_dist;
for (int j=index;j<m;j+=stride) {
p_dist = out+j*n;
// selection sort for the first k elements
for (int s=0;s<k;++s) {
int min=s;
// find the min
for (int t=s+1;t<n;++t) {
if (p_dist[t]<p_dist[min]) {
min = t;
}
}
// swap min-th and i-th element
if (min!=s) {
float tmp = p_dist[min];
p_dist[min] = p_dist[s];
p_dist[s] = tmp;
int tmpi = outi[j*n+min];
outi[j*n+min] = outi[j*n+s];
outi[j*n+s] = tmpi;
}
}
}
}
void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
query_ball_point_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
//cudaDeviceSynchronize();
}
void queryBallPointLevelLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx1, int* idx2, int* idx3, int *pts_cnt) {
query_ball_point_level_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx1,idx2,idx3,pts_cnt);
//cudaDeviceSynchronize();
}
void queryTangentPointLevelLauncher(int b, int n, int m, float radius, int nsample, const float *tangent, const int* group, int *idx1, int* idx2, int* idx3, int *pts_cnt) {
query_tangent_point_level_gpu<<<b,256>>>(b,n,m,radius,nsample,tangent,group,idx1,idx2,idx3,pts_cnt);
//cudaDeviceSynchronize();
}
void queryRadiusPointLevelLauncher(int b, int n, int m, float radius, int nsample, const float *tangent, const int* group, int *idx1, int* idx2, int* idx3, int *pts_cnt) {
query_radius_point_level_gpu<<<b,256>>>(b,n,m,radius,nsample,tangent,group,idx1,idx2,idx3,pts_cnt);
//cudaDeviceSynchronize();
}
void queryRadiusAnglePointLevelLauncher(int b, int n, int m, float start_angle, float radius, int nsample, const float *tangent, const int* group,
int *idx1, int *idx2, int *idx3, int* idx4, int* idx5, int* idx6, int* idx7, int* idx8, int* idx9, int *pts_cnt)
{
query_radius_angle_point_level_gpu<<<b,256>>>(b,n,m,start_angle,radius,nsample,tangent,group,idx1,idx2,idx3,idx4,idx5,idx6,idx7,idx8,idx9,pts_cnt);
}
void queryTangent9PointLevelLauncher(int b, int n, int m, float start_angle, float radius, int nsample, const float *tangent, const int* group,
int *idx1, int *idx2, int *idx3, int* idx4, int* idx5, int* idx6, int* idx7, int* idx8, int* idx9, int *pts_cnt)
{
query_tangent9_point_level_gpu<<<b,256>>>(b,n,m,start_angle,radius,nsample,tangent,group,idx1,idx2,idx3,idx4,idx5,idx6,idx7,idx8,idx9,pts_cnt);
}
void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
selection_sort_gpu<<<b,256>>>(b,n,m,k,dist,outi,out);
//cudaDeviceSynchronize();
}
void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out, int relative){
group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out,relative);
//cudaDeviceSynchronize();
}
void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points, int relative){
group_point_grad_gpu<<<b,256>>>(b,n,c,m,nsample,grad_out,idx,grad_points,relative);
//group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//cudaDeviceSynchronize();
}
|
the_stack
|
#include "affinity.cuh"
__constant__ int g_anDispRange[AFF_MAP_DISP_RANGE_MAX];
namespace impl {
__device__ inline void l2dist_lu_exec(const device::DistCalcLUT* aDistCalcLUTs, float* aDescCalcLUT, float* aAffArray, int nOffsets, int nDescSize) {
const int nThreads = blockDim.x;
const int nThreadIdx = threadIdx.x;
const int nStepPerDesc = __float2int_ru(float(nDescSize)/nThreads);
assert(nStepPerDesc>0);
const int nDescSize_LUT = nStepPerDesc*nThreads;
assert(nDescSize_LUT>=nDescSize);
for(int nOffsetIdx=0; nOffsetIdx<nOffsets; ++nOffsetIdx) {
const device::DistCalcLUT& aDistCalcLUT = aDistCalcLUTs[nOffsetIdx];
if(aDistCalcLUT.aDesc1) {
for(int nStep=0; nStep<nStepPerDesc; ++nStep) {
const int nDescBinIdx = nThreads*nStep + nThreadIdx;
assert(nDescBinIdx<nDescSize_LUT);
if(nDescBinIdx<nDescSize) {
const float fDescBinDiff = aDistCalcLUT.aDesc1[nDescBinIdx]-aDistCalcLUT.aDesc2[nDescBinIdx];
aDescCalcLUT[nDescBinIdx] = fDescBinDiff*fDescBinDiff;
}
else
aDescCalcLUT[nDescBinIdx] = 0.0f;
}
__syncthreads();
assert((nDescSize_LUT%nThreads)==0);
assert(lv::isPow2(nThreads));
if(nDescSize_LUT>nThreads) {
assert(nDescSize_LUT>=nThreads*2);
for(int nStep=nThreads; nStep<nDescSize_LUT; nStep+=nThreads)
aDescCalcLUT[nThreadIdx] += aDescCalcLUT[nThreadIdx+nStep];
__syncthreads();
for(int nStep=nThreads/2; nStep>0; nStep>>=1) {
// barrier-less reduction works in sc desc impl, but not here...?
float tmp = aDescCalcLUT[nThreadIdx+nStep];
__syncthreads();
aDescCalcLUT[nThreadIdx] += tmp;
__syncthreads();
}
}
else {
assert(nDescSize_LUT==nThreads);
for(int nStep=nThreads/2; nStep>0; nStep>>=1) {
if(nThreadIdx<nStep)
aDescCalcLUT[nThreadIdx] += aDescCalcLUT[nThreadIdx+nStep];
__syncthreads();
}
}
if(nThreadIdx==0)
aAffArray[nOffsetIdx] = sqrtf(aDescCalcLUT[0]);
}
}
}
__global__ void compute_desc_affinity_l2(const cv::cuda::PtrStep<float> oDescMap1,
const cv::cuda::PtrStep<float> oDescMap2,
cv::cuda::PtrStep<float> oAffinityMap,
int nOffsets, int nDescSize) {
assert((blockDim.x%warpSize)==0 && blockDim.y==1 && blockDim.z==1 && gridDim.z==1);
assert(nDescSize>0 && nOffsets>0 && nOffsets<=blockDim.x && nOffsets<=AFF_MAP_DISP_RANGE_MAX);
const int nCols = gridDim.x;
const int nRowIdx = blockIdx.y;
const int nColIdx = blockIdx.x;
const int nThreads = blockDim.x;
const int nThreadIdx = threadIdx.x;
const int nStepPerPixel = __float2int_ru(float(nOffsets)/nThreads);
assert(nStepPerPixel>0);
const int nOffsets_LUT = nStepPerPixel*nThreads;
assert(nOffsets_LUT>=nOffsets);
float* aAffArray = oAffinityMap.ptr(nRowIdx*nCols+nColIdx);
extern __shared__ int aTmpCommon_l2[];
device::DistCalcLUT* aDistCalcLUTs = (device::DistCalcLUT*)aTmpCommon_l2;
for(int nStep=0; nStep<nStepPerPixel; ++nStep) {
const int nOffsetIdx = nThreads*nStep + nThreadIdx;
assert(nOffsetIdx<nOffsets_LUT);
device::DistCalcLUT& aDistCalcLUT = aDistCalcLUTs[nOffsetIdx];
aDistCalcLUT.aDesc1 = nullptr;
if(nOffsetIdx<nOffsets) {
const int nOffsetColIdx = nColIdx+g_anDispRange[nOffsetIdx];
if(nOffsetColIdx>=0 && nOffsetColIdx<nCols) {
aDistCalcLUT.aDesc1 = oDescMap1.ptr(nRowIdx*nCols+nColIdx);
aDistCalcLUT.aDesc2 = oDescMap2.ptr(nRowIdx*nCols+nOffsetColIdx);
}
else
aAffArray[nOffsetIdx] = -1.0f; // default value for OOB pixels
}
}
__syncthreads();
float* aDescCalcLUT = (float*)(aDistCalcLUTs+nOffsets_LUT);
l2dist_lu_exec(aDistCalcLUTs,aDescCalcLUT,aAffArray,nOffsets,nDescSize);
}
__global__ void compute_desc_affinity_l2_roi(const cv::cuda::PtrStep<float> oDescMap1,
const cv::cuda::PtrStep<uchar> oROI1,
const cv::cuda::PtrStep<float> oDescMap2,
const cv::cuda::PtrStep<uchar> oROI2,
cv::cuda::PtrStep<float> oAffinityMap,
int nOffsets, int nDescSize) {
assert((blockDim.x%warpSize)==0 && blockDim.y==1 && blockDim.z==1 && gridDim.z==1);
assert(nDescSize>0 && nOffsets>0 && nOffsets<=blockDim.x && nOffsets<=AFF_MAP_DISP_RANGE_MAX);
const int nCols = gridDim.x;
const int nRowIdx = blockIdx.y;
const int nColIdx = blockIdx.x;
const int nThreads = blockDim.x;
const int nThreadIdx = threadIdx.x;
const int nStepPerPixel = __float2int_ru(float(nOffsets)/nThreads);
assert(nStepPerPixel>0);
const int nOffsets_LUT = nStepPerPixel*nThreads;
assert(nOffsets_LUT>=nOffsets);
float* aAffArray = oAffinityMap.ptr(nRowIdx*nCols+nColIdx);
if(oROI1(nRowIdx,nColIdx)==0) {
for(int nStep=0; nStep<nStepPerPixel; ++nStep) {
const int nOffsetIdx = nThreads*nStep+nThreadIdx;
assert(nOffsetIdx<nOffsets_LUT);
if(nOffsetIdx<nOffsets)
aAffArray[nOffsetIdx] = -1.0f; // default value for OOB pixels
}
return;
}
const uchar* aROI2Array = oROI2.ptr(nRowIdx);
extern __shared__ int aTmpCommon_l2_roi[];
device::DistCalcLUT* aDistCalcLUTs = (device::DistCalcLUT*)aTmpCommon_l2_roi;
for(int nStep=0; nStep<nStepPerPixel; ++nStep) {
const int nOffsetIdx = nThreads*nStep + nThreadIdx;
assert(nOffsetIdx<nOffsets_LUT);
device::DistCalcLUT& aDistCalcLUT = aDistCalcLUTs[nOffsetIdx];
aDistCalcLUT.aDesc1 = nullptr;
if(nOffsetIdx<nOffsets) {
const int nOffsetColIdx = nColIdx+g_anDispRange[nOffsetIdx];
if(nOffsetColIdx>=0 && nOffsetColIdx<nCols && aROI2Array[nOffsetColIdx]!=0) {
aDistCalcLUT.aDesc1 = oDescMap1.ptr(nRowIdx*nCols+nColIdx);
aDistCalcLUT.aDesc2 = oDescMap2.ptr(nRowIdx*nCols+nOffsetColIdx);
}
else
aAffArray[nOffsetIdx] = -1.0f; // default value for OOB pixels
}
}
__syncthreads();
float* aDescCalcLUT = (float*)(aDistCalcLUTs+nOffsets_LUT);
l2dist_lu_exec(aDistCalcLUTs,aDescCalcLUT,aAffArray,nOffsets,nDescSize);
}
__global__ void compute_desc_affinity_patch(const cv::cuda::PtrStep<float> oRawAffinityMap,
cv::cuda::PtrStep<float> oAffinityMap, int nPatchSize) {
assert((blockDim.x%warpSize)==0 && blockDim.y==1 && blockDim.z==1);
assert(nPatchSize*nPatchSize<=blockDim.x);
const int nRows = gridDim.y;
const int nCols = gridDim.x;
const int nRowIdx = blockIdx.y;
const int nColIdx = blockIdx.x;
const int nOffsetIdx = blockIdx.z;
const int nThreads = blockDim.x;
const int nThreadIdx = threadIdx.x;
const int nPatchRadius = nPatchSize/2;
const int nPatchRowIdx = nThreadIdx/nPatchSize;
const int nOffsetRowIdx = nRowIdx+nPatchRowIdx-nPatchRadius;
const int nOffsetColIdx = nColIdx+nThreadIdx%nPatchSize-nPatchRadius;
const bool bValid = nPatchRowIdx<nPatchSize && nOffsetRowIdx>=0 && nOffsetRowIdx<nRows && nOffsetColIdx>=0 && nOffsetColIdx<nCols;
extern __shared__ int aTmpCommon_patch[];
int* anCounts = aTmpCommon_patch;
float* afAffinities = (float*)(anCounts+nThreads);
float fRawAffinity;
if(bValid && (fRawAffinity = oRawAffinityMap(nOffsetRowIdx*nCols+nOffsetColIdx,nOffsetIdx))!=-1.0f) {
anCounts[nThreadIdx] = 1;
afAffinities[nThreadIdx] = fRawAffinity;
}
else {
anCounts[nThreadIdx] = 0;
afAffinities[nThreadIdx] = 0.0f;
}
assert(lv::isPow2(nThreads));
for(int nStep=nThreads/2; nStep>0; nStep>>=1) {
const bool bInRange = (nThreadIdx+nStep)<nThreads;
__syncthreads();
const int nCurrCount = bInRange?anCounts[nThreadIdx+nStep]:0;
const float fCurrAff = bInRange?afAffinities[nThreadIdx+nStep]:0.0f;
__syncthreads();
anCounts[nThreadIdx] += nCurrCount;
afAffinities[nThreadIdx] += fCurrAff;
}
if(nThreadIdx==0)
oAffinityMap(nRowIdx*nCols+nColIdx,nOffsetIdx) = anCounts[0]?(afAffinities[0]/anCounts[0]):-1.0f;
}
} // namespace impl
/////////////////////////////////////////////////////////////////////////
void device::compute_desc_affinity_l2(const lv::cuda::KernelParams& oKParams, const cv::cuda::PtrStep<float> oDescMap1, const cv::cuda::PtrStep<float> oDescMap2, cv::cuda::PtrStep<float> oAffinityMap, int nOffsets, int nDescSize) {
cudaKernelWrap(compute_desc_affinity_l2,oKParams,oDescMap1,oDescMap2,oAffinityMap,nOffsets,nDescSize);
}
void device::compute_desc_affinity_l2_roi(const lv::cuda::KernelParams& oKParams, const cv::cuda::PtrStep<float> oDescMap1, const cv::cuda::PtrStep<uchar> oROI1, const cv::cuda::PtrStep<float> oDescMap2, const cv::cuda::PtrStep<uchar> oROI2, cv::cuda::PtrStep<float> oAffinityMap, int nOffsets, int nDescSize) {
cudaKernelWrap(compute_desc_affinity_l2_roi,oKParams,oDescMap1,oROI1,oDescMap2,oROI2,oAffinityMap,nOffsets,nDescSize);
}
void device::compute_desc_affinity_patch(const lv::cuda::KernelParams& oKParams, const cv::cuda::PtrStep<float> oRawAffinityMap, cv::cuda::PtrStep<float> oAffinityMap, int nPatchSize) {
cudaKernelWrap(compute_desc_affinity_patch,oKParams,oRawAffinityMap,oAffinityMap,nPatchSize);
}
void device::setDisparityRange(const std::array<int,AFF_MAP_DISP_RANGE_MAX>& aDispRange) {
cudaErrorCheck_(cudaMemcpyToSymbol(g_anDispRange,aDispRange.data(),sizeof(int)*aDispRange.size()));
}
|
the_stack
|
__global__
void k_forward_rush_larsen(double* states, const double t, const double dt,
const double* parameters, const int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
// Assign states
const double Xr1 = states[n * STATE_Xr1 + i];
const double Xr2 = states[n * STATE_Xr2 + i];
const double Xs = states[n * STATE_Xs + i];
const double m = states[n * STATE_m + i];
const double h = states[n * STATE_h + i];
const double j = states[n * STATE_j + i];
const double d = states[n * STATE_d + i];
const double f = states[n * STATE_f + i];
const double f2 = states[n * STATE_f2 + i];
const double fCass = states[n * STATE_fCass + i];
const double s = states[n * STATE_s + i];
const double r = states[n * STATE_r + i];
const double Ca_i = states[n * STATE_Ca_i + i];
const double R_prime = states[n * STATE_R_prime + i];
const double Ca_SR = states[n * STATE_Ca_SR + i];
const double Ca_ss = states[n * STATE_Ca_ss + i];
const double Na_i = states[n * STATE_Na_i + i];
const double V = states[n * STATE_V + i];
const double K_i = states[n * STATE_K_i + i];
// Assign parameters
const double P_kna = parameters[n * PARAM_P_kna + i];
const double g_K1 = parameters[n * PARAM_g_K1 + i];
const double g_Kr = parameters[n * PARAM_g_Kr + i];
const double g_Ks = parameters[n * PARAM_g_Ks + i];
const double g_Na = parameters[n * PARAM_g_Na + i];
const double g_bna = parameters[n * PARAM_g_bna + i];
const double g_CaL = parameters[n * PARAM_g_CaL + i];
const double g_bca = parameters[n * PARAM_g_bca + i];
const double g_to = parameters[n * PARAM_g_to + i];
const double K_mNa = parameters[n * PARAM_K_mNa + i];
const double K_mk = parameters[n * PARAM_K_mk + i];
const double P_NaK = parameters[n * PARAM_P_NaK + i];
const double K_NaCa = parameters[n * PARAM_K_NaCa + i];
const double K_sat = parameters[n * PARAM_K_sat + i];
const double Km_Ca = parameters[n * PARAM_Km_Ca + i];
const double Km_Nai = parameters[n * PARAM_Km_Nai + i];
const double alpha = parameters[n * PARAM_alpha + i];
const double gamma = parameters[n * PARAM_gamma + i];
const double K_pCa = parameters[n * PARAM_K_pCa + i];
const double g_pCa = parameters[n * PARAM_g_pCa + i];
const double g_pK = parameters[n * PARAM_g_pK + i];
const double Buf_c = parameters[n * PARAM_Buf_c + i];
const double Buf_sr = parameters[n * PARAM_Buf_sr + i];
const double Buf_ss = parameters[n * PARAM_Buf_ss + i];
const double Ca_o = parameters[n * PARAM_Ca_o + i];
const double EC = parameters[n * PARAM_EC + i];
const double K_buf_c = parameters[n * PARAM_K_buf_c + i];
const double K_buf_sr = parameters[n * PARAM_K_buf_sr + i];
const double K_buf_ss = parameters[n * PARAM_K_buf_ss + i];
const double K_up = parameters[n * PARAM_K_up + i];
const double V_leak = parameters[n * PARAM_V_leak + i];
const double V_rel = parameters[n * PARAM_V_rel + i];
const double V_sr = parameters[n * PARAM_V_sr + i];
const double V_ss = parameters[n * PARAM_V_ss + i];
const double V_xfer = parameters[n * PARAM_V_xfer + i];
const double Vmax_up = parameters[n * PARAM_Vmax_up + i];
const double k1_prime = parameters[n * PARAM_k1_prime + i];
const double k2_prime = parameters[n * PARAM_k2_prime + i];
const double k3 = parameters[n * PARAM_k3 + i];
const double k4 = parameters[n * PARAM_k4 + i];
const double max_sr = parameters[n * PARAM_max_sr + i];
const double min_sr = parameters[n * PARAM_min_sr + i];
const double Na_o = parameters[n * PARAM_Na_o + i];
const double Cm = parameters[n * PARAM_Cm + i];
const double F = parameters[n * PARAM_F + i];
const double R = parameters[n * PARAM_R + i];
const double T = parameters[n * PARAM_T + i];
const double V_c = parameters[n * PARAM_V_c + i];
const double stim_amplitude = parameters[n * PARAM_stim_amplitude + i];
const double stim_duration = parameters[n * PARAM_stim_duration + i];
const double stim_period = parameters[n * PARAM_stim_period + i];
const double stim_start = parameters[n * PARAM_stim_start + i];
const double K_o = parameters[n * PARAM_K_o + i];
// Expressions for the Reversal potentials component
const double E_Na = R*T*log(Na_o/Na_i)/F;
const double E_K = R*T*log(K_o/K_i)/F;
const double E_Ks = R*T*log((K_o + Na_o*P_kna)/(P_kna*Na_i + K_i))/F;
const double E_Ca = 0.5*R*T*log(Ca_o/Ca_i)/F;
// Expressions for the Inward rectifier potassium current component
const double alpha_K1 = 0.1/(1. + 6.14421235332821e-6*exp(0.06*V -
0.06*E_K));
const double beta_K1 = (0.367879441171442*exp(0.1*V - 0.1*E_K) +
3.06060402008027*exp(0.0002*V - 0.0002*E_K))/(1. + exp(0.5*E_K
- 0.5*V));
const double xK1_inf = alpha_K1/(alpha_K1 + beta_K1);
const double i_K1 = 0.430331482911935*g_K1*sqrt(K_o)*(-E_K + V)*xK1_inf;
// Expressions for the Rapid time dependent potassium current component
const double i_Kr = 0.430331482911935*g_Kr*sqrt(K_o)*(-E_K + V)*Xr1*Xr2;
// Expressions for the Xr1 gate component
const double xr1_inf = 1.0/(1. + exp(-26./7. - V/7.));
const double alpha_xr1 = 450./(1. + exp(-9./2. - V/10.));
const double beta_xr1 = 6./(1. +
13.5813245225782*exp(0.0869565217391304*V));
const double tau_xr1 = alpha_xr1*beta_xr1;
const double dXr1_dt = (-Xr1 + xr1_inf)/tau_xr1;
const double dXr1_dt_linearized = -1./tau_xr1;
states[n * STATE_Xr1 + i] = (fabs(dXr1_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dXr1_dt_linearized))*dXr1_dt/dXr1_dt_linearized : dt*dXr1_dt)
+ Xr1;
// Expressions for the Xr2 gate component
const double xr2_inf = 1.0/(1. + exp(11./3. + V/24.));
const double alpha_xr2 = 3./(1. + exp(-3. - V/20.));
const double beta_xr2 = 1.12/(1. + exp(-3. + V/20.));
const double tau_xr2 = alpha_xr2*beta_xr2;
const double dXr2_dt = (-Xr2 + xr2_inf)/tau_xr2;
const double dXr2_dt_linearized = -1./tau_xr2;
states[n * STATE_Xr2 + i] = (fabs(dXr2_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dXr2_dt_linearized))*dXr2_dt/dXr2_dt_linearized : dt*dXr2_dt)
+ Xr2;
// Expressions for the Slow time dependent potassium current component
const double i_Ks = g_Ks*(Xs*Xs)*(-E_Ks + V);
// Expressions for the Xs gate component
const double xs_inf = 1.0/(1. + exp(-5./14. - V/14.));
const double alpha_xs = 1400./sqrt(1. + exp(5./6. - V/6.));
const double beta_xs = 1.0/(1. + exp(-7./3. + V/15.));
const double tau_xs = 80. + alpha_xs*beta_xs;
const double dXs_dt = (-Xs + xs_inf)/tau_xs;
const double dXs_dt_linearized = -1./tau_xs;
states[n * STATE_Xs + i] = (fabs(dXs_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dXs_dt_linearized))*dXs_dt/dXs_dt_linearized : dt*dXs_dt) +
Xs;
// Expressions for the Fast sodium current component
const double i_Na = g_Na*(m*m*m)*(-E_Na + V)*h*j;
// Expressions for the m gate component
const double m_inf = 1.0/((1. +
0.00184221158116513*exp(-0.110741971207087*V))*(1. +
0.00184221158116513*exp(-0.110741971207087*V)));
const double alpha_m = 1.0/(1. + exp(-12. - V/5.));
const double beta_m = 0.1/(1. + exp(7. + V/5.)) + 0.1/(1. +
exp(-1./4. + V/200.));
const double tau_m = alpha_m*beta_m;
const double dm_dt = (-m + m_inf)/tau_m;
const double dm_dt_linearized = -1./tau_m;
states[n * STATE_m + i] = (fabs(dm_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dm_dt_linearized))*dm_dt/dm_dt_linearized : dt*dm_dt) + m;
// Expressions for the h gate component
const double h_inf = 1.0/((1. +
15212.5932856544*exp(0.134589502018843*V))*(1. +
15212.5932856544*exp(0.134589502018843*V)));
const double alpha_h = (V < -40. ?
4.43126792958051e-7*exp(-0.147058823529412*V) : 0.);
const double beta_h = (V < -40. ? 310000.*exp(0.3485*V) +
2.7*exp(0.079*V) : 0.77/(0.13 +
0.0497581410839387*exp(-0.0900900900900901*V)));
const double tau_h = 1.0/(alpha_h + beta_h);
const double dh_dt = (-h + h_inf)/tau_h;
const double dh_dt_linearized = -1./tau_h;
states[n * STATE_h + i] = (fabs(dh_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dh_dt_linearized))*dh_dt/dh_dt_linearized : dt*dh_dt) + h;
// Expressions for the j gate component
const double j_inf = 1.0/((1. +
15212.5932856544*exp(0.134589502018843*V))*(1. +
15212.5932856544*exp(0.134589502018843*V)));
const double alpha_j = (V < -40. ? (37.78 + V)*(-25428.*exp(0.2444*V)
- 6.948e-6*exp(-0.04391*V))/(1. + 50262745825.954*exp(0.311*V))
: 0.);
const double beta_j = (V < -40. ? 0.02424*exp(-0.01052*V)/(1. +
0.00396086833990426*exp(-0.1378*V)) : 0.6*exp(0.057*V)/(1. +
0.0407622039783662*exp(-0.1*V)));
const double tau_j = 1.0/(alpha_j + beta_j);
const double dj_dt = (-j + j_inf)/tau_j;
const double dj_dt_linearized = -1./tau_j;
states[n * STATE_j + i] = (fabs(dj_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dj_dt_linearized))*dj_dt/dj_dt_linearized : dt*dj_dt) + j;
// Expressions for the Sodium background current component
const double i_b_Na = g_bna*(-E_Na + V);
// Expressions for the L_type Ca current component
const double V_eff = (fabs(-15. + V) < 0.01 ? 0.01 : -15. + V);
const double i_CaL = 4.*g_CaL*(F*F)*(-Ca_o +
0.25*Ca_ss*exp(2.*F*V_eff/(R*T)))*V_eff*d*f*f2*fCass/(R*T*(-1. +
exp(2.*F*V_eff/(R*T))));
// Expressions for the d gate component
const double d_inf = 1.0/(1. +
0.344153786865412*exp(-0.133333333333333*V));
const double alpha_d = 0.25 + 1.4/(1. + exp(-35./13. - V/13.));
const double beta_d = 1.4/(1. + exp(1. + V/5.));
const double gamma_d = 1.0/(1. + exp(5./2. - V/20.));
const double tau_d = alpha_d*beta_d + gamma_d;
const double dd_dt = (-d + d_inf)/tau_d;
const double dd_dt_linearized = -1./tau_d;
states[n * STATE_d + i] = (fabs(dd_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dd_dt_linearized))*dd_dt/dd_dt_linearized : dt*dd_dt) + d;
// Expressions for the f gate component
const double f_inf = 1.0/(1. + exp(20./7. + V/7.));
const double tau_f = 20. + 180./(1. + exp(3. + V/10.)) + 200./(1. +
exp(13./10. - V/10.)) + 1102.5*exp(-((27. + V)*(27. + V))/225.);
const double df_dt = (-f + f_inf)/tau_f;
const double df_dt_linearized = -1./tau_f;
states[n * STATE_f + i] = (fabs(df_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*df_dt_linearized))*df_dt/df_dt_linearized : dt*df_dt) + f;
// Expressions for the F2 gate component
const double f2_inf = 0.33 + 0.67/(1. + exp(5. + V/7.));
const double tau_f2 = 31./(1. + exp(5./2. - V/10.)) + 80./(1. +
exp(3. + V/10.)) + 562.*exp(-((27. + V)*(27. + V))/240.);
const double df2_dt = (-f2 + f2_inf)/tau_f2;
const double df2_dt_linearized = -1./tau_f2;
states[n * STATE_f2 + i] = (fabs(df2_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*df2_dt_linearized))*df2_dt/df2_dt_linearized : dt*df2_dt) +
f2;
// Expressions for the FCass gate component
const double fCass_inf = 0.4 + 0.6/(1. + 400.0*(Ca_ss*Ca_ss));
const double tau_fCass = 2. + 80./(1. + 400.0*(Ca_ss*Ca_ss));
const double dfCass_dt = (-fCass + fCass_inf)/tau_fCass;
const double dfCass_dt_linearized = -1./tau_fCass;
states[n * STATE_fCass + i] = (fabs(dfCass_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dfCass_dt_linearized))*dfCass_dt/dfCass_dt_linearized :
dt*dfCass_dt) + fCass;
// Expressions for the Calcium background current component
const double i_b_Ca = g_bca*(-E_Ca + V);
// Expressions for the Transient outward current component
const double i_to = g_to*(-E_K + V)*r*s;
// Expressions for the s gate component
const double s_inf = 1.0/(1. + exp(4. + V/5.));
const double tau_s = 3. + 5./(1. + exp(-4. + V/5.)) +
85.*exp(-((45. + V)*(45. + V))/320.);
const double ds_dt = (-s + s_inf)/tau_s;
const double ds_dt_linearized = -1./tau_s;
states[n * STATE_s + i] = (fabs(ds_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*ds_dt_linearized))*ds_dt/ds_dt_linearized : dt*ds_dt) + s;
// Expressions for the r gate component
const double r_inf = 1.0/(1. + exp(10./3. - V/6.));
const double tau_r = 0.8 + 9.5*exp(-((40. + V)*(40. + V))/1800.);
const double dr_dt = (-r + r_inf)/tau_r;
const double dr_dt_linearized = -1./tau_r;
states[n * STATE_r + i] = (fabs(dr_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dr_dt_linearized))*dr_dt/dr_dt_linearized : dt*dr_dt) + r;
// Expressions for the Sodium potassium pump current component
const double i_NaK = K_o*P_NaK*Na_i/((K_mNa + Na_i)*(K_mk + K_o)*(1. +
0.0353*exp(-F*V/(R*T)) + 0.1245*exp(-0.1*F*V/(R*T))));
// Expressions for the Sodium calcium exchanger current component
const double i_NaCa =
K_NaCa*(Ca_o*(Na_i*Na_i*Na_i)*exp(F*gamma*V/(R*T)) -
alpha*(Na_o*Na_o*Na_o)*Ca_i*exp(F*(-1. + gamma)*V/(R*T)))/((1. +
K_sat*exp(F*(-1. + gamma)*V/(R*T)))*(Ca_o +
Km_Ca)*((Km_Nai*Km_Nai*Km_Nai) + (Na_o*Na_o*Na_o)));
// Expressions for the Calcium pump current component
const double i_p_Ca = g_pCa*Ca_i/(K_pCa + Ca_i);
// Expressions for the Potassium pump current component
const double i_p_K = g_pK*(-E_K + V)/(1. +
65.4052157419383*exp(-0.167224080267559*V));
// Expressions for the Calcium dynamics component
const double i_up = Vmax_up/(1. + (K_up*K_up)/(Ca_i*Ca_i));
const double i_leak = V_leak*(-Ca_i + Ca_SR);
const double i_xfer = V_xfer*(-Ca_i + Ca_ss);
const double kcasr = max_sr - (max_sr - min_sr)/(1. + (EC*EC)/(Ca_SR*Ca_SR));
const double Ca_i_bufc = 1.0/(1. + Buf_c*K_buf_c/((K_buf_c + Ca_i)*(K_buf_c
+ Ca_i)));
const double Ca_sr_bufsr = 1.0/(1. + Buf_sr*K_buf_sr/((K_buf_sr +
Ca_SR)*(K_buf_sr + Ca_SR)));
const double Ca_ss_bufss = 1.0/(1. + Buf_ss*K_buf_ss/((K_buf_ss +
Ca_ss)*(K_buf_ss + Ca_ss)));
const double dCa_i_dt = (V_sr*(-i_up + i_leak)/V_c - Cm*(-2.*i_NaCa +
i_b_Ca + i_p_Ca)/(2.*F*V_c) + i_xfer)*Ca_i_bufc;
const double dCa_i_bufc_dCa_i = 2.*Buf_c*K_buf_c/(((1. +
Buf_c*K_buf_c/((K_buf_c + Ca_i)*(K_buf_c + Ca_i)))*(1. +
Buf_c*K_buf_c/((K_buf_c + Ca_i)*(K_buf_c + Ca_i))))*((K_buf_c +
Ca_i)*(K_buf_c + Ca_i)*(K_buf_c + Ca_i)));
const double di_NaCa_dCa_i = -K_NaCa*alpha*(Na_o*Na_o*Na_o)*exp(F*(-1.
+ gamma)*V/(R*T))/((1. + K_sat*exp(F*(-1. + gamma)*V/(R*T)))*(Ca_o +
Km_Ca)*((Km_Nai*Km_Nai*Km_Nai) + (Na_o*Na_o*Na_o)));
const double di_up_dCa_i = 2.*Vmax_up*(K_up*K_up)/(((1. +
(K_up*K_up)/(Ca_i*Ca_i))*(1. +
(K_up*K_up)/(Ca_i*Ca_i)))*(Ca_i*Ca_i*Ca_i));
const double di_p_Ca_dCa_i = g_pCa/(K_pCa + Ca_i) - g_pCa*Ca_i/((K_pCa +
Ca_i)*(K_pCa + Ca_i));
const double dE_Ca_dCa_i = -0.5*R*T/(F*Ca_i);
const double dCa_i_dt_linearized = (-V_xfer + V_sr*(-V_leak -
di_up_dCa_i)/V_c - Cm*(-2.*di_NaCa_dCa_i - g_bca*dE_Ca_dCa_i +
di_p_Ca_dCa_i)/(2.*F*V_c))*Ca_i_bufc + (V_sr*(-i_up + i_leak)/V_c -
Cm*(-2.*i_NaCa + i_b_Ca + i_p_Ca)/(2.*F*V_c) + i_xfer)*dCa_i_bufc_dCa_i;
states[n * STATE_Ca_i + i] = Ca_i + (fabs(dCa_i_dt_linearized) > 1.0e-8 ?
(-1.0 + exp(dt*dCa_i_dt_linearized))*dCa_i_dt/dCa_i_dt_linearized :
dt*dCa_i_dt);
const double k1 = k1_prime/kcasr;
const double k2 = k2_prime*kcasr;
const double O = (Ca_ss*Ca_ss)*R_prime*k1/(k3 + (Ca_ss*Ca_ss)*k1);
const double dR_prime_dt = k4*(1. - R_prime) - Ca_ss*R_prime*k2;
const double dR_prime_dt_linearized = -k4 - Ca_ss*k2;
states[n * STATE_R_prime + i] = (fabs(dR_prime_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dR_prime_dt_linearized))*dR_prime_dt/dR_prime_dt_linearized :
dt*dR_prime_dt) + R_prime;
const double i_rel = V_rel*(-Ca_ss + Ca_SR)*O;
const double dCa_SR_dt = (-i_leak - i_rel + i_up)*Ca_sr_bufsr;
const double dkcasr_dCa_SR = -2.*(EC*EC)*(max_sr - min_sr)/(((1. +
(EC*EC)/(Ca_SR*Ca_SR))*(1. + (EC*EC)/(Ca_SR*Ca_SR)))*(Ca_SR*Ca_SR*Ca_SR));
const double dCa_sr_bufsr_dCa_SR = 2.*Buf_sr*K_buf_sr/(((1. +
Buf_sr*K_buf_sr/((K_buf_sr + Ca_SR)*(K_buf_sr + Ca_SR)))*(1. +
Buf_sr*K_buf_sr/((K_buf_sr + Ca_SR)*(K_buf_sr + Ca_SR))))*((K_buf_sr +
Ca_SR)*(K_buf_sr + Ca_SR)*(K_buf_sr + Ca_SR)));
const double di_rel_dO = V_rel*(-Ca_ss + Ca_SR);
const double dk1_dkcasr = -k1_prime/(kcasr*kcasr);
const double dO_dk1 = (Ca_ss*Ca_ss)*R_prime/(k3 + (Ca_ss*Ca_ss)*k1) -
pow(Ca_ss, 4.)*R_prime*k1/((k3 + (Ca_ss*Ca_ss)*k1)*(k3 +
(Ca_ss*Ca_ss)*k1));
const double di_rel_dCa_SR = V_rel*O + V_rel*(-Ca_ss +
Ca_SR)*dO_dk1*dk1_dkcasr*dkcasr_dCa_SR;
const double dCa_SR_dt_linearized = (-V_leak - di_rel_dCa_SR -
dO_dk1*di_rel_dO*dk1_dkcasr*dkcasr_dCa_SR)*Ca_sr_bufsr + (-i_leak - i_rel
+ i_up)*dCa_sr_bufsr_dCa_SR;
states[n * STATE_Ca_SR + i] = Ca_SR + (fabs(dCa_SR_dt_linearized) > 1.0e-8 ?
(-1.0 + exp(dt*dCa_SR_dt_linearized))*dCa_SR_dt/dCa_SR_dt_linearized
: dt*dCa_SR_dt);
const double dCa_ss_dt = (V_sr*i_rel/V_ss - V_c*i_xfer/V_ss -
Cm*i_CaL/(2.*F*V_ss))*Ca_ss_bufss;
const double dO_dCa_ss = -2.*(Ca_ss*Ca_ss*Ca_ss)*(k1*k1)*R_prime/((k3 +
(Ca_ss*Ca_ss)*k1)*(k3 + (Ca_ss*Ca_ss)*k1)) + 2.*Ca_ss*R_prime*k1/(k3 +
(Ca_ss*Ca_ss)*k1);
const double di_rel_dCa_ss = -V_rel*O + V_rel*(-Ca_ss + Ca_SR)*dO_dCa_ss;
const double dCa_ss_bufss_dCa_ss = 2.*Buf_ss*K_buf_ss/(((1. +
Buf_ss*K_buf_ss/((K_buf_ss + Ca_ss)*(K_buf_ss + Ca_ss)))*(1. +
Buf_ss*K_buf_ss/((K_buf_ss + Ca_ss)*(K_buf_ss + Ca_ss))))*((K_buf_ss +
Ca_ss)*(K_buf_ss + Ca_ss)*(K_buf_ss + Ca_ss)));
const double di_CaL_dCa_ss =
1.0*g_CaL*(F*F)*V_eff*d*exp(2.*F*V_eff/(R*T))*f*f2*fCass/(R*T*(-1. +
exp(2.*F*V_eff/(R*T))));
const double dCa_ss_dt_linearized = (V_sr*(dO_dCa_ss*di_rel_dO +
di_rel_dCa_ss)/V_ss - V_c*V_xfer/V_ss -
Cm*di_CaL_dCa_ss/(2.*F*V_ss))*Ca_ss_bufss + (V_sr*i_rel/V_ss -
V_c*i_xfer/V_ss - Cm*i_CaL/(2.*F*V_ss))*dCa_ss_bufss_dCa_ss;
states[n * STATE_Ca_ss + i] = Ca_ss + (fabs(dCa_ss_dt_linearized) > 1.0e-8 ?
(-1.0 + exp(dt*dCa_ss_dt_linearized))*dCa_ss_dt/dCa_ss_dt_linearized
: dt*dCa_ss_dt);
// Expressions for the Sodium dynamics component
const double dNa_i_dt = Cm*(-i_Na - i_b_Na - 3.*i_NaCa - 3.*i_NaK)/(F*V_c);
const double dE_Na_dNa_i = -R*T/(F*Na_i);
const double di_NaCa_dNa_i =
3.*Ca_o*K_NaCa*(Na_i*Na_i)*exp(F*gamma*V/(R*T))/((1. +
K_sat*exp(F*(-1. + gamma)*V/(R*T)))*(Ca_o +
Km_Ca)*((Km_Nai*Km_Nai*Km_Nai) + (Na_o*Na_o*Na_o)));
const double di_Na_dE_Na = -g_Na*(m*m*m)*h*j;
const double di_NaK_dNa_i = K_o*P_NaK/((K_mNa + Na_i)*(K_mk + K_o)*(1. +
0.0353*exp(-F*V/(R*T)) + 0.1245*exp(-0.1*F*V/(R*T)))) -
K_o*P_NaK*Na_i/(((K_mNa + Na_i)*(K_mNa + Na_i))*(K_mk + K_o)*(1. +
0.0353*exp(-F*V/(R*T)) + 0.1245*exp(-0.1*F*V/(R*T))));
const double dNa_i_dt_linearized = Cm*(-3.*di_NaCa_dNa_i - 3.*di_NaK_dNa_i
+ g_bna*dE_Na_dNa_i - dE_Na_dNa_i*di_Na_dE_Na)/(F*V_c);
states[n * STATE_Na_i + i] = Na_i + (fabs(dNa_i_dt_linearized) > 1.0e-8 ?
(-1.0 + exp(dt*dNa_i_dt_linearized))*dNa_i_dt/dNa_i_dt_linearized :
dt*dNa_i_dt);
// Expressions for the Membrane component
const double i_Stim = (t - stim_period*floor(t/stim_period) <=
stim_duration + stim_start && t - stim_period*floor(t/stim_period)
>= stim_start ? -stim_amplitude : 0.);
const double dV_dt = -i_CaL - i_K1 - i_Kr - i_Ks - i_Na - i_NaCa - i_NaK -
i_Stim - i_b_Ca - i_b_Na - i_p_Ca - i_p_K - i_to;
const double dalpha_K1_dV = -3.68652741199693e-8*exp(0.06*V -
0.06*E_K)/((1. + 6.14421235332821e-6*exp(0.06*V - 0.06*E_K))*(1. +
6.14421235332821e-6*exp(0.06*V - 0.06*E_K)));
const double di_CaL_dV_eff = 4.*g_CaL*(F*F)*(-Ca_o +
0.25*Ca_ss*exp(2.*F*V_eff/(R*T)))*d*f*f2*fCass/(R*T*(-1. +
exp(2.*F*V_eff/(R*T)))) - 8.*g_CaL*(F*F*F)*(-Ca_o +
0.25*Ca_ss*exp(2.*F*V_eff/(R*T)))*V_eff*d*exp(2.*F*V_eff/(R*T))*f*f2*fCass/((R*R)*(T*T)*((-1.
+ exp(2.*F*V_eff/(R*T)))*(-1. + exp(2.*F*V_eff/(R*T))))) +
2.0*g_CaL*(F*F*F)*Ca_ss*V_eff*d*exp(2.*F*V_eff/(R*T))*f*f2*fCass/((R*R)*(T*T)*(-1.
+ exp(2.*F*V_eff/(R*T))));
const double di_Ks_dV = g_Ks*(Xs*Xs);
const double di_p_K_dV = g_pK/(1. +
65.4052157419383*exp(-0.167224080267559*V)) +
10.9373270471469*g_pK*(-E_K + V)*exp(-0.167224080267559*V)/((1. +
65.4052157419383*exp(-0.167224080267559*V))*(1. +
65.4052157419383*exp(-0.167224080267559*V)));
const double di_to_dV = g_to*r*s;
const double dxK1_inf_dbeta_K1 = -alpha_K1/((alpha_K1 + beta_K1)*(alpha_K1 +
beta_K1));
const double dxK1_inf_dalpha_K1 = 1.0/(alpha_K1 + beta_K1) -
alpha_K1/((alpha_K1 + beta_K1)*(alpha_K1 + beta_K1));
const double dbeta_K1_dV = (0.000612120804016053*exp(0.0002*V -
0.0002*E_K) + 0.0367879441171442*exp(0.1*V - 0.1*E_K))/(1. +
exp(0.5*E_K - 0.5*V)) + 0.5*(0.367879441171442*exp(0.1*V -
0.1*E_K) + 3.06060402008027*exp(0.0002*V -
0.0002*E_K))*exp(0.5*E_K - 0.5*V)/((1. + exp(0.5*E_K -
0.5*V))*(1. + exp(0.5*E_K - 0.5*V)));
const double di_K1_dV = 0.430331482911935*g_K1*sqrt(K_o)*xK1_inf +
0.430331482911935*g_K1*sqrt(K_o)*(-E_K +
V)*(dalpha_K1_dV*dxK1_inf_dalpha_K1 + dbeta_K1_dV*dxK1_inf_dbeta_K1);
const double dV_eff_dV = (fabs(-15. + V) < 0.01 ? 0. : 1.);
const double di_Na_dV = g_Na*(m*m*m)*h*j;
const double di_Kr_dV = 0.430331482911935*g_Kr*sqrt(K_o)*Xr1*Xr2;
const double di_NaK_dV = K_o*P_NaK*(0.0353*F*exp(-F*V/(R*T))/(R*T) +
0.01245*F*exp(-0.1*F*V/(R*T))/(R*T))*Na_i/((K_mNa + Na_i)*(K_mk +
K_o)*((1. + 0.0353*exp(-F*V/(R*T)) +
0.1245*exp(-0.1*F*V/(R*T)))*(1. + 0.0353*exp(-F*V/(R*T)) +
0.1245*exp(-0.1*F*V/(R*T)))));
const double di_K1_dxK1_inf = 0.430331482911935*g_K1*sqrt(K_o)*(-E_K +
V);
const double di_NaCa_dV =
K_NaCa*(Ca_o*F*gamma*(Na_i*Na_i*Na_i)*exp(F*gamma*V/(R*T))/(R*T) -
F*alpha*(Na_o*Na_o*Na_o)*(-1. + gamma)*Ca_i*exp(F*(-1. +
gamma)*V/(R*T))/(R*T))/((1. + K_sat*exp(F*(-1. +
gamma)*V/(R*T)))*(Ca_o + Km_Ca)*((Km_Nai*Km_Nai*Km_Nai) +
(Na_o*Na_o*Na_o))) - F*K_NaCa*K_sat*(-1. +
gamma)*(Ca_o*(Na_i*Na_i*Na_i)*exp(F*gamma*V/(R*T)) -
alpha*(Na_o*Na_o*Na_o)*Ca_i*exp(F*(-1. +
gamma)*V/(R*T)))*exp(F*(-1. + gamma)*V/(R*T))/(R*T*((1. +
K_sat*exp(F*(-1. + gamma)*V/(R*T)))*(1. + K_sat*exp(F*(-1. +
gamma)*V/(R*T))))*(Ca_o + Km_Ca)*((Km_Nai*Km_Nai*Km_Nai) +
(Na_o*Na_o*Na_o)));
const double dV_dt_linearized = -g_bca - g_bna - di_K1_dV - di_Kr_dV -
di_Ks_dV - di_NaCa_dV - di_NaK_dV - di_Na_dV - di_p_K_dV - di_to_dV -
(dalpha_K1_dV*dxK1_inf_dalpha_K1 +
dbeta_K1_dV*dxK1_inf_dbeta_K1)*di_K1_dxK1_inf - dV_eff_dV*di_CaL_dV_eff;
states[n * STATE_V + i] = (fabs(dV_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dV_dt_linearized))*dV_dt/dV_dt_linearized : dt*dV_dt) + V;
// Expressions for the Potassium dynamics component
const double dK_i_dt = Cm*(-i_K1 - i_Kr - i_Ks - i_Stim - i_p_K - i_to +
2.*i_NaK)/(F*V_c);
const double dE_Ks_dK_i = -R*T/(F*(P_kna*Na_i + K_i));
const double dbeta_K1_dE_K = (-0.000612120804016053*exp(0.0002*V -
0.0002*E_K) - 0.0367879441171442*exp(0.1*V - 0.1*E_K))/(1. +
exp(0.5*E_K - 0.5*V)) - 0.5*(0.367879441171442*exp(0.1*V -
0.1*E_K) + 3.06060402008027*exp(0.0002*V -
0.0002*E_K))*exp(0.5*E_K - 0.5*V)/((1. + exp(0.5*E_K -
0.5*V))*(1. + exp(0.5*E_K - 0.5*V)));
const double di_Kr_dE_K = -0.430331482911935*g_Kr*sqrt(K_o)*Xr1*Xr2;
const double dE_K_dK_i = -R*T/(F*K_i);
const double di_Ks_dE_Ks = -g_Ks*(Xs*Xs);
const double di_to_dE_K = -g_to*r*s;
const double dalpha_K1_dE_K = 3.68652741199693e-8*exp(0.06*V -
0.06*E_K)/((1. + 6.14421235332821e-6*exp(0.06*V - 0.06*E_K))*(1. +
6.14421235332821e-6*exp(0.06*V - 0.06*E_K)));
const double di_K1_dE_K = -0.430331482911935*g_K1*sqrt(K_o)*xK1_inf +
0.430331482911935*g_K1*sqrt(K_o)*(-E_K +
V)*(dalpha_K1_dE_K*dxK1_inf_dalpha_K1 + dbeta_K1_dE_K*dxK1_inf_dbeta_K1);
const double di_p_K_dE_K = -g_pK/(1. +
65.4052157419383*exp(-0.167224080267559*V));
const double dK_i_dt_linearized =
Cm*(-(dE_K_dK_i*dalpha_K1_dE_K*dxK1_inf_dalpha_K1 +
dE_K_dK_i*dbeta_K1_dE_K*dxK1_inf_dbeta_K1)*di_K1_dxK1_inf -
dE_K_dK_i*di_K1_dE_K - dE_K_dK_i*di_Kr_dE_K - dE_K_dK_i*di_p_K_dE_K -
dE_K_dK_i*di_to_dE_K - dE_Ks_dK_i*di_Ks_dE_Ks)/(F*V_c);
states[n * STATE_K_i + i] = K_i + (fabs(dK_i_dt_linearized) > 1.0e-8 ? (-1.0 +
exp(dt*dK_i_dt_linearized))*dK_i_dt/dK_i_dt_linearized : dt*dK_i_dt);
}
}
|
the_stack
|
namespace surfelwarp { namespace device {
enum {
window_halfsize = 1,
};
__device__ __forceinline__ float computeAlignmentErrorWindowSearch(
cudaTextureObject_t depth_vertex_confid_map,
cudaTextureObject_t depth_normal_radius_map,
cudaTextureObject_t filter_foreground_mask,
cudaTextureObject_t reference_vertex_map,
cudaTextureObject_t reference_normal_map,
cudaTextureObject_t index_map,
const DeviceArrayView2D<KNNAndWeight> knn_map,
const DualQuaternion* device_warp_field,
const Intrinsic& intrinsic, const mat34& world2camera
) {
const auto x = threadIdx.x + blockDim.x*blockIdx.x;
const auto y = threadIdx.y + blockDim.y*blockIdx.y;
if (x >= knn_map.Cols() || y >= knn_map.Rows()) return 0.0f;
//The residual value
float alignment_error = 0.0f;
const auto surfel_index = tex2D<unsigned>(index_map, x, y);
if(surfel_index != d_invalid_index)
{
//Get the vertex
const float4 can_vertex4 = tex2D<float4>(reference_vertex_map, x, y);
const float4 can_normal4 = tex2D<float4>(reference_normal_map, x, y);
const KNNAndWeight knn = knn_map(y, x);
DualQuaternion dq_average = averageDualQuaternion(device_warp_field, knn.knn, knn.weight);
const mat34 se3 = dq_average.se3_matrix();
//And warp it
const float3 warped_vertex = se3.rot * can_vertex4 + se3.trans;
const float3 warped_normal = se3.rot * can_normal4;
//Transfer to the camera frame
const float3 warped_vertex_camera = world2camera.rot * warped_vertex + world2camera.trans;
const float3 warped_normal_camera = world2camera.rot * warped_normal;
//Project the vertex into image
const int2 img_coord = {
__float2int_rn(((warped_vertex_camera.x / (warped_vertex_camera.z + 1e-10)) * intrinsic.focal_x) + intrinsic.principal_x),
__float2int_rn(((warped_vertex_camera.y / (warped_vertex_camera.z + 1e-10)) * intrinsic.focal_y) + intrinsic.principal_y)
};
//Use window search
alignment_error = d_maximum_alignment_error;
bool depth_vertex_found = false;
for(auto depth_y = img_coord.y - window_halfsize; depth_y <= img_coord.y + window_halfsize; depth_y++) {
for(auto depth_x = img_coord.x - window_halfsize; depth_x <= img_coord.x + window_halfsize; depth_x++) {
const float4 depth_vertex = tex2D<float4>(depth_vertex_confid_map, depth_x, depth_y);
const float4 depth_normal = tex2D<float4>(depth_normal_radius_map, depth_x, depth_y);
if(!is_zero_vertex(depth_vertex) && dotxyz(warped_normal_camera, depth_normal) > 0.3f)
depth_vertex_found = true;
const auto error = fabsf_diff_xyz(warped_vertex_camera, depth_vertex);
if(error < alignment_error)
alignment_error = error;
}
}
//If there is no depth pixel, check the foreground mask
if(!depth_vertex_found) {
const float filter_foreground_value = tex2D<float>(filter_foreground_mask, img_coord.x, img_coord.y);
if(filter_foreground_value < 0.9) { //This is on boundary or foreground
//0.05[m] (5 cm) is the approximate maximum value (corresponded to 1.0 foreground value)
//if the surfel is on the boundary of the image.
alignment_error = 0.03f * filter_foreground_value;
}
}
}
//Return the value for further processing
return alignment_error;
}
__global__ void computeAlignmentErrorMapKernel(
cudaTextureObject_t depth_vertex_confid_map,
cudaTextureObject_t depth_normal_radius_map,
cudaTextureObject_t filter_foreground_mask,
cudaTextureObject_t reference_vertex_map,
cudaTextureObject_t reference_normal_map,
cudaTextureObject_t index_map,
const DeviceArrayView2D<KNNAndWeight> knn_map,
const DualQuaternion* device_warp_field,
const Intrinsic intrinsic, const mat34 world2camera,
//the output
cudaSurfaceObject_t alignment_error_map
) {
const auto x = threadIdx.x + blockDim.x*blockIdx.x;
const auto y = threadIdx.y + blockDim.y*blockIdx.y;
if (x >= knn_map.Cols() || y >= knn_map.Rows()) return;
//The residual value
const float alignment_error = computeAlignmentErrorWindowSearch(
depth_vertex_confid_map,
depth_normal_radius_map,
filter_foreground_mask,
reference_vertex_map,
reference_normal_map,
index_map,
knn_map,
device_warp_field,
intrinsic, world2camera
);
//Write the value to surface
surf2Dwrite(alignment_error, alignment_error_map, x * sizeof(float), y);
}
__global__ void computeNodeAlignmentErrorFromMapKernel(
cudaTextureObject_t depth_vertex_confid_map,
cudaTextureObject_t depth_normal_radius_map,
cudaTextureObject_t filter_foreground_mask,
cudaTextureObject_t reference_vertex_map,
cudaTextureObject_t reference_normal_map,
cudaTextureObject_t index_map,
const DeviceArrayView2D<KNNAndWeight> knn_map,
const DualQuaternion* device_warp_field,
const Intrinsic intrinsic, const mat34 world2camera,
//the output
float* node_alignment_error,
float* node_accumlate_weight
) {
const auto x = threadIdx.x + blockDim.x*blockIdx.x;
const auto y = threadIdx.y + blockDim.y*blockIdx.y;
if (x >= knn_map.Cols() || y >= knn_map.Rows()) return;
//The residual value
const float alignment_error = computeAlignmentErrorWindowSearch(
depth_vertex_confid_map,
depth_normal_radius_map,
filter_foreground_mask,
reference_vertex_map,
reference_normal_map,
index_map,
knn_map,
device_warp_field,
intrinsic, world2camera
);
//The knn and weight is used to interplate
const KNNAndWeight knn = knn_map(y, x);
const unsigned short* node_array = (const unsigned short*)(&knn.knn);
const float* node_weight_array = (const float*)(&knn.weight);
if(alignment_error > 1e-6f)
{
for(auto i = 0; i < 4; i++) {
const auto node = node_array[i];
const auto node_weight = node_weight_array[i];
atomicAdd(&(node_alignment_error[node]), node_weight * alignment_error);
atomicAdd(&(node_accumlate_weight[node]), node_weight);
}
}
}
__global__ void collectAlignmentErrorMapFromNodeKernel(
const float* node_alignment_error,
const float* node_accumlate_weight,
cudaTextureObject_t index_map,
const DeviceArrayView2D<KNNAndWeight> knn_map,
//the output
cudaSurfaceObject_t alignment_error_map
) {
const auto x = threadIdx.x + blockDim.x*blockIdx.x;
const auto y = threadIdx.y + blockDim.y*blockIdx.y;
if (x >= knn_map.Cols() || y >= knn_map.Rows()) return;
float filter_alignment_error = 0.0f;
const auto surfel_index = tex2D<unsigned>(index_map, x, y);
if(surfel_index != d_invalid_index)
{
//The knn and weight is used to interplate
const KNNAndWeight knn = knn_map(y, x);
const unsigned short* node_array = (const unsigned short*)(&knn.knn);
const float* node_weight_array = (const float*)(&knn.weight);
//Load from node
float accumlate_error = 0.0f;
float accumlate_weight = 0.0f;
for(auto i = 0; i < 4; i++) {
const auto node = node_array[i];
const auto node_weight = node_weight_array[i];
const float node_error = node_alignment_error[node];
const float node_total_weight = node_accumlate_weight[node];
const float node_unit_error = node_error / node_total_weight;
accumlate_error += node_unit_error * node_weight;
accumlate_weight += node_weight;
}
//Write to output value
filter_alignment_error = accumlate_error / (accumlate_weight + 1e-4f);
}
//Write the value to surface
surf2Dwrite(filter_alignment_error, alignment_error_map, x * sizeof(float), y);
}
} // device
} // surfelwarp
void surfelwarp::DenseDepthHandler::ComputeAlignmentErrorMapDirect(
const DeviceArrayView<DualQuaternion> &node_se3,
const mat34& world2camera,
cudaTextureObject_t filter_foreground_mask,
cudaStream_t stream
) {
//Check the size
SURFELWARP_CHECK(m_node_se3.Size() == node_se3.Size());
dim3 blk(16, 16);
dim3 grid(divUp(m_image_width, blk.x), divUp(m_image_height, blk.y));
device::computeAlignmentErrorMapKernel<<<grid, blk, 0, stream>>>(
m_depth_observation.vertex_map,
m_depth_observation.normal_map,
filter_foreground_mask,
m_geometry_maps.reference_vertex_map,
m_geometry_maps.reference_normal_map,
m_geometry_maps.index_map,
m_knn_map,
node_se3.RawPtr(),
m_project_intrinsic, world2camera,
m_alignment_error_map.surface
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
//First compute the error on node, then distribute them on map
void surfelwarp::DenseDepthHandler::ComputeNodewiseError(
const DeviceArrayView<DualQuaternion> &node_se3,
const mat34 &world2camera,
cudaTextureObject_t filter_foreground_mask,
cudaStream_t stream
) {
const auto num_nodes = m_node_se3.Size();
SURFELWARP_CHECK(node_se3.Size() == num_nodes);
//Correct the size
m_node_accumlate_error.ResizeArrayOrException(num_nodes);
m_node_accumlate_weight.ResizeArrayOrException(num_nodes);
//Clear the value
cudaSafeCall(cudaMemsetAsync(m_node_accumlate_error.Ptr(), 0, sizeof(float) * num_nodes, stream));
cudaSafeCall(cudaMemsetAsync(m_node_accumlate_weight.Ptr(), 0, sizeof(float) * num_nodes, stream));
//First scatter the value to nodes
dim3 blk(16, 16);
dim3 grid(divUp(m_image_width, blk.x), divUp(m_image_height, blk.y));
device::computeNodeAlignmentErrorFromMapKernel<<<grid, blk, 0, stream>>>(
m_depth_observation.vertex_map,
m_depth_observation.normal_map,
filter_foreground_mask,
m_geometry_maps.reference_vertex_map,
m_geometry_maps.reference_normal_map,
m_geometry_maps.index_map,
m_knn_map,
node_se3.RawPtr(),
m_project_intrinsic, world2camera,
m_node_accumlate_error.Ptr(),
m_node_accumlate_weight.Ptr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
void surfelwarp::DenseDepthHandler::distributeNodeErrorOnMap(cudaStream_t stream) {
dim3 blk(16, 16);
dim3 grid(divUp(m_image_width, blk.x), divUp(m_image_height, blk.y));
device::collectAlignmentErrorMapFromNodeKernel<<<grid, blk, 0, stream>>>(
m_node_accumlate_error.Ptr(),
m_node_accumlate_weight.Ptr(),
m_geometry_maps.index_map,
m_knn_map,
m_alignment_error_map.surface
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
}
void surfelwarp::DenseDepthHandler::ComputeAlignmentErrorMapFromNode(
const DeviceArrayView<DualQuaternion>& node_se3,
const mat34 & world2camera,
cudaTextureObject_t filter_foreground_mask,
cudaStream_t stream
) {
ComputeNodewiseError(node_se3, world2camera, filter_foreground_mask, stream);
distributeNodeErrorOnMap(stream);
}
|
the_stack
|
#include "eaconv/src/EAConv2d_kernel.h"
#include "eaconv/src/cuda_check.h"
#include "eaconv/src/handle.h"
#include "eaconv/src/conv_params.h"
struct Workspace {
Workspace(THCState* state, size_t size) :
state(state), size(size), data(NULL) {
checkCUDA(THCudaMalloc(state, &data, size));
}
Workspace(const Workspace&) = delete;
Workspace(Workspace&&) = default;
~Workspace() {
if (data) {
THCudaFree(state, data);
}
}
THCState* state;
size_t size;
void* data;
};
#ifdef __cplusplus
extern "C" {
#endif
void EAConv2d_cudnn_forward_bias(cudnnHandle_t cudnn,
float *bias, float* output,
int output_batch_size,
int output_channels,
int output_h,
int output_w) {
Convolution_Params params(1, 1,
0, 0,
1, 1,
3, 3, 3, 3,
3, 3, 3, 3,
output_batch_size,
output_channels,
output_h,
output_w);
const float alpha = 1;
checkCUDNN(cudnnAddTensor(cudnn, &alpha,
params.bias_desc, bias,
&alpha,
params.output_desc, output));
}
void EAConv2d_cudnn_backward_bias(cudnnHandle_t cudnn,
float *grad_bias, float* gradOutput,
int output_batch_size,
int output_channels,
int output_h,
int output_w) {
Convolution_Params params(1, 1,
0, 0,
1, 1,
3, 3, 3, 3,
3, 3, 3, 3,
output_batch_size,
output_channels,
output_h,
output_w);
const float alpha = 1;
checkCUDNN(cudnnConvolutionBackwardBias(cudnn, &alpha,
params.output_desc, gradOutput,
&alpha,
params.bias_desc, grad_bias));
}
void EAConv2d_cudnn_forward(THCState* state,
cudnnHandle_t cudnn,
float* input,
float* weight,
float* output,
int stride_x,
int stride_y,
int padding_x,
int padding_y,
int dilation_x,
int dilation_y,
int groups,
int input_batch_size,
int input_channels,
int input_h,
int input_w,
int kernel_out,
int kernel_in,
int kernel_h,
int kernel_w,
int output_batch_size,
int output_channels,
int output_h,
int output_w) {
Convolution_Params params(stride_x,
stride_y,
padding_x,
padding_y,
dilation_x,
dilation_y,
input_batch_size,
input_channels,
input_h,
input_w,
kernel_out,
kernel_in,
kernel_h,
kernel_w,
output_batch_size,
output_channels,
output_h,
output_w);
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(
cudnnGetConvolutionForwardAlgorithm(cudnn,
params.input_desc,
params.kernel_desc,
params.conv_desc,
params.output_desc,
// CUDNN_CONVOLUTION_FWD_NO_WORKSPACE,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
size_t workspace_bytes = 0;
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
params.input_desc,
params.kernel_desc,
params.conv_desc,
params.output_desc,
convolution_algorithm,
&workspace_bytes));
Workspace cur_ws(state, workspace_bytes);
const float alpha = 1;
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
params.input_desc,
input,
params.kernel_desc,
weight,
params.conv_desc,
convolution_algorithm,
cur_ws.data,
cur_ws.size,
&alpha,
params.output_desc,
output));
}
void EAConv2d_cudnn_backward(THCState* state,
cudnnHandle_t cudnn,
float* grad_input,
float* grad_weight,
float* gradOutput,
float* input,
float* weight,
int stride_x,
int stride_y,
int padding_x,
int padding_y,
int dilation_x,
int dilation_y,
int groups,
int input_batch_size,
int input_channels,
int input_h,
int input_w,
int kernel_out,
int kernel_in,
int kernel_h,
int kernel_w,
int output_batch_size,
int output_channels,
int output_h,
int output_w) {
Convolution_Params params(stride_x,
stride_y,
padding_x,
padding_y,
dilation_x,
dilation_y,
input_batch_size,
input_channels,
input_h,
input_w,
kernel_out,
kernel_in,
kernel_h,
kernel_w,
output_batch_size,
output_channels,
output_h,
output_w);
// backward filter
cudnnConvolutionBwdFilterAlgo_t convolution_filter_algorithm;
checkCUDNN(
cudnnGetConvolutionBackwardFilterAlgorithm(
cudnn,
params.input_desc,
params.output_desc,
params.conv_desc,
params.kernel_desc,
// CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_filter_algorithm));
size_t filter_workspace_bytes = 0;
checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(
cudnn,
params.input_desc,
params.output_desc,
params.conv_desc,
params.kernel_desc,
convolution_filter_algorithm,
&filter_workspace_bytes));
// backward data
cudnnConvolutionBwdDataAlgo_t convolution_data_algorithm;
checkCUDNN(
cudnnGetConvolutionBackwardDataAlgorithm(
cudnn,
params.kernel_desc,
params.output_desc,
params.conv_desc,
params.input_desc,
// CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_data_algorithm));
size_t data_workspace_bytes = 0;
checkCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize(
cudnn,
params.kernel_desc,
params.output_desc,
params.conv_desc,
params.input_desc,
convolution_data_algorithm,
&data_workspace_bytes));
Workspace filter_ws(state, filter_workspace_bytes);
Workspace data_ws(state, data_workspace_bytes);
const float alpha = 1;
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn,
&alpha,
params.input_desc,
input,
params.output_desc,
gradOutput,
params.conv_desc,
convolution_filter_algorithm,
filter_ws.data,
filter_ws.size,
&alpha,
params.kernel_desc,
grad_weight));
checkCUDNN(cudnnConvolutionBackwardData(cudnn,
&alpha,
params.kernel_desc,
weight,
params.output_desc,
gradOutput,
params.conv_desc,
convolution_data_algorithm,
data_ws.data,
data_ws.size,
&alpha,
params.input_desc,
grad_input));
}
#ifdef __cplusplus
}
#endif
|
the_stack
|
#define ITERATION 100
typedef unsigned char uint8_t;
static const uint8_t sbox[16] = {
0xC0, 0x50, 0x60, 0xB0, 0x90, 0x00, 0xA0, 0xD0, 0x30, 0xE0, 0xF0, 0x80, 0x40, 0x70, 0x10, 0x20,
};
// look-up tables for speeding up permutation layer
static const uint8_t sbox_pmt_3[256] = {
0xF0, 0xB1, 0xB4, 0xE5, 0xE1, 0xA0, 0xE4, 0xF1, 0xA5, 0xF4, 0xF5, 0xE0, 0xB0, 0xB5, 0xA1, 0xA4,
0x72, 0x33, 0x36, 0x67, 0x63, 0x22, 0x66, 0x73, 0x27, 0x76, 0x77, 0x62, 0x32, 0x37, 0x23, 0x26,
0x78, 0x39, 0x3C, 0x6D, 0x69, 0x28, 0x6C, 0x79, 0x2D, 0x7C, 0x7D, 0x68, 0x38, 0x3D, 0x29, 0x2C,
0xDA, 0x9B, 0x9E, 0xCF, 0xCB, 0x8A, 0xCE, 0xDB, 0x8F, 0xDE, 0xDF, 0xCA, 0x9A, 0x9F, 0x8B, 0x8E,
0xD2, 0x93, 0x96, 0xC7, 0xC3, 0x82, 0xC6, 0xD3, 0x87, 0xD6, 0xD7, 0xC2, 0x92, 0x97, 0x83, 0x86,
0x50, 0x11, 0x14, 0x45, 0x41, 0x00, 0x44, 0x51, 0x05, 0x54, 0x55, 0x40, 0x10, 0x15, 0x01, 0x04,
0xD8, 0x99, 0x9C, 0xCD, 0xC9, 0x88, 0xCC, 0xD9, 0x8D, 0xDC, 0xDD, 0xC8, 0x98, 0x9D, 0x89, 0x8C,
0xF2, 0xB3, 0xB6, 0xE7, 0xE3, 0xA2, 0xE6, 0xF3, 0xA7, 0xF6, 0xF7, 0xE2, 0xB2, 0xB7, 0xA3, 0xA6,
0x5A, 0x1B, 0x1E, 0x4F, 0x4B, 0x0A, 0x4E, 0x5B, 0x0F, 0x5E, 0x5F, 0x4A, 0x1A, 0x1F, 0x0B, 0x0E,
0xF8, 0xB9, 0xBC, 0xED, 0xE9, 0xA8, 0xEC, 0xF9, 0xAD, 0xFC, 0xFD, 0xE8, 0xB8, 0xBD, 0xA9, 0xAC,
0xFA, 0xBB, 0xBE, 0xEF, 0xEB, 0xAA, 0xEE, 0xFB, 0xAF, 0xFE, 0xFF, 0xEA, 0xBA, 0xBF, 0xAB, 0xAE,
0xD0, 0x91, 0x94, 0xC5, 0xC1, 0x80, 0xC4, 0xD1, 0x85, 0xD4, 0xD5, 0xC0, 0x90, 0x95, 0x81, 0x84,
0x70, 0x31, 0x34, 0x65, 0x61, 0x20, 0x64, 0x71, 0x25, 0x74, 0x75, 0x60, 0x30, 0x35, 0x21, 0x24,
0x7A, 0x3B, 0x3E, 0x6F, 0x6B, 0x2A, 0x6E, 0x7B, 0x2F, 0x7E, 0x7F, 0x6A, 0x3A, 0x3F, 0x2B, 0x2E,
0x52, 0x13, 0x16, 0x47, 0x43, 0x02, 0x46, 0x53, 0x07, 0x56, 0x57, 0x42, 0x12, 0x17, 0x03, 0x06,
0x58, 0x19, 0x1C, 0x4D, 0x49, 0x08, 0x4C, 0x59, 0x0D, 0x5C, 0x5D, 0x48, 0x18, 0x1D, 0x09, 0x0C,
};
static const uint8_t sbox_pmt_2[256] = {
0x3C, 0x6C, 0x2D, 0x79, 0x78, 0x28, 0x39, 0x7C, 0x69, 0x3D, 0x7D, 0x38, 0x2C, 0x6D, 0x68, 0x29,
0x9C, 0xCC, 0x8D, 0xD9, 0xD8, 0x88, 0x99, 0xDC, 0xC9, 0x9D, 0xDD, 0x98, 0x8C, 0xCD, 0xC8, 0x89,
0x1E, 0x4E, 0x0F, 0x5B, 0x5A, 0x0A, 0x1B, 0x5E, 0x4B, 0x1F, 0x5F, 0x1A, 0x0E, 0x4F, 0x4A, 0x0B,
0xB6, 0xE6, 0xA7, 0xF3, 0xF2, 0xA2, 0xB3, 0xF6, 0xE3, 0xB7, 0xF7, 0xB2, 0xA6, 0xE7, 0xE2, 0xA3,
0xB4, 0xE4, 0xA5, 0xF1, 0xF0, 0xA0, 0xB1, 0xF4, 0xE1, 0xB5, 0xF5, 0xB0, 0xA4, 0xE5, 0xE0, 0xA1,
0x14, 0x44, 0x05, 0x51, 0x50, 0x00, 0x11, 0x54, 0x41, 0x15, 0x55, 0x10, 0x04, 0x45, 0x40, 0x01,
0x36, 0x66, 0x27, 0x73, 0x72, 0x22, 0x33, 0x76, 0x63, 0x37, 0x77, 0x32, 0x26, 0x67, 0x62, 0x23,
0xBC, 0xEC, 0xAD, 0xF9, 0xF8, 0xA8, 0xB9, 0xFC, 0xE9, 0xBD, 0xFD, 0xB8, 0xAC, 0xED, 0xE8, 0xA9,
0x96, 0xC6, 0x87, 0xD3, 0xD2, 0x82, 0x93, 0xD6, 0xC3, 0x97, 0xD7, 0x92, 0x86, 0xC7, 0xC2, 0x83,
0x3E, 0x6E, 0x2F, 0x7B, 0x7A, 0x2A, 0x3B, 0x7E, 0x6B, 0x3F, 0x7F, 0x3A, 0x2E, 0x6F, 0x6A, 0x2B,
0xBE, 0xEE, 0xAF, 0xFB, 0xFA, 0xAA, 0xBB, 0xFE, 0xEB, 0xBF, 0xFF, 0xBA, 0xAE, 0xEF, 0xEA, 0xAB,
0x34, 0x64, 0x25, 0x71, 0x70, 0x20, 0x31, 0x74, 0x61, 0x35, 0x75, 0x30, 0x24, 0x65, 0x60, 0x21,
0x1C, 0x4C, 0x0D, 0x59, 0x58, 0x08, 0x19, 0x5C, 0x49, 0x1D, 0x5D, 0x18, 0x0C, 0x4D, 0x48, 0x09,
0x9E, 0xCE, 0x8F, 0xDB, 0xDA, 0x8A, 0x9B, 0xDE, 0xCB, 0x9F, 0xDF, 0x9A, 0x8E, 0xCF, 0xCA, 0x8B,
0x94, 0xC4, 0x85, 0xD1, 0xD0, 0x80, 0x91, 0xD4, 0xC1, 0x95, 0xD5, 0x90, 0x84, 0xC5, 0xC0, 0x81,
0x16, 0x46, 0x07, 0x53, 0x52, 0x02, 0x13, 0x56, 0x43, 0x17, 0x57, 0x12, 0x06, 0x47, 0x42, 0x03,
};
static const uint8_t sbox_pmt_1[256] = {
0x0F, 0x1B, 0x4B, 0x5E, 0x1E, 0x0A, 0x4E, 0x1F, 0x5A, 0x4F, 0x5F, 0x0E, 0x0B, 0x5B, 0x1A, 0x4A,
0x27, 0x33, 0x63, 0x76, 0x36, 0x22, 0x66, 0x37, 0x72, 0x67, 0x77, 0x26, 0x23, 0x73, 0x32, 0x62,
0x87, 0x93, 0xC3, 0xD6, 0x96, 0x82, 0xC6, 0x97, 0xD2, 0xC7, 0xD7, 0x86, 0x83, 0xD3, 0x92, 0xC2,
0xAD, 0xB9, 0xE9, 0xFC, 0xBC, 0xA8, 0xEC, 0xBD, 0xF8, 0xED, 0xFD, 0xAC, 0xA9, 0xF9, 0xB8, 0xE8,
0x2D, 0x39, 0x69, 0x7C, 0x3C, 0x28, 0x6C, 0x3D, 0x78, 0x6D, 0x7D, 0x2C, 0x29, 0x79, 0x38, 0x68,
0x05, 0x11, 0x41, 0x54, 0x14, 0x00, 0x44, 0x15, 0x50, 0x45, 0x55, 0x04, 0x01, 0x51, 0x10, 0x40,
0x8D, 0x99, 0xC9, 0xDC, 0x9C, 0x88, 0xCC, 0x9D, 0xD8, 0xCD, 0xDD, 0x8C, 0x89, 0xD9, 0x98, 0xC8,
0x2F, 0x3B, 0x6B, 0x7E, 0x3E, 0x2A, 0x6E, 0x3F, 0x7A, 0x6F, 0x7F, 0x2E, 0x2B, 0x7B, 0x3A, 0x6A,
0xA5, 0xB1, 0xE1, 0xF4, 0xB4, 0xA0, 0xE4, 0xB5, 0xF0, 0xE5, 0xF5, 0xA4, 0xA1, 0xF1, 0xB0, 0xE0,
0x8F, 0x9B, 0xCB, 0xDE, 0x9E, 0x8A, 0xCE, 0x9F, 0xDA, 0xCF, 0xDF, 0x8E, 0x8B, 0xDB, 0x9A, 0xCA,
0xAF, 0xBB, 0xEB, 0xFE, 0xBE, 0xAA, 0xEE, 0xBF, 0xFA, 0xEF, 0xFF, 0xAE, 0xAB, 0xFB, 0xBA, 0xEA,
0x0D, 0x19, 0x49, 0x5C, 0x1C, 0x08, 0x4C, 0x1D, 0x58, 0x4D, 0x5D, 0x0C, 0x09, 0x59, 0x18, 0x48,
0x07, 0x13, 0x43, 0x56, 0x16, 0x02, 0x46, 0x17, 0x52, 0x47, 0x57, 0x06, 0x03, 0x53, 0x12, 0x42,
0xA7, 0xB3, 0xE3, 0xF6, 0xB6, 0xA2, 0xE6, 0xB7, 0xF2, 0xE7, 0xF7, 0xA6, 0xA3, 0xF3, 0xB2, 0xE2,
0x25, 0x31, 0x61, 0x74, 0x34, 0x20, 0x64, 0x35, 0x70, 0x65, 0x75, 0x24, 0x21, 0x71, 0x30, 0x60,
0x85, 0x91, 0xC1, 0xD4, 0x94, 0x80, 0xC4, 0x95, 0xD0, 0xC5, 0xD5, 0x84, 0x81, 0xD1, 0x90, 0xC0,
};
static const uint8_t sbox_pmt_0[256] = {
0xC3, 0xC6, 0xD2, 0x97, 0x87, 0x82, 0x93, 0xC7, 0x96, 0xD3, 0xD7, 0x83, 0xC2, 0xD6, 0x86, 0x92,
0xC9, 0xCC, 0xD8, 0x9D, 0x8D, 0x88, 0x99, 0xCD, 0x9C, 0xD9, 0xDD, 0x89, 0xC8, 0xDC, 0x8C, 0x98,
0xE1, 0xE4, 0xF0, 0xB5, 0xA5, 0xA0, 0xB1, 0xE5, 0xB4, 0xF1, 0xF5, 0xA1, 0xE0, 0xF4, 0xA4, 0xB0,
0x6B, 0x6E, 0x7A, 0x3F, 0x2F, 0x2A, 0x3B, 0x6F, 0x3E, 0x7B, 0x7F, 0x2B, 0x6A, 0x7E, 0x2E, 0x3A,
0x4B, 0x4E, 0x5A, 0x1F, 0x0F, 0x0A, 0x1B, 0x4F, 0x1E, 0x5B, 0x5F, 0x0B, 0x4A, 0x5E, 0x0E, 0x1A,
0x41, 0x44, 0x50, 0x15, 0x05, 0x00, 0x11, 0x45, 0x14, 0x51, 0x55, 0x01, 0x40, 0x54, 0x04, 0x10,
0x63, 0x66, 0x72, 0x37, 0x27, 0x22, 0x33, 0x67, 0x36, 0x73, 0x77, 0x23, 0x62, 0x76, 0x26, 0x32,
0xCB, 0xCE, 0xDA, 0x9F, 0x8F, 0x8A, 0x9B, 0xCF, 0x9E, 0xDB, 0xDF, 0x8B, 0xCA, 0xDE, 0x8E, 0x9A,
0x69, 0x6C, 0x78, 0x3D, 0x2D, 0x28, 0x39, 0x6D, 0x3C, 0x79, 0x7D, 0x29, 0x68, 0x7C, 0x2C, 0x38,
0xE3, 0xE6, 0xF2, 0xB7, 0xA7, 0xA2, 0xB3, 0xE7, 0xB6, 0xF3, 0xF7, 0xA3, 0xE2, 0xF6, 0xA6, 0xB2,
0xEB, 0xEE, 0xFA, 0xBF, 0xAF, 0xAA, 0xBB, 0xEF, 0xBE, 0xFB, 0xFF, 0xAB, 0xEA, 0xFE, 0xAE, 0xBA,
0x43, 0x46, 0x52, 0x17, 0x07, 0x02, 0x13, 0x47, 0x16, 0x53, 0x57, 0x03, 0x42, 0x56, 0x06, 0x12,
0xC1, 0xC4, 0xD0, 0x95, 0x85, 0x80, 0x91, 0xC5, 0x94, 0xD1, 0xD5, 0x81, 0xC0, 0xD4, 0x84, 0x90,
0xE9, 0xEC, 0xF8, 0xBD, 0xAD, 0xA8, 0xB9, 0xED, 0xBC, 0xF9, 0xFD, 0xA9, 0xE8, 0xFC, 0xAC, 0xB8,
0x49, 0x4C, 0x58, 0x1D, 0x0D, 0x08, 0x19, 0x4D, 0x1C, 0x59, 0x5D, 0x09, 0x48, 0x5C, 0x0C, 0x18,
0x61, 0x64, 0x70, 0x35, 0x25, 0x20, 0x31, 0x65, 0x34, 0x71, 0x75, 0x21, 0x60, 0x74, 0x24, 0x30,
};
// full-round should be 31, i.e. rounds = 31
// plain and cipher can overlap, so do key and cipher
void present_rounds(const uint8_t *plain, const uint8_t *key,
const uint8_t rounds, uint8_t *cipher)
{
uint8_t rounh_counter = 1;
uint8_t state[8];
uint8_t rounh_key[10];
// add key
state[0] = plain[0] ^ key[0];
state[1] = plain[1] ^ key[1];
state[2] = plain[2] ^ key[2];
state[3] = plain[3] ^ key[3];
state[4] = plain[4] ^ key[4];
state[5] = plain[5] ^ key[5];
state[6] = plain[6] ^ key[6];
state[7] = plain[7] ^ key[7];
// update key
rounh_key[9] = key[6] << 5 | key[7] >> 3;
rounh_key[8] = key[5] << 5 | key[6] >> 3;
rounh_key[7] = key[4] << 5 | key[5] >> 3;
rounh_key[6] = key[3] << 5 | key[4] >> 3;
rounh_key[5] = key[2] << 5 | key[3] >> 3;
rounh_key[4] = key[1] << 5 | key[2] >> 3;
rounh_key[3] = key[0] << 5 | key[1] >> 3;
rounh_key[2] = key[9] << 5 | key[0] >> 3;
rounh_key[1] = key[8] << 5 | key[9] >> 3;
rounh_key[0] = key[7] << 5 | key[8] >> 3;
rounh_key[0] = (rounh_key[0] & 0x0F) | sbox[rounh_key[0] >> 4];
rounh_key[7] ^= rounh_counter >> 1;
rounh_key[8] ^= rounh_counter << 7;
// substitution and permutation
cipher[0] =
(sbox_pmt_3[state[0]] & 0xC0) |
(sbox_pmt_2[state[1]] & 0x30) |
(sbox_pmt_1[state[2]] & 0x0C) |
(sbox_pmt_0[state[3]] & 0x03);
cipher[1] =
(sbox_pmt_3[state[4]] & 0xC0) |
(sbox_pmt_2[state[5]] & 0x30) |
(sbox_pmt_1[state[6]] & 0x0C) |
(sbox_pmt_0[state[7]] & 0x03);
cipher[2] =
(sbox_pmt_0[state[0]] & 0xC0) |
(sbox_pmt_3[state[1]] & 0x30) |
(sbox_pmt_2[state[2]] & 0x0C) |
(sbox_pmt_1[state[3]] & 0x03);
cipher[3] =
(sbox_pmt_0[state[4]] & 0xC0) |
(sbox_pmt_3[state[5]] & 0x30) |
(sbox_pmt_2[state[6]] & 0x0C) |
(sbox_pmt_1[state[7]] & 0x03);
cipher[4] =
(sbox_pmt_1[state[0]] & 0xC0) |
(sbox_pmt_0[state[1]] & 0x30) |
(sbox_pmt_3[state[2]] & 0x0C) |
(sbox_pmt_2[state[3]] & 0x03);
cipher[5] =
(sbox_pmt_1[state[4]] & 0xC0) |
(sbox_pmt_0[state[5]] & 0x30) |
(sbox_pmt_3[state[6]] & 0x0C) |
(sbox_pmt_2[state[7]] & 0x03);
cipher[6] =
(sbox_pmt_2[state[0]] & 0xC0) |
(sbox_pmt_1[state[1]] & 0x30) |
(sbox_pmt_0[state[2]] & 0x0C) |
(sbox_pmt_3[state[3]] & 0x03);
cipher[7] =
(sbox_pmt_2[state[4]] & 0xC0) |
(sbox_pmt_1[state[5]] & 0x30) |
(sbox_pmt_0[state[6]] & 0x0C) |
(sbox_pmt_3[state[7]] & 0x03);
for (rounh_counter = 2; rounh_counter <= rounds; rounh_counter++) {
state[0] = cipher[0] ^ rounh_key[0];
state[1] = cipher[1] ^ rounh_key[1];
state[2] = cipher[2] ^ rounh_key[2];
state[3] = cipher[3] ^ rounh_key[3];
state[4] = cipher[4] ^ rounh_key[4];
state[5] = cipher[5] ^ rounh_key[5];
state[6] = cipher[6] ^ rounh_key[6];
state[7] = cipher[7] ^ rounh_key[7];
cipher[0] =
(sbox_pmt_3[state[0]] & 0xC0) |
(sbox_pmt_2[state[1]] & 0x30) |
(sbox_pmt_1[state[2]] & 0x0C) |
(sbox_pmt_0[state[3]] & 0x03);
cipher[1] =
(sbox_pmt_3[state[4]] & 0xC0) |
(sbox_pmt_2[state[5]] & 0x30) |
(sbox_pmt_1[state[6]] & 0x0C) |
(sbox_pmt_0[state[7]] & 0x03);
cipher[2] =
(sbox_pmt_0[state[0]] & 0xC0) |
(sbox_pmt_3[state[1]] & 0x30) |
(sbox_pmt_2[state[2]] & 0x0C) |
(sbox_pmt_1[state[3]] & 0x03);
cipher[3] =
(sbox_pmt_0[state[4]] & 0xC0) |
(sbox_pmt_3[state[5]] & 0x30) |
(sbox_pmt_2[state[6]] & 0x0C) |
(sbox_pmt_1[state[7]] & 0x03);
cipher[4] =
(sbox_pmt_1[state[0]] & 0xC0) |
(sbox_pmt_0[state[1]] & 0x30) |
(sbox_pmt_3[state[2]] & 0x0C) |
(sbox_pmt_2[state[3]] & 0x03);
cipher[5] =
(sbox_pmt_1[state[4]] & 0xC0) |
(sbox_pmt_0[state[5]] & 0x30) |
(sbox_pmt_3[state[6]] & 0x0C) |
(sbox_pmt_2[state[7]] & 0x03);
cipher[6] =
(sbox_pmt_2[state[0]] & 0xC0) |
(sbox_pmt_1[state[1]] & 0x30) |
(sbox_pmt_0[state[2]] & 0x0C) |
(sbox_pmt_3[state[3]] & 0x03);
cipher[7] =
(sbox_pmt_2[state[4]] & 0xC0) |
(sbox_pmt_1[state[5]] & 0x30) |
(sbox_pmt_0[state[6]] & 0x0C) |
(sbox_pmt_3[state[7]] & 0x03);
rounh_key[5] ^= rounh_counter << 2; // do this first, which may be faster
// use state[] for temporary storage
state[2] = rounh_key[9];
state[1] = rounh_key[8];
state[0] = rounh_key[7];
rounh_key[9] = rounh_key[6] << 5 | rounh_key[7] >> 3;
rounh_key[8] = rounh_key[5] << 5 | rounh_key[6] >> 3;
rounh_key[7] = rounh_key[4] << 5 | rounh_key[5] >> 3;
rounh_key[6] = rounh_key[3] << 5 | rounh_key[4] >> 3;
rounh_key[5] = rounh_key[2] << 5 | rounh_key[3] >> 3;
rounh_key[4] = rounh_key[1] << 5 | rounh_key[2] >> 3;
rounh_key[3] = rounh_key[0] << 5 | rounh_key[1] >> 3;
rounh_key[2] = state[2] << 5 | rounh_key[0] >> 3;
rounh_key[1] = state[1] << 5 | state[2] >> 3;
rounh_key[0] = state[0] << 5 | state[1] >> 3;
rounh_key[0] = (rounh_key[0] & 0x0F) | sbox[rounh_key[0] >> 4];
}
// if round is not equal to 31, then do not perform the last adding key operation
// this can be used in constructing PRESENT based algorithm, such as MAC
if (31 == rounds) {
cipher[0] ^= rounh_key[0];
cipher[1] ^= rounh_key[1];
cipher[2] ^= rounh_key[2];
cipher[3] ^= rounh_key[3];
cipher[4] ^= rounh_key[4];
cipher[5] ^= rounh_key[5];
cipher[6] ^= rounh_key[6];
cipher[7] ^= rounh_key[7];
}
}
__global__ void present(
const int num,
const int rounds,
const uint8_t *plains,
const uint8_t *keys,
uint8_t *ciphers,
const uint8_t *sbox,
const uint8_t *sbox_pmt_0,
const uint8_t *sbox_pmt_1,
const uint8_t *sbox_pmt_2,
const uint8_t *sbox_pmt_3)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid >= num) return;
const uint8_t *plain = plains + gid * 8;
const uint8_t *key = keys + gid * 10;
uint8_t *cipher = ciphers + gid * 8;
uint8_t rounh_counter = 1;
uint8_t state[8];
uint8_t rounh_key[10];
// add key
state[0] = plain[0] ^ key[0];
state[1] = plain[1] ^ key[1];
state[2] = plain[2] ^ key[2];
state[3] = plain[3] ^ key[3];
state[4] = plain[4] ^ key[4];
state[5] = plain[5] ^ key[5];
state[6] = plain[6] ^ key[6];
state[7] = plain[7] ^ key[7];
// update key
rounh_key[9] = key[6] << 5 | key[7] >> 3;
rounh_key[8] = key[5] << 5 | key[6] >> 3;
rounh_key[7] = key[4] << 5 | key[5] >> 3;
rounh_key[6] = key[3] << 5 | key[4] >> 3;
rounh_key[5] = key[2] << 5 | key[3] >> 3;
rounh_key[4] = key[1] << 5 | key[2] >> 3;
rounh_key[3] = key[0] << 5 | key[1] >> 3;
rounh_key[2] = key[9] << 5 | key[0] >> 3;
rounh_key[1] = key[8] << 5 | key[9] >> 3;
rounh_key[0] = key[7] << 5 | key[8] >> 3;
rounh_key[0] = (rounh_key[0] & 0x0F) | sbox[rounh_key[0] >> 4];
rounh_key[7] ^= rounh_counter >> 1;
rounh_key[8] ^= rounh_counter << 7;
// substitution and permutation
cipher[0] =
(sbox_pmt_3[state[0]] & 0xC0) |
(sbox_pmt_2[state[1]] & 0x30) |
(sbox_pmt_1[state[2]] & 0x0C) |
(sbox_pmt_0[state[3]] & 0x03);
cipher[1] =
(sbox_pmt_3[state[4]] & 0xC0) |
(sbox_pmt_2[state[5]] & 0x30) |
(sbox_pmt_1[state[6]] & 0x0C) |
(sbox_pmt_0[state[7]] & 0x03);
cipher[2] =
(sbox_pmt_0[state[0]] & 0xC0) |
(sbox_pmt_3[state[1]] & 0x30) |
(sbox_pmt_2[state[2]] & 0x0C) |
(sbox_pmt_1[state[3]] & 0x03);
cipher[3] =
(sbox_pmt_0[state[4]] & 0xC0) |
(sbox_pmt_3[state[5]] & 0x30) |
(sbox_pmt_2[state[6]] & 0x0C) |
(sbox_pmt_1[state[7]] & 0x03);
cipher[4] =
(sbox_pmt_1[state[0]] & 0xC0) |
(sbox_pmt_0[state[1]] & 0x30) |
(sbox_pmt_3[state[2]] & 0x0C) |
(sbox_pmt_2[state[3]] & 0x03);
cipher[5] =
(sbox_pmt_1[state[4]] & 0xC0) |
(sbox_pmt_0[state[5]] & 0x30) |
(sbox_pmt_3[state[6]] & 0x0C) |
(sbox_pmt_2[state[7]] & 0x03);
cipher[6] =
(sbox_pmt_2[state[0]] & 0xC0) |
(sbox_pmt_1[state[1]] & 0x30) |
(sbox_pmt_0[state[2]] & 0x0C) |
(sbox_pmt_3[state[3]] & 0x03);
cipher[7] =
(sbox_pmt_2[state[4]] & 0xC0) |
(sbox_pmt_1[state[5]] & 0x30) |
(sbox_pmt_0[state[6]] & 0x0C) |
(sbox_pmt_3[state[7]] & 0x03);
for (rounh_counter = 2; rounh_counter <= rounds; rounh_counter++) {
state[0] = cipher[0] ^ rounh_key[0];
state[1] = cipher[1] ^ rounh_key[1];
state[2] = cipher[2] ^ rounh_key[2];
state[3] = cipher[3] ^ rounh_key[3];
state[4] = cipher[4] ^ rounh_key[4];
state[5] = cipher[5] ^ rounh_key[5];
state[6] = cipher[6] ^ rounh_key[6];
state[7] = cipher[7] ^ rounh_key[7];
cipher[0] =
(sbox_pmt_3[state[0]] & 0xC0) |
(sbox_pmt_2[state[1]] & 0x30) |
(sbox_pmt_1[state[2]] & 0x0C) |
(sbox_pmt_0[state[3]] & 0x03);
cipher[1] =
(sbox_pmt_3[state[4]] & 0xC0) |
(sbox_pmt_2[state[5]] & 0x30) |
(sbox_pmt_1[state[6]] & 0x0C) |
(sbox_pmt_0[state[7]] & 0x03);
cipher[2] =
(sbox_pmt_0[state[0]] & 0xC0) |
(sbox_pmt_3[state[1]] & 0x30) |
(sbox_pmt_2[state[2]] & 0x0C) |
(sbox_pmt_1[state[3]] & 0x03);
cipher[3] =
(sbox_pmt_0[state[4]] & 0xC0) |
(sbox_pmt_3[state[5]] & 0x30) |
(sbox_pmt_2[state[6]] & 0x0C) |
(sbox_pmt_1[state[7]] & 0x03);
cipher[4] =
(sbox_pmt_1[state[0]] & 0xC0) |
(sbox_pmt_0[state[1]] & 0x30) |
(sbox_pmt_3[state[2]] & 0x0C) |
(sbox_pmt_2[state[3]] & 0x03);
cipher[5] =
(sbox_pmt_1[state[4]] & 0xC0) |
(sbox_pmt_0[state[5]] & 0x30) |
(sbox_pmt_3[state[6]] & 0x0C) |
(sbox_pmt_2[state[7]] & 0x03);
cipher[6] =
(sbox_pmt_2[state[0]] & 0xC0) |
(sbox_pmt_1[state[1]] & 0x30) |
(sbox_pmt_0[state[2]] & 0x0C) |
(sbox_pmt_3[state[3]] & 0x03);
cipher[7] =
(sbox_pmt_2[state[4]] & 0xC0) |
(sbox_pmt_1[state[5]] & 0x30) |
(sbox_pmt_0[state[6]] & 0x0C) |
(sbox_pmt_3[state[7]] & 0x03);
rounh_key[5] ^= rounh_counter << 2; // do this first, which may be faster
// use state[] for temporary storage
state[2] = rounh_key[9];
state[1] = rounh_key[8];
state[0] = rounh_key[7];
rounh_key[9] = rounh_key[6] << 5 | rounh_key[7] >> 3;
rounh_key[8] = rounh_key[5] << 5 | rounh_key[6] >> 3;
rounh_key[7] = rounh_key[4] << 5 | rounh_key[5] >> 3;
rounh_key[6] = rounh_key[3] << 5 | rounh_key[4] >> 3;
rounh_key[5] = rounh_key[2] << 5 | rounh_key[3] >> 3;
rounh_key[4] = rounh_key[1] << 5 | rounh_key[2] >> 3;
rounh_key[3] = rounh_key[0] << 5 | rounh_key[1] >> 3;
rounh_key[2] = state[2] << 5 | rounh_key[0] >> 3;
rounh_key[1] = state[1] << 5 | state[2] >> 3;
rounh_key[0] = state[0] << 5 | state[1] >> 3;
rounh_key[0] = (rounh_key[0] & 0x0F) | sbox[rounh_key[0] >> 4];
}
// if round is not equal to 31, then do not perform the last adding key operation
// this can be used in constructing PRESENT based algorithm, such as MAC
if (31 == rounds) {
cipher[0] ^= rounh_key[0];
cipher[1] ^= rounh_key[1];
cipher[2] ^= rounh_key[2];
cipher[3] ^= rounh_key[3];
cipher[4] ^= rounh_key[4];
cipher[5] ^= rounh_key[5];
cipher[6] ^= rounh_key[6];
cipher[7] ^= rounh_key[7];
}
}
int main(int argc, char** argv) {
int num = atoi(argv[1]); // number of plain texts
uint seed = 8;
srand(seed);
// Initial 8-byte plain text
std::array<uint8_t, 8> plain {'P', 'R', 'E', 'S', 'E', 'N', 'T', '\0'};
// 80-bit key
uint8_t key[10];
// prepare data for offloading
uint8_t* h_plain = (uint8_t*) malloc (sizeof(uint8_t) * 8 * num);
uint8_t* h_key = (uint8_t*) malloc (sizeof(uint8_t) * 10 * num);
uint8_t* h_cipher = (uint8_t*) malloc (sizeof(uint8_t) * 8 * num);
// full rounds
const int rounds = 31;
for (int i = 0; i < num; i++) {
// set a random key for each text
for (int k = 0; k < 10; k++) key[k] = rand() % 256;
memcpy(h_key+i*10, key, 10);
memcpy(h_plain+i*8, plain.data(), 8);
// shuffle the text
shuffle (plain.begin(), plain.end(), std::default_random_engine(seed));
}
// use checksum for verification
size_t h_checksum = 0;
for (int n = 0; n < ITERATION; n++) {
for (int i = 0; i < num; i++) {
present_rounds(h_plain+i*8, h_key+i*10, rounds, h_cipher+i*8);
for (int k = 0; k < 8; k++) h_checksum += h_cipher[i*8+k];
}
}
uint8_t* d_plain;
uint8_t* d_key;
uint8_t* d_cipher;
uint8_t* d_sbox;
uint8_t* d_sbox_pmt_3;
uint8_t* d_sbox_pmt_2;
uint8_t* d_sbox_pmt_1;
uint8_t* d_sbox_pmt_0;
hipMalloc((void**)&d_plain, 8*num);
hipMemcpyAsync(d_plain, h_plain, 8*num, hipMemcpyHostToDevice, 0);
hipMalloc((void**)&d_key, 10*num);
hipMemcpyAsync(d_key, h_key, 10*num, hipMemcpyHostToDevice, 0);
hipMalloc((void**)&d_cipher, 8*num);
hipMalloc((void**)&d_sbox, 16);
hipMemcpyAsync(d_sbox, sbox, 16, hipMemcpyHostToDevice, 0);
hipMalloc((void**)&d_sbox_pmt_3, 256);
hipMemcpyAsync(d_sbox_pmt_3, sbox_pmt_3, 256, hipMemcpyHostToDevice, 0);
hipMalloc((void**)&d_sbox_pmt_2, 256);
hipMemcpyAsync(d_sbox_pmt_2, sbox_pmt_2, 256, hipMemcpyHostToDevice, 0);
hipMalloc((void**)&d_sbox_pmt_1, 256);
hipMemcpyAsync(d_sbox_pmt_1, sbox_pmt_1, 256, hipMemcpyHostToDevice, 0);
hipMalloc((void**)&d_sbox_pmt_0, 256);
hipMemcpyAsync(d_sbox_pmt_0, sbox_pmt_0, 256, hipMemcpyHostToDevice, 0);
dim3 grid ((num+255)/256);
dim3 block (256);
size_t d_checksum = 0;
for (int n = 0; n < ITERATION; n++) {
hipLaunchKernelGGL(present, dim3(grid), dim3(block), 0, 0, num, rounds, d_plain, d_key, d_cipher, d_sbox,
d_sbox_pmt_0, d_sbox_pmt_1, d_sbox_pmt_2, d_sbox_pmt_3);
hipMemcpy(h_cipher, d_cipher, num * 8, hipMemcpyDeviceToHost);
for (int i = 0; i < num*8; i++) d_checksum += h_cipher[i];
}
if (h_checksum != d_checksum)
printf("FAILED\n");
else
printf("SUCCESS\n");
free(h_plain);
free(h_key);
free(h_cipher);
hipFree(d_plain);
hipFree(d_key);
hipFree(d_cipher);
hipFree(d_sbox);
hipFree(d_sbox_pmt_3);
hipFree(d_sbox_pmt_2);
hipFree(d_sbox_pmt_1);
hipFree(d_sbox_pmt_0);
}
|
the_stack
|
#include "CUFLU.h"
#if ( MODEL == HYDRO && defined MHD )
// external functions
#ifdef __CUDACC__
#include "CUFLU_Shared_FluUtility.cu"
#else // #ifdef __CUDACC__
void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset );
void Hydro_Con2Flux( const int XYZ, real Flux[], const real In[], const real MinPres,
const EoS_DE2P_t EoS_DensEint2Pres, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[],
const real *const EoS_Table[EOS_NTABLE_MAX], const real* const PresIn );
void Hydro_Con2Pri( const real In[], real Out[], const real MinPres,
const bool FracPassive, const int NFrac, const int FracIdx[],
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2E_t EoS_DensPres2Eint,
const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[],
const real *const EoS_Table[EOS_NTABLE_MAX], real* const EintOut );
#endif // #ifdef __CUDACC__ ... else ...
//-------------------------------------------------------------------------------------------------------
// Function : Hydro_RiemannSolver_HLLD
// Description : Approximate Riemann solver of Harten, Lax, and van Leer extended to support MHD
//
// Note : 1. Input data should be conserved variables
// 2. Ref : (a) Riemann Solvers and Numerical Methods for Fluid Dynamics - A Practical Introduction
// ~ by Eleuterio F. Toro
// (b) Stone et al., ApJS, 178, 137 (2008)
// (c) Batten et al., SIAM J. Sci. Comput., 18, 1553 (1997)
// (d) Miyoshi & Kusano, JCP, 208, 315 (2005)
// (e) Davis, SIAM J. Sci. Statist. Comput. 9, 445 (1988)
// 3. Wave-speed estimator is set by HLLD_WAVESPEED in CUFLU.h
// 4. Support general EoS
// 5. This function is shared by MHM, MHM_RP, and CTU schemes
//
// Parameter : XYZ : Target spatial direction : (0/1/2) --> (x/y/z)
// Flux_Out : Array to store the output flux
// L_In : Input left state (conserved variables)
// R_In : Input right state (conserved variables)
// MinDens/Pres : Density and pressure floors
// EoS_DensEint2Pres : EoS routine to compute the gas pressure
// EoS_DensPres2CSqr : EoS routine to compute the sound speed squared
// EoS_AuxArray_* : Auxiliary arrays for the EoS routines
// EoS_Table : EoS tables
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE
void Hydro_RiemannSolver_HLLD( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[],
const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres,
const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[],
const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] )
{
// check
# if ( HLLD_WAVESPEED != HLL_WAVESPEED_DAVIS )
# error : ERROR : HLLD_WAVESPEED only supports HLL_WAVESPEED_DAVIS !!
# endif
const real MaxErr2 = SQR(MAX_ERROR);
const real ZERO = (real)0.0;
const real ONE = (real)1.0;
const real _TWO = (real)0.5;
const bool FracPassive_No = false;
const bool JeansMinPres_No = false;
const int IdxBx = MAG_OFFSET + 0;
const int IdxBy = MAG_OFFSET + 1;
const int IdxBz = MAG_OFFSET + 2;
real Con_L[NCOMP_TOTAL_PLUS_MAG], Con_R[NCOMP_TOTAL_PLUS_MAG], Pri_L[NCOMP_TOTAL_PLUS_MAG], Pri_R[NCOMP_TOTAL_PLUS_MAG];
for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++)
{
Con_L[v] = L_In[v];
Con_R[v] = R_In[v];
}
Hydro_Rotate3D( Con_L, XYZ, true, IdxBx );
Hydro_Rotate3D( Con_R, XYZ, true, IdxBx );
const real Bx = Con_L[IdxBx];
const real ByL = Con_L[IdxBy];
const real BzL = Con_L[IdxBz];
const real ByR = Con_R[IdxBy];
const real BzR = Con_R[IdxBz];
const real _Bx = ONE / Bx;
const real Bx2 = SQR( Bx );
const real _Bx2 = SQR( _Bx );
# ifdef GAMER_DEBUG
if ( Con_L[IdxBx] != Con_R[IdxBx] )
printf( "ERROR : BxL (%24.17e) != BxR (%24.17e) for XYZ %d at file <%s>, line <%d>, function <%s>!!\n",
Con_L[IdxBx], Con_R[IdxBx], XYZ, __FILE__, __LINE__, __FUNCTION__ );
# endif
Hydro_Con2Pri( Con_L, Pri_L, MinPres, FracPassive_No, NULL_INT, NULL, JeansMinPres_No, NULL_REAL,
EoS_DensEint2Pres, NULL, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL );
Hydro_Con2Pri( Con_R, Pri_R, MinPres, FracPassive_No, NULL_INT, NULL, JeansMinPres_No, NULL_REAL,
EoS_DensEint2Pres, NULL, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL );
real tmp_1, tmp_2, crit, crit_Bx;
real _RhoL, _RhoR;
real sqrt_RhoLst, sqrt_RhoRst;
real PT_L, PT_R, PT_st;
real BtL2, BtR2, B2L_d2, B2R_d2;
real a2 , Cf2 ,Cax2, Cat2, Ca2_plus_a2, Ca2_min_a2, Cf2_min_Cs2;
real Cf_L, Cf_R;
real Sd_L, Sd_R, Sdm_L, Sdm_R, SdL_SdmL, SdR_SdmR;
real VBdot_Lst, VBdot_Rst;
real Speed[5] = { ZERO, ZERO, ZERO, ZERO, ZERO };
real Con_Lst[NCOMP_TOTAL_PLUS_MAG], Con_Ldst[NCOMP_TOTAL_PLUS_MAG];
real Con_Rdst[NCOMP_TOTAL_PLUS_MAG], Con_Rst[NCOMP_TOTAL_PLUS_MAG];
real _Rho_Lst, Vy_Lst, Vz_Lst;
real _Rho_Rst, Vy_Rst, Vz_Rst;
real Flux_L[NCOMP_TOTAL_PLUS_MAG], Flux_R[NCOMP_TOTAL_PLUS_MAG];
_RhoL = ONE/Con_L[0];
_RhoR = ONE/Con_R[0];
BtL2 = SQR( ByL ) + SQR( BzL );
BtR2 = SQR( ByR ) + SQR( BzR );
B2L_d2 = _TWO*( Bx2 + BtL2 );
B2R_d2 = _TWO*( Bx2 + BtR2 );
PT_L = Pri_L[4] + B2L_d2;
PT_R = Pri_R[4] + B2R_d2;
a2 = EoS_DensPres2CSqr( Con_L[0], Pri_L[4], Con_L+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table );
Cax2 = Bx2*_RhoL;
Cat2 = BtL2*_RhoL;
Ca2_plus_a2 = Cat2 + Cax2 + a2;
Ca2_min_a2 = Cat2 + Cax2 - a2;
Cf2_min_Cs2 = SQRT( SQR(Ca2_min_a2) + (real)4.0*a2*Cat2 );
if ( Cat2 == ZERO )
{
if ( Cax2 >= a2 ) Cf2 = Cax2;
else Cf2 = a2;
}
else
{
if ( Cax2 == ZERO ) Cf2 = a2 + Cat2;
else Cf2 = _TWO*( Ca2_plus_a2 + Cf2_min_Cs2 );
}
Cf_L = SQRT( Cf2 ); // Cf2 is positive definite using the above formula
a2 = EoS_DensPres2CSqr( Con_R[0], Pri_R[4], Con_R+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table );
Cax2 = Bx2*_RhoR;
Cat2 = BtR2*_RhoR;
Ca2_plus_a2 = Cat2 + Cax2 + a2;
Ca2_min_a2 = Cat2 + Cax2 - a2;
Cf2_min_Cs2 = SQRT( SQR(Ca2_min_a2) + (real)4.0*a2*Cat2 );
if ( Cat2 == ZERO )
{
if ( Cax2 >= a2 ) Cf2 = Cax2;
else Cf2 = a2;
}
else
{
if ( Cax2 == ZERO ) Cf2 = a2 + Cat2;
else Cf2 = _TWO*( Ca2_plus_a2 + Cf2_min_Cs2 );
}
Cf_R = SQRT( Cf2 ); // Cf2 is positive definite using the above formula
// estimate the maximum wave-speed using the min/max left and right eigenvalues
# if ( HLLD_WAVESPEED == HLL_WAVESPEED_DAVIS )
Speed[0] = FMIN( Pri_L[1]-Cf_L, Pri_R[1]-Cf_R );
Speed[4] = FMAX( Pri_L[1]+Cf_L, Pri_R[1]+Cf_R );
# else
# error : ERROR : unsupported HLLD_WAVESPEED !!
# endif
Hydro_Con2Flux( 0, Flux_L, Con_L, MinPres, NULL, NULL, NULL, NULL, Pri_L+4 );
Hydro_Con2Flux( 0, Flux_R, Con_R, MinPres, NULL, NULL, NULL, NULL, Pri_R+4 );
// return the upwind fluxes if flow is supersonic
if ( Speed[0] >= ZERO )
{
for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) Flux_Out[v] = Flux_L[v];
Hydro_Rotate3D( Flux_Out, XYZ, false, IdxBx );
return;
}
if ( Speed[4] <= ZERO )
{
for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) Flux_Out[v] = Flux_R[v];
Hydro_Rotate3D( Flux_Out, XYZ, false, IdxBx );
return;
}
Sd_L = Speed[0] - Pri_L[1];
Sd_R = Speed[4] - Pri_R[1];
tmp_1 = Sd_L*Pri_L[0];
tmp_2 = Sd_R*Pri_R[0];
Speed[2] = ( tmp_2*Pri_R[1] - tmp_1*Pri_L[1] - PT_R + PT_L ) / ( tmp_2 - tmp_1);
Sdm_L = Speed[0] - Speed[2];
Sdm_R = Speed[4] - Speed[2];
SdL_SdmL = Sd_L / Sdm_L;
SdR_SdmR = Sd_R / Sdm_R;
Con_Lst[0] = Con_L[0]*SdL_SdmL;
Con_Rst[0] = Con_R[0]*SdR_SdmR;
sqrt_RhoLst = SQRT( Con_Lst[0] );
sqrt_RhoRst = SQRT( Con_Rst[0] );
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( Hydro_CheckNegative(Con_Lst[0]) )
printf( "ERROR : invalid Con_Lst[0] (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Con_Lst[0], __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(Con_Rst[0]) )
printf( "ERROR : invalid Con_Rst[0] (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Con_Rst[0], __FILE__, __LINE__, __FUNCTION__ );
# endif
tmp_1 = FABS( Bx );
Speed[1] = Speed[2] - tmp_1/sqrt_RhoLst;
Speed[3] = Speed[2] + tmp_1/sqrt_RhoRst;
PT_st = PT_L + Pri_L[0]*Sd_L*( Sd_L - Sdm_L );
Con_Lst[ 1] = Con_Lst[0]*Speed[2];
Con_Lst[IdxBx] = Bx;
tmp_1 = FMIN( B2L_d2 , B2R_d2 );
if ( tmp_1 == ZERO ) crit_Bx = ZERO;
else crit_Bx = FABS( _TWO*Bx2/tmp_1 );
if ( crit_Bx < MaxErr2 ) crit = Con_L[0]*Sd_L*Sdm_L;
else crit = Con_L[0]*Sd_L*Sdm_L*_Bx2 - ONE;
if ( FABS(crit) < MAX_ERROR )
{
Con_Lst[2] = Con_Lst[0]*Pri_L[2];
Con_Lst[3] = Con_Lst[0]*Pri_L[3];
Con_Lst[IdxBy] = ByL;
Con_Lst[IdxBz] = BzL;
}
else
{
if ( crit_Bx < MaxErr2 )
{
Con_Lst[ 2] = Con_Lst[0]*Pri_L[2];
Con_Lst[ 3] = Con_Lst[0]*Pri_L[3];
Con_Lst[IdxBy] = ByL*SdL_SdmL;
Con_Lst[IdxBz] = BzL*SdL_SdmL;
}
else
{
tmp_1 = ONE/crit;
tmp_2 = ( Sd_L - Sdm_L )*_Bx*tmp_1;
Con_Lst[ 2] = Con_Lst[0]*( Pri_L[2] - Pri_L[IdxBy]*tmp_2 );
Con_Lst[ 3] = Con_Lst[0]*( Pri_L[3] - Pri_L[IdxBz]*tmp_2 );
tmp_2 = ( Con_L[0]*Sd_L*Sd_L - Bx2 )*_Bx2*tmp_1;
Con_Lst[IdxBy] = ByL*tmp_2;
Con_Lst[IdxBz] = BzL*tmp_2;
}
} // if ( FABS(crit) < MAX_ERROR ) ... else ...
VBdot_Lst = ( Con_Lst[1]*Con_Lst[IdxBx] + Con_Lst[2]*Con_Lst[IdxBy] + Con_Lst[3]*Con_Lst[IdxBz] ) / Con_Lst[0];
Con_Lst[ 4] = ( Sd_L*Con_L[4] - PT_L*Pri_L[1] + PT_st*Speed[2] +
Bx*( Pri_L[1]*Pri_L[IdxBx] + Pri_L[2]*Pri_L[IdxBy] + Pri_L[3]*Pri_L[IdxBz] - VBdot_Lst ) ) / Sdm_L;
_Rho_Lst = (real)1.0/Con_Lst[0];
Vy_Lst = _Rho_Lst*Con_Lst[2];
Vz_Lst = _Rho_Lst*Con_Lst[3];
Con_Rst[ 1] = Con_Rst[0]*Speed[2];
Con_Rst[IdxBx] = Bx;
if ( crit_Bx < MaxErr2 ) crit = Con_R[0]*Sd_R*Sdm_R;
else crit = Con_R[0]*Sd_R*Sdm_R*_Bx2 - ONE;
if ( FABS(crit) < MAX_ERROR )
{
Con_Rst[ 2] = Con_Rst[0]*Pri_R[2];
Con_Rst[ 3] = Con_Rst[0]*Pri_R[3];
Con_Rst[IdxBy] = ByR;
Con_Rst[IdxBz] = BzR;
}
else
{
if ( crit_Bx < MaxErr2 )
{
Con_Rst[ 2] = Con_Rst[0]*Pri_R[2];
Con_Rst[ 3] = Con_Rst[0]*Pri_R[3];
Con_Rst[IdxBy] = ByR*SdR_SdmR;
Con_Rst[IdxBz] = BzR*SdR_SdmR;
}
else
{
tmp_1 = ONE/crit;
tmp_2 = ( Sd_R - Sdm_R )*_Bx*tmp_1;
Con_Rst[ 2] = Con_Rst[0]*( Pri_R[2] - Pri_R[IdxBy]*tmp_2 );
Con_Rst[ 3] = Con_Rst[0]*( Pri_R[3] - Pri_R[IdxBz]*tmp_2 );
tmp_2 = ( Con_R[0]*Sd_R*Sd_R - Bx2 )*_Bx2*tmp_1;
Con_Rst[IdxBy] = ByR*tmp_2;
Con_Rst[IdxBz] = BzR*tmp_2;
}
} // if ( FABS(crit) < MAX_ERROR ) ... else ...
VBdot_Rst = ( Con_Rst[1]*Con_Rst[IdxBx] + Con_Rst[2]*Con_Rst[IdxBy] + Con_Rst[3]*Con_Rst[IdxBz] ) / Con_Rst[0];
Con_Rst[4] = ( Sd_R*Con_R[4] - PT_R*Pri_R[1] + PT_st*Speed[2] +
Bx*( Pri_R[1]*Pri_R[IdxBx] + Pri_R[2]*Pri_R[IdxBy] + Pri_R[3]*Pri_R[IdxBz] - VBdot_Rst ) ) / Sdm_R;
_Rho_Rst = (real)1.0/Con_Rst[0];
Vy_Rst = _Rho_Rst*Con_Rst[2];
Vz_Rst = _Rho_Rst*Con_Rst[3];
if ( crit_Bx < MaxErr2 )
{
for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++)
{
Con_Ldst[v] = Con_Lst[v];
Con_Rdst[v] = Con_Rst[v];
}
}
else
{
const real invsumd = ONE/( sqrt_RhoLst + sqrt_RhoRst );
const real Bxsig = SIGN( Bx );
Con_Ldst[0] = Con_Lst[0];
Con_Rdst[0] = Con_Rst[0];
Con_Ldst[1] = Con_Lst[1];
Con_Rdst[1] = Con_Rst[1];
tmp_1 = invsumd*( sqrt_RhoLst*Vy_Lst + sqrt_RhoRst*Vy_Rst + Bxsig*( Con_Rst[IdxBy] - Con_Lst[IdxBy] ) );
Con_Ldst[2] = Con_Ldst[0]*tmp_1;
Con_Rdst[2] = Con_Rdst[0]*tmp_1;
tmp_1 = invsumd*( sqrt_RhoLst*Vz_Lst + sqrt_RhoRst*Vz_Rst + Bxsig*( Con_Rst[IdxBz] - Con_Lst[IdxBz] ) );
Con_Ldst[3] = Con_Ldst[0]*tmp_1;
Con_Rdst[3] = Con_Rdst[0]*tmp_1;
tmp_1 = invsumd*( sqrt_RhoLst*Con_Rst[IdxBy] + sqrt_RhoRst*Con_Lst[IdxBy] +
Bxsig*sqrt_RhoLst*sqrt_RhoRst*( Vy_Rst - Vy_Lst ) );
Con_Ldst[IdxBy] = tmp_1;
Con_Rdst[IdxBy] = tmp_1;
tmp_1 = invsumd*( sqrt_RhoLst*Con_Rst[IdxBz] + sqrt_RhoRst*Con_Lst[IdxBz] +
Bxsig*sqrt_RhoLst*sqrt_RhoRst*( Vz_Rst - Vz_Lst ) );
Con_Ldst[IdxBz] = tmp_1;
Con_Rdst[IdxBz] = tmp_1;
tmp_1 = Speed[2]*Bx + ( Con_Ldst[2]*Con_Ldst[IdxBy] + Con_Ldst[3]*Con_Ldst[IdxBz] ) / Con_Ldst[0];
Con_Ldst[4] = Con_Lst[4] - sqrt_RhoLst*Bxsig*( VBdot_Lst - tmp_1 );
Con_Rdst[4] = Con_Rst[4] + sqrt_RhoRst*Bxsig*( VBdot_Rst - tmp_1 );
} // if ( crit_Bx < MaxErr2 ) ... else ...
// evaluate the HLLD fluxes
if ( Speed[1] >= ZERO )
{
for (int v=0; v<NCOMP_FLUID; v++)
Flux_Out[ v] = Flux_L[v] + Speed[0]*( Con_Lst[v] - Con_L[v] );
Flux_Out[IdxBx] = ZERO;
Flux_Out[IdxBy] = Flux_L[IdxBy] + Speed[0]*(Con_Lst[IdxBy] - ByL);
Flux_Out[IdxBz] = Flux_L[IdxBz] + Speed[0]*(Con_Lst[IdxBz] - BzL);
}
else if ( Speed[2] >= ZERO )
{
tmp_1 = Speed[1] - Speed[0];
for (int v=0; v<NCOMP_FLUID; v++)
Flux_Out[ v] = Flux_L[v] - Speed[0]*Con_L[v] - tmp_1*Con_Lst[v] + Speed[1]*Con_Ldst[v];
Flux_Out[IdxBx] = ZERO;
Flux_Out[IdxBy] = Flux_L[IdxBy] - Speed[0]*ByL - tmp_1*Con_Lst[IdxBy] + Speed[1]*Con_Ldst[IdxBy];
Flux_Out[IdxBz] = Flux_L[IdxBz] - Speed[0]*BzL - tmp_1*Con_Lst[IdxBz] + Speed[1]*Con_Ldst[IdxBz];
}
else if ( Speed[3] > ZERO )
{
tmp_1 = Speed[3] - Speed[4];
for (int v=0; v<NCOMP_FLUID; v++)
Flux_Out[ v] = Flux_R[v] - Speed[4]*Con_R[v] - tmp_1*Con_Rst[v] + Speed[3]*Con_Rdst[v];
Flux_Out[IdxBx] = ZERO;
Flux_Out[IdxBy] = Flux_R[IdxBy] - Speed[4]*ByR - tmp_1*Con_Rst[IdxBy] + Speed[3]*Con_Rdst[IdxBy];
Flux_Out[IdxBz] = Flux_R[IdxBz] - Speed[4]*BzR - tmp_1*Con_Rst[IdxBz] + Speed[3]*Con_Rdst[IdxBz];
}
else
{
for (int v=0; v<NCOMP_FLUID; v++)
Flux_Out[ v] = Flux_R[v] + Speed[4]*( Con_Rst[v] - Con_R[v] );
Flux_Out[IdxBx] = ZERO;
Flux_Out[IdxBy] = Flux_R[IdxBy] + Speed[4]*( Con_Rst[IdxBy] - ByR );
Flux_Out[IdxBz] = Flux_R[IdxBz] + Speed[4]*( Con_Rst[IdxBz] - BzR );
} // if ( Speed[x] > ZERO ) ... else ...
// evaluate the fluxes for passive scalars
# if ( NCOMP_PASSIVE > 0 )
if ( Flux_Out[FLUX_DENS] >= ZERO )
{
const real vx = Flux_Out[FLUX_DENS]*_RhoL;
for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = Con_L[v]*vx;
}
else
{
const real vx = Flux_Out[FLUX_DENS]*_RhoR;
for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = Con_R[v]*vx;
}
# endif
// restore the correct order
Hydro_Rotate3D( Flux_Out, XYZ, false, IdxBx );
} // FUNCTION : Hydro_RiemannSolver_HLLD
#endif // #if ( MODEL == HYDRO && defined MHD )
#endif // #ifndef __CUFLU_RIEMANNSOLVER_HLLD__
|
the_stack
|
#include <iostream>
#include "viennacl.hpp"
#include "viennacl_private.hpp"
//include basic scalar and vector types of ViennaCL
#include "viennacl/scalar.hpp"
#include "viennacl/vector.hpp"
//include the generic inner product functions of ViennaCL
#include "viennacl/linalg/inner_prod.hpp"
//include the generic norm functions of ViennaCL
#include "viennacl/linalg/norm_1.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/linalg/norm_inf.hpp"
#ifdef VIENNACL_WITH_OPENCL
// IxAMAX
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLiSamax(ViennaCLBackend backend, ViennaCLInt n,
ViennaCLInt *index,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
*index = static_cast<ViennaCLInt>(viennacl::linalg::index_norm_inf(v1));
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLiDamax(ViennaCLBackend backend, ViennaCLInt n,
ViennaCLInt *index,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
*index = static_cast<ViennaCLInt>(viennacl::linalg::index_norm_inf(v1));
return ViennaCLSuccess;
}
// xASUM
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLSasum(ViennaCLBackend backend, ViennaCLInt n,
float *alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
*alpha = viennacl::linalg::norm_1(v1);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLDasum(ViennaCLBackend backend, ViennaCLInt n,
double *alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
*alpha = viennacl::linalg::norm_1(v1);
return ViennaCLSuccess;
}
// xAXPY
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLSaxpy(ViennaCLBackend backend, ViennaCLInt n,
float alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<float> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
v2 += alpha * v1;
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLDaxpy(ViennaCLBackend backend, ViennaCLInt n,
double alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<double> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
v2 += alpha * v1;
return ViennaCLSuccess;
}
// xCOPY
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLScopy(ViennaCLBackend backend, ViennaCLInt n,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<float> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
v2 = v1;
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLDcopy(ViennaCLBackend backend, ViennaCLInt n,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<double> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
v2 = v1;
return ViennaCLSuccess;
}
// xDOT
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLSdot(ViennaCLBackend backend, ViennaCLInt n,
float *alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<float> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
*alpha = viennacl::linalg::inner_prod(v1, v2);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLDdot(ViennaCLBackend backend, ViennaCLInt n,
double *alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<double> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
*alpha = viennacl::linalg::inner_prod(v1, v2);
return ViennaCLSuccess;
}
// xNRM2
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLSnrm2(ViennaCLBackend backend, ViennaCLInt n,
float *alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
*alpha = viennacl::linalg::norm_2(v1);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLDnrm2(ViennaCLBackend backend, ViennaCLInt n,
double *alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
*alpha = viennacl::linalg::norm_2(v1);
return ViennaCLSuccess;
}
// xROT
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLSrot(ViennaCLBackend backend, ViennaCLInt n,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy,
float c, float s)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<float> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::linalg::plane_rotation(v1, v2, c, s);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLDrot(ViennaCLBackend backend, ViennaCLInt n,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy,
double c, double s)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<double> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::linalg::plane_rotation(v1, v2, c, s);
return ViennaCLSuccess;
}
// xSCAL
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLSscal(ViennaCLBackend backend, ViennaCLInt n,
float alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
v1 *= alpha;
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLDscal(ViennaCLBackend backend, ViennaCLInt n,
double alpha,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
v1 *= alpha;
return ViennaCLSuccess;
}
// xSWAP
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLSswap(ViennaCLBackend backend, ViennaCLInt n,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<float> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::swap(v1, v2);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLOpenCLDswap(ViennaCLBackend backend, ViennaCLInt n,
cl_mem x, ViennaCLInt offx, ViennaCLInt incx,
cl_mem y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, size_type(n), size_type(offx), difference_type(incx), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::vector_base<double> v2(y, size_type(n), size_type(offy), difference_type(incy), viennacl::ocl::get_context(backend->opencl_backend.context_id));
viennacl::swap(v1, v2);
return ViennaCLSuccess;
}
#endif
|
the_stack
|
#include <layers/fully_connected_layer.hpp>
#include <linalg/matrix_vector_op.cuh>
#include <linalg/reduce.cuh>
#include <utils.cuh>
#include <utils.hpp>
#include <vector>
namespace HugeCTR {
namespace {
void __global__ add_bias_kernel_row(float* data, const float* bias, const int m, const int n) {
int offset = blockIdx.x * n;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
data[offset + tid] += bias[tid];
}
}
void __global__ add_bias_kernel_col(float* data, const float* bias, const int m, const int n) {
int offset = blockIdx.x * m;
float b = bias[blockIdx.x];
for (int tid = threadIdx.x; tid < m; tid += blockDim.x) {
data[offset + tid] += b;
}
}
void add_bias(float* data, const float* bias, const int m, const int n, bool row_major,
cudaStream_t stream) {
if (row_major) {
dim3 grid(m);
dim3 block(min(n, 1024));
add_bias_kernel_row<<<grid, block, 0, stream>>>(data, bias, m, n);
} else {
dim3 grid(n);
dim3 block(min(m, 1024));
add_bias_kernel_col<<<grid, block, 0, stream>>>(data, bias, m, n);
}
#ifndef NDEBUG
cudaDeviceSynchronize();
HCTR_LIB_THROW(cudaGetLastError());
#endif
}
} // namespace
FullyConnectedLayer<float>::FullyConnectedLayer(
const std::shared_ptr<BufferBlock2<float>>& weight_buff,
const std::shared_ptr<BufferBlock2<float>>& wgrad_buff, const Tensor2<float>& in_tensor,
const Tensor2<float>& out_tensor, const std::shared_ptr<GPUResource>& gpu_resource,
bool use_mixed_precision, bool enable_tf32_compute,
std::vector<Initializer_t> initializer_types)
: Layer(gpu_resource, initializer_types),
use_mixed_precision_(use_mixed_precision),
enable_tf32_compute_(enable_tf32_compute) {
try {
// check the in_tensor and out_tensor
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
// 1. two dim?
if (in_tensor_dim.size() != 2 || out_tensor_dim.size() != 2) {
HCTR_OWN_THROW(Error_t::WrongInput, "input or output tensor doesn't has two dimensions");
}
// 2. dim match?
size_t m = in_tensor_dim[0];
size_t n = out_tensor_dim[1];
size_t k = in_tensor_dim[1];
size_t m_ck = out_tensor_dim[0];
if (m != m_ck) {
HCTR_OWN_THROW(Error_t::WrongInput, "size of input / output tensor doesn't match");
}
std::vector<size_t> weight_dim = {k, n};
std::vector<size_t> bias_dim = {1, n};
{
Tensor2<float> tensor;
weight_buff->reserve(weight_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<float> tensor;
weight_buff->reserve(bias_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<float> tensor;
wgrad_buff->reserve(weight_dim, &tensor);
wgrad_.push_back(tensor);
}
{
Tensor2<float> tensor;
wgrad_buff->reserve(bias_dim, &tensor);
wgrad_.push_back(tensor);
}
in_tensors_.push_back(in_tensor);
out_tensors_.push_back(out_tensor);
// Where should we create this cuBLAS handle?
} catch (const std::runtime_error& rt_err) {
HCTR_LOG_S(ERROR, WORLD) << rt_err.what() << std::endl;
throw;
}
}
void FullyConnectedLayer<float>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
Tensor2<float>& in_tensor = get_in_tensors(is_train)[0];
Tensor2<float>& out_tensor = out_tensors_[0];
float* weight = weights_[0].get_ptr();
float* bias = weights_[1].get_ptr();
float* in = in_tensor.get_ptr();
float* out = out_tensor.get_ptr();
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
int m, n, k;
m = in_tensor_dim[0];
n = out_tensor_dim[1];
k = in_tensor_dim[1];
float alpha = 1.0f, beta = 0.0f;
cublasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
HCTR_LIB_THROW(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, m, k,
&alpha, weight, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta, out,
CUDA_R_32F, n, compute_type, falgo_));
add_bias(out, bias, m, n, true, get_gpu().get_stream());
// PROFILE_RECORD("TopMLP.fprop.stop", get_gpu().get_stream());
}
void FullyConnectedLayer<float>::bprop() {
CudaDeviceContext context(get_device_id());
Tensor2<float>& in_tensor = get_in_tensors(true)[0];
Tensor2<float>& out_tensor = out_tensors_[0];
float* wgrad = wgrad_[0].get_ptr();
float* bias_grad = wgrad_[1].get_ptr();
float* weight = weights_[0].get_ptr();
float* in = in_tensor.get_ptr();
float* out = out_tensor.get_ptr();
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
int m, n, k;
m = in_tensor_dim[0];
n = out_tensor_dim[1];
k = in_tensor_dim[1];
float alpha = 1.0f, beta_w = 1.0f, beta_x = 0.0f;
// PROFILE_RECORD("TopMLP.bprop.start", get_gpu().get_stream());
cublasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
// gradient respect to W
HCTR_LIB_THROW(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m,
&alpha, out, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta_w, wgrad,
CUDA_R_32F, n, compute_type, balgo_W_));
// gradient respect to Xn
HCTR_LIB_THROW(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, k, m, n,
&alpha, weight, CUDA_R_32F, n, out, CUDA_R_32F, n, &beta_x, in,
CUDA_R_32F, k, compute_type, balgo_Xn_));
MLCommon::LinAlg::reduce(bias_grad, out, m, n, float(0), false, true, get_gpu().get_stream(),
true);
}
void FullyConnectedLayer<float>::search_algorithm() {
// Set to the CUDA device where this layer assigned to
CudaDeviceContext context(get_device_id());
const int repeat_num = 100;
// Device Tensors to be used
Tensor2<float>& in_tensor = get_in_tensors(true)[0];
Tensor2<float>& out_tensor = out_tensors_[0];
float* weight = weights_[0].get_ptr();
float* in = in_tensor.get_ptr();
float* out = out_tensor.get_ptr();
float* wgrad = wgrad_[0].get_ptr();
// Tensor dim
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
int m, n, k;
m = in_tensor_dim[0];
n = out_tensor_dim[1];
k = in_tensor_dim[1];
// Record time for each algorithm
float shortestTime = 100000000.0;
float time;
cudaEvent_t start, stop;
HCTR_LIB_THROW(cudaEventCreate(&start));
HCTR_LIB_THROW(cudaEventCreate(&stop));
// cublas ret status
cublasStatus_t status;
// Start, end for search
int startAlgo, endAlgo;
if (use_mixed_precision_) {
startAlgo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
endAlgo = (int)CUBLAS_GEMM_ALGO15_TENSOR_OP;
} else {
startAlgo = (int)CUBLAS_GEMM_DEFAULT;
endAlgo = (int)CUBLAS_GEMM_ALGO23;
}
cublasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
// Search all the algorithm for fprop
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
float alpha = 1.0f, beta = 0.0f;
// Record start event
HCTR_LIB_THROW(cudaEventRecord(start, get_gpu().get_stream()));
for (int i = 0; i < repeat_num; ++i) {
status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, m, k,
&alpha, weight, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta, out,
CUDA_R_32F, n, compute_type, static_cast<cublasGemmAlgo_t>(testAlgo));
}
HCTR_LIB_THROW(cudaEventRecord(stop, get_gpu().get_stream()));
HCTR_LIB_THROW(cudaEventSynchronize(stop));
HCTR_LIB_THROW(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// HCTR_LOG(INFO, WORLD, "The algorithms %d is not supported for fprop, skipped.\n",
// testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
falgo_ = static_cast<cublasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = 100000000.0;
// Search all the algorithm for bprop_W
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
float alpha = 1.0f, beta_w = 1.0f;
// Record start event
HCTR_LIB_THROW(cudaEventRecord(start, get_gpu().get_stream()));
for (int i = 0; i < repeat_num; ++i) {
status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m,
&alpha, out, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta_w, wgrad,
CUDA_R_32F, n, compute_type, static_cast<cublasGemmAlgo_t>(testAlgo));
}
HCTR_LIB_THROW(cudaEventRecord(stop, get_gpu().get_stream()));
HCTR_LIB_THROW(cudaEventSynchronize(stop));
HCTR_LIB_THROW(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// HCTR_LOG(INFO, WORLD, "The algorithms %d is not supported for bprop_W, skipped.\n",
// testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_W_ = static_cast<cublasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = 100000000.0;
// Search all the algorithm for bprop_Xn
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
float alpha = 1.0f, beta_x = 0.0f;
// Record start event
HCTR_LIB_THROW(cudaEventRecord(start, get_gpu().get_stream()));
for (int i = 0; i < repeat_num; ++i) {
status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, k, m, n,
&alpha, weight, CUDA_R_32F, n, out, CUDA_R_32F, n, &beta_x, in,
CUDA_R_32F, k, compute_type, static_cast<cublasGemmAlgo_t>(testAlgo));
}
HCTR_LIB_THROW(cudaEventRecord(stop, get_gpu().get_stream()));
HCTR_LIB_THROW(cudaEventSynchronize(stop));
HCTR_LIB_THROW(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// HCTR_LOG(INFO, WORLD, "The algorithms %d is not supported for bprop_Xn, skipped.\n",
// testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_Xn_ = static_cast<cublasGemmAlgo_t>(testAlgo);
}
}
// Print selection information
// HCTR_LOG(INFO, WORLD, "The algorithm selection for fprop, bprop_W and bprop_Xn are: %d, %d and
// %d.\n",
// (int)falgo_, (int)balgo_W_, (int)balgo_Xn_);
// Output msg
// HCTR_LOG(INFO, ROOT, "The fully-connected layer has finished choosing the algorithm for cublas
// Gemm.\n"); Clean-up
HCTR_LIB_THROW(cudaEventDestroy(start));
HCTR_LIB_THROW(cudaEventDestroy(stop));
}
std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_uniform_initializer(
const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim);
return std::make_unique<UniformDataSimulator>(-1 * limit, limit);
}
std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_xavier_uniform_initializer(
const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Uniform,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_xavier_norm_initializer(
const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FullyConnectedLayer<float>::get_default_initializer(
const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
std::unique_ptr<DataSimulator> simu(nullptr);
if (0 == index) {
simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm, bottom_dim, top_dim));
} else if (1 == index) {
float stddev = sqrt(1.f / top_dim);
simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev));
} else {
HCTR_OWN_THROW(Error_t::OutOfBound, "index != {0, 1}.");
}
return simu;
}
template class FullyConnectedLayer<float>;
} // namespace HugeCTR
|
the_stack
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.