Summary
I'm porting a simple raytracing application based on the Scratchapixel version to a bunch of GPU libraries. I sucessfully ported it to CUDA using the runtime API and the driver API, but It throws a Segmentation fault (core dumped)
when I try to use the PTX compiled at runtime with NVRTC.
If I uncomment the #include <math.h>
directive at the beginning of the kernel file (see below), it still works using NVCC (the generated PTX is exactly the same) but fails at compilation using NVRTC.
I want to know how can I make NVRTC behave just like NVCC (is it even possible?), or at least to understand the reason behind this issues.
Detailed description
File kernel.cu
(Kernel source):
//#include <math.h>
#define MAX_RAY_DEPTH 5
template<typename T>
class Vec3
{
public:
T x, y, z;
__device__ Vec3() : x(T(0)), y(T(0)), z(T(0)) {}
__device__ Vec3(T xx) : x(xx), y(xx), z(xx) {}
__device__ Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {}
__device__ Vec3& normalize()
{
T nor2 = length2();
if (nor2 > 0) {
T invNor = 1 / sqrt(nor2);
x *= invNor, y *= invNor, z *= invNor;
}
return *this;
}
__device__ Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); }
__device__ Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); }
__device__ T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; }
__device__ Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); }
__device__ Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); }
__device__ Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; }
__device__ Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; }
__device__ Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); }
__device__ T length2() const { return x * x + y * y + z * z; }
__device__ T length() const { return sqrt(length2()); }
};
typedef Vec3<float> Vec3f;
typedef Vec3<bool> Vec3b;
class Sphere
{
public:
const char* id;
Vec3f center; /// position of the sphere
float radius, radius2; /// sphere radius and radius^2
Vec3f surfaceColor, emissionColor; /// surface color and emission (light)
float transparency, reflection; /// surface transparency and reflectivity
int animation_frame;
Vec3b animation_position_rand;
Vec3f animation_position;
Sphere(
const char* id,
const Vec3f &c,
const float &r,
const Vec3f &sc,
const float &refl = 0,
const float &transp = 0,
const Vec3f &ec = 0) :
id(id), center(c), radius(r), radius2(r * r), surfaceColor(sc),
emissionColor(ec), transparency(transp), reflection(refl)
{
animation_frame = 0;
}
//[comment]
// Compute a ray-sphere intersection using the geometric solution
//[/comment]
__device__ bool intersect(const Vec3f &rayorig, const Vec3f &raydir, float &t0, float &t1) const
{
Vec3f l = center - rayorig;
float tca = l.dot(raydir);
if (tca < 0) return false;
float d2 = l.dot(l) - tca * tca;
if (d2 > radius2) return false;
float thc = sqrt(radius2 - d2);
t0 = tca - thc;
t1 = tca + thc;
return true;
}
};
__device__ float mix(const float &a, const float &b, const float &mixval)
{
return b * mixval + a * (1 - mixval);
}
__device__ Vec3f trace(
const Vec3f &rayorig,
const Vec3f &raydir,
const Sphere *spheres,
const unsigned int spheres_size,
const int &depth)
{
float tnear = INFINITY;
const Sphere* sphere = NULL;
// find intersection of this ray with the sphere in the scene
for (unsigned i = 0; i < spheres_size; ++i) {
float t0 = INFINITY, t1 = INFINITY;
if (spheres[i].intersect(rayorig, raydir, t0, t1)) {
if (t0 < 0) t0 = t1;
if (t0 < tnear) {
tnear = t0;
sphere = &spheres[i];
}
}
}
// if there's no intersection return black or background color
if (!sphere) return Vec3f(2);
Vec3f surfaceColor = 0; // color of the ray/surfaceof the object intersected by the ray
Vec3f phit = rayorig + raydir * tnear; // point of intersection
Vec3f nhit = phit - sphere->center; // normal at the intersection point
nhit.normalize(); // normalize normal direction
// If the normal and the view direction are not opposite to each other
// reverse the normal direction. That also means we are inside the sphere so set
// the inside bool to true. Finally reverse the sign of IdotN which we want
// positive.
float bias = 1e-4; // add some bias to the point from which we will be tracing
bool inside = false;
if (raydir.dot(nhit) > 0) nhit = -nhit, inside = true;
if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) {
float facingratio = -raydir.dot(nhit);
// change the mix value to tweak the effect
float fresneleffect = mix(pow(1 - facingratio, 3), 1, 0.1);
// compute reflection direction (not need to normalize because all vectors
// are already normalized)
Vec3f refldir = raydir - nhit * 2 * raydir.dot(nhit);
refldir.normalize();
Vec3f reflection = trace(phit + nhit * bias, refldir, spheres, spheres_size, depth + 1);
Vec3f refraction = 0;
// if the sphere is also transparent compute refraction ray (transmission)
if (sphere->transparency) {
float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface?
float cosi = -nhit.dot(raydir);
float k = 1 - eta * eta * (1 - cosi * cosi);
Vec3f refrdir = raydir * eta + nhit * (eta * cosi - sqrt(k));
refrdir.normalize();
refraction = trace(phit - nhit * bias, refrdir, spheres, spheres_size, depth + 1);
}
// the result is a mix of reflection and refraction (if the sphere is transparent)
surfaceColor = (
reflection * fresneleffect +
refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
}
else {
// it's a diffuse object, no need to raytrace any further
for (unsigned i = 0; i < spheres_size; ++i) {
if (spheres[i].emissionColor.x > 0) {
// this is a light
Vec3f transmission = 1;
Vec3f lightDirection = spheres[i].center - phit;
lightDirection.normalize();
for (unsigned j = 0; j < spheres_size; ++j) {
if (i != j) {
float t0, t1;
if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) {
transmission = 0;
break;
}
}
}
surfaceColor += sphere->surfaceColor * transmission *
max(float(0), nhit.dot(lightDirection)) * spheres[i].emissionColor;
}
}
}
return surfaceColor + sphere->emissionColor;
}
extern "C" __global__
void raytrace_kernel(unsigned int width, unsigned int height, Vec3f *image, Sphere *spheres, unsigned int spheres_size, float invWidth, float invHeight, float aspectratio, float angle) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < height && x < width) {
float xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
float yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3f raydir(xx, yy, -1);
raydir.normalize();
image[y*width+x] = trace(Vec3f(0), raydir, spheres, spheres_size, 0);
}
}
I can successfully compile it with: nvcc --ptx kernel.cu -o kernel.ptx
(full PTX here) and use that PTX in the driver API with cuModuleLoadDataEx
using the following snippet. It works as expected.
It works fine even if I uncomment the #include <math.h>
line (actually, the PTX generated is exactly the same).
CudaSafeCall( cuInit(0) );
CUdevice device;
CudaSafeCall( cuDeviceGet(&device, 0) );
CUcontext context;
CudaSafeCall( cuCtxCreate(&context, 0, device) );
unsigned int error_buffer_size = 1024;
std::vector<CUjit_option> options;
std::vector<void*> values;
char* error_log = new char[error_buffer_size];
options.push_back(CU_JIT_ERROR_LOG_BUFFER); //Pointer to a buffer in which to print any log messages that reflect errors
values.push_back(error_log);
options.push_back(CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES); //Log buffer size in bytes. Log messages will be capped at this size (including null terminator)
values.push_back(&error_buffer_size);
options.push_back(CU_JIT_TARGET_FROM_CUCONTEXT); //Determines the target based on the current attached context (default)
values.push_back(0); //No option value required for CU_JIT_TARGET_FROM_CUCONTEXT
CUmodule module;
CUresult status = cuModuleLoadDataEx(&module, ptxSource, options.size(), options.data(), values.data());
if (error_log && error_log[0]) { //https://stackoverflow.com/a/7970669/3136474
std::cout << "Compiler error: " << error_log << std::endl;
}
CudaSafeCall( status );
However, whenever I try to compile this exact kernel using NVRTC (full PTX here), it compiles successfully but gives me a Segmentation fault (core dumped)
on the call to cuModuleLoadDataEx
(when trying to use the resulting PTX).
If I uncomment the #include <math.h>
line, it fails at the nvrtcCompileProgram
call with the following output:
nvrtcSafeBuild() failed at cuda_raytracer_nvrtc_api.cpp:221 : NVRTC_ERROR_COMPILATION
Build log:
/usr/include/bits/mathcalls.h(177): error: linkage specification is incompatible with previous "isinf"
__nv_nvrtc_builtin_header.h(126689): here
/usr/include/bits/mathcalls.h(211): error: linkage specification is incompatible with previous "isnan"
__nv_nvrtc_builtin_header.h(126686): here
2 errors detected in the compilation of "kernel.cu".
The code I'm using to compile it with NVRTC is:
nvrtcProgram prog;
NvrtcSafeCall( nvrtcCreateProgram(&prog, kernelSource, "kernel.cu", 0, NULL, NULL) );
// https://docs.nvidia.com/cuda/nvrtc/index.html#group__options
std::vector<const char*> compilationOpts;
compilationOpts.push_back("--device-as-default-execution-space");
// NvrtcSafeBuild is a macro which automatically prints nvrtcGetProgramLog if the compilation fails
NvrtcSafeBuild( nvrtcCompileProgram(prog, compilationOpts.size(), compilationOpts.data()), prog );
size_t ptxSize;
NvrtcSafeCall( nvrtcGetPTXSize(prog, &ptxSize) );
char* ptxSource = new char[ptxSize];
NvrtcSafeCall( nvrtcGetPTX(prog, ptxSource) );
NvrtcSafeCall( nvrtcDestroyProgram(&prog) );
Then I simply load the ptxSource
using the previous snippet (note: that code block is the same used for both the driver API version and the NVRTC version).
Additional things that I've noticed/tried so far
- The PTX generated by the NVCC and the one generated by NVRTC are quite different, but I'm unable to understand them to identify possible problems.
- Tried to specify the specific GPU architecture (in my case, CC 6.1) to the compiler, no difference.
- Tried to disable any compiler optimizations (options
--ftz=false --prec-sqrt=true --prec-div=true --fmad=false
innvrtcCompileProgram
). PTX file got bigger, but still Segfaulting. - Tried to add
--std=c++11
or--std=c++14
to the NVRTC compiler options. With any of them NVRTC generates an almost empty (4 lines) PTX but issue no warning nor error until I try to use it.
Environment
- SO: Ubuntu 18.04.4 LTS 64-bit
nvcc --version
: Cuda compilation tools, release 10.1, V10.1.168. Built on Wed_Apr_24_19:10:27_PDT_2019gcc --version
: gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0- Hardware: Intel I7-7700HQ, GeForce GTX 1050 Ti
Edit on OP+1 day
I forgot to add my environment. See previous section.
Also can you compile the nvrtc output with ptxas? – @talonmies' comment
The nvcc
-generated PTX compiles with a warning:
$ ptxas -o /tmp/temp_ptxas_output.o kernel.ptx
ptxas warning : Stack size for entry function 'raytrace_kernel' cannot be statically determined
Which is due to the recursive kernel function (more on that). It can be safely ignored.
The nvrtc
-generated PTX does not compile and issues the error:
$ ptxas -o /tmp/temp_ptxas_output.o nvrtc_kernel.ptx
ptxas fatal : Unresolved extern function '_Z5powiffi'
Based on this question I added __device__
to Sphere
class constructor and removed --device-as-default-execution-space
compiler option.
It generates a slightly different PTX now, but still presents the same error.
Compiling with the #include <math.h>
now generates a lot of "A function without execution space annotations is considered a host function, and host functions are not allowed in JIT mode." warnings besides the previous errors.
If I try to use the accepted solution of the question it throws me a bunch of syntax errors and does not compile. NVCC still works flawlessly.
Just found the culprit by the ancient comment-and-test method: the error goes away if I remove the
pow
call used to calculate the fresnel effect inside thetrace
method.For now, I've just replaced
pow(var, 3)
forvar*var*var
.I created a MVCE and filled a bug report to NVIDIA: https://developer.nvidia.com/nvidia_bug/2917596.
Which Liam Zhang answered and pointed me the problem:
If I change the call to
values.push_back((void*)error_buffer_size);
it reports the same error asptxas
compilation of the generated PTX: