Skip to content

Code Examples

Hannes Hergeth edited this page Apr 5, 2017 · 7 revisions

The code for this example is also contained in the windows dependencies as a project.

Host Example

Initializing the library and creating a scene is done like this:

	const int width = 1024, height = 1024;
	const float fov = 90;
	const int NPasses = 10;

	InitializeCuda4Tracer(data_path);
	SimpleFileManager fManager;
	Sensor camera = CreateAggregate<Sensor>(PerspectiveSensor(width, height, fov));
	//A SceneInitData object specifies the maximum number of meshes/triangles to allocate sufficient storage on the GPU
	DynamicScene scene(&camera, SceneInitData::CreateForScene(10, 10, 1000), &fManager);

Objects can be loaded and positioned like this:

	{
		auto ground = scene.CreateNode(data_path + "plane2.obj");
		//sets the object to world matrix for this node
		scene.SetNodeTransform(float4x4::Scale(Vec3f(10)), ground);
		//enables seperate materials for each node of this mesh
		scene.instanciateNodeMaterials(ground);
		auto mat = scene.getMaterials(ground)(0);
		auto tex = CreateAggregate<Texture>(ImageTexture(TextureMapping2D(), "cobblestone.jpg", Spectrum(1.0f)));
		//set the reflectence distribution ( = material type) and a displacement map
		mat->bsdf = CreateAggregate<BSDFALL>(diffuse(tex));
		mat->SetHeightMap("cobblestone_disp.jpg"); mat->enableParallaxOcclusion = true; mat->HeightScale = 0.025f;
		//invalidating the reference lets the library know to copy the object onto the GPU
		mat.Invalidate();
	}

	{
		auto window = scene.CreateNode(data_path + "plane2.obj");
		scene.SetNodeTransform(float4x4::Translate(1, 6, 0) % float4x4::RotateZ(PI / 2), window);
		scene.instanciateNodeMaterials(window);
		auto mat = scene.getMaterials(window)(0);
		auto tex1 = CreateAggregate<Texture>(ConstantTexture(Spectrum(0.0f)));
		auto tex2 = CreateAggregate<Texture>(ImageTexture(TextureMapping2D(), "WindowTexture.jpg", Spectrum(1.0f)));
		//a dielectric is made up of the index of refrection, the reflection and transmittance, here no light is reflected
		mat->bsdf = CreateAggregate<BSDFALL>(dielectric(1.0f, tex1, tex2));
		mat.Invalidate();
	}

Finishing the scene setup, this adds a light, volume and positions the camera:

	auto light = scene.CreateLight(SpotLight(Vec3f(5, 10, 0), Vec3f(0, 5, 0), Spectrum(1000), 10, 10));

	camera.SetToWorld(Vec3f(-5.107796f, 5.408825f, -5.528843f), Vec3f(0.3678481f, -0.3674385f, 0.8542113f));

	//an isotropic phase function distributes incoming radiance uniformly in the 2 sphere
	auto pFunc = CreateAggregate<PhaseFunction>(IsotropicPhaseFunction());
	//the volume is in [0, 1]³, using a transformation it can be positioned/scaled
	auto volToWorld = float4x4::Translate(-10,0,-10) % float4x4::Scale(11,7,20);
	//a homogeneous volume has constant absorption and scattering coefficients and here emits no light
	scene.CreateVolume(HomogeneousVolumeDensity(pFunc, volToWorld, 0.005f, 0.05f, 0));

	//an enviornement map can be scaled by a constant factor
	scene.setEnvironementMap(Spectrum(1), "envmap.exr");

Lastly, a tracer needs to be constructed for rendering images:

	//this constructor allocates sufficient storage for pixel values on host and device.
	//other constructors can use opengl or D3D textures for real-time rendering
	Image outImage(width, height);

	//Here the tracer from the device example is used
	PhotonTracer tracer;
	//initialize the tracer object with the output size and scene
	tracer.Resize(width, height);
	tracer.InitializeScene(&scene);
	//construct/update the scene BVH and copy data to the device
	scene.UpdateScene();
	//do the actual rendering
	for (int i = 0; i < NPasses; i++)
		tracer.DoPass(&outImage, !i);

	//apply a simple box filter to the image
	applyImagePipeline(tracer, outImage, CreateAggregate<Filter>(BoxFilter()));

	//write the resulting image to a file with an exif comment
	outImage.WriteDisplayImage("result.jpg", "");

For completeness, here is the SimpleFileManager referenced above. The library compiles meshes into a simple binary format which holds BVH instances for faster loading times. This FileManager simply stores them in a sub directory.

const std::string data_path = "Data/";
class SimpleFileManager : public IFileManager
{
public:
	virtual std::string getCompiledMeshPath(const std::string& name)
	{
		return data_path + "Compiled/" + name;
	}
	virtual std::string getTexturePath(const std::string& name)
	{
		return data_path + name;
	}
	virtual std::string getCompiledTexturePath(const std::string& name)
	{
		return data_path + "Compiled/" + name;
	}
};

Device Example

#f03c15 No longer possible due to changes in CUDA 8.0 Sadly it is currently not possible to implement custom tracers in separate projects on windows. This is but a slight inconvenience because now they have to be implemented in the library itself.

As discussed a simple tracer only needs to override RenderBlock like this:

class ExampleTracer : public Tracer<true, true>
{
public:
	ExampleTracer()
	{

	}
protected:
	void DebugInternal(Image* I, const Vec2i& p);
	virtual void RenderBlock(Image* I, int x, int y, int blockW, int blockH);
};

Implement the following in a *.cu file to use CUDA:

CUDA_FUNC_IN Spectrum ComputePixel(const NormalizedT<Ray>& r, Sampler& rng)
{
	//trace a ray through the scene, this is the actual heavy weight operation
	auto prim_res = traceRay(r);
	if (!prim_res.hasHit())
		return 0.0f;

	//stores info about the intersection, like normal, pixel derivatives and uv coordinates
	BSDFSamplingRecord bRec;
	//computes the intersection data and does normal/height (POM) sampling
	prim_res.getBsdfSample(r, bRec, ETransportMode::ERadiance);

	const int NSamples = 4;
	float acc_dist = 0, acc_weight = 0;
	for (int i = 0; i < NSamples; i++)
	{
		//samples an outgoing direction, returning f/pdf as in mitsuba
		prim_res.getMat().bsdf.sample(bRec, rng.randomFloat2()).avg();
		auto res = traceRay(Ray(bRec.dg.P, bRec.getOutgoing()));
		if (res.hasHit())
		{
			//compute an average, relative occlusion distance
			acc_dist += res.m_fDist / g_SceneData.m_sBox.Size().length();
			acc_weight += 1;
		}
	}
	return acc_dist / acc_weight * 0.5f;
}

CUDA_GLOBAL void pathKernel(unsigned int w, unsigned int h, unsigned int xoff, unsigned int yoff, Image img)
{
	//for tracer using the block sampler this will give the pixel position of this CUDA thread
	Vec2i pixel = TracerBase::getPixelPos(xoff, yoff);
	//get a rng object for random numbers on the device
	auto rng = g_SamplerData(TracerBase::getPixelIndex(xoff, yoff, w, h));
	if (pixel.x < w && pixel.y < h)
	{
		NormalizedT<Ray> r;
		//the position of the pixel to sample a ray for
		auto pixelPos = Vec2f(pixel.x, pixel.y) + rng.randomFloat2();
		//the last parameter is used for sampling an outgoing direction, if necessary
		Spectrum imp = g_SceneData.sampleSensorRay(r, pixelPos, rng.randomFloat2());
		Spectrum col = imp * ComputePixel(r, rng);
		//adds the result to the output image
		img.AddSample(pixel.x, pixel.y, col);
	}
}

void ExampleTracer::RenderBlock(Image* I, int x, int y, int blockW, int blockH)
{
	pathKernel << <BLOCK_SAMPLER_LAUNCH_CONFIG >> > (w, h, x, y, *I);
}

Splitting the code into multiple functions like this enables one to do simple debugging:

void ExampleTracer::DebugInternal(Image* I, const Vec2i& pixel)
{
	//nearly all functions can be used as on the device
	auto rng = g_SamplerData(pixel.y * I->getWidth() + pixel.x);
	NormalizedT<Ray> r;
	auto pixelPos = Vec2f((float)pixel.x, (float)pixel.y) + rng.randomFloat2();
	Spectrum imp = g_SceneData.sampleSensorRay(r, pixelPos, rng.randomFloat2());
	ComputePixel(r, rng);
}
Clone this wiki locally