Skip to content

Commit

Permalink
refactor based on (9344570)
Browse files Browse the repository at this point in the history
  • Loading branch information
wilsonCernWq committed Dec 18, 2023
1 parent 81584a2 commit 312bc66
Show file tree
Hide file tree
Showing 40 changed files with 1,167 additions and 3,250 deletions.
3 changes: 1 addition & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ if(OVR_BUILD_MODULE_NNVOLUME)
endif()

# Two projects uses different target names ...
if(TARGET rendercommon)
if((NOT TARGET util) AND (TARGET rendercommon))
add_library(util ALIAS rendercommon)
endif()

Expand All @@ -60,7 +60,6 @@ if(OVR_BUILD_MODULE_NNVOLUME)
set(MACROCELL_SIZE_MIP 4)
set(ENABLE_IN_SHADER ON)
set(ENABLE_OUT_OF_CORE ON)
set(ENABLE_OPTIX ON)
set(ENABLE_OPENGL ON)

add_subdirectory(core)
Expand Down
200 changes: 129 additions & 71 deletions api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,10 +185,10 @@ int vnrSimpleVolumeGetNumberOfTimeSteps(vnrVolume self)

// neural volume

vnrVolume vnrCreateNeuralVolume(const json& config, vnrVolume groundtruth, bool online_macrocell_construction)
vnrVolume vnrCreateNeuralVolume(const json& config, vnrVolume groundtruth, bool online_macrocell_construction, size_t batchsize)
{
auto& source = castSimpleVolume(groundtruth)->source;
auto ret = std::make_shared<NeuralVolumeContext>();
auto ret = std::make_shared<NeuralVolumeContext>(batchsize);
ret->dims = groundtruth->dims;
ret->type = groundtruth->type;
ret->range = groundtruth->range;
Expand All @@ -202,9 +202,9 @@ vnrVolume vnrCreateNeuralVolume(const json& config, vnrVolume groundtruth, bool
return ret;
}

vnrVolume vnrCreateNeuralVolume(const json& config, vnr::vec3i dims)
vnrVolume vnrCreateNeuralVolume(const json& config, vnr::vec3i dims, size_t batchsize)
{
auto ret = std::make_shared<NeuralVolumeContext>();
auto ret = std::make_shared<NeuralVolumeContext>(batchsize);
ret->dims = dims;
ret->type = vnr::VALUE_TYPE_FLOAT;
ret->range = range1f(0, 1);
Expand All @@ -218,7 +218,7 @@ vnrVolume vnrCreateNeuralVolume(const json& config, vnr::vec3i dims)
return ret;
}

vnrVolume vnrCreateNeuralVolume(const json& params)
vnrVolume vnrCreateNeuralVolume(const json& params, size_t batchsize)
{
vec3i dims;
if (params.contains("volume")) {
Expand All @@ -229,7 +229,7 @@ vnrVolume vnrCreateNeuralVolume(const json& params)
else {
throw std::runtime_error("expecting a model config with volume dims tag");
}
auto ret = vnrCreateNeuralVolume(params["model"], dims);
auto ret = vnrCreateNeuralVolume(params["model"], dims, batchsize);
vnrNeuralVolumeSetParams(ret, params);
return ret;
}
Expand Down Expand Up @@ -344,6 +344,18 @@ int vnrNeuralVolumeGetNumberOfBlobs(vnrVolume self)
return nv->neural.get_num_blobs();
}

int vnrNeuralVolumeGetNBytesMultilayerPerceptron(vnrVolume self)
{
auto nv = castNeuralVolume(self);
return nv->neural.get_mlp_size();
}

int vnrNeuralVolumeGetNBytesEncoding(vnrVolume self)
{
auto nv = castNeuralVolume(self);
return nv->neural.get_enc_size();
}

// general

void vnrVolumeSetClippingBox(vnrVolume self, vnr::vec3f lower, vnr::vec3f upper)
Expand Down Expand Up @@ -438,71 +450,79 @@ const vnr::range1f& vnrTransferFunctionGetValueRange(vnrTransferFunction tfn)
return tfn->range;
}


// ------------------------------------------------------------------
//
// ------------------------------------------------------------------

vnrRenderer vnrCreateRenderer(vnrVolume v)
{
auto self = std::make_shared<RendererContext>();
auto self = std::make_shared<RenderContext>();
self->volume = v;

auto& renderer = self->renderer;
self->framebuffer.create();
self->framebuffer_stream = self->framebuffer.current_stream();
self->render.stream = self->framebuffer_stream;
if (self->volume->isNetwork()) {
auto& neural = std::dynamic_pointer_cast<NeuralVolumeContext>(self->volume)->neural;

// std::cout << "INF MC " << neural.get_macrocell_value_range() << std::endl;
renderer.set_scene(neural.texture(),
neural.get_data_type(),
neural.get_data_dims(),
neural.get_data_value_range(),
neural.get_data_transform(),
neural.get_macrocell_dims(),
neural.get_macrocell_spacings(),
neural.get_macrocell_value_range(),
neural.get_macrocell_max_opacity(),
&neural);
auto& source = std::dynamic_pointer_cast<NeuralVolumeContext>(self->volume)->neural;
self->render.init(
source.get_data_transform(),
source.get_data_type(),
source.get_data_dims(),
source.get_data_value_range(),
source.get_macrocell_dims(),
source.get_macrocell_spacings(),
source.get_macrocell_value_range(),
source.get_macrocell_max_opacity()
);
}
else {
auto& source = std::dynamic_pointer_cast<SimpleVolumeContext>(self->volume)->source;

// std::cout << "REF MC " << source.get_macrocell_value_range() << std::endl;
renderer.set_scene(source.texture(),
source.get_data_type(),
source.get_data_dims(),
source.get_data_value_range(),
source.get_data_transform(),
source.get_macrocell_dims(),
source.get_macrocell_spacings(),
source.get_macrocell_value_range(),
source.get_macrocell_max_opacity());
}

renderer.set_scene_clipbox(self->volume->clipbox);
renderer.set_rendering_mode(5);
renderer.init();
self->render.init(
source.get_data_transform(),
source.get_data_type(),
source.get_data_dims(),
source.get_data_value_range(),
source.get_macrocell_dims(),
source.get_macrocell_spacings(),
source.get_macrocell_value_range(),
source.get_macrocell_max_opacity()
);
}
self->framebuffer_reset = true;
return self;
}

void vnrRendererSetMode(vnrRenderer self, int mode)
{
self->renderer.set_rendering_mode(mode);
self->rendering_mode = mode;
if (mode < 4) {
std::string name = "???";
switch ((vnrRenderMode)mode) {
case VNR_OPTIX_NO_SHADING: name = "VNR_OPTIX_NO_SHADING"; break;
case VNR_OPTIX_GRADIENT_SHADING: name = "VNR_OPTIX_GRADIENT_SHADING"; break;
case VNR_OPTIX_FULL_SHADOW: name = "VNR_OPTIX_FULL_SHADOW"; break;
case VNR_OPTIX_SINGLE_SHADE_HEURISTIC: name = "VNR_OPTIX_SINGLE_SHADE_HEURISTIC"; break;
}
std::cerr << "Error: OptiX-based rendering mode (" << mode << ":" << name << ") is deprecated" << std::endl;
}
self->framebuffer_reset = true;
}

void vnrRendererSetDenoiser(vnrRenderer self, bool flag)
{
self->renderer.set_denoiser(flag);
;
}

void vnrRendererSetVolumeSamplingRate(vnrRenderer self, float rate)
{
self->renderer.set_volume_sampling_rate(rate);
self->sampling_rate = rate;
self->framebuffer_reset = true;
}

void vnrRendererSetVolumeDensityScale(vnrRenderer self, float value)
{
self->renderer.set_volume_density_scale(value);
self->density_scale = value;
self->framebuffer_reset = true;
}

// ------------------------------------------------------------------
Expand All @@ -511,71 +531,115 @@ void vnrRendererSetVolumeDensityScale(vnrRenderer self, float value)

void vnrRendererSetTransferFunction(vnrRenderer self, vnrTransferFunction _tfn)
{
range1f original_data_range;
auto& tfn = *_tfn;
if (self->volume->isNetwork()) {
auto nv = std::dynamic_pointer_cast<NeuralVolumeContext>(self->volume);
nv->neural.set_transfer_function(tfn.color, tfn.alpha, tfn.range);
auto& source = std::dynamic_pointer_cast<NeuralVolumeContext>(self->volume)->neural;
source.set_transfer_function(tfn.color, tfn.alpha, tfn.range);
original_data_range = source.get_data_value_range();
}
else {
auto sv = std::dynamic_pointer_cast<SimpleVolumeContext>(self->volume);
sv->source.set_transfer_function(tfn.color, tfn.alpha, tfn.range);
auto& source = std::dynamic_pointer_cast<SimpleVolumeContext>(self->volume)->source;
source.set_transfer_function(tfn.color, tfn.alpha, tfn.range);
original_data_range = source.get_data_value_range();
}

self->renderer.set_transfer_function(tfn.color, tfn.alpha, tfn.range);
// self->renderer.set_transfer_function(tfn.color, tfn.alpha, tfn.range);
self->tfn.update(tfn, original_data_range, self->framebuffer_stream);
self->framebuffer_reset = true;
}

void vnrRendererSetCamera(vnrRenderer self, vnrCamera cam)
{
self->renderer.set_camera(*cam);
self->camera = *cam;
self->framebuffer_reset = true;
}

void vnrRendererSetFramebufferSize(vnrRenderer self, vec2i fbsize)
{
self->renderer.resize(fbsize);
self->framebuffer.resize(fbsize);
self->framebuffer_size = fbsize;
self->framebuffer_reset = true;
}

vnr::vec4f *vnrRendererMapFrame(vnrRenderer self)
{
vec4f *pixels = nullptr;
self->renderer.mapframe(&pixels);
CUDA_CHECK(cudaStreamSynchronize(self->framebuffer_stream));
vec4f *pixels = self->framebuffer.host_pointer();

self->framebuffer.safe_swap();
self->framebuffer_stream = self->framebuffer.current_stream();
self->render.stream = self->framebuffer_stream;

return pixels;
}

void vnrRendererResetAccumulation(vnrRenderer self)
{
self->renderer.reset_frame();
self->framebuffer_reset = true;
}

void vnrRender(vnrRenderer self)
{
self->renderer.render();
if (self->framebuffer_reset) {
self->render.update(
self->rendering_mode,
self->tfn.tfn,
self->sampling_rate,
self->density_scale,
self->volume->clipbox.lower,
self->volume->clipbox.upper,
self->camera,
self->framebuffer_size
);
self->framebuffer_reset = false;
}

if (self->volume->isNetwork()) {
auto& source = std::dynamic_pointer_cast<NeuralVolumeContext>(self->volume)->neural;
self->render.render(self->framebuffer.device_pointer(), &source, source.texture());
}
else {
auto& source = std::dynamic_pointer_cast<SimpleVolumeContext>(self->volume)->source;
self->render.render(self->framebuffer.device_pointer(), nullptr, source.texture());
}

self->framebuffer.download_async();
}


// ------------------------------------------------------------------
//
// ------------------------------------------------------------------

void vnrMemoryQuery(size_t* used_by_engine, size_t* used_by_tcnn, unsigned long long* used_total)
void vnrResetMaxMemory()
{
if (used_by_engine) *used_by_engine = util::total_n_bytes_allocated();
if (used_by_tcnn) *used_by_tcnn = NeuralVolume::total_n_bytes_allocated_by_tcnn();
if (used_total) util::getUsedGPUMemory(used_total);
util::max_nbytes_allocated() = 0;
}

void vnrMemoryQuery(size_t* used_by_self, size_t* used_by_tcnn, size_t* used_peak, size_t* used_total)
{
if (used_by_self) *used_by_self = util::tot_nbytes_allocated();
if (used_by_tcnn) *used_by_tcnn = NeuralVolume::tot_nbytes_allocated_by_tcnn();
if (used_peak) *used_peak = util::max_nbytes_allocated() + NeuralVolume::max_nbytes_allocated_by_tcnn();
if (used_total) {
unsigned long long tmp; util::getUsedGPUMemory(&tmp); *used_total = tmp;
}
}

void vnrMemoryQueryPrint(const char* str)
{
size_t used_by_engine;
size_t used_by_self;
size_t used_by_tcnn;
unsigned long long used_total;
vnrMemoryQuery(&used_by_engine, &used_by_tcnn);
util::getUsedGPUMemory(&used_total);

printf("%s: total used %s, engine %s, tcnn %s, unknown %s\n", str,
size_t used_peak;
size_t used_total;
vnrMemoryQuery(&used_by_self, &used_by_tcnn, &used_peak, &used_total);
printf("%s: total used %s, self %s, tcnn %s, unknown %s, peak %s\n", str,
util::prettyBytes(used_total).c_str(),
util::prettyBytes(used_by_engine).c_str(),
util::prettyBytes(used_by_self).c_str(),
util::prettyBytes(used_by_tcnn).c_str(),
util::prettyBytes(used_total - used_by_engine - used_by_tcnn).c_str()
util::prettyBytes(used_total - used_by_self - used_by_tcnn).c_str(),
util::prettyBytes(used_peak).c_str()
);
}

Expand All @@ -601,12 +665,6 @@ void vnrCompilationStatus(const char* str)
printf(" fV-SRN: disabled\n");
#endif

#ifdef ENABLE_OPTIX
printf(" optix renderer: enabled\n");
#else
printf(" optix renderer: disabled\n");
#endif

#ifdef ENABLE_IN_SHADER
printf(" in-shader renderer: enabled\n");
#else
Expand Down
Loading

0 comments on commit 312bc66

Please sign in to comment.