diff --git a/parsers/opts.py b/parsers/opts.py
index dd37b6f..b03066a 100644
--- a/parsers/opts.py
+++ b/parsers/opts.py
@@ -28,7 +28,7 @@ def get_options(delayed_parse = False):
parser.add_argument("--name", default = "complex.xml", help = "Scene file name with extension", type = str)
parser.add_argument("--arch", default = 'cuda', choices=['cpu', 'gpu', 'vulkan', 'cuda'], help = "Backend-architecture")
parser.add_argument("--save_iter", default = -1, type = int, help = "Iteration to save check-point")
- parser.add_argument("--type", default = 'vpt', choices=['vpt', 'pt', 'bdpt'], help = "Algorithm to be used")
+ parser.add_argument("--type", default = 'vpt', choices=['vpt', 'pt', 'bdpt', 'ao'], help = "Algorithm to be used")
parser.add_argument("-p", "--profile", default = False, action = "store_true", help = "Whether to profile the program")
parser.add_argument("--no_gui", default = False, action = "store_true", help = "Whether to display GUI")
diff --git a/render.py b/render.py
index 16f35ff..4ba911f 100644
--- a/render.py
+++ b/render.py
@@ -11,6 +11,7 @@
from tqdm import tqdm
from renderer.bdpt import BDPT
+from renderer.ssao import SSAORenderer
from renderer.vpt import VolumeRenderer
from renderer.vanilla_renderer import Renderer
from tracer.path_tracer import PathTracer
@@ -29,8 +30,8 @@
CONSOLE = Console(width = 128)
-rdr_mapping = {"pt": Renderer, "vpt": VolumeRenderer, "bdpt": BDPT}
-name_mapping = {"pt": "", "vpt": "Volumetric ", "bdpt": "Bidirectional "}
+rdr_mapping = {"pt": Renderer, "vpt": VolumeRenderer, "bdpt": BDPT, "ao": SSAORenderer}
+name_mapping = {"pt": "", "vpt": "Volumetric ", "bdpt": "Bidirectional ", "ao": "SSAO "}
def export_transient_profile(
rdr: BDPT, sample_cnt: int, out_path: str, out_name: str, out_ext: str,
diff --git a/renderer/ssao.py b/renderer/ssao.py
index 94d5998..63d5c6d 100644
--- a/renderer/ssao.py
+++ b/renderer/ssao.py
@@ -3,6 +3,7 @@
@date: 2024-4-7
"""
import taichi as ti
+import taichi.math as tm
from taichi.math import vec3
from typing import List
@@ -17,6 +18,11 @@
from rich.console import Console
CONSOLE = Console(width = 128)
+@ti.func
+def smooth_step(edge0: float, edge1: float, x: float) -> float:
+ t = tm.clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0);
+ return t * t * (3.0 - 2.0 * t)
+
@ti.data_oriented
class SSAORenderer(PathTracer):
"""
@@ -30,9 +36,12 @@ def __init__(self,
self.smp_hemisphere = prop.get('smp_hemisphere', 32)
self.depth_samples = prop.get('depth_samples', 64)
self.sample_extent = prop.get('sample_extent', 0.1) # float
+ self.inv_cam_r = self.cam_r.inverse()
+ self.cam_normal = (self.cam_r @ vec3([0, 0, 1])).normalized()
CONSOLE.log(f"Rendering depth map: {self.depth_samples} sample(s) per pixel.")
self.get_depth_map()
CONSOLE.log(f"Depth map rendering completed.")
+ CONSOLE.log(f"SSAO statistics: Sample per hemisphere: {self.smp_hemisphere} | Sample extent: {self.sample_extent:.2f}")
@ti.kernel
def get_depth_map(self):
@@ -52,17 +61,52 @@ def get_depth_map(self):
if num_valid_hits:
self.color[i, j][2] /= num_valid_hits
+
+ @ti.func
+ def rasterize_pinhole(self, local_ray_x: float, local_ray_y: float):
+ """ For path with only one camera vertex, ray should be re-rasterized to the film
+ ray_d is pointing into the camera, therefore should be negated
+ """
+ valid_raster = False
+
+ pi = int(self.half_w + 1.0 - local_ray_x / self.inv_focal)
+ pj = int(self.half_h + 1.0 + local_ray_y / self.inv_focal)
+ if pi >= self.start_x and pj >= self.start_y and pi < self.end_x and pj < self.end_y: # cropping is considered
+ valid_raster = True
+ return pj, pi, valid_raster
+
+ @ti.func
+ def splat_camera(self, ray_d: vec3):
+ """ Rasterize pos onto the image plane and query the depth map
+ """
+ test_depth = 0.0
+ if tm.dot(ray_d, self.cam_normal) > 0.:
+ local_ray = self.inv_cam_r @ ray_d
+ z = local_ray[2]
+ if z > 0.0:
+ local_ray /= z
+ p_row, p_col, is_valid = self.rasterize_pinhole(local_ray[0], local_ray[1])
+ if is_valid:
+ test_depth = self.color[p_col, p_row][2]
+ return test_depth
@ti.func
- def get_sample_depth(self, it: ti.template(), pos: vec3):
+ def normal_sample_occluded(self, it: ti.template(), pos: vec3):
""" Get samples around normal
and return the screen space depth
"""
- local_dir = uniform_hemisphere()
- normal_sample, _ = delocalize_rotate(it.n_s, local_dir)
+ local_dir, _pdf = uniform_hemisphere()
+ normal_sample, _r = delocalize_rotate(it.n_s, local_dir)
position = pos + normal_sample * self.sample_extent
- depth = (position - self.cam_t).norm()
- return depth
+ # should rasterize the position on to the camera
+ ray_d = position - self.cam_t
+ ray_d /= ray_d.norm()
+ ## Online quering
+ # it = self.ray_intersect(ray_d, self.cam_t)
+ # queried_depth = it.min_depth
+ queried_depth = self.splat_camera(ray_d) + 1e-3
+ depth = (position - self.cam_t).norm()
+ return ti.select(depth >= queried_depth, 1.0, 0.0) * smooth_step(0.0, 1.0, self.sample_extent / ti.abs(queried_depth - depth))
@ti.kernel
def render(self, _t_start: int, _t_end: int, _s_start: int, _s_end: int, _a: int, _b: int):
@@ -71,21 +115,18 @@ def render(self, _t_start: int, _t_end: int, _s_start: int, _s_end: int, _a: int
for i, j in self.pixels:
in_crop_range = i >= self.start_x and i < self.end_x and j >= self.start_y and j < self.end_y
+ color_vec = ZERO_V3
if not self.do_crop or in_crop_range:
- min_depth = self.color[i, j][2] + 1e-5
ray_d = self.pix2ray(i, j)
- ray_o = self.cam_t
- it = self.ray_intersect(ray_d, ray_o)
- if it.is_ray_not_hit(): break
- # AO sampling: the hemisphere
- pos = ray_o + ray_d * it.min_depth
- num_un_occluded = 0.0
- for _ in range(self.smp_hemisphere):
- depth = self.get_sample_depth(it, pos)
- num_un_occluded += float(depth < min_depth) # depth
- self.color[i, j][0] += num_un_occluded / self.smp_hemisphere
- color_vec = ZERO_V3
- color_vec.fill(self.color[i, j][2] / self.cnt[None])
+ it = self.ray_intersect(ray_d, self.cam_t)
+ if not it.is_ray_not_hit():
+ # AO sampling: the hemisphere
+ pos = self.cam_t + ray_d * it.min_depth
+ occlusion_factor = 0.0
+ for _ in range(self.smp_hemisphere):
+ occlusion_factor += self.normal_sample_occluded(it, pos)
+ self.color[i, j][0] += 1.0 - occlusion_factor / self.smp_hemisphere
+ color_vec.fill(self.color[i, j][0] / self.cnt[None] )
self.pixels[i, j] = color_vec
def summary(self):
diff --git a/scenes/cbox/cbox.xml b/scenes/cbox/cbox.xml
index 6006a0a..c5b45e6 100755
--- a/scenes/cbox/cbox.xml
+++ b/scenes/cbox/cbox.xml
@@ -26,6 +26,10 @@
+
+
+
+