forked from ShenhanQian/GaussianAvatars
-
Notifications
You must be signed in to change notification settings - Fork 0
/
render.py
147 lines (127 loc) · 6.46 KB
/
render.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
import torch
from torch.utils.data import DataLoader
from scene import Scene
import os
from tqdm import tqdm
from os import makedirs
import concurrent.futures
import multiprocessing
from pathlib import Path
from tqdm import tqdm
from PIL import Image
import numpy as np
from gaussian_renderer import render
from utils.general_utils import safe_state
from argparse import ArgumentParser
from arguments import ModelParams, PipelineParams, get_combined_args
from gaussian_renderer import GaussianModel, FlameGaussianModel
from mesh_renderer import NVDiffRenderer
mesh_renderer = NVDiffRenderer()
def write_data(path2data):
for path, data in path2data.items():
if not path.parent.exists():
path.parent.mkdir(parents=True, exist_ok=True)
if path.suffix in [".png", ".jpg"]:
data = data.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
Image.fromarray(data).save(path)
elif path.suffix in [".obj"]:
with open(path, "w") as f:
f.write(data)
elif path.suffix in [".txt"]:
with open(path, "w") as f:
f.write(data)
elif path.suffix in [".npz"]:
np.savez(path, **data)
else:
raise NotImplementedError(f"Unknown file type: {path.suffix}")
def render_set(dataset : ModelParams, name, iteration, views, gaussians, pipeline, background, render_mesh):
if dataset.select_camera_id != -1:
name = f"{name}_{dataset.select_camera_id}"
iter_path = Path(dataset.model_path) / name / f"ours_{iteration}"
render_path = iter_path / "renders"
gts_path = iter_path / "gt"
if render_mesh:
render_mesh_path = iter_path / "renders_mesh"
makedirs(render_path, exist_ok=True)
makedirs(gts_path, exist_ok=True)
views_loader = DataLoader(views, batch_size=None, shuffle=False, num_workers=8)
max_threads = multiprocessing.cpu_count()
print('Max threads: ', max_threads)
worker_args = []
for idx, view in enumerate(tqdm(views_loader, desc="Rendering progress")):
if gaussians.binding != None:
gaussians.select_mesh_by_timestep(view.timestep)
rendering = render(view, gaussians, pipeline, background)["render"]
gt = view.original_image[0:3, :, :]
if render_mesh:
out_dict = mesh_renderer.render_from_camera(gaussians.verts, gaussians.faces, view)
rgba_mesh = out_dict['rgba'].squeeze(0).permute(2, 0, 1) # (C, W, H)
rgb_mesh = rgba_mesh[:3, :, :]
alpha_mesh = rgba_mesh[3:, :, :]
mesh_opacity = 0.5
rendering_mesh = rgb_mesh * alpha_mesh * mesh_opacity + gt.to(rgb_mesh) * (alpha_mesh * (1 - mesh_opacity) + (1 - alpha_mesh))
path2data = {}
path2data[Path(render_path) / f'{idx:05d}.png'] = rendering
path2data[Path(gts_path) / f'{idx:05d}.png'] = gt
if render_mesh:
path2data[Path(render_mesh_path) / f'{idx:05d}.png'] = rendering_mesh
worker_args.append([path2data])
if len(worker_args) == max_threads or idx == len(views_loader)-1:
with concurrent.futures.ThreadPoolExecutor(max_threads) as executor:
futures = [executor.submit(write_data, *args) for args in worker_args]
concurrent.futures.wait(futures)
worker_args = []
try:
os.system(f"ffmpeg -y -framerate 25 -f image2 -pattern_type glob -i '{render_path}/*.png' -pix_fmt yuv420p {iter_path}/renders.mp4")
os.system(f"ffmpeg -y -framerate 25 -f image2 -pattern_type glob -i '{gts_path}/*.png' -pix_fmt yuv420p {iter_path}/gt.mp4")
if render_mesh:
os.system(f"ffmpeg -y -framerate 25 -f image2 -pattern_type glob -i '{render_mesh_path}/*.png' -pix_fmt yuv420p {iter_path}/renders_mesh.mp4")
except Exception as e:
print(e)
def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_val : bool, skip_test : bool, render_mesh: bool):
with torch.no_grad():
if dataset.bind_to_mesh:
# gaussians = FlameGaussianModel(dataset.sh_degree, dataset.disable_flame_static_offset)
gaussians = FlameGaussianModel(dataset.sh_degree)
else:
gaussians = GaussianModel(dataset.sh_degree)
scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False)
bg_color = [1,1,1] if dataset.white_background else [0, 0, 0]
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
if dataset.target_path != "":
name = os.path.basename(os.path.normpath(dataset.target_path))
# when loading from a target path, test cameras are merged into the train cameras
render_set(dataset, f'{name}', scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background, render_mesh)
else:
if not skip_train:
render_set(dataset, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background, render_mesh)
if not skip_val:
render_set(dataset, "val", scene.loaded_iter, scene.getValCameras(), gaussians, pipeline, background, render_mesh)
if not skip_test:
render_set(dataset, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background, render_mesh)
if __name__ == "__main__":
# Set up command line argument parser
parser = ArgumentParser(description="Testing script parameters")
model = ModelParams(parser, sentinel=True)
pipeline = PipelineParams(parser)
parser.add_argument("--iteration", default=-1, type=int)
parser.add_argument("--skip_train", action="store_true")
parser.add_argument("--skip_val", action="store_true")
parser.add_argument("--skip_test", action="store_true")
parser.add_argument("--quiet", action="store_true")
parser.add_argument("--render_mesh", action="store_true")
args = get_combined_args(parser)
print("Rendering " + args.model_path)
# Initialize system state (RNG)
safe_state(args.quiet)
render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_val, args.skip_test, args.render_mesh)