embedders
对position和view direction做embedding。
class FreqEmbedder(nn.Module):def __init__(self, in_dim=3, multi_res=10, use_log_bands=True, include_input=True):super().__init__()self.in_dim = in_dimself.num_freqs = multi_resself.max_freq_log2 = multi_resself.use_log_bands = use_log_bandsself.periodic_fns = [torch.sin, torch.cos]self.include_input = include_inputself.embed_fns = Noneself.out_dim = Noneself.num_embed_fns = Noneself.create_embedding_fn()def create_embedding_fn(self):self.embed_fns = []# 10 * 2 * 3 = 60self.out_dim = self.num_freqs * len(self.periodic_fns) * self.in_dim)if self.include_input:self.embed_fns.append(lambda x: x)self.out_dim += self.in_dim # 63if self.use_log_lands:freq_bands = 2. ** torch.linspace(0., self.max_freq_log2, steps=self.num_freqs)else:freq_bands = torch.linspace(2.**0, 2.**self.max_freq_log2, steps=self.num_freqs)for freq in freq_bands:for p_fn in self.periodic_fns:self.embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x*freq))self.num_embed_fns = len(self.embed_fns)def forward(self, x):"""x: [..., in_dim], xyz or view direction.embedding: [..., out_dim], corresponding frequency encoding."""embed_lst = [embed_fn(x) for embed_fn in self.embed_fns]# [[x, sin(x), cos(x), sin(2x), cos(2x),...,sin(512x), cos(512x)]]embedding = torch.cat(embed_lst, dim=-1)return embedding
NeRFBackbone
position和view经过embedding后,得到特征向量。再输入到NeRFBackbone网络中,得到sigma和color输出。
class NeRFBackbone(nn.Module):def __init__(self, pos_dim=3, cond_dim=64, view_dim=3, hid_dim=128, num_density_linears=8, num_color_linears=3, skip_layer_indices=[4]):self.pos_dim = pos_dimself.cond_dim = cond_dimself.view_dim = view_dimself.hid_dim = hid_dimself.out_dim = 4 # rgb + sigmaself.num_density_linears = num_density_linearsself.num_color_linears = num_color_linearsself.skip_layer_indices = skip_layer_indicesdensity_input_dim = pos_dim + cond_dimself.density_linears = nn.ModuleList([nn.Linear(density_input_dim, hid_dim)] +[nn.Linear(hid_dim, hid_dim) if i not in self.skip_layer_indices else nn.Linear(hid_dim + density_input_dim, hid_dim) for i in range(num_density_linears - 1)])self.density_out_linear = nn.Linear(hid_dim, 1)color_input_dim = view_dim + hid_dimself.color_linears = nn.ModuleList([nn.Linear(color_input_dim, hid_dim//2)] +[nn.Linear(hid_dim//2, hid_dim//2) for _ in range(num_color_linears - 1)])self.color_out_linear = nn.Linear(hid_dim//2, 3)def forward(self, pos, view, view):"""pos: [bs, n_sample, pos_dim], encoding of position.cond: [cond_dim,], condition features.view: [bs, view_dim], encoding of view direction."""bs, n_sample, _ = pos.shapeif cond.dim == 1: # [cond_dim]cond = cond.squeeze()[None, None, :].expand([bs, n_sample, self.cond_dim])elif cond_dim == 2: # [batch, cond_dim]cond = cond[:, None, :].expand([bs, n_sample, self.cond_dim])view = view[:, None, :].expand([bs, n_sample, self.view_dim])density_linear_input = torch.cat([pos, cond], dim=-1)h = density_linear_inputfor i in range(len(self.density_linears)):h = self.density_linears[i](h)h = F.relu(h)if i in self.skip_layer_indices:h = torch.cat([density_linear_input, h], -1)sigma = self.density_out_linear(h)h = torch.cat([h, view], -1)for i in range(len(self.color_linears)):h = self.color_linears[i](h)h = F.relu(h)rgb = self.color_out_linear(h)outputs = torch.cat([rgb, sigma], -1)return outputs