整理了代码,创建了一个相机类,控制镜头
class Camera
{
public:Camera(Vec3f cameraPos, Vec3f target, Vec3f up):cameraPos_(cameraPos), target_(target), up_(up) {}Matrix getView();Matrix getProjection();
private:Vec3f cameraPos_;Vec3f target_;Vec3f up_;
};
以及一个专门渲染的Rasterizer类
class Rasterizer
{
public:Rasterizer(Matrix viewport, int width, int height);void setModelView(Matrix modelView) { modelView_ = modelView; }void setProjection(Matrix projection) { projection_ = projection; }void draw(Model* model, IShader& shader, TGAImage& image);Matrix getProjection() { return projection_; }Matrix getModelView() { return modelView_; }
private:void triangle(Vec4f* pts, IShader& shader, TGAImage& image);Matrix viewport_;Matrix modelView_;Matrix projection_;std::vector<float> zBuffer_;int width_;int height_;
};
一个着色器结构,分别处理顶点着色,和片段着色
struct IShader
{virtual Vec3i vertex(int iface, int vertIdx) = 0;virtual bool fragment(Vec3f bar, TGAColor& color) = 0;
};
顶点着色是对模型的三角形顶点进行变换处理
片段是对三角形内部的每个点进行循环插值
void Rasterizer::draw(Model* model, IShader& shader, TGAImage& image) {for (int i = 0; i < model->faceSize(); i++) {Vec4f screencoords[3];for (int j = 0; j < 3; j++) {screencoords[j] = viewport_ * shader.vertex(i, j);}triangle(screencoords, shader, image);}
}void triangle(Vec4f* pts, IShader& shader, TGAImage& image) {//...for (int x = minX; x < maxX; x++) {for (int y = minY; y < maxY; y++) {//...bool discard = shader.fragment(Vec3f(alpha, beta, gamma), color);//...}}
}
出现了破面,破面问题基本上就是数据精度的误差,应该是更新了新的geometry.h文件,导致一些值的浮点数和整数的选择出现了新的误差
调整了一下
试了下教程的片段着色器,并且调整了下光源方向
flat shading
struct FlatShader :public IShader
{Vec3f vert[3];virtual Vec4f vertex(int iface, int vertIdx) {vert[vertIdx] = model->vert(model->face(iface)[vertIdx]);return rasterizer->getProjection() * rasterizer->getModelView() * embed<4>(vert[vertIdx]);}virtual bool fragment(Vec3f barycentricCoordinates, TGAColor& color) {Vec3f n = cross((vert[1] - vert[0]), (vert[2] - vert[0])).normalize();float I = std::max(0.f, n * light_dir);color = TGAColor(255, 255, 255) * I;return false;}
};
Phong shading
struct PhongShader :public IShader
{Vec3f n[3];virtual Vec4f vertex(int iface, int vertIdx) {n[vertIdx] = model->nverts(model->nface(iface)[vertIdx]);Vec3f v = model->vert(model->face(iface)[vertIdx]);return rasterizer->getProjection() * rasterizer->getModelView() * embed<4>(v);}virtual bool fragment(Vec3f barycentricCoordinates, TGAColor& color) {Vec3f normal = n[0] * barycentricCoordinates.x + n[1] * barycentricCoordinates.y + n[2] * barycentricCoordinates.z;float I = std::max(0.f, normal * light_dir);color = TGAColor(255, 255, 255) * I;return false;}
};
gouraud 纹理着色
struct GouraudTexShader :public IShader
{Vec3f intensity;mat<2, 3, float> uv;virtual Vec4f vertex(int iface, int vertIdx) {Vec3f n = model->nverts(model->nface(iface)[vertIdx]);intensity[vertIdx] = std::max(0.f, n * light_dir);Vec3f v = model->vert(model->face(iface)[vertIdx]);uv.set_col(vertIdx, model->tverts(model->tface(iface)[vertIdx]));return rasterizer->getProjection() * rasterizer->getModelView() * embed<4>(v);}virtual bool fragment(Vec3f barycentricCoordinates, TGAColor& color) {float I = intensity * barycentricCoordinates;Vec2f texcoords = uv * barycentricCoordinates;color = model->getDiffuseColor(texcoords.x, texcoords.y) * I;return false;}
};
flat纹理着色
struct FlatTexShader :public IShader
{Vec3f vert[3];mat<2, 3, float> uv;virtual Vec4f vertex(int iface, int vertIdx) {vert[vertIdx] = model->vert(model->face(iface)[vertIdx]);uv.set_col(vertIdx, model->tverts(model->tface(iface)[vertIdx]));return rasterizer->getProjection() * rasterizer->getModelView() * embed<4>(vert[vertIdx]);}virtual bool fragment(Vec3f barycentricCoordinates, TGAColor& color) {Vec3f n = cross((vert[1] - vert[0]), (vert[2] - vert[0])).normalize();float I = std::max(0.f, n * light_dir);Vec2f texcoords = uv * barycentricCoordinates;color = model->getDiffuseColor(texcoords.x, texcoords.y) * I;return false;}
};
Phong纹理着色
struct PhongTexShader :public IShader
{Vec3f n[3];mat<2, 3, float> uv;virtual Vec4f vertex(int iface, int vertIdx) {n[vertIdx] = model->nverts(model->nface(iface)[vertIdx]);Vec3f v = model->vert(model->face(iface)[vertIdx]);uv.set_col(vertIdx, model->tverts(model->tface(iface)[vertIdx]));return rasterizer->getProjection() * rasterizer->getModelView() * embed<4>(v);}virtual bool fragment(Vec3f barycentricCoordinates, TGAColor& color) {Vec3f normal = n[0] * barycentricCoordinates.x + n[1] * barycentricCoordinates.y + n[2] * barycentricCoordinates.z;float I = std::max(0.f, normal * light_dir);Vec2f texcoords = uv * barycentricCoordinates;color = model->getDiffuseColor(texcoords.x, texcoords.y) * I;return false;}
};
使用法线贴图shader
struct normalTexShader :public IShader {mat<2, 3, float> uv;virtual Vec4f vertex(int iface, int vertIdx) {Vec3f v = model->vert(model->face(iface)[vertIdx]);uv.set_col(vertIdx, model->tverts(model->tface(iface)[vertIdx]));return rasterizer->getProjection() * rasterizer->getModelView() * embed<4>(v);}virtual bool fragment(Vec3f barycentricCoordinates, TGAColor& color) {Vec2f texcoords = uv * barycentricCoordinates;Vec3f normal = model->getNormal(texcoords.x, texcoords.y);float I = std::max(0.f, normal * light_dir);color = model->getDiffuseColor(texcoords.x, texcoords.y) * I;return false;}
};
输出图片和教程源码的输出有些不一致
根据第5节课末尾的内容,假设模型资源里有某个点是(x,y,z,1),这个点对应的向量是(A,B,C,0)。
原本
如果此时模型的顶点数据经过了一些矩阵变换,可能导致与最初法线贴图记录的法线不再垂直。此时则需要将取出来的法线也做一些矩阵变换,使最终着色时的法线仍与对应顶点垂直
如图,右边的括号对应的是顶点的变换,则左边对应的是法向量所需的变换
如图M的逆转置矩阵就是法向量所需的变换
当变换矩阵M是均匀缩放/旋转和平移时,则等于对应的逆转置矩阵,但由于变换矩阵通常包含透视投影,所以一般不相等
struct normalTexShader :public IShader {mat<2, 3, float> uv;Matrix MVPT = (rasterizer->getProjection() * rasterizer->getModelView()).invert_transpose();//....Vec3f n = proj<3>(MVPT * embed<4>(normal)).normalize();float I = std::max(0.f, n * light_dir);//...}
};
和官方图片还是有误差,对比了下代码发现我的写法和教程源码有两点不同
1.教程的光源方向是对比模型的相对位置,光源方向也做了MVP变换
shader.uniform_M = Projection * ModelView;
Vec3f l = proj<3>(uniform_M * embed<4>(light_dir)).normalize();
我的光源位置是绝对位置
逻辑上都是对的,只是取决与具体需求
另一点不同的是,教程源码的法线取值,rgb对应的是zyx
Vec3f Model::normal(Vec2f uvf) {Vec2i uv(uvf[0]*normalmap_.get_width(), uvf[1]*normalmap_.get_height());TGAColor c = normalmap_.get(uv[0], uv[1]);Vec3f res;for (int i=0; i<3; i++)res[2-i] = (float)c[i]/255.f*2.f - 1.f;return res;
}
我取的法线值,rgb就是对应xyz
Vec3f Model::getNormal(float x, float y) {TGAColor n = normalTex_.get(x * normalTex_.get_width(), y * normalTex_.get_height());Vec3f res;for (int i = 0; i < 3; i++) {res[i] = n[i] / 255.f * 2 - 1.f;}return res;
}
这里为了检查后续的显示正确与否,我也先改成教程的逻辑处理
镜面反射
Vec3f r = (n * (n * l * 2.f) - l).normalize();float spec = pow(std::max(r.z, 0.0f), model->getSpecularColor(texcoords.x, texcoords.y));float diff = std::max(0.f, n * l);TGAColor c = model->getDiffuseColor(texcoords.x, texcoords.y);color = c;for (int i = 0; i < 3; i++) color[i] = std::min<float>(5 + c[i] * (diff + .6 * spec), 255);
这里教程的逻辑其实比较模糊,没完整的解释光强,反射系数那些布林冯模型公式里的参数
正常公式里的颜色系数*光强,在教程里是直接自定义了一些值,环境光的是5,漫反射的是1,镜面反射的是0.6.然后公式里的两个cos值就是教程里求出来的diff和spec,镜面反射贴图记录的就是镜面反射公式的指数p
项目跟随练习代码地址