summaryrefslogtreecommitdiff
path: root/viewport.h
blob: 2a8f2457fc45347585a73e01b3f6583d0e590db2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
//
// Created by Keuin on 2022/4/12.
//

#ifndef RT_VIEWPORT_H
#define RT_VIEWPORT_H

#include "timer.h"
#include "bitmap.h"
#include "ray.h"
#include "vec.h"
#include "hitlist.h"
#include "tracelog.h"
#include <cstdlib>
#include <memory>
#include <limits>
#include <vector>
#include <iostream>
#include <cstdint>
#include <random>
#include <cmath>

// bias context, used for placing sub-pixels
class bias_ctx {
    bool enabled; // put all together, eliminating a virtual function call
    std::mt19937_64 mt;
    std::uniform_real_distribution<double> uni{0.0, 1.0};

public:
    bias_ctx() : enabled(false) {}

    bias_ctx(uint64_t seed) : enabled(true), mt(std::mt19937_64{seed}) {}

    void operator()(double &bx, double &by) {
        if (enabled) {
            bx = uni(mt);
            by = uni(mt);
        } else {
            bx = 0.0;
            by = 0.0;
        }
    }
};

// TODO rename to camera
// Single sampled viewport which supports bias sampling
// U: color depth, V: pos
template<typename U, typename V>
class basic_viewport {
    vec3<V> cxyz; // coordinate of the focus point
    vec3<V> screen_center;
//    double pitch; // TODO implement
//    double yaw; // TODO implement
    uint32_t image_width; // how many pixels every row has
    uint32_t image_height; // how many pixels every column has
    V screen_hw; // determined if screen_height is known
    V screen_hh; // determined if screen_width is known
//    double fov_h; // horizontal FOV, determined if screen_width or screen_height is known
//    double focus_length; // distance between the focus point and the image screen
    hitlist &world;
    vec3<V> vup{0, 1, 0}; // vector determine the camera rotating
public:

    basic_viewport(const vec3<V> &cxyz, const vec3<V> &screen_center,
                   uint32_t image_width, uint32_t image_height,
                   double fov_h, hitlist &world) :
            cxyz{cxyz}, screen_center{screen_center}, image_width{image_width}, image_height{image_height},
            screen_hw{(cxyz - screen_center).norm() * tan((double) fov_h / 2.0)},
            screen_hh{screen_hw * ((double) image_height / image_width)},
            world{world} {}

    basic_viewport(const vec3<V> &cxyz, const vec3<V> &screen_center,
                   uint32_t image_width, uint32_t image_height,
                   double screen_hw, double screen_hh,
                   hitlist &world) :
            cxyz{cxyz}, screen_center{screen_center}, image_width{image_width}, image_height{image_height},
            screen_hw{screen_hw},
            screen_hh{screen_hh},
            world{world} {
        assert(std::abs(1.0 * image_width / image_height - 1.0 * screen_hw / screen_hh) < 1e-8);
    }

    /**
     * Generate the image seen on given viewpoint.
     * @param bx bias on x axis (0.0 <= bx < 1.0)
     * @param by bias on y axis (0.0 <= by < 1.0)
     * @return
     */
    bitmap<U> render(uint64_t diffuse_seed, bias_ctx &bias
            /* by putting thread-specific parameters in call argument list, make users convenient*/) const {
        // The implementation keep all mutable state in local stack,
        // keeping the class immutable and thread-safe.
        bitmap<U> image{image_width, image_height};
        random_uv_gen_3d ruvg{diffuse_seed};
        V bx, by;
        const auto r = screen_center - cxyz;
        const int img_hw = image_width / 2, img_hh = image_height / 2;
        // screen plane is determined by coord system x`Vy`, where V is screen_center
        // for variable name we let u := x`, v := y`
        const auto u = cross(r, vup).unit_vec() * screen_hw, v = cross(u, r).unit_vec() * screen_hh;
        assert(dot(u, v) < 1e-8);
        assert(dot(u, r) < 1e-8);
        // iterate over every pixel on the image
        for (int j = -img_hh; j < img_hh; ++j) { // axis y, transformation is needed
            for (int i = -img_hw; i < img_hw; ++i) { // axis x
                bias(bx, by); // get a random bias (bx, by) for sub-pixel sampling
                assert(0 <= bx);
                assert(0 <= by);
                assert(bx < 1.0);
                assert(by < 1.0);
                const auto off_u = (1.0 * i + bx) / img_hw;
                const auto off_v = (1.0 * j + by) / img_hh;
                const auto off = off_u * u + off_v * v; // offset on screen plane
                const auto dir = r + off; // direction vector from camera to current pixel on screen
                ray3d ray{cxyz, dir}; // from camera to pixel (on the viewport)
                const auto pixel = world.color<U>(ray, ruvg);
                const auto x_ = i + img_hw, y_ = j + img_hh;
                image.set(x_, y_, pixel);

#ifdef LOG_TRACE
                const auto ret = pixel;
                const auto ret8b = pixel8b::from(ret);
                const auto ret8bg2 = pixel8b::from(ret.gamma2());
                TRACELOG(" ^  apply to pixel: (%d, %d), color: [%f, %f, %f] (8bit: [%d, %d, %d], 8bit-gamma2: [%d, %d, %d])\n",
                         x_, y_,
                         (double) ret.r, (double) ret.g, (double) ret.b,
                         ret8b.r, ret8b.g, ret8b.b,
                         ret8bg2.r, ret8bg2.g, ret8bg2.b);
#endif
            }
        }
        return image;
    }
};

#endif //RT_VIEWPORT_H