defocus-modules/lib/raytracer.c
2023-06-20 23:28:33 +02:00

452 lines
15 KiB
C

#include <defocus/defocus.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#include "pcg/pcg_basic.h"
/* ************* *
* *
* Vectors *
* *
* ************* */
static df_v3 normalize(df_v3 v)
{
float l = sqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
return (df_v3){v.x / l, v.y / l, v.z / l};
}
static float dot(df_v3 a, df_v3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; }
static float v2_len(df_v2 v) { return sqrtf(v.x * v.x + v.y * v.y); }
/* ********************* *
* *
* Image functions *
* *
* ********************* */
#define MAX_IMAGES 1024
struct
{
struct
{
stbi_uc *pixels;
int w;
int h;
} images[MAX_IMAGES];
unsigned int image_count;
} _image_table;
DF_API df_image_handle df_load_image(const char *path, int *w, int *h)
{
if (_image_table.image_count == MAX_IMAGES)
return 0;
int width, height, c;
stbi_uc *pixels = stbi_load(path, &width, &height, &c, 4);
if (!pixels) {
return 0;
}
df_image_handle handle = (_image_table.image_count + 1);
_image_table.image_count++;
_image_table.images[handle].pixels = pixels;
_image_table.images[handle].w = width;
_image_table.images[handle].h = height;
if (w) *w = width;
if (h) *h = height;
return handle;
}
/* *************************** *
* *
* Builtin camera models *
* *
* *************************** */
DF_API df_pinhole_camera_data df_create_pinhole_camera_data(int image_width, int image_height, float focal_length)
{
float aspect_ratio = (float)image_width / (float)image_height;
float viewport_height = 2.f;
float viewport_width = aspect_ratio * viewport_height;
/* Simple perspective projection.
* The lens is placed at (0, 0, 0)
*/
float lower_left_x = -viewport_width / 2.f;
float lower_left_y = -viewport_height / 2.f;
float lower_left_z = -focal_length;
return (df_pinhole_camera_data){
.viewport_size = {.x = viewport_width, .y = viewport_height},
.lower_left = { .x = lower_left_x, .y = lower_left_y, .z = lower_left_z },
};
}
DF_API df_ray df_pinhole_camera(float u, float v, unsigned int sample_idx, void *camera_data)
{
DF_UNUSED(sample_idx);
df_pinhole_camera_data *data = camera_data;
df_v3 origin = {0.f, 0.f, 0.f};
df_v3 target = {
data->lower_left.x + u * data->viewport_size.x,
data->lower_left.y + v * data->viewport_size.y,
data->lower_left.z
};
return (df_ray){
.origin = origin,
.dir = target,
};
}
static df_v2 random_point_on_disk(unsigned int i)
{
for (unsigned int j = 0; j < i; ++j)
pcg32_random();
while (1) {
df_v2 p = {.x = (float)pcg32_boundedrand(1024) / 1023.f, .y = (float)pcg32_boundedrand(1024) / 1023.f};
if (v2_len(p) <= 1.f)
return p;
}
}
DF_API df_thin_lense_camera_data df_create_thin_lense_camera_data(int image_width,
int image_height,
float aperture,
float focal_distance)
{
float aspect_ratio = (float)image_width / (float)image_height;
float viewport_height = 2.f;
float viewport_width = aspect_ratio * viewport_height;
/* Simple perspective projection.
* The lens is placed at (0, 0, 0)
*/
float lower_left_x = -viewport_width / 2.f;
float lower_left_y = -viewport_height / 2.f;
float lower_left_z = -focal_distance;
float lens_radius = aperture / 2.f;
return (df_thin_lense_camera_data){
.lens_radius = lens_radius,
.lower_left = {
.x = lower_left_x,
.y = lower_left_y,
.z = lower_left_z,
},
.viewport_size = {
.x = viewport_width,
.y = viewport_height
}
};
}
DF_API df_ray df_thin_lense_camera(float u, float v, unsigned int sample_idx, void *camera_data)
{
df_thin_lense_camera_data *data = camera_data;
df_v2 lensp = random_point_on_disk(sample_idx);
lensp.x *= data->lens_radius;
lensp.y *= data->lens_radius;
df_v3 offset = {u * lensp.x, v * lensp.y, .z = 0.f};
df_v3 origin = offset;
df_v3 target = {data->lower_left.x + u * data->viewport_size.x + offset.x,
data->lower_left.y + v * data->viewport_size.y + offset.y,
data->lower_left.z + offset.z};
return (df_ray){.origin = origin, .dir = target};
}
/* ********************************* *
* *
* Intersection test functions *
* *
* ********************************* */
typedef struct
{
float ray_t;
float img_u;
float img_v;
df_image_handle image;
} df_hit;
/* -1 => does not hit, >= 0 => hit (sphere index) */
static float sphere_test(float ray_origin_x,
float ray_origin_y,
float ray_origin_z,
float ray_dx,
float ray_dy,
float ray_dz,
const df_sphere *spheres,
unsigned int sphere_count)
{
float result = -1.f;
for (unsigned int i = 0; i < sphere_count; ++i) {
float delta_x = ray_origin_x - spheres[i].center_x;
float delta_y = ray_origin_y - spheres[i].center_y;
float delta_z = ray_origin_z - spheres[i].center_z;
float a = ray_dx * ray_dx + ray_dy * ray_dy + ray_dz * ray_dz;
float b = (delta_x * ray_dx + delta_y * ray_dy + delta_z * ray_dz);
float c = (delta_x * delta_x + delta_y * delta_y + delta_z * delta_z) - (spheres[i].radius * spheres[i].radius);
float discriminant = b * b - a * c;
/* we can get the hit location t as: (-b - sqrtf(disciminant)) / (2.f * a) */
if (discriminant > 0) {
float t = (-b - sqrtf(discriminant)) / (2.f * a);
#if 0
df_hit_record hit;
hit.at.x = ray_origin_x + t * ray_dx;
hit.at.y = ray_origin_y + t * ray_dy;
hit.at.z = ray_origin_z + t * ray_dz;
hit.ray_t = t;
df_v3 outward_normal = {
hit.at.x - spheres[i].center_x,
hit.at.y - spheres[i].center_y,
hit.at.z - spheres[i].center_z
};
set_face_normal(&hit, (df_v3){ray_dx, ray_dy, ray_dz}, outward_normal);
#endif
if (t > result)
result = t;
}
}
return result;
}
static df_hit plane_test(df_v3 ray_origin,
df_v3 ray_d,
const df_plane *planes,
unsigned int plane_count)
{
df_hit result = {
.ray_t = -1.f,
};
for (unsigned int i = 0; i < plane_count; ++i) {
float dot = (ray_d.x * planes[i].normal_x + ray_d.y * planes[i].normal_y + ray_d.z * planes[i].normal_z);
if (dot > DF_EPSF32 || dot < -DF_EPSF32) {
float delta_x = planes[i].base_x - ray_origin.x;
float delta_y = planes[i].base_y - ray_origin.y;
float delta_z = planes[i].base_z - ray_origin.z;
float num = delta_x * planes[i].normal_x + delta_y * planes[i].normal_y + delta_z * planes[i].normal_z;
float t = num / dot;
if (t > result.ray_t) {
result.ray_t = t;
/* Project point on plane to image coordinate system */
float px = ray_origin.x + t * ray_d.x;
float py = ray_origin.y + t * ray_d.y;
float pz = ray_origin.z + t * ray_d.z;
float img_p3_x = px - planes[i].img_p0_x;
float img_p3_y = py - planes[i].img_p0_y;
float img_p3_z = pz - planes[i].img_p0_z;
float w = planes[i].img_w;
float h = planes[i].img_h;
result.img_u =
img_p3_x * planes[i].img_ax0_x + img_p3_y * planes[i].img_ax0_y + img_p3_z * planes[i].img_ax0_z;
result.img_v =
img_p3_x * planes[i].img_ax1_x + img_p3_y * planes[i].img_ax1_y + img_p3_z * planes[i].img_ax1_z;
result.img_u /= w;
result.img_v /= h;
result.image = planes[i].image;
}
}
/* Line is parallel to the plane, either contained or not. Do we care? */
}
return result;
}
/* Ray packets to support more than one ray (=sample) per pixel */
typedef struct
{
unsigned int samples_per_pixel;
unsigned int ray_count;
df_v3 *ray_origins; /* (0, 0, 0) is the center of the lens */
df_v3 *ray_deltas; /* not necessarily normalized! */
df_v2i *ray_pixels; /* pixel coordinates in the output image */
} df_ray_packet;
DF_API int df_trace_rays(df_trace_rays_settings settings,
const df_sphere *spheres,
unsigned int sphere_count,
const df_plane *planes,
unsigned int plane_count,
uint8_t **result)
{
int image_width = settings.image_width;
int image_height = settings.image_height;
unsigned int samples_per_pixel = (unsigned int)settings.samples_per_pixel;
if (samples_per_pixel == 0)
samples_per_pixel = 1;
df_camera_fn camera = settings.camera;
if (!camera)
return 0;
void *camera_data = settings.camera_data;
uint8_t *pixels = malloc(image_width * image_height * 3);
if (!pixels)
return 0;
memset(pixels, 0, image_width * image_height * 3);
df_ray_packet packet;
packet.samples_per_pixel = samples_per_pixel;
packet.ray_count = image_width * image_height * samples_per_pixel;
void *packet_mem = malloc(packet.ray_count * (2 * sizeof(df_v3) + sizeof(df_v2i)));
if (!packet_mem) {
free(pixels);
return 0;
}
packet.ray_origins = (df_v3 *)packet_mem;
packet.ray_deltas = packet.ray_origins + packet.ray_count;
packet.ray_pixels = (df_v2i *)(packet.ray_deltas + packet.ray_count);
/* FIXME(Kevin): Replace with dynamic (time() ?) initializer */
pcg32_srandom(1523789, 901842398023);
/* Generate the rays */
unsigned int ray_idx = 0;
for (int y = 0; y < image_height; ++y) {
/* TODO(Kevin): SIMD */
uint8_t *row = pixels + y * image_width * 3;
for (int x = 0; x < image_width; ++x) {
float u = (float)x / (float)(image_width - 1);
float v = (float)y / (float)(image_height - 1);
for (unsigned int samplei = 0; samplei < samples_per_pixel; ++samplei) {
packet.ray_pixels[ray_idx] = (df_v2i){x, y};
df_ray ray = camera(u, v, samplei, camera_data);
packet.ray_origins[ray_idx] = ray.origin;
packet.ray_deltas[ray_idx] = ray.dir;
++ray_idx;
}
#if 0
/* Target = Delta because origin is (0, 0, 0) */
df_v3 origin = {0.f, 0.f, 0.f};
df_v3 target = {lower_left_x + u * viewport_width, lower_left_y + v * viewport_height, lower_left_z};
/* Adjust for lens effect */
if (settings.lens_radius > 0.f) {
df_v2 lensp = concentric_sample_disk((df_v2){u, v});
lensp.x *= settings.lens_radius;
lensp.y *= settings.lens_radius;
float ft = -focal_length / target.z;
df_v3 focusp = {target.x * ft, target.y * ft, target.z * ft};
origin.x = lensp.x;
origin.y = lensp.y;
origin.z = 0.f;
target = (df_v3){focusp.x - lensp.x, focusp.y - lensp.y, focusp.z};
}
/* Raycast against all planes */
df_hit plane_hit = plane_test(origin, target, planes, plane_count);
#if 0
float hits[] = {sphere_hit_t, plane_hit.ray_t};
float hit_t = df_max_f32(hits, DF_ARRAY_COUNT(hits));
if (hit_t >= 0) {
df_v3 hit_p = {target_x * hit_t, target_y * hit_t, target_z * hit_t};
hit_p.z -= -1.f;
df_v3 normal = normalize(hit_p);
row[x * 3 + 0] = (uint8_t)((.5f * (normal.x + 1.f)) * 255);
row[x * 3 + 1] = (uint8_t)((.5f * (normal.y + 1.f)) * 255);
row[x * 3 + 2] = (uint8_t)((.5f * (normal.z + 1.f)) * 255);
}
#endif
if (plane_hit.ray_t >= 0) {
float img_u = plane_hit.img_u;
float img_v = plane_hit.img_v;
// Temporary, handle arbitrary scale (esp. resulting from different orientation)
float view_aspect = viewport_width / viewport_height;
if (img_u >= 0 && img_v >= 0 && img_u <= 1.f && img_v <= 1.f) {
int pixelx = (int)floorf(img_u * (_image_table.images[plane_hit.image].w - 1));
int pixely = (int)floorf(img_v * (_image_table.images[plane_hit.image].h - 1));
stbi_uc *pixel = _image_table.images[plane_hit.image].pixels +
4 * (pixely * _image_table.images[plane_hit.image].w + pixelx);
row[x * 3 + 0] = pixel[0];
row[x * 3 + 1] = pixel[1];
row[x * 3 + 2] = pixel[2];
}
}
else {
/* Gradient background color */
float len = sqrtf(target.x * target.x + target.y * target.y + target.z * target.z);
float t = .5f * (target.y / len + 1.f);
float r = (1.f - t) + t * 0.5f;
float g = (1.f - t) + t * 0.7f;
float b = (1.f - t) + t * 1.0f;
row[x * 3 + 0] = (uint8_t)(r * 255);
row[x * 3 + 1] = (uint8_t)(g * 255);
row[x * 3 + 2] = (uint8_t)(b * 255);
}
#endif
}
}
/* Trace the rays */
for (unsigned int i = 0; i < packet.ray_count; ++i) {
df_v2i pixelp = packet.ray_pixels[i];
uint8_t *row = pixels + pixelp.y * image_width * 3;
df_hit plane_hit = plane_test(packet.ray_origins[i], packet.ray_deltas[i], planes, plane_count);
if (plane_hit.ray_t >= 0) {
float img_u = plane_hit.img_u;
float img_v = plane_hit.img_v;
if (img_u >= 0 && img_v >= 0 && img_u <= 1.f && img_v <= 1.f) {
int pixelx = (int)floorf(img_u * (_image_table.images[plane_hit.image].w - 1));
int pixely = (int)floorf(img_v * (_image_table.images[plane_hit.image].h - 1));
stbi_uc *pixel = _image_table.images[plane_hit.image].pixels +
4 * (pixely * _image_table.images[plane_hit.image].w + pixelx);
row[pixelp.x * 3 + 0] += pixel[0] / samples_per_pixel;
row[pixelp.x * 3 + 1] += pixel[1] / samples_per_pixel;
row[pixelp.x * 3 + 2] += pixel[2] / samples_per_pixel;
}
}
}
free(packet_mem);
*result = pixels;
return 1;
}