summary refs log tree commit diff
path: root/src/main.rs
blob: 53f3a417b8f23bc03afe5f741fdbaf898cd6a3c5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
use std::path::PathBuf;

use bpaf::{construct, positional, OptionParser, Parser};
use demosaic::Demosaic;
use image::buffer::ConvertBuffer;

mod demosaic;
mod pipeline;

#[derive(Clone, Debug)]
struct Args {
    paths: Vec<PathBuf>
}

fn args() -> OptionParser<Args> {
    let paths = positional("FILE").some("must process at least one image");

    construct!(Args { paths })
        .to_options()
        .descr("Intuitive raw photo processing engine")
}

fn main() {
    // Parse arguments
    let args = args().fallback_to_usage().run();

    // Initialize GPU
    let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor::default());
    let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions::default())).unwrap();

    let downlevel_caps = adapter.get_downlevel_capabilities();
    if !downlevel_caps.flags.contains(wgpu::DownlevelFlags::COMPUTE_SHADERS) {
        panic!("GPU does not support compute");
    }

    let (gpu, queue) = pollster::block_on(adapter.request_device(&wgpu::DeviceDescriptor {
        label: None,
        required_features: wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
        required_limits: wgpu::Limits::default(),
        memory_hints: wgpu::MemoryHints::MemoryUsage,
    }, None)).unwrap();

    // Process images
    for path in &args.paths {
        let image = rawloader::decode_file(path).unwrap();
        dbg!(&image.cfa);

        let pipeline = pipeline::Resources {
            demosaic: Box::new(demosaic::Lmmse::new(&gpu, &queue))
        };
        let demosaiced = pipeline.demosaic.demoasic(&gpu, &queue, &image);

        let readback_buf = gpu.create_buffer(&wgpu::BufferDescriptor {
            label: None,
            size: 4 * 4 * image.width as u64 * image.height as u64,
            usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
            mapped_at_creation: false,
        });
        let mut encoder = gpu.create_command_encoder(&wgpu::CommandEncoderDescriptor {
            label: None,
        });
        encoder.copy_texture_to_buffer(
            wgpu::TexelCopyTextureInfo {
                texture: &demosaiced,
                mip_level: 0,
                origin: wgpu::Origin3d::ZERO,
                aspect: wgpu::TextureAspect::All,
            },
            wgpu::TexelCopyBufferInfo {
                buffer: &readback_buf,
                layout: wgpu::TexelCopyBufferLayout {
                    offset: 0,
                    bytes_per_row: Some(4 * 4 * image.width as u32),
                    rows_per_image: Some(image.height as u32),
                },
            },
            wgpu::Extent3d {
                width: image.width as u32,
                height: image.height as u32,
                depth_or_array_layers: 1,
            }
        );
        queue.submit([encoder.finish()]);

        let readback_slice = readback_buf.slice(..);
        readback_slice.map_async(wgpu::MapMode::Read, |_| {});
        gpu.poll(wgpu::Maintain::Wait);
        {
            let readback_data = readback_slice.get_mapped_range();
            let result_image = image::ImageBuffer::<image::Rgba<f32>, _>::from_raw(image.width as u32, image.height as u32, bytemuck::cast_slice(&readback_data)).unwrap();
            <_ as ConvertBuffer<image::ImageBuffer<image::Rgb<u16>, Vec<u16>>>>::convert(&result_image).save_with_format("out.png", image::ImageFormat::Png).unwrap();
        }
        readback_buf.unmap();
    }
}