Initial commit

This commit is contained in:
Dawid Pietrykowski 2025-03-30 01:10:53 +01:00
commit 71c190bbc1
12 changed files with 6090 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/target
test_images

4283
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

33
Cargo.toml Normal file
View File

@ -0,0 +1,33 @@
[package]
name = "imflow"
version = "0.1.0"
edition = "2024"
[dependencies]
egui = "0.31.1"
egui-wgpu = { version = "0.31.1",features = ["winit"] }
egui-winit = "0.31.1"
winit = "0.30.9"
pollster = "0.4.0"
clap = { version = "4.5.34", features = ["derive"] }
image = "0.25.6"
zune-image = {version = "0.4.15", features = ["all"]}
libheif-rs = "1.1.0"
jpegxl-rs = "0.11.2"
itertools = "0.12"
rexiv2 = "0.10.0"
threadpool = "1.8.1"
bytemuck = "1.22.0"
[profile.release]
opt-level = 3
[dev-dependencies]
criterion = "0.3"
[[bench]]
name = "image_load"
harness = false

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Dawid Pietrykowski
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

294
benches/image_load.rs Normal file
View File

@ -0,0 +1,294 @@
#![allow(unused)]
use std::any::Any;
use std::fs::{File, read};
use std::io::{BufReader, Cursor};
use std::iter;
use std::ops::Deref;
use std::path::PathBuf;
use std::time::Duration;
use criterion::{AxisScale, BenchmarkId, PlotConfiguration};
use criterion::{Criterion, black_box, criterion_group, criterion_main};
use image::codecs::jpeg::JpegDecoder;
use image::metadata::Orientation;
use image::{DynamicImage, ImageResult, RgbaImage};
use imflow::image::{
ImflowImageBuffer, get_orientation, get_rating, image_to_rgba_buffer, load_available_images,
load_image, load_thumbnail_exif, load_thumbnail_full,
};
use jpegxl_rs::Endianness;
use jpegxl_rs::decode::{Data, PixelFormat, Pixels};
use jpegxl_rs::decoder_builder;
use zune_image::codecs::jpeg::JpegDecoder as ZuneJpegDecoder;
use zune_image::codecs::qoi::zune_core::colorspace::ColorSpace;
use zune_image::codecs::qoi::zune_core::options::DecoderOptions;
const PATH: &str = "test_images";
/// Create a new decoder that decodes from the stream ```r```
// pub fn new(r: R) -> ImageResult<JpegDecoder<R>> {
// let mut input = Vec::new();
// let mut r = r;
// r.read_to_end(&mut input)?;
// let options = DecoderOptions::default()
// .set_strict_mode(false)
// .set_max_width(usize::MAX)
// .set_max_height(usize::MAX);
// let mut decoder = ZuneJpegDecoder::new_with_options(input.as_slice(), options);
// decoder.decode_headers().map_err(ImageError::from_jpeg)?;
// // now that we've decoded the headers we can `.unwrap()`
// // all these functions that only fail if called before decoding the headers
// let (width, height) = decoder.dimensions().unwrap();
// // JPEG can only express dimensions up to 65535x65535, so this conversion cannot fail
// let width: u16 = width.try_into().unwrap();
// let height: u16 = height.try_into().unwrap();
// let orig_color_space = decoder.get_output_colorspace().unwrap();
// // Limits are disabled by default in the constructor for all decoders
// let limits = Limits::no_limits();
// Ok(JpegDecoder {
// input,
// orig_color_space,
// width,
// height,
// limits,
// orientation: None,
// phantom: PhantomData,
// })
// }
// pub fn full_load_benchmark(c: &mut Criterion) {
// let mut group = c.benchmark_group("image_decode");
// group
// .sample_size(10)
// .measurement_time(Duration::from_millis(500))
// .warm_up_time(Duration::from_millis(200));
// let images = load_available_images(PATH.into());
// for image in images.iter() {
// let image_name = image.to_str().unwrap();
// group.bench_with_input(format!("{}/zune", image_name), image, |b, image| {
// b.iter(|| load_image_argb(image.clone().into()));
// });
// group.bench_with_input(format!("{}/image-rs", image_name), image, |b, image| {
// b.iter(|| load_image_argb_imagers(image.clone().into()));
// });
// }
// group.finish();
// }
fn load_a(path: &PathBuf) -> ImflowImageBuffer {
let file = read(path.clone()).unwrap();
let mut decoder = ZuneJpegDecoder::new(&file);
let options = DecoderOptions::new_fast().jpeg_set_out_colorspace(ColorSpace::RGBA);
decoder.set_options(options);
decoder.decode_headers().unwrap();
let info = decoder.info().unwrap();
let width = info.width as usize;
let height = info.height as usize;
let mut buffer: Vec<u8> = vec![0; width * height * 4];
decoder.decode_into(buffer.as_mut_slice()).unwrap();
// Reinterpret to avoid copying
let buffer_u32 = unsafe {
Vec::from_raw_parts(
buffer.as_mut_ptr() as *mut u32,
buffer.len() / 4,
buffer.capacity() / 4,
)
};
std::mem::forget(buffer);
// let total_time = total_start.elapsed();
// println!("Total loading time: {:?}", total_time);
let rating = get_rating(path);
ImflowImageBuffer {
width,
height,
rgba_buffer: buffer_u32,
rating,
}
}
fn load_b(path: &PathBuf) -> ImflowImageBuffer {
let file = read(path.clone()).unwrap();
let mut decoder = ZuneJpegDecoder::new(&file);
let options = DecoderOptions::new_fast().jpeg_set_out_colorspace(ColorSpace::RGBA);
decoder.set_options(options);
decoder.decode_headers().unwrap();
let info = decoder.info().unwrap();
let width = info.width as usize;
let height = info.height as usize;
let mut buffer: Vec<u8> = vec![0; width * height * 4];
decoder.decode_into(buffer.as_mut_slice()).unwrap();
let image = RgbaImage::from_raw(width as u32, height as u32, buffer).unwrap();
let orientation = Orientation::from_exif(get_orientation(path)).unwrap();
let mut dynamic_image = DynamicImage::from(image);
dynamic_image.apply_orientation(orientation);
let rating = get_rating(path);
let mut buffer = dynamic_image.to_rgba8();
let buffer_u32 = unsafe {
Vec::from_raw_parts(
buffer.as_mut_ptr() as *mut u32,
buffer.len() / 4,
buffer.len() / 4,
)
};
std::mem::forget(buffer);
ImflowImageBuffer {
width,
height,
rgba_buffer: buffer_u32,
rating,
}
}
fn load_jxl_single(path: &PathBuf) -> (jpegxl_rs::decode::Metadata, Vec<u8>) {
let file = read(path).unwrap();
use jpegxl_rs::ThreadsRunner;
let runner = ThreadsRunner::default();
let decoder = decoder_builder()
// .parallel_runner(&runner)
.pixel_format(PixelFormat {
num_channels: 4,
endianness: Endianness::Big,
align: 8,
})
.build()
.unwrap();
decoder.decode_with::<u8>(&file).unwrap()
// buffer = data;
// width = metadata.width as usize;
// height = metadata.height as usize;
}
fn load_jxl_multi(path: &PathBuf) -> (jpegxl_rs::decode::Metadata, Vec<u8>) {
let file = read(path).unwrap();
use jpegxl_rs::ThreadsRunner;
let runner = ThreadsRunner::default();
let decoder = decoder_builder()
.parallel_runner(&runner)
.pixel_format(PixelFormat {
num_channels: 4,
endianness: Endianness::Big,
align: 8,
})
.build()
.unwrap();
decoder.decode_with::<u8>(&file).unwrap()
// buffer = data;
// width = metadata.width as usize;
// height = metadata.height as usize;
}
// fn load_b(path: &PathBuf) -> ImflowImageBuffer {
// println!("path: {:?}", path);
// // let file = read(path.clone()).unwrap();
// let file = BufReader::new(File::open(path).unwrap());
// let decoder = image::ImageReader::new(file).unwrap();
// let options = DecoderOptions::new_fast().jpeg_set_out_colorspace(ColorSpace::RGBA);
// decoder.set_options(options);
// let image = reader
// .with_guessed_format()
// .unwrap()
// .decode()
// .unwrap();
// let width = image.width() as usize;
// let height = image.height() as usize;
// // let buffer = image_to_rgba_buffer(image);
// let im = RgbaImage::from_raw(width, height, image.as_rgba8()).unwrap();
// let rating = get_rating(path.into());
// ImflowImageBuffer {
// width,
// height,
// rgba_buffer: buffer,
// rating,
// }
// }
pub fn thumbnail_load_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("thumbnail");
group
.sample_size(10)
.measurement_time(Duration::from_millis(500))
.warm_up_time(Duration::from_millis(200));
let images = load_available_images(PATH.into());
group.bench_function("exif", |b| {
for image in images.iter().take(10) {
b.iter(|| load_thumbnail_exif(image));
}
});
group.bench_function("full", |b| {
for image in images.iter().take(10) {
b.iter(|| load_thumbnail_full(image));
}
});
group.finish();
}
pub fn file_load_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("image_load");
group
.sample_size(10)
.measurement_time(Duration::from_millis(500))
.warm_up_time(Duration::from_millis(200));
let images = load_available_images(PATH.into());
group.bench_function("zune_jpeg", |b| {
for image in images.iter().take(10) {
b.iter(|| load_a(image));
}
});
group.bench_function("image_rs", |b| {
for image in images.iter().take(10) {
b.iter(|| load_b(image));
}
});
group.finish();
}
pub fn jxl_multithreading_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("jxl_multithreading");
group
.sample_size(10)
.measurement_time(Duration::from_millis(500))
.warm_up_time(Duration::from_millis(200));
let images = load_available_images("./test_images/jxl".into());
group.bench_function("single", |b| {
for image in images.iter().take(10) {
b.iter(|| load_jxl_single(image));
}
});
group.bench_function("multi", |b| {
for image in images.iter().take(10) {
b.iter(|| load_jxl_multi(image));
}
});
group.finish();
}
// criterion_group!(benches, thumbnail_load_benchmark);
// criterion_group!(benches, file_load_benchmark);
criterion_group!(benches, jxl_multithreading_benchmark);
criterion_main!(benches);

737
src/app.rs Normal file
View File

@ -0,0 +1,737 @@
use crate::egui_tools::EguiRenderer;
use egui::{Event, Key, PointerButton};
use egui_wgpu::wgpu::SurfaceError;
use egui_wgpu::{ScreenDescriptor, wgpu};
use imflow::store::ImageStore;
use std::path::PathBuf;
use std::process::exit;
use std::sync::Arc;
use wgpu::util::DeviceExt;
use wgpu::{PipelineCompilationOptions, SurfaceConfiguration};
use winit::application::ApplicationHandler;
use winit::dpi::{LogicalSize, PhysicalSize};
use winit::event::WindowEvent;
use winit::event_loop::ActiveEventLoop;
use winit::platform::x11::WindowAttributesExtX11;
use winit::window::{Window, WindowId};
// Uniforms for transformations
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct Transforms {
transform: [f32; 16], // 4x4 matrix
width: u32,
height: u32,
_padding1: u32,
_padding2: u32,
}
pub(crate) struct TransformData {
pan_x: f32,
pan_y: f32,
zoom: f32,
width: u32,
height: u32,
}
#[rustfmt::skip]
fn create_transform_matrix(data: &TransformData, scale_x: f32, scale_y: f32) -> [f32; 16] {
const ZOOM_MULTIPLIER: f32 = 3.0;
let zoom = data.zoom.powf(ZOOM_MULTIPLIER);
[
zoom * scale_x, 0.0, 0.0, 0.0,
0.0, zoom * scale_y, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
data.pan_x, data.pan_y, 0.0, 1.0,
]
}
fn setup_texture(
device: &wgpu::Device,
surface_config: SurfaceConfiguration,
width: u32,
height: u32,
) -> (
wgpu::Texture,
wgpu::BindGroup,
wgpu::RenderPipeline,
wgpu::Buffer,
) {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Image texture"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Texture Bind Group Layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::all(),
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let transform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Transform Uniform Buffer"),
size: std::mem::size_of::<Transforms>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
// Create bind group with your texture
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Texture Bind Group"),
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: transform_buffer.as_entire_binding(),
},
],
});
let vertex_buffer_layout = wgpu::VertexBufferLayout {
array_stride: 5 * std::mem::size_of::<f32>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
// Position
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
// UV
wgpu::VertexAttribute {
offset: 3 * std::mem::size_of::<f32>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
],
};
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Texture Shader"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(include_str!("shader.wgsl"))),
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Texture Render Pipeline"),
layout: Some(
&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Texture Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
}),
),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[vertex_buffer_layout],
compilation_options: PipelineCompilationOptions::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: surface_config.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: PipelineCompilationOptions::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
(texture, bind_group, render_pipeline, transform_buffer)
}
pub struct AppState {
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub surface_config: wgpu::SurfaceConfiguration,
pub surface: wgpu::Surface<'static>,
pub scale_factor: f32,
pub egui_renderer: EguiRenderer,
pub store: ImageStore,
pub image_texture: wgpu::Texture,
pub bind_group: wgpu::BindGroup,
pub render_pipeline: wgpu::RenderPipeline,
pub transform_buffer: wgpu::Buffer,
pub transform_data: TransformData,
}
impl AppState {
async fn new(
instance: &wgpu::Instance,
surface: wgpu::Surface<'static>,
window: &Window,
width: u32,
height: u32,
path: PathBuf,
) -> Self {
let power_pref = wgpu::PowerPreference::default();
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: power_pref,
force_fallback_adapter: false,
compatible_surface: Some(&surface),
})
.await
.expect("Failed to find an appropriate adapter");
let features = wgpu::Features::empty();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: features,
required_limits: Default::default(),
memory_hints: Default::default(),
},
None,
)
.await
.expect("Failed to create device");
let swapchain_capabilities = surface.get_capabilities(&adapter);
let selected_format = wgpu::TextureFormat::Bgra8UnormSrgb;
let swapchain_format = swapchain_capabilities
.formats
.iter()
.find(|d| **d == selected_format)
.expect("failed to select proper surface texture format!");
let surface_config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: *swapchain_format,
width,
height,
present_mode: wgpu::PresentMode::AutoVsync,
desired_maximum_frame_latency: 0,
alpha_mode: swapchain_capabilities.alpha_modes[0],
view_formats: vec![],
};
surface.configure(&device, &surface_config);
let egui_renderer = EguiRenderer::new(&device, surface_config.format, None, 1, window);
let scale_factor = 1.0;
let store = ImageStore::new(path);
let (image_texture, bind_group, render_pipeline, transform_buffer) =
// setup_texture(&device, surface_config.clone(), 6000, 4000);
setup_texture(&device, surface_config.clone(), 8192, 8192);
let transform_data = TransformData {
pan_x: 0.0,
pan_y: 0.0,
zoom: 1.0,
width: 10000,
height: 10000,
};
Self {
device,
queue,
surface,
surface_config,
egui_renderer,
scale_factor,
store,
image_texture,
bind_group,
render_pipeline,
transform_buffer,
transform_data,
}
}
fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_config.width = width;
self.surface_config.height = height;
self.surface.configure(&self.device, &self.surface_config);
}
}
pub struct App {
instance: wgpu::Instance,
state: Option<AppState>,
window: Option<Arc<Window>>,
path: PathBuf,
}
impl App {
pub fn new(path: PathBuf) -> Self {
let instance = egui_wgpu::wgpu::Instance::new(&wgpu::InstanceDescriptor::default());
Self {
instance,
state: None,
window: None,
path,
}
}
async fn set_window(&mut self, window: Window) {
let window = Arc::new(window);
let initial_height = 1200;
let initial_width = (initial_height as f32 * 1.5) as u32;
let _ = window.request_inner_size(PhysicalSize::new(initial_width, initial_height));
let surface = self
.instance
.create_surface(window.clone())
.expect("Failed to create surface!");
let state = AppState::new(
&self.instance,
surface,
&window,
initial_width,
initial_width,
self.path.clone(),
)
.await;
self.window.get_or_insert(window);
self.state.get_or_insert(state);
self.pan_zoom(0.0, 0.0, 0.0);
self.update_texture();
}
fn handle_resized(&mut self, width: u32, height: u32) {
if width > 0 && height > 0 {
self.state.as_mut().unwrap().resize_surface(width, height);
}
self.pan_zoom(0.0, 0.0, 0.0);
}
pub fn update_texture(&mut self) {
let state = self.state.as_mut().unwrap();
state.store.check_loaded_images();
let imbuf = if let Some(full) = state.store.get_current_image() {
full
} else {
state.store.get_thumbnail()
};
let width = imbuf.width as u32;
let height = imbuf.height as u32;
let buffer_u8 = unsafe {
std::slice::from_raw_parts(
imbuf.rgba_buffer.as_ptr() as *const u8,
imbuf.rgba_buffer.len() * 4,
)
};
state.transform_data.width = width;
state.transform_data.height = height;
state.queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &state.image_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&buffer_u8,
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(4 * width), // 4 bytes per ARGB pixel
rows_per_image: Some(height),
},
wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
);
self.pan_zoom(0.0, 0.0, 0.0);
}
fn update_transform(&mut self) {
let state = self.state.as_mut().unwrap();
let image_aspect_ratio =
(state.transform_data.width as f32) / (state.transform_data.height as f32);
let window_size = self.window.as_ref().unwrap().inner_size();
let window_aspect_ratio = window_size.width as f32 / window_size.height as f32;
let mut scale_x = 1.0;
let mut scale_y = 1.0;
if window_aspect_ratio > image_aspect_ratio {
scale_x = image_aspect_ratio / window_aspect_ratio;
} else {
scale_y = window_aspect_ratio / image_aspect_ratio;
}
let transform = create_transform_matrix(&state.transform_data, scale_x, scale_y);
state.queue.write_buffer(
&state.transform_buffer,
0,
bytemuck::cast_slice(&[Transforms {
transform,
width: state.transform_data.width,
height: state.transform_data.height,
_padding1: 0,
_padding2: 0,
}]),
);
}
pub fn reset_transform(&mut self) {
let state = self.state.as_mut().unwrap();
state.transform_data.zoom = 1.0;
state.transform_data.pan_x = 0.0;
state.transform_data.pan_y = 0.0;
self.update_transform();
}
pub fn pan_zoom(&mut self, zoom_delta: f32, pan_x: f32, pan_y: f32) {
let state = self.state.as_mut().unwrap();
state.transform_data.zoom = (state.transform_data.zoom + zoom_delta).clamp(1.0, 20.0);
state.transform_data.pan_x += pan_x;
state.transform_data.pan_y += pan_y;
self.update_transform();
}
fn handle_redraw(&mut self) {
// Attempt to handle minimizing window
if let Some(window) = self.window.as_ref() {
if let Some(min) = window.is_minimized() {
if min {
println!("Window is minimized");
return;
}
}
}
let state = self.state.as_mut().unwrap();
let screen_descriptor = ScreenDescriptor {
size_in_pixels: [state.surface_config.width, state.surface_config.height],
pixels_per_point: self.window.as_ref().unwrap().scale_factor() as f32
* state.scale_factor,
};
let surface_texture = state.surface.get_current_texture();
let surface_texture = match surface_texture {
Err(SurfaceError::Outdated) => {
// Ignoring outdated to allow resizing and minimization
println!("wgpu surface outdated");
return;
}
Err(SurfaceError::Timeout) => {
println!("wgpu surface timeout");
return;
}
Err(_) => {
surface_texture.expect("Failed to acquire next swap chain texture");
return;
}
Ok(surface_texture) => surface_texture,
};
let surface_view = surface_texture
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = state
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
// Clear buffer with black
{
let _ = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &surface_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
}
{
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
// Quad (two triangles)
let vertices = [
// Position (x, y, z), Texture coords (u, v)
Vertex {
position: [-1.0, -1.0, 0.0],
tex_coords: [0.0, 1.0],
}, // bottom left
Vertex {
position: [-1.0, 1.0, 0.0],
tex_coords: [0.0, 0.0],
}, // top left
Vertex {
position: [1.0, -1.0, 0.0],
tex_coords: [1.0, 1.0],
}, // bottom right
Vertex {
position: [1.0, 1.0, 0.0],
tex_coords: [1.0, 0.0],
}, // top right
];
let indices: [u16; 6] = [0, 1, 2, 2, 1, 3];
let vertex_buffer =
state
.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = state
.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(&indices),
usage: wgpu::BufferUsages::INDEX,
});
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Texture Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &surface_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
render_pass.set_pipeline(&state.render_pipeline);
render_pass.set_bind_group(0, &state.bind_group, &[]);
// Bind the vertex buffer
render_pass.set_vertex_buffer(0, vertex_buffer.slice(..));
// Draw using the index buffer
render_pass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(0..6, 0, 0..1);
}
let rating = state.store.get_current_rating();
let path = state.store.current_image_path.clone();
let filename = path.path.file_name().unwrap();
let window = self.window.as_ref().unwrap();
{
state.egui_renderer.begin_frame(window);
egui::Window::new("Rating")
.collapsible(false)
.resizable(false)
.default_width(5.0)
.show(state.egui_renderer.context(), |ui| {
ui.vertical_centered(|ui| {
ui.label(
egui::RichText::new(format!("{:.1}", rating))
.size(42.0)
.strong(),
);
ui.label(
egui::RichText::new(format!("{}", filename.to_str().unwrap()))
.size(10.0)
.strong(),
);
});
});
state.egui_renderer.end_frame_and_draw(
&state.device,
&state.queue,
&mut encoder,
window,
&surface_view,
screen_descriptor,
);
}
state.queue.submit(Some(encoder.finish()));
surface_texture.present();
}
}
impl ApplicationHandler for App {
fn resumed(&mut self, event_loop: &ActiveEventLoop) {
let attributes = Window::default_attributes()
.with_base_size(LogicalSize::new(2000, 4000))
.with_resizable(true);
let window = event_loop.create_window(attributes).unwrap();
pollster::block_on(self.set_window(window));
}
fn window_event(&mut self, event_loop: &ActiveEventLoop, _: WindowId, event: WindowEvent) {
// let egui render to process the event first
self.state
.as_mut()
.unwrap()
.egui_renderer
.handle_input(self.window.as_ref().unwrap(), &event);
match event {
WindowEvent::CloseRequested => {
println!("The close button was pressed; stopping");
event_loop.exit();
}
WindowEvent::RedrawRequested => {
self.handle_redraw();
let (events, _keys_down, pointer) = self
.state
.as_ref()
.unwrap()
.egui_renderer
.context()
.input(|i| (i.events.clone(), i.keys_down.clone(), i.pointer.clone()));
events.iter().for_each(|e| {
if let Event::Key { key, pressed, .. } = e {
if !*pressed {
return;
}
match *key {
Key::ArrowLeft => {
self.state.as_mut().unwrap().store.next_image(-1);
self.update_texture();
}
Key::ArrowRight => {
self.state.as_mut().unwrap().store.next_image(1);
self.update_texture();
}
Key::ArrowUp => {
let rating =
self.state.as_mut().unwrap().store.get_current_rating();
self.state.as_mut().unwrap().store.set_rating(rating + 1);
}
Key::ArrowDown => {
let rating =
self.state.as_mut().unwrap().store.get_current_rating();
self.state.as_mut().unwrap().store.set_rating(rating - 1);
}
Key::Backtick => self.state.as_mut().unwrap().store.set_rating(0),
Key::Num0 => self.state.as_mut().unwrap().store.set_rating(0),
Key::Num1 => self.state.as_mut().unwrap().store.set_rating(1),
Key::Num2 => self.state.as_mut().unwrap().store.set_rating(2),
Key::Num3 => self.state.as_mut().unwrap().store.set_rating(3),
Key::Num4 => self.state.as_mut().unwrap().store.set_rating(4),
Key::Num5 => self.state.as_mut().unwrap().store.set_rating(5),
Key::Escape => exit(0),
_ => {}
}
} else if let Event::MouseWheel { delta, .. } = e {
self.pan_zoom(delta.y * 0.2, 0.0, 0.0);
} else if let Event::PointerButton {
button, pressed, ..
} = e
{
if *pressed && *button == PointerButton::Secondary {
self.reset_transform();
}
}
});
if pointer.primary_down() && pointer.is_moving() {
self.pan_zoom(0.0, pointer.delta().x * 0.001, pointer.delta().y * -0.001);
}
self.window.as_ref().unwrap().request_redraw();
}
WindowEvent::Resized(new_size) => {
self.handle_resized(new_size.width, new_size.height);
}
_ => (),
}
}
}

118
src/egui_tools.rs Normal file
View File

@ -0,0 +1,118 @@
use egui::Context;
use egui_wgpu::wgpu::{CommandEncoder, Device, Queue, StoreOp, TextureFormat, TextureView};
use egui_wgpu::{Renderer, ScreenDescriptor, wgpu};
use egui_winit::State;
use winit::event::WindowEvent;
use winit::window::Window;
pub struct EguiRenderer {
state: State,
renderer: Renderer,
frame_started: bool,
}
impl EguiRenderer {
pub fn context(&self) -> &Context {
self.state.egui_ctx()
}
pub fn new(
device: &Device,
output_color_format: TextureFormat,
output_depth_format: Option<TextureFormat>,
msaa_samples: u32,
window: &Window,
) -> EguiRenderer {
let egui_context = Context::default();
let egui_state = egui_winit::State::new(
egui_context,
egui::viewport::ViewportId::ROOT,
&window,
Some(window.scale_factor() as f32),
None,
Some(2 * 1024), // default dimension is 2048
);
let egui_renderer = Renderer::new(
device,
output_color_format,
output_depth_format,
msaa_samples,
true,
);
EguiRenderer {
state: egui_state,
renderer: egui_renderer,
frame_started: false,
}
}
pub fn handle_input(&mut self, window: &Window, event: &WindowEvent) {
let _ = self.state.on_window_event(window, event);
}
pub fn ppp(&mut self, v: f32) {
self.context().set_pixels_per_point(v);
}
pub fn begin_frame(&mut self, window: &Window) {
let raw_input = self.state.take_egui_input(window);
self.state.egui_ctx().begin_pass(raw_input);
self.frame_started = true;
}
pub fn end_frame_and_draw(
&mut self,
device: &Device,
queue: &Queue,
encoder: &mut CommandEncoder,
window: &Window,
window_surface_view: &TextureView,
screen_descriptor: ScreenDescriptor,
) {
if !self.frame_started {
panic!("begin_frame must be called before end_frame_and_draw can be called!");
}
self.ppp(screen_descriptor.pixels_per_point);
let full_output = self.state.egui_ctx().end_pass();
self.state
.handle_platform_output(window, full_output.platform_output);
let tris = self
.state
.egui_ctx()
.tessellate(full_output.shapes, self.state.egui_ctx().pixels_per_point());
for (id, image_delta) in &full_output.textures_delta.set {
self.renderer
.update_texture(device, queue, *id, image_delta);
}
self.renderer
.update_buffers(device, queue, encoder, &tris, &screen_descriptor);
let rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: window_surface_view,
resolve_target: None,
ops: egui_wgpu::wgpu::Operations {
load: egui_wgpu::wgpu::LoadOp::Load,
store: StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
label: Some("egui main render pass"),
occlusion_query_set: None,
});
self.renderer
.render(&mut rpass.forget_lifetime(), &tris, &screen_descriptor);
for x in &full_output.textures_delta.free {
self.renderer.free_texture(x)
}
self.frame_started = false;
}
}

351
src/image.rs Normal file
View File

@ -0,0 +1,351 @@
use image::DynamicImage;
use image::RgbaImage;
use image::imageops::FilterType;
use image::metadata::Orientation;
use itertools::Itertools;
use jpegxl_rs::Endianness;
use jpegxl_rs::decode::PixelFormat;
use jpegxl_rs::decoder_builder;
use libheif_rs::{HeifContext, LibHeif, RgbChroma};
use rexiv2::Metadata;
use zune_image::codecs::jpeg::JpegDecoder;
use zune_image::codecs::qoi::zune_core::colorspace::ColorSpace;
use zune_image::codecs::qoi::zune_core::options::DecoderOptions;
use std::fs;
use std::fs::File;
use std::fs::read;
use std::io::BufReader;
use std::io::Cursor;
use std::mem;
use std::path::PathBuf;
use std::time::Instant;
#[derive(Clone, Eq, Hash, PartialEq, PartialOrd)]
pub enum ImageFormat {
Jpg,
Jxl,
Heif,
}
#[derive(Clone, Eq, Hash, PartialEq, PartialOrd)]
pub struct ImageData {
pub path: PathBuf,
pub format: ImageFormat,
}
pub struct ImflowImageBuffer {
pub width: usize,
pub height: usize,
pub rgba_buffer: Vec<u32>,
pub rating: i32,
}
pub fn get_rating(image: &ImageData) -> i32 {
let meta = Metadata::new_from_path(&image.path);
match meta {
Ok(meta) => {
let rating = meta.get_tag_numeric("Xmp.xmp.Rating");
rating
}
Err(e) => panic!("{:?}", e),
}
}
pub fn get_orientation(image: &ImageData) -> u8 {
let meta = Metadata::new_from_path(&image.path);
match meta {
Ok(meta) => meta.get_orientation() as u8,
Err(e) => panic!("{:?}", e),
}
}
fn swap_wh<T>(width: T, height: T, orientation: Orientation) -> (T, T) {
if [
Orientation::Rotate90,
Orientation::Rotate270,
Orientation::Rotate90FlipH,
Orientation::Rotate270FlipH,
]
.contains(&orientation)
{
return (height, width);
}
(width, height)
}
fn get_format(path: &PathBuf) -> Option<ImageFormat> {
if !path.is_file() {
return None;
}
let os_str = path.extension().unwrap().to_ascii_lowercase();
let extension = &os_str.to_str().unwrap();
if ["heic", "heif"].contains(extension) {
Some(ImageFormat::Heif)
} else if ["jpg", "jpeg"].contains(extension) {
Some(ImageFormat::Jpg)
} else if ["jxl"].contains(extension) {
Some(ImageFormat::Jxl)
} else {
None
}
}
pub fn load_image(image: &ImageData) -> ImflowImageBuffer {
let total_start = Instant::now();
match image.format {
ImageFormat::Heif => {
let img = load_heif(image, false);
let total_time = total_start.elapsed();
println!("Total HEIF loading time: {:?}", total_time);
img
}
ImageFormat::Jxl => {
let rating = get_rating(image);
let file = read(image.path.clone()).unwrap();
use jpegxl_rs::ThreadsRunner;
let runner = ThreadsRunner::default();
let decoder = decoder_builder()
.parallel_runner(&runner)
.pixel_format(PixelFormat {
num_channels: 4,
endianness: Endianness::Big,
align: 8,
})
.build()
.unwrap();
let (metadata, buffer) = decoder.decode_with::<u8>(&file).unwrap();
let width = metadata.width as usize;
let height = metadata.height as usize;
let rgba_buffer = unsafe {
Vec::from_raw_parts(
buffer.as_ptr() as *mut u32,
buffer.len() / 4,
buffer.len() / 4,
)
};
std::mem::forget(buffer);
println!("Total JXL loading time: {:?}", total_start.elapsed());
ImflowImageBuffer {
width,
height,
rgba_buffer,
rating,
}
}
ImageFormat::Jpg => {
let rating = get_rating(image);
let mut buffer: Vec<u8>;
let options = DecoderOptions::new_fast().jpeg_set_out_colorspace(ColorSpace::RGBA);
let file = read(image.path.clone()).unwrap();
let mut decoder = JpegDecoder::new(&file);
decoder.set_options(options);
decoder.decode_headers().unwrap();
let info = decoder.info().unwrap();
let width = info.width as usize;
let height = info.height as usize;
buffer = vec![0; width * height * 4];
decoder.decode_into(buffer.as_mut_slice()).unwrap();
let orientation_start = Instant::now();
// TODO: Optimize rotation
let orientation =
Orientation::from_exif(get_orientation(image)).unwrap_or(Orientation::NoTransforms);
let image = RgbaImage::from_raw(width as u32, height as u32, buffer).unwrap();
let mut dynamic_image = DynamicImage::from(image);
dynamic_image.apply_orientation(orientation);
let buffer = dynamic_image.as_rgba8().unwrap();
let (width, height) = swap_wh(width, height, orientation);
let orientation_time = orientation_start.elapsed();
// Reinterpret to avoid copying
let rgba_buffer = unsafe {
Vec::from_raw_parts(
buffer.as_ptr() as *mut u32,
buffer.len() / 4,
buffer.len() / 4,
)
};
std::mem::forget(dynamic_image);
let total_time = total_start.elapsed();
println!("Orientation time: {:?}", orientation_time);
println!("Total loading time: {:?}", total_time);
ImflowImageBuffer {
width,
height,
rgba_buffer,
rating,
}
}
}
}
pub fn image_to_rgba_buffer(img: DynamicImage) -> Vec<u32> {
let flat = img.to_rgba8();
let mut buffer = flat.to_vec();
let vec = unsafe {
Vec::from_raw_parts(
buffer.as_mut_ptr() as *mut u32,
buffer.len() / 4,
buffer.len() / 4,
)
};
mem::forget(buffer);
vec
}
pub fn load_available_images(dir: PathBuf) -> Vec<ImageData> {
fs::read_dir(dir)
.unwrap()
.map(|f| f.unwrap().path().to_path_buf())
.sorted()
.filter_map(|path| {
if let Some(format) = get_format(&path) {
Some(ImageData { path, format })
} else {
None
}
})
.collect::<Vec<ImageData>>()
}
pub fn get_embedded_thumbnail(image: &ImageData) -> Option<Vec<u8>> {
let meta = Metadata::new_from_path(&image.path);
match meta {
Ok(meta) => {
if let Some(previews) = meta.get_preview_images() {
for preview in previews {
return Some(preview.get_data().unwrap());
}
}
None
}
Err(_) => None,
}
}
pub fn load_thumbnail(path: &ImageData) -> ImflowImageBuffer {
if path.format == ImageFormat::Heif {
return load_heif(path, true);
}
match load_thumbnail_exif(path) {
Some(thumbnail) => return thumbnail,
None => load_thumbnail_full(path),
}
}
pub fn load_thumbnail_exif(path: &ImageData) -> Option<ImflowImageBuffer> {
match get_embedded_thumbnail(path) {
Some(thumbnail) => {
let decoder = image::ImageReader::new(Cursor::new(thumbnail))
.with_guessed_format()
.unwrap();
let image = decoder.decode().unwrap();
let width: usize = image.width() as usize;
let height: usize = image.height() as usize;
let flat = image.into_rgba8().into_raw();
let mut buffer = flat.to_vec();
let buffer_u32 = unsafe {
Vec::from_raw_parts(
buffer.as_mut_ptr() as *mut u32,
buffer.len() / 4,
buffer.len() / 4,
)
};
let rating = get_rating(path.into());
Some(ImflowImageBuffer {
width,
height,
rgba_buffer: buffer_u32,
rating,
})
}
_ => None,
}
}
pub fn load_thumbnail_full(path: &ImageData) -> ImflowImageBuffer {
let file = BufReader::new(File::open(path.path.clone()).unwrap());
let reader = image::ImageReader::new(file);
let image = reader
.with_guessed_format()
.unwrap()
.decode()
.unwrap()
.resize(640, 480, FilterType::Nearest);
let width = image.width() as usize;
let height = image.height() as usize;
let buffer = image_to_rgba_buffer(image);
let rating = get_rating(path.into());
ImflowImageBuffer {
width,
height,
rgba_buffer: buffer,
rating,
}
}
pub fn load_heif(path: &ImageData, resize: bool) -> ImflowImageBuffer {
let lib_heif = LibHeif::new();
let ctx = HeifContext::read_from_file(path.path.to_str().unwrap()).unwrap();
let handle = ctx.primary_image_handle().unwrap();
// assert_eq!(handle.width(), 1652);
// assert_eq!(handle.height(), 1791);
// Get Exif
// let mut meta_ids: Vec<ItemId> = vec![0; 1];
// let count = handle.metadata_block_ids(&mut meta_ids, b"Exif");
// assert_eq!(count, 1);
// let exif: Vec<u8> = handle.metadata(meta_ids[0]).unwrap();
// Decode the image
let mut image = lib_heif
.decode(&handle, libheif_rs::ColorSpace::Rgb(RgbChroma::Rgba), None)
.unwrap();
assert_eq!(
image.color_space(),
Some(libheif_rs::ColorSpace::Rgb(RgbChroma::Rgba)),
);
// Scale the image
if resize {
image = image.scale(640, 480, None).unwrap();
assert_eq!(image.width(), 640);
assert_eq!(image.height(), 480);
}
let width = image.width() as usize;
let height = image.height() as usize;
let rating = get_rating(path);
// Get "pixels"
let planes = image.planes();
let interleaved_plane = planes.interleaved.unwrap();
assert!(!interleaved_plane.data.is_empty());
assert!(interleaved_plane.stride > 0);
let rgba_buffer = interleaved_plane.data;
// Create a slice of u32 from the u8 slice
let u32_slice = unsafe {
std::slice::from_raw_parts(rgba_buffer.as_ptr() as *const u32, rgba_buffer.len() / 4)
};
ImflowImageBuffer {
width,
height,
rgba_buffer: u32_slice.to_vec(),
rating,
}
}

2
src/lib.rs Normal file
View File

@ -0,0 +1,2 @@
pub mod image;
pub mod store;

32
src/main.rs Normal file
View File

@ -0,0 +1,32 @@
use clap::Parser;
use std::path::PathBuf;
mod app;
mod egui_tools;
use winit::event_loop::{ControlFlow, EventLoop};
fn main() {
let args = Args::parse();
let path = args.path.unwrap_or("./test_images".into());
#[cfg(not(target_arch = "wasm32"))]
{
pollster::block_on(run(path));
}
}
async fn run(path: PathBuf) {
let event_loop = EventLoop::new().unwrap();
event_loop.set_control_flow(ControlFlow::Poll);
let mut app = app::App::new(path);
event_loop.run_app(&mut app).expect("Failed to run app");
}
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
path: Option<PathBuf>,
}

36
src/shader.wgsl Normal file
View File

@ -0,0 +1,36 @@
struct Transforms {
transform: mat4x4<f32>,
width: u32,
height: u32
};
@group(0) @binding(2) var<uniform> transforms: Transforms;
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) uv: vec2<f32>,
};
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) uv: vec2<f32>,
};
@vertex
fn vs_main(in: VertexInput) -> VertexOutput {
var out: VertexOutput;
out.position = transforms.transform * vec4<f32>(in.position, 1.0);
out.uv = in.uv;
return out;
}
@group(0) @binding(0) var texture: texture_2d<f32>;
@group(0) @binding(1) var texture_sampler: sampler;
@fragment
fn fs_main(@location(0) uv: vec2<f32>) -> @location(0) vec4<f32> {
let texture_size = vec2<f32>(f32(transforms.width), f32(transforms.height));
let out_dim = vec2<f32>(textureDimensions(texture));
let scale = texture_size / out_dim;
let pixel = uv * scale;
return textureSample(texture, texture_sampler, pixel);
}

181
src/store.rs Normal file
View File

@ -0,0 +1,181 @@
use crate::image::{ImageData, load_thumbnail};
use crate::image::{ImflowImageBuffer, load_available_images, load_image};
use rexiv2::Metadata;
use std::collections::HashMap;
use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::mpsc;
use std::time::Instant;
use threadpool::ThreadPool;
const PRELOAD_NEXT_IMAGE_N: usize = 16;
pub struct ImageStore {
pub(crate) current_image_id: usize,
pub(crate) loaded_images: HashMap<ImageData, ImflowImageBuffer>,
pub(crate) loaded_images_thumbnails: HashMap<ImageData, ImflowImageBuffer>,
pub(crate) available_images: Vec<ImageData>,
pub current_image_path: ImageData,
pub(crate) pool: ThreadPool,
pub(crate) loader_rx: mpsc::Receiver<(ImageData, ImflowImageBuffer)>,
pub(crate) loader_tx: mpsc::Sender<(ImageData, ImflowImageBuffer)>,
pub(crate) currently_loading: HashSet<ImageData>,
}
impl ImageStore {
pub fn new(path: PathBuf) -> Self {
let current_image_id: usize = 0;
let mut loaded_images: HashMap<ImageData, ImflowImageBuffer> = HashMap::new();
let mut loaded_thumbnails: HashMap<ImageData, ImflowImageBuffer> = HashMap::new();
let available_images = load_available_images(path);
let new_path = available_images[0].clone();
let (loader_tx, loader_rx) = mpsc::channel();
let pool = ThreadPool::new(32);
let currently_loading = HashSet::new();
let total_start = Instant::now();
let mut loaded = 0;
let to_load = available_images.len();
for path in &available_images {
let buf = load_thumbnail(path);
loaded_thumbnails.insert(path.clone(), buf);
loaded += 1;
println!("{}/{}", loaded, to_load);
}
let total_time = total_start.elapsed();
println!(
"all thumbnails load time: {:?} for {}",
total_time,
loaded_thumbnails.len()
);
let path = available_images[0].clone();
let image = load_image(&path.clone());
loaded_images.insert(path, image);
let mut state = Self {
current_image_id,
loaded_images,
available_images,
current_image_path: new_path,
pool,
loader_rx,
loader_tx,
currently_loading,
loaded_images_thumbnails: loaded_thumbnails,
};
state.preload_next_images(PRELOAD_NEXT_IMAGE_N);
state
}
pub fn set_rating(&mut self, rating: i32) {
let meta = Metadata::new_from_path(self.current_image_path.path.clone());
match meta {
Ok(meta) => {
meta.set_tag_numeric("Xmp.xmp.Rating", rating).unwrap();
meta.save_to_file(self.current_image_path.path.clone())
.unwrap();
}
Err(e) => panic!("{:?}", e),
}
if let Some(full) = self.loaded_images.get_mut(&self.current_image_path.clone()) {
full.rating = rating;
}
if let Some(thumbnail) = self
.loaded_images_thumbnails
.get_mut(&self.current_image_path.clone())
{
thumbnail.rating = rating;
}
}
pub fn get_current_rating(&self) -> i32 {
let imbuf = if let Some(full) = self.get_current_image() {
// println!("full");
full
} else {
// TODO: this assumes loaded thumbnail
self.loaded_images_thumbnails
.get(&self.current_image_path)
.unwrap()
};
imbuf.rating
}
pub fn preload_next_images(&mut self, n: usize) {
for image in self
.available_images
.clone()
.iter()
.skip(self.current_image_id)
.take(n)
{
self.request_load(image.clone());
}
}
pub fn request_load(&mut self, path: ImageData) {
if self.loaded_images.contains_key(&path) || self.currently_loading.contains(&path) {
return;
}
let tx = self.loader_tx.clone();
self.currently_loading.insert(path.clone());
self.pool.execute(move || {
let image = load_image(&path.clone());
let _ = tx.send((path, image));
});
}
pub fn check_loaded_images(&mut self) {
while let Ok((path, image)) = self.loader_rx.try_recv() {
self.loaded_images.insert(path.clone(), image);
self.currently_loading.remove(&path);
}
}
pub fn next_image(&mut self, change: i32) {
self.current_image_id = (self.current_image_id as i32 + change)
.clamp(0, self.available_images.len() as i32 - 1)
as usize;
let new_path = self.available_images[self.current_image_id].clone();
if !self.loaded_images.contains_key(&new_path) {
self.request_load(new_path.clone());
}
self.current_image_path = new_path;
self.preload_next_images(PRELOAD_NEXT_IMAGE_N);
}
pub fn get_current_image(&self) -> Option<&ImflowImageBuffer> {
self.loaded_images.get(&self.current_image_path)
}
pub fn get_image(&self, path: &ImageData) -> Option<&ImflowImageBuffer> {
self.loaded_images.get(path)
}
pub fn get_thumbnail(&mut self) -> &ImflowImageBuffer {
if self
.loaded_images_thumbnails
.contains_key(&self.current_image_path)
{
return self
.loaded_images_thumbnails
.get(&self.current_image_path)
.unwrap();
}
let buf = load_thumbnail(&self.current_image_path);
self.loaded_images_thumbnails
.insert(self.current_image_path.clone(), buf);
return self
.loaded_images_thumbnails
.get(&self.current_image_path)
.unwrap();
}
}