Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump MSRV to 1.80 #91

Merged
merged 1 commit into from
Jan 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@ jobs:
strategy:
fail-fast: false
matrix:
rust: ["1.67.1", nightly, beta, stable]
rust: ["1.80.1", nightly, beta, stable]
steps:
- uses: actions/checkout@v4

- uses: dtolnay/rust-toolchain@nightly
if: ${{ matrix.rust == '1.67.1' }}
if: ${{ matrix.rust == '1.80.1' }}
- name: Generate Cargo.lock with minimal-version dependencies
if: ${{ matrix.rust == '1.67.1' }}
if: ${{ matrix.rust == '1.80.1' }}
run: cargo -Zminimal-versions generate-lockfile

- uses: dtolnay/rust-toolchain@v1
Expand All @@ -34,7 +34,7 @@ jobs:
- name: build
run: cargo build -v
- name: test
if: ${{ matrix.rust != '1.67.1' }}
if: ${{ matrix.rust != '1.80.1' }}
run: cargo test -v && cargo doc -v
- name: bench
if: ${{ matrix.rust == 'nightly' }}
Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name = "image-webp"
version = "0.2.0"
edition = "2021"
license = "MIT OR Apache-2.0"
rust-version = "1.67.1"
rust-version = "1.80.1"

description = "WebP encoding and decoding in pure Rust"
homepage = "https://github.com/image-rs/image-webp"
Expand Down
27 changes: 10 additions & 17 deletions src/decoder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use byteorder_lite::{LittleEndian, ReadBytesExt};
use quick_error::quick_error;

use std::collections::HashMap;
use std::io::{self, BufRead, BufReader, Cursor, Read, Seek};
use std::io::{self, BufRead, Cursor, Read, Seek};
use std::num::NonZeroU16;
use std::ops::Range;

Expand Down Expand Up @@ -385,15 +385,8 @@ impl<R: BufRead + Seek> WebPDecoder<R> {
let max_position = position + riff_size.saturating_sub(12);
self.r.seek(io::SeekFrom::Start(position))?;

// Resist denial of service attacks by using a BufReader. In most images there
// should be a very small number of chunks. However, nothing prevents a malicious
// image from having an extremely large number of "unknown" chunks. Issuing
// millions of reads and seeks against the underlying reader might be very
// expensive.
let mut reader = BufReader::with_capacity(64 << 10, &mut self.r);

while position < max_position {
match read_chunk_header(&mut reader) {
match read_chunk_header(&mut self.r) {
Ok((chunk, chunk_size, chunk_size_rounded)) => {
let range = position + 8..position + 8 + chunk_size;
position += 8 + chunk_size_rounded;
Expand All @@ -408,8 +401,8 @@ impl<R: BufRead + Seek> WebPDecoder<R> {
return Err(DecodingError::InvalidChunkSize);
}

reader.seek_relative(12)?;
let duration = reader.read_u32::<LittleEndian>()? & 0xffffff;
self.r.seek_relative(12)?;
let duration = self.r.read_u32::<LittleEndian>()? & 0xffffff;
self.loop_duration =
self.loop_duration.wrapping_add(u64::from(duration));

Expand All @@ -419,19 +412,19 @@ impl<R: BufRead + Seek> WebPDecoder<R> {
// and the spec says that lossless images SHOULD NOT contain ALPH
// chunks, so we treat both as indicators of lossy images.
if !self.is_lossy {
let (subchunk, ..) = read_chunk_header(&mut reader)?;
let (subchunk, ..) = read_chunk_header(&mut self.r)?;
if let WebPRiffChunk::VP8 | WebPRiffChunk::ALPH = subchunk {
self.is_lossy = true;
}
reader.seek_relative(chunk_size_rounded as i64 - 24)?;
self.r.seek_relative(chunk_size_rounded as i64 - 24)?;
} else {
reader.seek_relative(chunk_size_rounded as i64 - 16)?;
self.r.seek_relative(chunk_size_rounded as i64 - 16)?;
}

continue;
}

reader.seek_relative(chunk_size_rounded as i64)?;
self.r.seek_relative(chunk_size_rounded as i64)?;
}
Err(DecodingError::IoError(e))
if e.kind() == io::ErrorKind::UnexpectedEof =>
Expand Down Expand Up @@ -885,13 +878,13 @@ pub(crate) fn range_reader<R: BufRead + Seek>(
Ok(r.take(range.end - range.start))
}

pub(crate) fn read_fourcc<R: Read>(mut r: R) -> Result<WebPRiffChunk, DecodingError> {
pub(crate) fn read_fourcc<R: BufRead>(mut r: R) -> Result<WebPRiffChunk, DecodingError> {
let mut chunk_fourcc = [0; 4];
r.read_exact(&mut chunk_fourcc)?;
Ok(WebPRiffChunk::from_fourcc(chunk_fourcc))
}

pub(crate) fn read_chunk_header<R: Read>(
pub(crate) fn read_chunk_header<R: BufRead>(
mut r: R,
) -> Result<(WebPRiffChunk, u64, u64), DecodingError> {
let chunk = read_fourcc(&mut r)?;
Expand Down
2 changes: 1 addition & 1 deletion src/encoder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ fn write_huffman_tree<W: Write>(

const fn length_to_symbol(len: u16) -> (u16, u8) {
let len = len - 1;
let highest_bit = 15 - len.leading_zeros() as u16; // TODO: use ilog2 once MSRV >= 1.67
let highest_bit = len.ilog2() as u16;
let second_highest_bit = (len >> (highest_bit - 1)) & 1;
let extra_bits = highest_bit - 1;
let symbol = 2 * highest_bit + second_highest_bit;
Expand Down
13 changes: 1 addition & 12 deletions src/lossless_transform.rs
Original file line number Diff line number Diff line change
Expand Up @@ -386,17 +386,6 @@ pub(crate) fn apply_color_indexing_transform(
table_size: u16,
table_data: &[u8],
) {
// TODO: Replace with built-in div_ceil when MSRV is 1.73+
const fn div_ceil(a: u16, b: u16) -> u16 {
let d = a / b;
let r = a % b;
if r > 0 && b > 0 {
d + 1
} else {
d
}
}

if table_size > 16 {
let mut table = table_data.chunks_exact(4).collect::<Vec<_>>();
table.resize(256, &[0; 4]);
Expand Down Expand Up @@ -434,7 +423,7 @@ pub(crate) fn apply_color_indexing_transform(
let table = table.chunks_exact(4 << width_bits).collect::<Vec<_>>();

let entry_size = 4 << width_bits;
let index_image_width = div_ceil(width, 1 << width_bits) as usize;
let index_image_width = width.div_ceil(1 << width_bits) as usize;
let final_entry_size = width as usize * 4 - entry_size * (index_image_width - 1);

for y in (0..height as usize).rev() {
Expand Down
Loading