chore(contrib): upgrade to 2024 edition

Part of #114
This edition change doesn't affect these crates except for formatting.

Change-Id: I441c967b6803f1f839379c2ec13dee3f0958fb05
Reviewed-on: https://cl.snix.dev/c/snix/+/30593
Autosubmit: Bence Nemes <nemes.bence1@gmail.com>
Reviewed-by: Ilan Joselevich <personal@ilanjoselevich.com>
Reviewed-by: Florian Klink <flokli@flokli.de>
Tested-by: besadii
This commit is contained in:
Starnick4444 2025-07-01 08:28:28 +02:00 committed by clbot
parent d829261a09
commit f242bc6df8
27 changed files with 36 additions and 30 deletions

View file

@ -21,9 +21,9 @@
//! write any output.
use bytes::Bytes;
use futures::{stream::FuturesUnordered, Stream, TryStreamExt};
use futures::{Stream, TryStreamExt, stream::FuturesUnordered};
use rusoto_core::ByteStream;
use rusoto_s3::{GetObjectRequest, PutObjectRequest, S3Client, S3};
use rusoto_s3::{GetObjectRequest, PutObjectRequest, S3, S3Client};
use serde::Deserialize;
use std::{io::Write, mem, ops::Range, ptr};
use tokio::{
@ -34,7 +34,7 @@ use tokio::{
/// Fetch a group of keys, streaming concatenated chunks as they arrive from S3.
/// `keys` must be a slice from the job file. Any network error at all fails the
/// entire batch, and there is no rate limiting.
fn fetch(keys: &[[u8; 32]]) -> impl Stream<Item = io::Result<Bytes>> {
fn fetch(keys: &[[u8; 32]]) -> impl Stream<Item = io::Result<Bytes>> + use<> {
// S3 supports only HTTP/1.1, but we can ease the pain somewhat by using
// HTTP pipelining. It terminates the TCP connection after receiving 100
// requests, so we chunk the keys up accordingly, and make one connection