chore(contrib): upgrade to 2024 edition
Part of #114 This edition change doesn't affect these crates except for formatting. Change-Id: I441c967b6803f1f839379c2ec13dee3f0958fb05 Reviewed-on: https://cl.snix.dev/c/snix/+/30593 Autosubmit: Bence Nemes <nemes.bence1@gmail.com> Reviewed-by: Ilan Joselevich <personal@ilanjoselevich.com> Reviewed-by: Florian Klink <flokli@flokli.de> Tested-by: besadii
This commit is contained in:
parent
d829261a09
commit
f242bc6df8
27 changed files with 36 additions and 30 deletions
|
|
@ -4348,7 +4348,7 @@ rec {
|
|||
"turbofetch" = rec {
|
||||
crateName = "turbofetch";
|
||||
version = "0.1.0";
|
||||
edition = "2021";
|
||||
edition = "2024";
|
||||
crateBin = [
|
||||
{
|
||||
name = "turbofetch";
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
[package]
|
||||
name = "turbofetch"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
edition = "2024"
|
||||
|
||||
# We don't join the //snix workspace, as this is fairly cache.nixos.org-specific.
|
||||
[workspace]
|
||||
|
|
|
|||
1
contrib/turbofetch/rustfmt.toml
Normal file
1
contrib/turbofetch/rustfmt.toml
Normal file
|
|
@ -0,0 +1 @@
|
|||
edition = "2024"
|
||||
|
|
@ -21,9 +21,9 @@
|
|||
//! write any output.
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{stream::FuturesUnordered, Stream, TryStreamExt};
|
||||
use futures::{Stream, TryStreamExt, stream::FuturesUnordered};
|
||||
use rusoto_core::ByteStream;
|
||||
use rusoto_s3::{GetObjectRequest, PutObjectRequest, S3Client, S3};
|
||||
use rusoto_s3::{GetObjectRequest, PutObjectRequest, S3, S3Client};
|
||||
use serde::Deserialize;
|
||||
use std::{io::Write, mem, ops::Range, ptr};
|
||||
use tokio::{
|
||||
|
|
@ -34,7 +34,7 @@ use tokio::{
|
|||
/// Fetch a group of keys, streaming concatenated chunks as they arrive from S3.
|
||||
/// `keys` must be a slice from the job file. Any network error at all fails the
|
||||
/// entire batch, and there is no rate limiting.
|
||||
fn fetch(keys: &[[u8; 32]]) -> impl Stream<Item = io::Result<Bytes>> {
|
||||
fn fetch(keys: &[[u8; 32]]) -> impl Stream<Item = io::Result<Bytes>> + use<> {
|
||||
// S3 supports only HTTP/1.1, but we can ease the pain somewhat by using
|
||||
// HTTP pipelining. It terminates the TCP connection after receiving 100
|
||||
// requests, so we chunk the keys up accordingly, and make one connection
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue