refactor(tvix/store): move blob splitting into a BlobWriter struct
This will moves the chunking-as-we-receive logic that so far only lived in grpc_blobservice_wrapper.rs into a generic BlobWriter. Change-Id: Ief7d1bda3c6280129f7139de3f6c4174be2ca6ea Reviewed-on: https://cl.tvl.fyi/c/depot/+/8154 Tested-by: BuildkiteCI Reviewed-by: raitobezarius <tvl@lahfa.xyz>
This commit is contained in:
parent
d8ab140d25
commit
0baaabc43e
4 changed files with 172 additions and 76 deletions
|
|
@ -1,9 +1,10 @@
|
|||
use crate::{blobservice::BlobService, chunkservice::ChunkService, Error};
|
||||
use data_encoding::BASE64;
|
||||
use std::io::Write;
|
||||
use tokio::{sync::mpsc::channel, task};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::{async_trait, Request, Response, Status, Streaming};
|
||||
use tracing::{debug, instrument};
|
||||
use tracing::{debug, error, instrument, warn};
|
||||
|
||||
pub struct GRPCBlobServiceWrapper<BS: BlobService, CS: ChunkService> {
|
||||
blob_service: BS,
|
||||
|
|
@ -164,86 +165,20 @@ impl<
|
|||
) -> Result<Response<super::PutBlobResponse>, Status> {
|
||||
let mut req_inner = request.into_inner();
|
||||
|
||||
// initialize a blake3 hasher calculating the hash of the whole blob.
|
||||
let mut blob_hasher = blake3::Hasher::new();
|
||||
let mut blob_writer = crate::BlobWriter::new(&self.chunk_service);
|
||||
|
||||
// start a BlobMeta, which we'll fill while looping over the chunks
|
||||
let mut blob_meta = super::BlobMeta::default();
|
||||
|
||||
// is filled with bytes received from the client.
|
||||
let mut buf: Vec<u8> = vec![];
|
||||
|
||||
// This reads data from the client, chunks it up using fastcdc,
|
||||
// uploads all chunks to the [ChunkService], and fills a
|
||||
// [super::BlobMeta] linking to these chunks.
|
||||
// receive data from the client, and keep writing it to the blob writer.
|
||||
while let Some(blob_chunk) = req_inner.message().await? {
|
||||
// calculate blob hash, and use rayon if data is > 128KiB.
|
||||
if blob_chunk.data.len() > 128 * 1024 {
|
||||
blob_hasher.update_rayon(&blob_chunk.data);
|
||||
} else {
|
||||
blob_hasher.update(&blob_chunk.data);
|
||||
}
|
||||
|
||||
// extend buf with the newly received data
|
||||
buf.append(&mut blob_chunk.data.clone());
|
||||
|
||||
// TODO: play with chunking sizes
|
||||
let chunker_avg_size = 64 * 1024;
|
||||
let chunker_min_size = chunker_avg_size / 4;
|
||||
let chunker_max_size = chunker_avg_size * 4;
|
||||
|
||||
// initialize a chunker with the current buffer
|
||||
let chunker = fastcdc::v2020::FastCDC::new(
|
||||
&buf,
|
||||
chunker_min_size,
|
||||
chunker_avg_size,
|
||||
chunker_max_size,
|
||||
);
|
||||
|
||||
// ask the chunker for cutting points in the buffer.
|
||||
let mut start_pos = 0 as usize;
|
||||
buf = loop {
|
||||
// ask the chunker for the next cutting point.
|
||||
let (_fp, end_pos) = chunker.cut(start_pos, buf.len() - start_pos);
|
||||
|
||||
// whenever the last cut point is pointing to the end of the buffer,
|
||||
// keep that chunk left in there.
|
||||
// We don't know if the chunker decided to cut here simply because it was
|
||||
// at the end of the buffer, or if it would also cut if there
|
||||
// were more data.
|
||||
//
|
||||
// Split off all previous chunks and keep this chunk data in the buffer.
|
||||
if end_pos == buf.len() {
|
||||
break buf.split_off(start_pos);
|
||||
}
|
||||
|
||||
// Upload that chunk to the chunk service and record it in BlobMeta.
|
||||
// TODO: make upload_chunk async and upload concurrently?
|
||||
let chunk_data = &buf[start_pos..end_pos];
|
||||
let chunk_digest =
|
||||
Self::upload_chunk(self.chunk_service.clone(), chunk_data.to_vec())?;
|
||||
|
||||
blob_meta.chunks.push(super::blob_meta::ChunkMeta {
|
||||
digest: chunk_digest,
|
||||
size: chunk_data.len() as u32,
|
||||
});
|
||||
|
||||
// move start_pos over the processed chunk.
|
||||
start_pos = end_pos;
|
||||
if let Err(e) = blob_writer.write_all(&blob_chunk.data) {
|
||||
error!(e=%e,"unable to write blob data");
|
||||
return Err(Status::internal("unable to write blob data"));
|
||||
}
|
||||
}
|
||||
|
||||
// Also upload the last chunk (what's left in `buf`) to the chunk
|
||||
// service and record it in BlobMeta.
|
||||
let buf_len = buf.len() as u32;
|
||||
let chunk_digest = Self::upload_chunk(self.chunk_service.clone(), buf)?;
|
||||
|
||||
blob_meta.chunks.push(super::blob_meta::ChunkMeta {
|
||||
digest: chunk_digest,
|
||||
size: buf_len,
|
||||
});
|
||||
|
||||
let blob_digest = blob_hasher.finalize().as_bytes().to_vec();
|
||||
// run finalize
|
||||
let (blob_digest, blob_meta) = blob_writer
|
||||
.finalize()
|
||||
.map_err(|_| Status::internal("unable to finalize blob"))?;
|
||||
|
||||
// check if we have the received blob in the [BlobService] already.
|
||||
let resp = self.blob_service.stat(&super::StatBlobRequest {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue