chore(tvix/store/protos): drop unused fields for now
This wasn't removed yet, and no code is using/populating it so far. It's confusing, let's update it to the state of things now, and re- introduce it once we get there. Change-Id: I68f5ba17a8eee604d8ccd82749da7c8be094cb99 Reviewed-on: https://cl.tvl.fyi/c/depot/+/9351 Reviewed-by: flokli <flokli@flokli.de> Tested-by: BuildkiteCI
This commit is contained in:
parent
bf2fe88a5c
commit
ca25acf11b
5 changed files with 51 additions and 218 deletions
|
|
@ -7,12 +7,10 @@ package tvix.store.v1;
|
|||
option go_package = "code.tvl.fyi/tvix/store/protos;storev1";
|
||||
|
||||
service BlobService {
|
||||
// Stat exposes metadata about a given blob,
|
||||
// In the future, Stat will expose more metadata about a given blob,
|
||||
// such as more granular chunking, baos.
|
||||
// It implicitly allows checking for existence too, as asking this for a
|
||||
// non-existing Blob will return a Status::not_found grpc error.
|
||||
// If there's no more granular chunking available, the response will simply
|
||||
// contain a single chunk.
|
||||
// For now, it's only used to check for the existence of a blob, as asking
|
||||
// this for a non-existing Blob will return a Status::not_found gRPC error.
|
||||
rpc Stat(StatBlobRequest) returns (BlobMeta);
|
||||
|
||||
// Read returns a stream of BlobChunk, which is just a stream of bytes with
|
||||
|
|
@ -20,7 +18,6 @@ service BlobService {
|
|||
//
|
||||
// The server may decide on whatever chunking it may seem fit as a size for
|
||||
// the individual BlobChunk sent in the response stream.
|
||||
//
|
||||
rpc Read(ReadBlobRequest) returns (stream BlobChunk);
|
||||
|
||||
// Put uploads a Blob, by reading a stream of bytes.
|
||||
|
|
@ -33,34 +30,9 @@ service BlobService {
|
|||
message StatBlobRequest {
|
||||
// The blake3 digest of the blob requested
|
||||
bytes digest = 1;
|
||||
|
||||
// Whether to include the chunks field
|
||||
bool include_chunks = 2;
|
||||
// Whether to include the inline_bao field, containing an (outboard) bao.
|
||||
// The [bao](https://github.com/oconnor663/bao/blob/master/docs/spec.md)
|
||||
// can be used to validate chunks end up hashing to the same root digest.
|
||||
// These only really matter when only downloading parts of a blob. Some
|
||||
// caution needs to be applied when validating chunks - the bao works with
|
||||
// 1K leaf nodes, which might not align with the chunk sizes - this might
|
||||
// imply a neighboring chunk might need to be (partially) fetched to
|
||||
// validate the hash.
|
||||
bool include_bao = 3;
|
||||
}
|
||||
|
||||
// BlobMeta provides more granular chunking information for the requested blob,
|
||||
// and baos.
|
||||
message BlobMeta {
|
||||
// This provides a list of chunks.
|
||||
// Concatenating their contents would produce a blob with the digest that
|
||||
// was specified in the request.
|
||||
repeated ChunkMeta chunks = 1;
|
||||
|
||||
message ChunkMeta {
|
||||
bytes digest = 1;
|
||||
uint32 size = 2;
|
||||
}
|
||||
|
||||
bytes inline_bao = 2;
|
||||
}
|
||||
|
||||
message ReadBlobRequest {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue