refactor(castore): drop Clone + Send + Sync requirements on BS, DS
We can now use async closures for this. Change-Id: Iccbe86998726be139e81749745c37eb9f475693c Reviewed-on: https://cl.snix.dev/c/snix/+/30546 Autosubmit: Florian Klink <flokli@flokli.de> Tested-by: besadii Reviewed-by: Connor Brewster <cbrewster@hey.com>
This commit is contained in:
parent
97f215aef2
commit
33a02267c2
1 changed files with 15 additions and 21 deletions
|
|
@ -121,8 +121,8 @@ pub struct SnixStoreFs<BS, DS, RN> {
|
||||||
|
|
||||||
impl<BS, DS, RN> SnixStoreFs<BS, DS, RN>
|
impl<BS, DS, RN> SnixStoreFs<BS, DS, RN>
|
||||||
where
|
where
|
||||||
BS: BlobService + Clone + Send,
|
BS: BlobService,
|
||||||
DS: DirectoryService + Clone + Send + 'static,
|
DS: DirectoryService,
|
||||||
RN: RootNodes + Clone + 'static,
|
RN: RootNodes + Clone + 'static,
|
||||||
{
|
{
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
|
@ -183,11 +183,7 @@ where
|
||||||
InodeData::Directory(DirectoryInodeData::Sparse(ref parent_digest, _)) => {
|
InodeData::Directory(DirectoryInodeData::Sparse(ref parent_digest, _)) => {
|
||||||
let directory = self
|
let directory = self
|
||||||
.tokio_handle
|
.tokio_handle
|
||||||
.block_on({
|
.block_on(async { self.directory_service.get(parent_digest).await })?
|
||||||
let directory_service = self.directory_service.clone();
|
|
||||||
let parent_digest = parent_digest.to_owned();
|
|
||||||
async move { directory_service.get(&parent_digest).await }
|
|
||||||
})?
|
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
warn!(directory.digest=%parent_digest, "directory not found");
|
warn!(directory.digest=%parent_digest, "directory not found");
|
||||||
// If the Directory can't be found, this is a hole, bail out.
|
// If the Directory can't be found, this is a hole, bail out.
|
||||||
|
|
@ -257,11 +253,10 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
// We don't have it yet, look it up in [self.root_nodes].
|
// We don't have it yet, look it up in [self.root_nodes].
|
||||||
match self.tokio_handle.block_on({
|
match self
|
||||||
let root_nodes_provider = self.root_nodes_provider.clone();
|
.tokio_handle
|
||||||
let name = name.clone();
|
.block_on(async move { self.root_nodes_provider.get_by_basename(name).await })
|
||||||
async move { root_nodes_provider.get_by_basename(&name).await }
|
{
|
||||||
}) {
|
|
||||||
// if there was an error looking up the root node, propagate up an IO error.
|
// if there was an error looking up the root node, propagate up an IO error.
|
||||||
Err(_e) => Err(io::Error::from_raw_os_error(libc::EIO)),
|
Err(_e) => Err(io::Error::from_raw_os_error(libc::EIO)),
|
||||||
// the root node doesn't exist, so the file doesn't exist.
|
// the root node doesn't exist, so the file doesn't exist.
|
||||||
|
|
@ -303,8 +298,8 @@ const XATTR_NAME_BLOB_DIGEST: &[u8] = b"user.snix.castore.blob.digest";
|
||||||
#[cfg(all(feature = "virtiofs", target_os = "linux"))]
|
#[cfg(all(feature = "virtiofs", target_os = "linux"))]
|
||||||
impl<BS, DS, RN> fuse_backend_rs::api::filesystem::Layer for SnixStoreFs<BS, DS, RN>
|
impl<BS, DS, RN> fuse_backend_rs::api::filesystem::Layer for SnixStoreFs<BS, DS, RN>
|
||||||
where
|
where
|
||||||
BS: BlobService + Clone + Send + 'static,
|
BS: BlobService,
|
||||||
DS: DirectoryService + Send + Clone + 'static,
|
DS: DirectoryService,
|
||||||
RN: RootNodes + Clone + 'static,
|
RN: RootNodes + Clone + 'static,
|
||||||
{
|
{
|
||||||
fn root_inode(&self) -> Self::Inode {
|
fn root_inode(&self) -> Self::Inode {
|
||||||
|
|
@ -314,8 +309,8 @@ where
|
||||||
|
|
||||||
impl<BS, DS, RN> FileSystem for SnixStoreFs<BS, DS, RN>
|
impl<BS, DS, RN> FileSystem for SnixStoreFs<BS, DS, RN>
|
||||||
where
|
where
|
||||||
BS: BlobService + Clone + Send + 'static,
|
BS: BlobService,
|
||||||
DS: DirectoryService + Send + Clone + 'static,
|
DS: DirectoryService,
|
||||||
RN: RootNodes + Clone + 'static,
|
RN: RootNodes + Clone + 'static,
|
||||||
{
|
{
|
||||||
type Handle = u64;
|
type Handle = u64;
|
||||||
|
|
@ -691,11 +686,10 @@ where
|
||||||
InodeData::Regular(ref blob_digest, _blob_size, _) => {
|
InodeData::Regular(ref blob_digest, _blob_size, _) => {
|
||||||
Span::current().record("blob.digest", blob_digest.to_string());
|
Span::current().record("blob.digest", blob_digest.to_string());
|
||||||
|
|
||||||
match self.tokio_handle.block_on({
|
match self
|
||||||
let blob_service = self.blob_service.clone();
|
.tokio_handle
|
||||||
let blob_digest = blob_digest.clone();
|
.block_on(async { self.blob_service.open_read(blob_digest).await })
|
||||||
async move { blob_service.open_read(&blob_digest).await }
|
{
|
||||||
}) {
|
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
warn!("blob not found");
|
warn!("blob not found");
|
||||||
Err(io::Error::from_raw_os_error(libc::EIO))
|
Err(io::Error::from_raw_os_error(libc::EIO))
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue