Skip to content
Permalink

Comparing changes

This is a direct comparison between two commits made in this repository or its related repositories. View the default comparison for this range or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: dragonflyoss/nydus
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: d099826ee082498bf0eb9f394c31a60c968b0c7f
Choose a base ref
..
head repository: dragonflyoss/nydus
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: bb6ff6e8e1c215c853d1252181118c764fac0d53
Choose a head ref
1 change: 1 addition & 0 deletions smoke/tests/external/backend/backend.go
Original file line number Diff line number Diff line change
@@ -66,6 +66,7 @@ type Result struct {
Backend Backend
}

// SplitChunks splits the total size into chunks with the specified size.
func SplitChunks(totalSize, chunkSize int64, objectIndex uint32) []Chunk {
chunks := []Chunk{}
if chunkSize <= 0 {
2 changes: 0 additions & 2 deletions smoke/tests/external/backend/backend_test.go
Original file line number Diff line number Diff line change
@@ -10,8 +10,6 @@ import (

func TestLayout(t *testing.T) {
require.Equal(t, fmt.Sprintf("%d", 4096), fmt.Sprintf("%d", unsafe.Sizeof(Header{})))

require.Equal(t, fmt.Sprintf("%d", 256), fmt.Sprintf("%d", unsafe.Sizeof(ChunkMeta{})))

require.Equal(t, fmt.Sprintf("%d", 256), fmt.Sprintf("%d", unsafe.Sizeof(ObjectMeta{})))
}
4 changes: 4 additions & 0 deletions smoke/tests/external/external.go
Original file line number Diff line number Diff line change
@@ -24,11 +24,15 @@ type Attribute struct {
BackendIndex uint32
}

// Handler is the interface for backend handler.
type Handler interface {
// Detect checks if the handler can handle the given directory.
Detect(ctx context.Context, root string) (bool, error)
// Handle handles the directory and returns the backend information.
Handle(ctx context.Context, root string) (*backend.Result, error)
}

// Handle handles the directory and generates the backend meta and attributes.
func Handle(ctx context.Context, opts Options) error {
handlers := []Handler{
local.NewHandler(),
13 changes: 11 additions & 2 deletions smoke/tests/external_layer_test.go
Original file line number Diff line number Diff line change
@@ -6,6 +6,7 @@ package tests

import (
"context"
"os"
"path/filepath"
"testing"

@@ -56,10 +57,12 @@ func (n *ExternalLayerTestSuite) testMakeLayers(ctx tool.Context, t *testing.T)

// Prepare .nydusattributes file
attributesPath := filepath.Join(ctx.Env.WorkDir, ".nydusattributes")
backendMetaPath := filepath.Join(ctx.Env.CacheDir, ".backend.meta")
backendConfigPath := filepath.Join(ctx.Env.CacheDir, ".backend.json")
err := external.Handle(context.Background(), external.Options{
Dir: lowerLayerSourceDir,
MetaOutput: filepath.Join(ctx.Env.CacheDir, "backend.meta"),
BackendOutput: filepath.Join(ctx.Env.CacheDir, "backend.json"),
MetaOutput: backendMetaPath,
BackendOutput: backendConfigPath,
AttributesOutput: attributesPath,
})
require.NoError(t, err)
@@ -74,6 +77,12 @@ func (n *ExternalLayerTestSuite) testMakeLayers(ctx tool.Context, t *testing.T)
}
lowerBlobDigest, lowerExternalBlobDigest := lowerLayer.PackWithAttributes(t, packOption, ctx.Env.BlobDir, lowerLayerSourceDir)

err = os.Rename(backendMetaPath, filepath.Join(ctx.Env.CacheDir, lowerExternalBlobDigest.Hex()+".backend.meta"))
require.NoError(t, err)

err = os.Rename(backendConfigPath, filepath.Join(ctx.Env.CacheDir, lowerExternalBlobDigest.Hex()+".backend.json"))
require.NoError(t, err)

// Make upper layer
upperLayer := texture.MakeUpperLayer(t, filepath.Join(ctx.Env.WorkDir, "source-upper"))
upperBlobDigest := upperLayer.Pack(t, packOption, ctx.Env.BlobDir)
19 changes: 19 additions & 0 deletions storage/src/backend/external/mod.rs
Original file line number Diff line number Diff line change
@@ -25,6 +25,20 @@ struct BackendConfig {
config: HashMap<String, String>,
}

pub struct NoopBackend {}

impl NoopBackend {
pub fn new() -> Self {
Self {}
}
}

impl ExternalBlobReader for NoopBackend {
fn read(&self, _buf: &mut [u8], _chunks: &[&dyn BlobChunkInfo]) -> Result<usize> {
unimplemented!();
}
}

pub struct ExternalBackendFactory {}

impl ExternalBackendFactory {
@@ -47,4 +61,9 @@ impl ExternalBackendFactory {
}
}
}

pub fn default() -> Arc<dyn ExternalBlobReader> {
let backend = NoopBackend::new();
Arc::new(backend) as Arc<dyn ExternalBlobReader>
}
}
24 changes: 8 additions & 16 deletions storage/src/cache/cachedfile.rs
Original file line number Diff line number Diff line change
@@ -563,6 +563,10 @@ impl BlobCache for FileCacheEntry {
self.is_zran
}

fn is_external(&self) -> bool {
self.blob_info.has_feature(BlobFeatures::EXTERNAL)
}

fn need_validation(&self) -> bool {
self.need_validation
}
@@ -705,14 +709,8 @@ impl BlobCache for FileCacheEntry {
}

let (blob_offset, _blob_end, blob_size) = self.get_blob_range(&pending[start..=end])?;
let external = self.blob_info.has_feature(BlobFeatures::EXTERNAL);
match self.read_chunks_from_backend(
blob_offset,
blob_size,
&pending[start..=end],
true,
external,
) {
match self.read_chunks_from_backend(blob_offset, blob_size, &pending[start..=end], true)
{
Ok(mut bufs) => {
total_size += blob_size;
if self.is_raw_data {
@@ -916,13 +914,11 @@ impl FileCacheEntry {
chunks[0].blob_index()
);

let external = self.blob_info.has_feature(BlobFeatures::EXTERNAL);
match self.read_chunks_from_backend(
blob_offset,
blob_size,
&chunks[start_idx..=end_idx],
prefetch,
external,
) {
Ok(mut bufs) => {
if self.is_raw_data {
@@ -987,8 +983,7 @@ impl FileCacheEntry {
Ok(false) => {
info!("retry for timeout chunk, {}", chunk.id());
let mut buf = alloc_buf(chunk.uncompressed_size() as usize);
let external = self.blob_info.has_feature(BlobFeatures::EXTERNAL);
self.read_chunk_from_backend(chunk.as_ref(), &mut buf, external)
self.read_chunk_from_backend(chunk.as_ref(), &mut buf)
.map_err(|e| {
self.update_chunk_pending_status(chunk.as_ref(), false);
eio!(format!("read_raw_chunk failed, {:?}", e))
@@ -1249,14 +1244,12 @@ impl FileCacheEntry {
region = &region_hold;
}

let external = self.blob_info.has_feature(BlobFeatures::EXTERNAL);
let bufs = self
.read_chunks_from_backend(
region.blob_address,
region.blob_len as usize,
&region.chunks,
false,
external,
)
.map_err(|e| {
for c in &region.chunks {
@@ -1345,9 +1338,8 @@ impl FileCacheEntry {
);
&d
} else {
let external = self.blob_info.has_feature(BlobFeatures::EXTERNAL);
let c = self
.read_chunk_from_backend(chunk.as_ref(), d.mut_slice(), external)
.read_chunk_from_backend(chunk.as_ref(), d.mut_slice())
.map_err(|e| {
self.chunk_map.clear_pending(chunk.as_ref());
e
9 changes: 6 additions & 3 deletions storage/src/cache/dummycache.rs
Original file line number Diff line number Diff line change
@@ -84,6 +84,10 @@ impl BlobCache for DummyCache {
self.is_legacy_stargz
}

fn is_external(&self) -> bool {
self.blob_info.has_feature(BlobFeatures::EXTERNAL)
}

fn need_validation(&self) -> bool {
self.need_validation
}
@@ -135,14 +139,13 @@ impl BlobCache for DummyCache {
let bios_len = bios.len();
let offset = bios[0].offset;
let d_size = bios[0].chunkinfo.uncompressed_size() as usize;
let external = self.blob_info.has_feature(BlobFeatures::EXTERNAL);
// Use the destination buffer to receive the uncompressed data if possible.
if bufs.len() == 1 && bios_len == 1 && offset == 0 && bufs[0].len() >= d_size {
if !bios[0].user_io {
return Ok(0);
}
let buf = unsafe { std::slice::from_raw_parts_mut(bufs[0].as_ptr(), d_size) };
self.read_chunk_from_backend(&bios[0].chunkinfo, buf, external)?;
self.read_chunk_from_backend(&bios[0].chunkinfo, buf)?;
return Ok(buf.len());
}

@@ -151,7 +154,7 @@ impl BlobCache for DummyCache {
for bio in bios.iter() {
if bio.user_io {
let mut d = alloc_buf(bio.chunkinfo.uncompressed_size() as usize);
self.read_chunk_from_backend(&bio.chunkinfo, d.as_mut_slice(), external)?;
self.read_chunk_from_backend(&bio.chunkinfo, d.as_mut_slice())?;
buffer_holder.push(d);
// Even a merged IO can hardly reach u32::MAX. So this is safe
user_size += bio.size;
22 changes: 15 additions & 7 deletions storage/src/cache/filecache/mod.rs
Original file line number Diff line number Diff line change
@@ -23,6 +23,9 @@ use crate::cache::state::{
};
use crate::cache::worker::{AsyncPrefetchConfig, AsyncWorkerMgr};
use crate::cache::{BlobCache, BlobCacheMgr};
use crate::cache::{
EXTERNAL_BLOB_BACKEND_CONFIG_FILE_SUFFIX, EXTERNAL_BLOB_BACKEND_META_FILE_SUFFIX,
};
use crate::device::{BlobFeatures, BlobInfo};

pub const BLOB_RAW_FILE_SUFFIX: &str = ".blob.raw";
@@ -199,13 +202,18 @@ impl FileCacheEntry {
.backend
.get_reader(&blob_id)
.map_err(|e| eio!(format!("failed to get reader for blob {}, {}", blob_id, e)))?;
let external_reader = ExternalBackendFactory::new(
PathBuf::new()
.join(&mgr.work_dir)
// .join(blob_info.blob_id().as_str())
.join("backend.meta"),
PathBuf::new().join(&mgr.work_dir).join("backend.json"),
)?;
let external_reader = if blob_info.is_external() {
ExternalBackendFactory::new(
PathBuf::new()
.join(&mgr.work_dir)
.join(blob_info.blob_id() + EXTERNAL_BLOB_BACKEND_META_FILE_SUFFIX),
PathBuf::new()
.join(&mgr.work_dir)
.join(blob_info.blob_id() + EXTERNAL_BLOB_BACKEND_CONFIG_FILE_SUFFIX),
)?
} else {
ExternalBackendFactory::default()
};
let blob_meta_reader = if is_separate_meta {
mgr.backend.get_reader(&blob_meta_id).map_err(|e| {
eio!(format!(
26 changes: 17 additions & 9 deletions storage/src/cache/fscache/mod.rs
Original file line number Diff line number Diff line change
@@ -16,14 +16,16 @@ use tokio::runtime::Runtime;

use crate::backend::{external::ExternalBackendFactory, BlobBackend};
use crate::cache::cachedfile::{FileCacheEntry, FileCacheMeta};
use crate::cache::filecache::BLOB_DATA_FILE_SUFFIX;
use crate::cache::state::{BlobStateMap, IndexedChunkMap, RangeMap};
use crate::cache::worker::{AsyncPrefetchConfig, AsyncWorkerMgr};
use crate::cache::{BlobCache, BlobCacheMgr};
use crate::cache::{
EXTERNAL_BLOB_BACKEND_CONFIG_FILE_SUFFIX, EXTERNAL_BLOB_BACKEND_META_FILE_SUFFIX,
};
use crate::device::{BlobFeatures, BlobInfo, BlobObject};
use crate::factory::BLOB_FACTORY;

use crate::cache::filecache::BLOB_DATA_FILE_SUFFIX;

const FSCACHE_BLOBS_CHECK_NUM: u8 = 1;

/// An implementation of [BlobCacheMgr](../trait.BlobCacheMgr.html) to improve performance by
@@ -229,13 +231,19 @@ impl FileCacheEntry {
.backend
.get_reader(&blob_id)
.map_err(|e| eio!(format!("failed to get reader for blob {}, {}", blob_id, e)))?;
let external_reader = ExternalBackendFactory::new(
PathBuf::new()
.join(&mgr.work_dir)
// .join(blob_info.blob_id().as_str())
.join("backend.meta"),
PathBuf::new().join(&mgr.work_dir).join("backend.json"),
)?;
let external_reader = if blob_info.is_external() {
ExternalBackendFactory::new(
PathBuf::new()
.join(&mgr.work_dir)
.join(blob_info.blob_id() + EXTERNAL_BLOB_BACKEND_META_FILE_SUFFIX),
PathBuf::new()
.join(&mgr.work_dir)
.join(blob_info.blob_id() + EXTERNAL_BLOB_BACKEND_CONFIG_FILE_SUFFIX),
)?
} else {
ExternalBackendFactory::default()
};

let blob_meta_reader = if is_separate_meta {
mgr.backend.get_reader(&blob_meta_id).map_err(|e| {
eio!(format!(
12 changes: 8 additions & 4 deletions storage/src/cache/mod.rs
Original file line number Diff line number Diff line change
@@ -54,6 +54,9 @@ pub use fscache::FsCacheMgr;
/// Timeout in milli-seconds to retrieve blob data from backend storage.
pub const SINGLE_INFLIGHT_WAIT_TIMEOUT: u64 = 2000;

pub const EXTERNAL_BLOB_BACKEND_META_FILE_SUFFIX: &str = ".backend.meta";
pub const EXTERNAL_BLOB_BACKEND_CONFIG_FILE_SUFFIX: &str = ".backend.json";

struct BlobIoMergeState<'a, F: FnMut(BlobIoRange)> {
cb: F,
// size of compressed data
@@ -193,6 +196,9 @@ pub trait BlobCache: Send + Sync {
false
}

/// Check whether the blob is external.
fn is_external(&self) -> bool;

/// Check whether need to validate the data chunk by digest value.
fn need_validation(&self) -> bool;

@@ -256,7 +262,6 @@ pub trait BlobCache: Send + Sync {
blob_size: usize,
chunks: &'b [Arc<dyn BlobChunkInfo>],
prefetch: bool,
external: bool,
) -> Result<ChunkDecompressState<'a, 'b>>
where
Self: Sized,
@@ -268,7 +273,7 @@ pub trait BlobCache: Send + Sync {
.iter()
.map(|v| v.as_ref())
.collect::<Vec<&dyn BlobChunkInfo>>();
let nr_read = if external {
let nr_read = if self.is_external() {
self.external_reader()
.read(c_buf.as_mut_slice(), chunks.as_slice())?
} else {
@@ -303,7 +308,6 @@ pub trait BlobCache: Send + Sync {
&self,
chunk: &dyn BlobChunkInfo,
buffer: &mut [u8],
external: bool,
) -> Result<Option<Vec<u8>>> {
let start = Instant::now();
let offset = chunk.compressed_offset();
@@ -312,7 +316,7 @@ pub trait BlobCache: Send + Sync {
if self.is_zran() || self.is_batch() {
return Err(enosys!("read_chunk_from_backend"));
} else if !chunk.is_compressed() && !chunk.is_encrypted() {
let size = if external {
let size = if self.is_external() {
let chunks: &[&dyn BlobChunkInfo] = std::slice::from_ref(&chunk);
self.external_reader().read(buffer, chunks)?
} else {
4 changes: 4 additions & 0 deletions storage/src/device.rs
Original file line number Diff line number Diff line change
@@ -446,6 +446,10 @@ impl BlobInfo {
self.blob_features.bits() & features.bits() == features.bits()
}

pub fn is_external(&self) -> bool {
self.has_feature(BlobFeatures::EXTERNAL)
}

/// Generate feature flags according to blob configuration.
fn compute_features(&mut self) {
if self.chunk_count == 0 {