From dcfcde12bfe5deba0f36e309779e479e943ead74 Mon Sep 17 00:00:00 2001 From: daiyongxuan <2423226609@qq.com> Date: Fri, 25 Oct 2024 03:55:35 +0000 Subject: [PATCH 1/3] feat: ADD generate prefetch blob --- .github/workflows/convert.yml | 2 +- .github/workflows/release.yml | 4 +- .github/workflows/smoke.yml | 21 +- Cargo.lock | 32 +- Makefile | 31 +- README.md | 1 - api/src/config.rs | 14 +- builder/Cargo.toml | 3 + builder/src/chunkdict_generator.rs | 317 ++++++++++- builder/src/compact.rs | 36 +- builder/src/core/blob.rs | 4 +- builder/src/core/bootstrap.rs | 12 +- builder/src/core/context.rs | 7 + builder/src/core/layout.rs | 6 +- builder/src/core/overlay.rs | 10 + builder/src/core/prefetch.rs | 30 +- builder/src/core/tree.rs | 53 +- builder/src/core/v5.rs | 10 +- builder/src/core/v6.rs | 9 +- builder/src/directory.rs | 6 +- builder/src/lib.rs | 3 +- builder/src/merge.rs | 2 +- builder/src/stargz.rs | 10 +- builder/src/tarball.rs | 6 +- contrib/ctr-remote/.gitignore | 1 - contrib/ctr-remote/.golangci.yml | 21 - contrib/ctr-remote/Makefile | 29 - contrib/ctr-remote/cmd/main.go | 67 --- contrib/ctr-remote/commands/rpull.go | 103 ---- contrib/ctr-remote/go.mod | 84 --- contrib/ctr-remote/go.sum | 358 ------------- contrib/nydus-overlayfs/.golangci.yml | 5 +- contrib/nydusify/.golangci.yml | 5 +- contrib/nydusify/cmd/nydusify.go | 17 +- contrib/nydusify/cmd/nydusify_test.go | 193 ++++--- contrib/nydusify/go.mod | 4 +- contrib/nydusify/go.sum | 4 +- contrib/nydusify/pkg/build/builder.go | 13 +- contrib/nydusify/pkg/checker/checker.go | 1 + .../nydusify/pkg/checker/rule/filesystem.go | 26 +- contrib/nydusify/pkg/checker/tool/nydusd.go | 4 +- contrib/nydusify/pkg/compactor/compactor.go | 29 +- .../nydusify/pkg/converter/provider/ported.go | 120 +++++ .../pkg/converter/provider/provider.go | 34 ++ contrib/nydusify/pkg/copier/copier.go | 102 +++- contrib/nydusify/pkg/utils/backend.go | 57 ++ contrib/nydusify/pkg/viewer/viewer.go | 20 +- deny.toml | 1 + docs/containerd-env-setup.md | 2 +- docs/nydus-fscache.md | 19 +- docs/nydus-image.md | 23 + docs/nydusd.md | 8 +- docs/nydusify.md | 44 +- go.work | 1 - misc/performance/nydusd_config.json | 2 +- misc/performance/snapshotter_config.toml | 2 +- misc/prepare.sh | 16 +- misc/takeover/snapshotter_config.toml | 132 ----- rafs/src/metadata/direct_v6.rs | 3 + rafs/src/metadata/inode.rs | 5 +- rafs/src/metadata/layout/v6.rs | 4 +- rust-toolchain.toml | 2 +- smoke/.golangci.yml | 5 +- smoke/Makefile | 1 + smoke/tests/api_test.go | 66 ++- smoke/tests/benchmark_test.go | 26 +- smoke/tests/blobcache_test.go | 4 + smoke/tests/commit_test.go | 12 +- smoke/tests/hot_upgrade_test.go | 156 ++++++ smoke/tests/image_test.go | 39 +- smoke/tests/main_test.go | 12 +- smoke/tests/native_layer_test.go | 40 +- smoke/tests/overlay_fs_test.go | 1 + smoke/tests/performance_test.go | 4 +- smoke/tests/takeover_test.go | 12 +- smoke/tests/tool/container.go | 24 +- smoke/tests/tool/iterator.go | 31 +- smoke/tests/tool/layer.go | 3 + smoke/tests/tool/nydusd.go | 503 +++++++++--------- smoke/tests/tool/snapshotter.go | 2 +- smoke/tests/tool/test/suite.go | 4 +- smoke/tests/tool/util.go | 2 +- smoke/tests/tool/verify.go | 71 --- src/bin/nydus-image/deduplicate.rs | 26 +- src/bin/nydus-image/inspect.rs | 2 +- src/bin/nydus-image/main.rs | 320 +++++++++-- src/bin/nydus-image/optimize.rs | 0 src/bin/nydus-image/prefetch.rs | 27 + src/bin/nydus-image/stat.rs | 2 +- src/bin/nydus-image/validator.rs | 2 +- src/bin/nydusctl/main.rs | 5 +- storage/src/backend/connection.rs | 159 ++++-- storage/src/cache/worker.rs | 4 +- storage/src/device.rs | 35 ++ storage/src/factory.rs | 66 ++- storage/src/meta/mod.rs | 39 +- storage/src/meta/toc.rs | 1 + tests/bats/Makefile | 1 - tests/bats/compile_ctr_remote.bats | 14 - tests/bats/run_container_with_rafs.bats | 2 +- tests/bats/run_container_with_zran.bats | 2 +- utils/src/digest.rs | 2 +- 102 files changed, 2201 insertions(+), 1716 deletions(-) delete mode 100644 contrib/ctr-remote/.gitignore delete mode 100644 contrib/ctr-remote/.golangci.yml delete mode 100644 contrib/ctr-remote/Makefile delete mode 100644 contrib/ctr-remote/cmd/main.go delete mode 100644 contrib/ctr-remote/commands/rpull.go delete mode 100644 contrib/ctr-remote/go.mod delete mode 100644 contrib/ctr-remote/go.sum create mode 100644 contrib/nydusify/pkg/utils/backend.go delete mode 100644 misc/takeover/snapshotter_config.toml create mode 100644 smoke/tests/hot_upgrade_test.go delete mode 100644 smoke/tests/tool/verify.go create mode 100644 src/bin/nydus-image/optimize.rs create mode 100644 src/bin/nydus-image/prefetch.rs delete mode 100644 tests/bats/compile_ctr_remote.bats diff --git a/.github/workflows/convert.yml b/.github/workflows/convert.yml index 9e2497bc810..92b326b611e 100644 --- a/.github/workflows/convert.yml +++ b/.github/workflows/convert.yml @@ -26,7 +26,7 @@ jobs: cache-dependency-path: "**/*.sum" - name: Build Contrib run: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sudo sh -s -- -b /usr/local/bin v1.54.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sudo sh -s -- -b /usr/local/bin v1.61.0 make -e DOCKER=false nydusify-release - name: Upload Nydusify uses: actions/upload-artifact@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 178e7ee7040..2f1bfb7b553 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -48,7 +48,7 @@ jobs: configs nydus-macos: - runs-on: macos-11 + runs-on: macos-13 strategy: matrix: arch: [amd64, arm64] @@ -100,7 +100,6 @@ jobs: - name: build contrib go components run: | make -e GOARCH=${{ matrix.arch }} contrib-release - sudo mv contrib/ctr-remote/bin/ctr-remote . sudo mv contrib/nydusify/cmd/nydusify . sudo mv contrib/nydus-overlayfs/bin/nydus-overlayfs . - name: store-artifacts @@ -108,7 +107,6 @@ jobs: with: name: nydus-artifacts-linux-${{ matrix.arch }}-contrib path: | - ctr-remote nydusify nydus-overlayfs containerd-nydus-grpc diff --git a/.github/workflows/smoke.yml b/.github/workflows/smoke.yml index 5214582de3f..61657cc1c2e 100644 --- a/.github/workflows/smoke.yml +++ b/.github/workflows/smoke.yml @@ -45,7 +45,6 @@ jobs: matrix: include: - path: contrib/nydusify - - path: contrib/ctr-remote - path: contrib/nydus-overlayfs steps: - name: Checkout @@ -56,11 +55,11 @@ jobs: go-version-file: 'go.work' cache: false - name: Lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: - version: v1.56 + version: v1.61 working-directory: ${{ matrix.path }} - args: --timeout=30m --issues-exit-code=0 + args: --timeout=10m --verbose nydus-build: runs-on: ubuntu-latest @@ -95,7 +94,7 @@ jobs: nydusd nydusd-build-macos: - runs-on: macos-11 + runs-on: macos-13 strategy: matrix: arch: [amd64, arm64] @@ -180,7 +179,7 @@ jobs: export NYDUS_NYDUSIFY_$version_export=/usr/bin/nydus-$version/nydusify done - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sudo sh -s -- -b /usr/bin v1.54.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sudo sh -s -- -b /usr/bin v1.61.0 sudo -E make smoke-only nydus-unit-test: @@ -266,15 +265,7 @@ jobs: with: name: contrib-test-coverage-artifact - name: Upload coverage to Codecov - if: ${{ github.event_name == 'pull_request' }} - uses: codecov/codecov-action@v4.0.0 - with: - files: ./codecov.json,./coverage.txt - verbose: true - fail_ci_if_error: true - - name: Upload coverage to Codecov - if: ${{ (github.event_name == 'push'|| github.event_name == 'schedule') }} - uses: codecov/codecov-action@v4.0.0 + uses: codecov/codecov-action@v4 with: files: ./codecov.json,./coverage.txt token: ${{ secrets.CODECOV_TOKEN }} diff --git a/Cargo.lock b/Cargo.lock index a5ea303314b..adb81d21a78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1310,12 +1310,15 @@ dependencies = [ "nydus-rafs", "nydus-storage", "nydus-utils", + "rand", "serde", "serde_json", "sha2", "tar", + "tempfile", "vmm-sys-util", "xattr", + "zstd 0.12.4", ] [[package]] @@ -1500,7 +1503,7 @@ dependencies = [ "thiserror", "tokio", "vmm-sys-util", - "zstd", + "zstd 0.11.2+zstd.1.5.2", ] [[package]] @@ -2780,7 +2783,16 @@ version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ - "zstd-safe", + "zstd-safe 5.0.2+zstd.1.5.2", +] + +[[package]] +name = "zstd" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" +dependencies = [ + "zstd-safe 6.0.6", ] [[package]] @@ -2793,12 +2805,22 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "zstd-safe" +version = "6.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" +dependencies = [ + "libc", + "zstd-sys", +] + [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", - "libc", + "pkg-config", ] diff --git a/Makefile b/Makefile index 87bd8628a89..f36d18ffaa9 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,6 @@ endif endif RUST_TARGET_STATIC ?= $(STATIC_TARGET) -CTR-REMOTE_PATH = contrib/ctr-remote NYDUSIFY_PATH = contrib/nydusify NYDUS-OVERLAYFS_PATH = contrib/nydus-overlayfs @@ -138,23 +137,18 @@ smoke-takeover: smoke: release smoke-only -contrib-build: nydusify ctr-remote nydus-overlayfs +contrib-build: nydusify nydus-overlayfs -contrib-release: nydusify-release ctr-remote-release \ - nydus-overlayfs-release +contrib-release: nydusify-release nydus-overlayfs-release -contrib-test: nydusify-test ctr-remote-test \ - nydus-overlayfs-test +contrib-test: nydusify-test nydus-overlayfs-test -contrib-lint: nydusify-lint ctr-remote-lint \ - nydus-overlayfs-lint +contrib-lint: nydusify-lint nydus-overlayfs-lint -contrib-clean: nydusify-clean ctr-remote-clean \ - nydus-overlayfs-clean +contrib-clean: nydusify-clean nydus-overlayfs-clean contrib-install: @sudo mkdir -m 755 -p $(INSTALL_DIR_PREFIX) - @sudo install -m 755 contrib/ctr-remote/bin/ctr-remote $(INSTALL_DIR_PREFIX)/ctr-remote @sudo install -m 755 contrib/nydus-overlayfs/bin/nydus-overlayfs $(INSTALL_DIR_PREFIX)/nydus-overlayfs @sudo install -m 755 contrib/nydusify/cmd/nydusify $(INSTALL_DIR_PREFIX)/nydusify @@ -173,21 +167,6 @@ nydusify-clean: nydusify-lint: $(call build_golang,${NYDUSIFY_PATH},make lint) -ctr-remote: - $(call build_golang,${CTR-REMOTE_PATH},make) - -ctr-remote-release: - $(call build_golang,${CTR-REMOTE_PATH},make release) - -ctr-remote-test: - $(call build_golang,${CTR-REMOTE_PATH},make test) - -ctr-remote-clean: - $(call build_golang,${CTR-REMOTE_PATH},make clean) - -ctr-remote-lint: - $(call build_golang,${CTR-REMOTE_PATH},make lint) - nydus-overlayfs: $(call build_golang,${NYDUS-OVERLAYFS_PATH},make) diff --git a/README.md b/README.md index 3b54a8702a1..d694cca8da5 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,6 @@ The following Benchmarking results demonstrate that Nydus images significantly o | [nydus-image](https://github.com/dragonflyoss/nydus/blob/master/docs/nydus-image.md) | Convert a single layer of OCI format container image into a nydus format container image generating meta part file and data part file respectively | | [nydusify](https://github.com/dragonflyoss/nydus/blob/master/docs/nydusify.md) | It pulls OCI image down and unpack it, invokes `nydus-image create` to convert image and then pushes the converted image back to registry and data storage | | [nydusctl](https://github.com/dragonflyoss/nydus/blob/master/docs/nydus-image.md) | Nydusd CLI client (`nydus-image inspect`), query daemon's working status/metrics and configure it | -| [ctr-remote](https://github.com/dragonflyoss/nydus/tree/master/contrib/ctr-remote) | An enhanced `containerd` CLI tool enable nydus support with `containerd` ctr | | [nydus-docker-graphdriver](https://github.com/nydusaccelerator/docker-nydus-graphdriver) | [Experimental] Works as a `docker` remote graph driver to control how images and containers are stored and managed | | [nydus-overlayfs](https://github.com/dragonflyoss/nydus/tree/master/contrib/nydus-overlayfs) | `Containerd` mount helper to invoke overlayfs mount with tweaking mount options a bit. So nydus prerequisites can be passed to vm-based runtime | | [nydus-backend-proxy](./contrib/nydus-backend-proxy/README.md) | A simple HTTP server to serve local directory as a blob backend for nydusd | diff --git a/api/src/config.rs b/api/src/config.rs index b2f93bca75e..f9a69239fc2 100644 --- a/api/src/config.rs +++ b/api/src/config.rs @@ -907,6 +907,9 @@ pub struct ProxyConfig { /// Replace URL to http to request source registry with proxy, and allow fallback to https if the proxy is unhealthy. #[serde(default)] pub use_http: bool, + /// Elapsed time to pause proxy health check when the request is inactive, in seconds. + #[serde(default = "default_check_pause_elapsed")] + pub check_pause_elapsed: u64, } impl Default for ProxyConfig { @@ -917,6 +920,7 @@ impl Default for ProxyConfig { fallback: true, check_interval: 5, use_http: false, + check_pause_elapsed: 300, } } } @@ -938,6 +942,9 @@ pub struct MirrorConfig { /// Maximum number of failures before marking a mirror as unusable. #[serde(default = "default_failure_limit")] pub failure_limit: u8, + /// Elapsed time to pause mirror health check when the request is inactive, in seconds. + #[serde(default = "default_check_pause_elapsed")] + pub health_check_pause_elapsed: u64, } impl Default for MirrorConfig { @@ -948,6 +955,7 @@ impl Default for MirrorConfig { health_check_interval: 5, failure_limit: 5, ping_url: String::new(), + health_check_pause_elapsed: 300, } } } @@ -1191,6 +1199,10 @@ fn default_check_interval() -> u64 { 5 } +fn default_check_pause_elapsed() -> u64 { + 300 +} + fn default_failure_limit() -> u8 { 5 } @@ -2115,7 +2127,7 @@ mod tests { "type": "blobcache", "compressed": true, "config": { - "work_dir": "/var/lib/containerd-nydus/cache", + "work_dir": "/var/lib/containerd/io.containerd.snapshotter.v1.nydus/cache", "disable_indexed_map": false } } diff --git a/builder/Cargo.toml b/builder/Cargo.toml index fa76a36f947..a1f95b3dab4 100644 --- a/builder/Cargo.toml +++ b/builder/Cargo.toml @@ -22,6 +22,9 @@ sha2 = "0.10.2" tar = "0.4.40" vmm-sys-util = "0.11.0" xattr = "1.0.1" +rand = "0.8.5" +zstd = "0.12" +tempfile = "3.2" nydus-api = { version = "0.3", path = "../api" } nydus-rafs = { version = "0.3", path = "../rafs" } diff --git a/builder/src/chunkdict_generator.rs b/builder/src/chunkdict_generator.rs index 4f7ab105d2b..f8dc8a16786 100644 --- a/builder/src/chunkdict_generator.rs +++ b/builder/src/chunkdict_generator.rs @@ -16,20 +16,36 @@ use super::core::node::{ChunkSource, NodeInfo}; use super::{BlobManager, Bootstrap, BootstrapManager, BuildContext, BuildOutput, Tree}; +use crate::core::blob::Blob; use crate::core::node::Node; -use crate::NodeChunk; -use anyhow::Result; +use crate::OsString; +use crate::Path; +use crate::TreeNode; +use crate::{ArtifactWriter, BlobContext, NodeChunk}; +use anyhow::{Ok, Result}; use nydus_rafs::metadata::chunk::ChunkWrapper; use nydus_rafs::metadata::inode::InodeWrapper; -use nydus_rafs::metadata::layout::RafsXAttrs; +use nydus_rafs::metadata::layout::v6::RafsV6BlobTable; +use nydus_rafs::metadata::layout::{RafsBlobTable, RafsXAttrs}; +use nydus_storage::device::{BlobFeatures, BlobInfo}; +use nydus_storage::meta::BatchContextGenerator; use nydus_storage::meta::BlobChunkInfoV1Ondisk; +use nydus_utils::compress; use nydus_utils::compress::Algorithm; use nydus_utils::digest::RafsDigest; -use std::ffi::OsString; +use sha2::digest::Update; + +use crate::finalize_blob; +use crate::Artifact; +use core::panic; +use std::fs::File; +use std::io::Read; +use std::io::Seek; use std::mem::size_of; use std::path::PathBuf; use std::str::FromStr; use std::sync::Arc; +use std::u32; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ChunkdictChunkInfo { @@ -53,9 +69,91 @@ pub struct ChunkdictBlobInfo { pub blob_meta_ci_offset: u64, } +// TODO(daiyongxuan): implement Read Trait for BlobNodeReader +#[derive(Debug)] +#[allow(dead_code)] +pub struct BlobNodeReader { + blob: Arc, + start: u64, + end: u64, + position: u64, +} + +impl BlobNodeReader { + pub fn new(blob: Arc, start: u64, end: u64) -> Result { + let mut reader = BlobNodeReader { + blob, + start, + end, + position: start, + }; + reader.blob.seek(std::io::SeekFrom::Start(start))?; + Ok(reader) + } +} + +impl Read for BlobNodeReader { + fn read(&mut self, buf: &mut [u8]) -> Result { + // EOF + if self.position > self.end { + return std::io::Result::Ok(0); + } + let max_read = (self.end - self.position) as usize; + let to_read = std::cmp::min(buf.len(), max_read); + let bytes_read = self.blob.read(&mut buf[..to_read])?; + self.position += bytes_read as u64; + std::io::Result::Ok(bytes_read) + } +} + /// Struct to generate chunkdict RAFS bootstrap. pub struct Generator {} +#[allow(dead_code)] +struct BlobIdAndCompressor { + pub blob_id: String, + pub compressor: compress::Algorithm, +} + +struct PrefetchBlobState { + blob_info: BlobInfo, + blob_ctx: BlobContext, + blob_writer: Box, + chunk_count: u32, +} + +impl PrefetchBlobState { + fn new(ctx: &BuildContext, blob_layer_num: u32, blobs_dir_path: &Path) -> Result { + let mut blob_info = BlobInfo::new( + blob_layer_num, + String::from("Prefetch-blob"), + 0, + 0, + ctx.chunk_size, + u32::MAX, + BlobFeatures::ALIGNED + | BlobFeatures::INLINED_CHUNK_DIGEST + | BlobFeatures::HAS_TAR_HEADER + | BlobFeatures::HAS_TOC + | BlobFeatures::CAP_TAR_TOC, + ); + blob_info.set_compressor(Algorithm::Zstd); + let mut blob_ctx = BlobContext::from(ctx, &blob_info, ChunkSource::Build)?; + blob_ctx.chunk_count = 0; + blob_ctx.blob_meta_info_enabled = true; + let blob_writer = ArtifactWriter::new(crate::ArtifactStorage::SingleFile( + blobs_dir_path.join("Prefetch-blob"), + )) + .map(|writer| Box::new(writer) as Box)?; + Ok(Self { + blob_info, + blob_ctx, + blob_writer, + chunk_count: 0, + }) + } +} + impl Generator { // Generate chunkdict RAFS bootstrap. pub fn generate( @@ -90,10 +188,215 @@ impl Generator { BuildOutput::new(blob_mgr, &bootstrap_mgr.bootstrap_storage) } + /// Generate a new bootstrap for prefetch. + pub fn generate_prefetch( + tree: &mut Tree, + ctx: &mut BuildContext, + bootstrap_mgr: &mut BootstrapManager, + blobtable: &mut RafsV6BlobTable, + blobs_dir_path: PathBuf, + prefetch_nodes: Vec, + ) -> Result<()> { + // create a new blob for prefetch layer + let blob_layer_num = blobtable.entries.len(); + + let mut blob_state = + PrefetchBlobState::new(&ctx, blob_layer_num as u32, &blobs_dir_path).unwrap(); + let mut batch = BatchContextGenerator::new(4096).unwrap(); + for node in &prefetch_nodes { + Self::process_prefetch_node( + tree, + &node, + &mut blob_state, + &mut batch, + blobtable, + &blobs_dir_path, + ); + } + + { + let prefetch_blob_ctx = &blob_state.blob_ctx; + let prefetch_blob_info = &mut blob_state.blob_info; + + Self::finalize_blobinfo_meta_data( + prefetch_blob_info, + blob_state.chunk_count as usize, + prefetch_blob_ctx.current_compressed_offset as usize, + prefetch_blob_ctx.current_uncompressed_offset as usize, + ); + } + + Self::finalize_blob(ctx, blobtable, &mut blob_state); + + debug!("prefetch blob id: {}", ctx.blob_id); + + Self::build_and_dump_bootstrap(tree, ctx, bootstrap_mgr, blobtable)?; + Ok(()) + } + + fn build_and_dump_bootstrap( + tree: &mut Tree, + ctx: &mut BuildContext, + bootstrap_mgr: &mut BootstrapManager, + blobtable: &mut RafsV6BlobTable, + ) -> Result<()> { + let mut bootstrap_ctx = bootstrap_mgr.create_ctx()?; + let mut bootstrap = Bootstrap::new(tree.clone())?; + + // Build bootstrap + bootstrap.build(ctx, &mut bootstrap_ctx)?; + + // Verify and update prefetch blob + assert!( + blobtable + .entries + .iter() + .filter(|blob| blob.blob_id() == "Prefetch-blob") + .count() + == 1, + "Expected exactly one Prefetch-blob" + ); + + // Rewrite prefetch blob id + blobtable + .entries + .iter_mut() + .filter(|blob| blob.blob_id() == "Prefetch-blob") + .for_each(|blob| { + let mut info = (**blob).clone(); + info.set_blob_id(ctx.blob_id.clone()); + *blob = Arc::new(info); + }); + + // Dump bootstrap + let blob_table_withprefetch = RafsBlobTable::V6(blobtable.clone()); + bootstrap.dump( + ctx, + &mut bootstrap_mgr.bootstrap_storage, + &mut bootstrap_ctx, + &blob_table_withprefetch, + )?; + + Ok(()) + } + + fn finalize_blob( + ctx: &mut BuildContext, + blobtable: &mut RafsV6BlobTable, + blob_state: &mut PrefetchBlobState, + ) { + blobtable.entries.push(blob_state.blob_info.clone().into()); + let mut blob_mgr = BlobManager::new(nydus_utils::digest::Algorithm::Blake3); + blob_mgr.add_blob(blob_state.blob_ctx.clone()); + blob_mgr.set_current_blob_index(0); + Blob::finalize_blob_data(&ctx, &mut blob_mgr, blob_state.blob_writer.as_mut()).unwrap(); + if let Some((_, blob_ctx)) = blob_mgr.get_current_blob() { + Blob::dump_meta_data(&ctx, blob_ctx, blob_state.blob_writer.as_mut()).unwrap(); + } else { + panic!(); + } + ctx.blob_id = String::from(""); + blob_mgr.get_current_blob().unwrap().1.blob_id = String::from(""); + finalize_blob(ctx, &mut blob_mgr, blob_state.blob_writer.as_mut()).unwrap(); + } + + fn finalize_blobinfo_meta_data( + blobinfo: &mut BlobInfo, + chunk_count: usize, + compressed_offset: usize, + umcompressed_offset: usize, + ) { + blobinfo.set_meta_ci_offset(0x200 + umcompressed_offset); + blobinfo.set_chunk_count(chunk_count); + blobinfo.set_compressed_size(compressed_offset); + blobinfo.set_uncompressed_size(umcompressed_offset); + } + + fn process_prefetch_node( + tree: &mut Tree, + node: &TreeNode, + prefetch_state: &mut PrefetchBlobState, + batch: &mut BatchContextGenerator, + blobtable: &RafsV6BlobTable, + blobs_dir_path: &Path, + ) { + let tree_node = tree + .get_node_mut(&node.borrow().path()) + .unwrap() + .node + .as_ref(); + let blob_id = { + let child = tree_node.borrow(); + child + .chunks + .first() + .and_then(|chunk| blobtable.entries.get(chunk.inner.blob_index() as usize)) + .map(|entry| entry.blob_id()) + .unwrap() + }; + let blob_file = Arc::new(File::open(blobs_dir_path.join(blob_id)).unwrap()); + { + let mut child = tree_node.borrow_mut(); + child.layer_idx = prefetch_state.blob_info.blob_index() as u16; + } + + { + let mut child = tree_node.borrow_mut(); + let chunks: &mut Vec = child.chunks.as_mut(); + let blob_ctx = &mut prefetch_state.blob_ctx; + let blob_info = &mut prefetch_state.blob_info; + let encrypted = blob_ctx.blob_compressor != compress::Algorithm::None; + + for chunk in chunks { + let inner = Arc::make_mut(&mut chunk.inner); + let mut reader = BlobNodeReader::new( + Arc::clone(&blob_file), + inner.compressed_offset(), + inner.compressed_offset() + inner.compressed_size() as u64, + ) + .unwrap(); + let buf = &mut vec![0u8; inner.compressed_size() as usize]; + reader.read_exact(buf).unwrap(); + prefetch_state.blob_writer.write_all(buf).unwrap(); + let info = batch + .generate_chunk_info( + blob_ctx.current_compressed_offset, + blob_ctx.current_uncompressed_offset, + inner.uncompressed_size(), + encrypted, + ) + .unwrap(); + inner.set_blob_index(blob_info.blob_index()); + inner.set_index(prefetch_state.chunk_count); + prefetch_state.chunk_count += 1; + inner.set_compressed_offset(blob_ctx.current_compressed_offset); + inner.set_uncompressed_offset(blob_ctx.current_uncompressed_offset); + let aligned_d_size: u64 = + nydus_utils::try_round_up_4k(inner.uncompressed_size()).unwrap(); + blob_ctx.compressed_blob_size += inner.compressed_size() as u64; + blob_ctx.uncompressed_blob_size += aligned_d_size; + blob_ctx.current_compressed_offset += inner.compressed_size() as u64; + blob_ctx.current_uncompressed_offset += aligned_d_size; + blob_ctx.add_chunk_meta_info(&inner, Some(info)).unwrap(); + blob_ctx.blob_hash.update(&buf); + + blob_info.set_meta_ci_compressed_size( + (blob_info.meta_ci_compressed_size() + + size_of::() as u64) as usize, + ); + + blob_info.set_meta_ci_uncompressed_size( + (blob_info.meta_ci_uncompressed_size() + + size_of::() as u64) as usize, + ); + } + } + } + /// Validate tree. fn validate_tree(tree: &Tree) -> Result<()> { let pre = &mut |t: &Tree| -> Result<()> { - let node = t.lock_node(); + let node = t.borrow_mut_node(); debug!("chunkdict tree: "); debug!("inode: {}", node); for chunk in &node.chunks { @@ -208,7 +511,7 @@ impl Generator { node.inode.set_child_count(node.chunks.len() as u32); let child = Tree::new(node); child - .lock_node() + .borrow_mut_node() .v5_set_dir_size(ctx.fs_version, &child.children); Ok(child) } @@ -278,3 +581,5 @@ impl Generator { Ok(()) } } + +// Read the blob, get the chunk, fix dump node chunk function, Blob::dump generate a blob diff --git a/builder/src/compact.rs b/builder/src/compact.rs index 3ff27eeac69..4131b3f25ee 100644 --- a/builder/src/compact.rs +++ b/builder/src/compact.rs @@ -48,22 +48,30 @@ pub struct Config { /// available value: 0-99, 0 means disable /// hint: it's better to disable this option when there are some shared blobs /// for example: build-cache - #[serde(default)] - min_used_ratio: u8, + pub min_used_ratio: u8, /// we compact blobs whose size are less than compact_blob_size - #[serde(default = "default_compact_blob_size")] - compact_blob_size: usize, + pub compact_blob_size: usize, /// size of compacted blobs should not be larger than max_compact_size - #[serde(default = "default_max_compact_size")] - max_compact_size: usize, + pub max_compact_size: usize, /// if number of blobs >= layers_to_compact, do compact /// 0 means always try compact - #[serde(default)] - layers_to_compact: usize, + pub layers_to_compact: usize, /// local blobs dir, may haven't upload to backend yet /// what's more, new blobs will output to this dir /// name of blob file should be equal to blob_id - blobs_dir: String, + pub blobs_dir: String, +} + +impl Default for Config { + fn default() -> Self { + Self { + min_used_ratio: 0, + compact_blob_size: default_compact_blob_size(), + max_compact_size: default_max_compact_size(), + layers_to_compact: 0, + blobs_dir: String::new(), + } + } } #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] @@ -79,7 +87,7 @@ impl ChunkKey { match c { ChunkWrapper::V5(_) => Self::Digest(*c.id()), ChunkWrapper::V6(_) => Self::Offset(c.blob_index(), c.compressed_offset()), - ChunkWrapper::Ref(_) => unimplemented!("unsupport ChunkWrapper::Ref(c)"), + ChunkWrapper::Ref(_) => Self::Digest(*c.id()), } } } @@ -181,7 +189,6 @@ impl ChunkSet { Blob::dump_meta_data(build_ctx, new_blob_ctx, &mut blob_writer)?; let blob_id = new_blob_ctx.blob_id(); blob_writer.finalize(blob_id)?; - Ok(changed_chunks) } } @@ -304,7 +311,7 @@ impl BlobCompactor { let chunk_dict = self.get_chunk_dict(); let cb = &mut |n: &Tree| -> Result<()> { - let mut node = n.lock_node(); + let mut node = n.borrow_mut_node(); for chunk_idx in 0..node.chunks.len() { let chunk = &mut node.chunks[chunk_idx]; let chunk_key = ChunkKey::from(&chunk.inner); @@ -367,7 +374,7 @@ impl BlobCompactor { fn apply_blob_move(&mut self, from: u32, to: u32) -> Result<()> { if let Some(idx_list) = self.b2nodes.get(&from) { for (n, chunk_idx) in idx_list.iter() { - let mut node = n.lock().unwrap(); + let mut node = n.borrow_mut(); ensure!( node.chunks[*chunk_idx].inner.blob_index() == from, "unexpected blob_index of chunk" @@ -381,7 +388,7 @@ impl BlobCompactor { fn apply_chunk_change(&mut self, c: &(ChunkWrapper, ChunkWrapper)) -> Result<()> { if let Some(chunks) = self.c2nodes.get(&ChunkKey::from(&c.0)) { for (n, chunk_idx) in chunks.iter() { - let mut node = n.lock().unwrap(); + let mut node = n.borrow_mut(); let chunk = &mut node.chunks[*chunk_idx]; let mut chunk_inner = chunk.inner.deref().clone(); apply_chunk_change(&c.1, &mut chunk_inner)?; @@ -790,7 +797,6 @@ mod tests { } #[test] - #[should_panic = "not implemented: unsupport ChunkWrapper::Ref(c)"] fn test_chunk_key_from() { let cw = ChunkWrapper::new(RafsVersion::V5); matches!(ChunkKey::from(&cw), ChunkKey::Digest(_)); diff --git a/builder/src/core/blob.rs b/builder/src/core/blob.rs index cc8e4d56737..a2dbecb47ec 100644 --- a/builder/src/core/blob.rs +++ b/builder/src/core/blob.rs @@ -33,7 +33,7 @@ impl Blob { let mut chunk_data_buf = vec![0u8; RAFS_MAX_CHUNK_SIZE as usize]; let (inodes, prefetch_entries) = BlobLayout::layout_blob_simple(&ctx.prefetch)?; for (idx, node) in inodes.iter().enumerate() { - let mut node = node.lock().unwrap(); + let mut node = node.borrow_mut(); let size = node .dump_node_data(ctx, blob_mgr, blob_writer, &mut chunk_data_buf) .context("failed to dump blob chunks")?; @@ -94,7 +94,7 @@ impl Blob { Ok(()) } - fn finalize_blob_data( + pub fn finalize_blob_data( ctx: &BuildContext, blob_mgr: &mut BlobManager, blob_writer: &mut dyn Artifact, diff --git a/builder/src/core/bootstrap.rs b/builder/src/core/bootstrap.rs index 22805bd3c03..ef3eee8cce3 100644 --- a/builder/src/core/bootstrap.rs +++ b/builder/src/core/bootstrap.rs @@ -30,7 +30,7 @@ impl Bootstrap { bootstrap_ctx: &mut BootstrapContext, ) -> Result<()> { // Special handling of the root inode - let mut root_node = self.tree.lock_node(); + let mut root_node = self.tree.borrow_mut_node(); assert!(root_node.is_dir()); let index = bootstrap_ctx.generate_next_ino(); // 0 is reserved and 1 also matches RAFS_V5_ROOT_INODE. @@ -50,7 +50,7 @@ impl Bootstrap { Self::build_rafs(ctx, bootstrap_ctx, &mut self.tree)?; if ctx.fs_version.is_v6() { - let root_offset = self.tree.node.lock().unwrap().v6_offset; + let root_offset = self.tree.node.borrow().v6_offset; Self::v6_update_dirents(&self.tree, root_offset); } @@ -90,7 +90,7 @@ impl Bootstrap { tree: &mut Tree, ) -> Result<()> { let parent_node = tree.node.clone(); - let mut parent_node = parent_node.lock().unwrap(); + let mut parent_node = parent_node.borrow_mut(); let parent_ino = parent_node.inode.ino(); let block_size = ctx.v6_block_size(); @@ -113,7 +113,7 @@ impl Bootstrap { let mut dirs: Vec<&mut Tree> = Vec::new(); for child in tree.children.iter_mut() { let child_node = child.node.clone(); - let mut child_node = child_node.lock().unwrap(); + let mut child_node = child_node.borrow_mut(); let index = bootstrap_ctx.generate_next_ino(); child_node.index = index; if ctx.fs_version.is_v5() { @@ -134,11 +134,11 @@ impl Bootstrap { let nlink = indexes.len() as u32 + 1; // Update nlink for previous hardlink inodes for n in indexes.iter() { - n.lock().unwrap().inode.set_nlink(nlink); + n.borrow_mut().inode.set_nlink(nlink); } let (first_ino, first_offset) = { - let first_node = indexes[0].lock().unwrap(); + let first_node = indexes[0].borrow_mut(); (first_node.inode.ino(), first_node.v6_offset) }; // set offset for rafs v6 hardlinks diff --git a/builder/src/core/context.rs b/builder/src/core/context.rs index eb7a77728c8..fcba2166161 100644 --- a/builder/src/core/context.rs +++ b/builder/src/core/context.rs @@ -13,6 +13,7 @@ use std::io::{BufWriter, Cursor, Read, Seek, Write}; use std::mem::size_of; use std::os::unix::fs::FileTypeExt; use std::path::{Display, Path, PathBuf}; +use std::result::Result::Ok; use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::{fmt, fs}; @@ -459,6 +460,7 @@ impl BlobCacheGenerator { } } +#[derive(Clone)] /// BlobContext is used to hold the blob information of a layer during build. pub struct BlobContext { /// Blob id (user specified or sha256(blob)). @@ -898,6 +900,11 @@ impl BlobManager { } } + /// Set current blob index + pub fn set_current_blob_index(&mut self, index: usize) { + self.current_blob_index = Some(index as u32) + } + fn new_blob_ctx(ctx: &BuildContext) -> Result { let (cipher_object, cipher_ctx) = match ctx.cipher { crypt::Algorithm::None => (Default::default(), None), diff --git a/builder/src/core/layout.rs b/builder/src/core/layout.rs index 9a3ef83ddbe..c3271229a64 100644 --- a/builder/src/core/layout.rs +++ b/builder/src/core/layout.rs @@ -16,11 +16,11 @@ impl BlobLayout { let (pre, non_pre) = prefetch.get_file_nodes(); let mut inodes: Vec = pre .into_iter() - .filter(|x| Self::should_dump_node(x.lock().unwrap().deref())) + .filter(|x| Self::should_dump_node(x.borrow().deref())) .collect(); let mut non_prefetch_inodes: Vec = non_pre .into_iter() - .filter(|x| Self::should_dump_node(x.lock().unwrap().deref())) + .filter(|x| Self::should_dump_node(x.borrow().deref())) .collect(); let prefetch_entries = inodes.len(); @@ -53,7 +53,7 @@ mod tests { let tree = Tree::new(node1); let mut prefetch = Prefetch::default(); - prefetch.insert(&tree.node, tree.node.lock().unwrap().deref()); + prefetch.insert(&tree.node, tree.node.borrow().deref()); let (inodes, prefetch_entries) = BlobLayout::layout_blob_simple(&prefetch).unwrap(); assert_eq!(inodes.len(), 1); diff --git a/builder/src/core/overlay.rs b/builder/src/core/overlay.rs index 7626ddd7b1b..a64ebe6da04 100644 --- a/builder/src/core/overlay.rs +++ b/builder/src/core/overlay.rs @@ -71,6 +71,16 @@ pub enum WhiteoutSpec { None, } +impl fmt::Display for WhiteoutSpec { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + WhiteoutSpec::Oci => write!(f, "OCI"), + WhiteoutSpec::Overlayfs => write!(f, "Overlayfs"), + WhiteoutSpec::None => write!(f, "None"), + } + } +} + impl Default for WhiteoutSpec { fn default() -> Self { Self::Oci diff --git a/builder/src/core/prefetch.rs b/builder/src/core/prefetch.rs index b5695e05686..ec18b1fff7f 100644 --- a/builder/src/core/prefetch.rs +++ b/builder/src/core/prefetch.rs @@ -213,7 +213,7 @@ impl Prefetch { if self.policy == PrefetchPolicy::Fs { let mut prefetch_table = RafsV5PrefetchTable::new(); for i in self.patterns.values().filter_map(|v| v.clone()) { - let node = i.lock().unwrap(); + let node = i.borrow_mut(); assert!(node.inode.ino() < u32::MAX as u64); prefetch_table.add_entry(node.inode.ino() as u32); } @@ -228,7 +228,7 @@ impl Prefetch { if self.policy == PrefetchPolicy::Fs { let mut prefetch_table = RafsV6PrefetchTable::new(); for i in self.patterns.values().filter_map(|v| v.clone()) { - let node = i.lock().unwrap(); + let node = i.borrow_mut(); let ino = node.inode.ino(); debug_assert!(ino > 0); let nid = calculate_nid(node.v6_offset, meta_addr); @@ -270,7 +270,7 @@ mod tests { use super::*; use crate::core::node::NodeInfo; use nydus_rafs::metadata::{inode::InodeWrapper, RafsVersion}; - use std::sync::Mutex; + use std::cell::RefCell; #[test] fn test_generate_pattern() { @@ -332,29 +332,29 @@ mod tests { let mut info1 = info.clone(); info1.target = PathBuf::from("/f"); let node1 = Node::new(inode.clone(), info1, 1); - let node1 = TreeNode::new(Mutex::from(node1)); - prefetch.insert(&node1, &node1.lock().unwrap()); + let node1 = TreeNode::new(RefCell::from(node1)); + prefetch.insert(&node1, &node1.borrow()); let inode2 = inode.clone(); let mut info2 = info.clone(); info2.target = PathBuf::from("/a/b"); let node2 = Node::new(inode2, info2, 1); - let node2 = TreeNode::new(Mutex::from(node2)); - prefetch.insert(&node2, &node2.lock().unwrap()); + let node2 = TreeNode::new(RefCell::from(node2)); + prefetch.insert(&node2, &node2.borrow()); let inode3 = inode.clone(); let mut info3 = info.clone(); info3.target = PathBuf::from("/h/i/j"); let node3 = Node::new(inode3, info3, 1); - let node3 = TreeNode::new(Mutex::from(node3)); - prefetch.insert(&node3, &node3.lock().unwrap()); + let node3 = TreeNode::new(RefCell::from(node3)); + prefetch.insert(&node3, &node3.borrow()); let inode4 = inode.clone(); let mut info4 = info.clone(); info4.target = PathBuf::from("/z"); let node4 = Node::new(inode4, info4, 1); - let node4 = TreeNode::new(Mutex::from(node4)); - prefetch.insert(&node4, &node4.lock().unwrap()); + let node4 = TreeNode::new(RefCell::from(node4)); + prefetch.insert(&node4, &node4.borrow()); let inode5 = inode.clone(); inode.set_mode(0o755 | libc::S_IFDIR as u32); @@ -362,8 +362,8 @@ mod tests { let mut info5 = info; info5.target = PathBuf::from("/a/b/d"); let node5 = Node::new(inode5, info5, 1); - let node5 = TreeNode::new(Mutex::from(node5)); - prefetch.insert(&node5, &node5.lock().unwrap()); + let node5 = TreeNode::new(RefCell::from(node5)); + prefetch.insert(&node5, &node5.borrow()); // node1, node2 assert_eq!(prefetch.fs_prefetch_rule_count(), 2); @@ -373,12 +373,12 @@ mod tests { assert_eq!(non_pre.len(), 1); let pre_str: Vec = pre .iter() - .map(|n| n.lock().unwrap().target().to_str().unwrap().to_owned()) + .map(|n| n.borrow().target().to_str().unwrap().to_owned()) .collect(); assert_eq!(pre_str, vec!["/a/b", "/a/b/d", "/f", "/h/i/j"]); let non_pre_str: Vec = non_pre .iter() - .map(|n| n.lock().unwrap().target().to_str().unwrap().to_owned()) + .map(|n| n.borrow().target().to_str().unwrap().to_owned()) .collect(); assert_eq!(non_pre_str, vec!["/z"]); diff --git a/builder/src/core/tree.rs b/builder/src/core/tree.rs index d701c2bbd9f..6545cc3c448 100644 --- a/builder/src/core/tree.rs +++ b/builder/src/core/tree.rs @@ -16,11 +16,12 @@ //! lower tree (MetadataTree). //! - Traverse the merged tree (OverlayTree) to dump bootstrap and data blobs. +use std::cell::{RefCell, RefMut}; use std::ffi::OsString; use std::os::unix::ffi::OsStrExt; use std::path::{Path, PathBuf}; use std::rc::Rc; -use std::sync::{Arc, Mutex, MutexGuard}; +use std::sync::Arc; use anyhow::{bail, Result}; use nydus_rafs::metadata::chunk::ChunkWrapper; @@ -35,7 +36,7 @@ use crate::core::overlay::OVERLAYFS_WHITEOUT_OPAQUE; use crate::{BuildContext, ChunkDict}; /// Type alias for tree internal node. -pub type TreeNode = Rc>; +pub type TreeNode = Rc>; /// An in-memory tree structure to maintain information and topology of filesystem nodes. #[derive(Clone)] @@ -53,7 +54,7 @@ impl Tree { pub fn new(node: Node) -> Self { let name = node.name().as_bytes().to_vec(); Tree { - node: Rc::new(Mutex::new(node)), + node: Rc::new(RefCell::new(node)), name, children: Vec::new(), } @@ -82,12 +83,12 @@ impl Tree { /// Set `Node` associated with the tree node. pub fn set_node(&mut self, node: Node) { - self.node = Rc::new(Mutex::new(node)); + self.node.replace(node); } - /// Get mutex guard to access the associated `Node` object. - pub fn lock_node(&self) -> MutexGuard { - self.node.lock().unwrap() + /// Get mutably borrowed value to access the associated `Node` object. + pub fn borrow_mut_node(&self) -> RefMut<'_, Node> { + self.node.as_ref().borrow_mut() } /// Walk all nodes in DFS mode. @@ -133,7 +134,7 @@ impl Tree { let mut dirs = Vec::with_capacity(32); for child in &self.children { cb(child)?; - if child.lock_node().is_dir() { + if child.borrow_mut_node().is_dir() { dirs.push(child); } } @@ -173,13 +174,37 @@ impl Tree { Some(tree) } + /// Get the mutable tree node corresponding to the path. + pub fn get_node_mut(&mut self, path: &Path) -> Option<&mut Tree> { + let target_vec = Node::generate_target_vec(path); + assert!(!target_vec.is_empty()); + let mut tree = self; + + let last_idx = target_vec.len() - 1; + for name in &target_vec[1..last_idx] { + match tree.get_child_idx(name.as_bytes()) { + Some(idx) => tree = &mut tree.children[idx], + None => return None, + } + } + + if let Some(last_name) = target_vec.last() { + match tree.get_child_idx(last_name.as_bytes()) { + Some(idx) => Some(&mut tree.children[idx]), + None => None, + } + } else { + Some(tree) + } + } + /// Merge the upper layer tree into the lower layer tree, applying whiteout rules. pub fn merge_overaly(&mut self, ctx: &BuildContext, upper: Tree) -> Result<()> { assert_eq!(self.name, "/".as_bytes()); assert_eq!(upper.name, "/".as_bytes()); // Handle the root node. - upper.lock_node().overlay = Overlay::UpperModification; + upper.borrow_mut_node().overlay = Overlay::UpperModification; self.node = upper.node.clone(); self.merge_children(ctx, &upper)?; lazy_drop(upper); @@ -191,7 +216,7 @@ impl Tree { // Handle whiteout nodes in the first round, and handle other nodes in the second round. let mut modified = Vec::with_capacity(upper.children.len()); for u in upper.children.iter() { - let mut u_node = u.lock_node(); + let mut u_node = u.borrow_mut_node(); match u_node.whiteout_type(ctx.whiteout_spec) { Some(WhiteoutType::OciRemoval) => { if let Some(origin_name) = u_node.origin_name(WhiteoutType::OciRemoval) { @@ -221,7 +246,7 @@ impl Tree { let mut dirs = Vec::new(); for u in modified { - let mut u_node = u.lock_node(); + let mut u_node = u.borrow_mut_node(); if let Some(idx) = self.get_child_idx(&u.name) { u_node.overlay = Overlay::UpperModification; self.children[idx].node = u.node.clone(); @@ -300,7 +325,7 @@ impl<'a> MetadataTreeBuilder<'a> { children.sort_unstable_by(|a, b| a.name.cmp(&b.name)); for child in children.iter_mut() { - let child_node = child.lock_node(); + let child_node = child.borrow_mut_node(); if child_node.is_dir() { let child_ino = child_node.inode.ino(); drop(child_node); @@ -404,7 +429,7 @@ mod tests { .unwrap(); let mut tree = Tree::new(node); assert_eq!(tree.name, tmpfile.as_path().file_name().unwrap().as_bytes()); - let node1 = tree.lock_node(); + let node1 = tree.borrow_mut_node(); drop(node1); let tmpfile = TempFile::new_in(tmpdir.as_path()).unwrap(); @@ -419,7 +444,7 @@ mod tests { ) .unwrap(); tree.set_node(node); - let node2 = tree.lock_node(); + let node2 = tree.borrow_mut_node(); assert_eq!(node2.name(), tmpfile.as_path().file_name().unwrap()); } diff --git a/builder/src/core/v5.rs b/builder/src/core/v5.rs index 78a6b78c212..11d5dc13376 100644 --- a/builder/src/core/v5.rs +++ b/builder/src/core/v5.rs @@ -92,7 +92,7 @@ impl Node { let mut d_size = 0u64; for child in children.iter() { - d_size += child.lock_node().inode.name_size() as u64 + RAFS_V5_VIRTUAL_ENTRY_SIZE; + d_size += child.borrow_mut_node().inode.name_size() as u64 + RAFS_V5_VIRTUAL_ENTRY_SIZE; } if d_size == 0 { self.inode.set_size(4096); @@ -124,13 +124,13 @@ impl Node { impl Bootstrap { /// Calculate inode digest for directory. fn v5_digest_node(&self, ctx: &mut BuildContext, tree: &Tree) { - let mut node = tree.lock_node(); + let mut node = tree.borrow_mut_node(); // We have set digest for non-directory inode in the previous dump_blob workflow. if node.is_dir() { let mut inode_hasher = RafsDigest::hasher(ctx.digester); for child in tree.children.iter() { - let child = child.lock_node(); + let child = child.borrow_mut_node(); inode_hasher.digest_update(child.inode.digest().as_ref()); } node.inode.set_digest(inode_hasher.digest_finalize()); @@ -200,7 +200,7 @@ impl Bootstrap { let mut has_xattr = false; self.tree.walk_dfs_pre(&mut |t| { - let node = t.lock_node(); + let node = t.borrow_mut_node(); inode_table.set(node.index, inode_offset)?; // Add inode size inode_offset += node.inode.inode_size() as u32; @@ -253,7 +253,7 @@ impl Bootstrap { timing_tracer!( { self.tree.walk_dfs_pre(&mut |t| { - t.lock_node() + t.borrow_mut_node() .dump_bootstrap_v5(ctx, bootstrap_ctx.writer.as_mut()) .context("failed to dump bootstrap") }) diff --git a/builder/src/core/v6.rs b/builder/src/core/v6.rs index 9dd1091d1e2..faf3d0ab72e 100644 --- a/builder/src/core/v6.rs +++ b/builder/src/core/v6.rs @@ -485,7 +485,6 @@ impl Node { } prev = Some((blob_idx, offset)); } - // Special optimization to enable page cache sharing for EROFS. let chunk_size = if is_continuous && inode.size() > ctx.chunk_size as u64 { inode.size().next_power_of_two() @@ -576,7 +575,7 @@ impl BuildContext { impl Bootstrap { pub(crate) fn v6_update_dirents(parent: &Tree, parent_offset: u64) { - let mut node = parent.lock_node(); + let mut node = parent.borrow_mut_node(); let node_offset = node.v6_offset; if !node.is_dir() { return; @@ -596,7 +595,7 @@ impl Bootstrap { let mut dirs: Vec<&Tree> = Vec::new(); for child in parent.children.iter() { - let child_node = child.lock_node(); + let child_node = child.borrow_mut_node(); let entry = ( child_node.v6_offset, OsStr::from_bytes(child.name()).to_owned(), @@ -670,7 +669,7 @@ impl Bootstrap { // When using nid 0 as root nid, // the root directory will not be shown by glibc's getdents/readdir. // Because in some OS, ino == 0 represents corresponding file is deleted. - let root_node_offset = self.tree.lock_node().v6_offset; + let root_node_offset = self.tree.borrow_mut_node().v6_offset; let orig_meta_addr = root_node_offset - EROFS_BLOCK_SIZE_4096; let meta_addr = if blob_table_size > 0 { align_offset( @@ -704,7 +703,7 @@ impl Bootstrap { timing_tracer!( { self.tree.walk_bfs(true, &mut |n| { - n.lock_node().dump_bootstrap_v6( + n.borrow_mut_node().dump_bootstrap_v6( ctx, bootstrap_ctx.writer.as_mut(), orig_meta_addr, diff --git a/builder/src/directory.rs b/builder/src/directory.rs index f934f5111ac..f90091c2b30 100644 --- a/builder/src/directory.rs +++ b/builder/src/directory.rs @@ -34,7 +34,7 @@ impl FilesystemTreeBuilder { layer_idx: u16, ) -> Result> { let mut result = Vec::new(); - let parent = parent.lock().unwrap(); + let parent = parent.borrow(); if !parent.is_dir() { return Ok(result); } @@ -70,7 +70,7 @@ impl FilesystemTreeBuilder { let mut child = Tree::new(child); child.children = self.load_children(ctx, bootstrap_ctx, &child.node, layer_idx)?; child - .lock_node() + .borrow_mut_node() .v5_set_dir_size(ctx.fs_version, &child.children); result.push(child); } @@ -112,7 +112,7 @@ impl DirectoryBuilder { { tree_builder.load_children(ctx, bootstrap_ctx, &tree.node, layer_idx) }, "load_from_directory" )?; - tree.lock_node() + tree.borrow_mut_node() .v5_set_dir_size(ctx.fs_version, &tree.children); Ok(tree) diff --git a/builder/src/lib.rs b/builder/src/lib.rs index 54f47e264a7..919da5cc893 100644 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -27,6 +27,7 @@ pub use self::chunkdict_generator::ChunkdictBlobInfo; pub use self::chunkdict_generator::ChunkdictChunkInfo; pub use self::chunkdict_generator::Generator; pub use self::compact::BlobCompactor; +pub use self::compact::Config as CompactConfig; pub use self::core::bootstrap::Bootstrap; pub use self::core::chunk_dict::{parse_chunk_dict_arg, ChunkDict, HashChunkDict}; pub use self::core::context::{ @@ -247,8 +248,8 @@ fn finalize_blob( if let Some(blob_cache) = ctx.blob_cache_generator.as_ref() { blob_cache.finalize(&blob_ctx.blob_id)?; } + ctx.blob_id = blob_ctx.blob_id.clone(); } - Ok(()) } diff --git a/builder/src/merge.rs b/builder/src/merge.rs index 25ba20c07e9..4dffef26836 100644 --- a/builder/src/merge.rs +++ b/builder/src/merge.rs @@ -257,7 +257,7 @@ impl Merger { let upper = Tree::from_bootstrap(&rs, &mut ())?; upper.walk_bfs(true, &mut |n| { - let mut node = n.lock_node(); + let mut node = n.borrow_mut_node(); for chunk in &mut node.chunks { let origin_blob_index = chunk.inner.blob_index() as usize; let blob_ctx = blobs[origin_blob_index].as_ref(); diff --git a/builder/src/stargz.rs b/builder/src/stargz.rs index d84a2d214a8..8a97c039410 100644 --- a/builder/src/stargz.rs +++ b/builder/src/stargz.rs @@ -601,7 +601,7 @@ impl StargzBuilder { } } - let mut tmp_node = tmp_tree.lock_node(); + let mut tmp_node = tmp_tree.borrow_mut_node(); if !tmp_node.is_reg() { bail!( "stargz: target {} for hardlink {} is not a regular file", @@ -788,7 +788,7 @@ impl StargzBuilder { bootstrap .tree .walk_bfs(true, &mut |n| { - let mut node = n.lock_node(); + let mut node = n.borrow_mut_node(); let node_path = node.path(); if let Some((size, ref mut chunks)) = self.file_chunk_map.get_mut(node_path) { node.inode.set_size(*size); @@ -802,9 +802,9 @@ impl StargzBuilder { for (k, v) in self.hardlink_map.iter() { match bootstrap.tree.get_node(k) { - Some(n) => { - let mut node = n.lock_node(); - let target = v.lock().unwrap(); + Some(t) => { + let mut node = t.borrow_mut_node(); + let target = v.borrow(); node.inode.set_size(target.inode.size()); node.inode.set_child_count(target.inode.child_count()); node.chunks = target.chunks.clone(); diff --git a/builder/src/tarball.rs b/builder/src/tarball.rs index edc996ac553..ca015955bc0 100644 --- a/builder/src/tarball.rs +++ b/builder/src/tarball.rs @@ -349,7 +349,7 @@ impl<'a> TarballTreeBuilder<'a> { } } } - let mut tmp_node = tmp_tree.lock_node(); + let mut tmp_node = tmp_tree.borrow_mut_node(); if !tmp_node.is_reg() { bail!( "tarball: target {} for hardlink {} is not a regular file", @@ -452,7 +452,7 @@ impl<'a> TarballTreeBuilder<'a> { // Tar hardlink header has zero file size and no file data associated, so copy value from // the associated regular file. if let Some(t) = hardlink_target { - let n = t.lock_node(); + let n = t.borrow_mut_node(); if n.inode.is_v5() { node.inode.set_digest(n.inode.digest().to_owned()); } @@ -540,7 +540,7 @@ impl<'a> TarballTreeBuilder<'a> { for c in &mut tree.children { Self::set_v5_dir_size(c); } - let mut node = tree.lock_node(); + let mut node = tree.borrow_mut_node(); node.v5_set_dir_size(RafsVersion::V5, &tree.children); } diff --git a/contrib/ctr-remote/.gitignore b/contrib/ctr-remote/.gitignore deleted file mode 100644 index 6dd29b7f8d0..00000000000 --- a/contrib/ctr-remote/.gitignore +++ /dev/null @@ -1 +0,0 @@ -bin/ \ No newline at end of file diff --git a/contrib/ctr-remote/.golangci.yml b/contrib/ctr-remote/.golangci.yml deleted file mode 100644 index 734653d6721..00000000000 --- a/contrib/ctr-remote/.golangci.yml +++ /dev/null @@ -1,21 +0,0 @@ -# https://golangci-lint.run/usage/configuration#config-file - -linters: - enable: - - staticcheck - - unconvert - - gofmt - - goimports - - revive - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -run: - deadline: 4m - skip-dirs: - - misc - diff --git a/contrib/ctr-remote/Makefile b/contrib/ctr-remote/Makefile deleted file mode 100644 index d00b32f6106..00000000000 --- a/contrib/ctr-remote/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -GIT_COMMIT := $(shell git rev-list -1 HEAD) -BUILD_TIME := $(shell date -u +%Y%m%d.%H%M) -PACKAGES ?= $(shell go list ./... | grep -v /vendor/) -GOARCH ?= $(shell go env GOARCH) -GOPROXY ?= https://goproxy.io - -ifdef GOPROXY -PROXY := GOPROXY=${GOPROXY} -endif - -.PHONY: all build release test clean - -all: build - -build: - @CGO_ENABLED=0 ${PROXY} GOOS=linux GOARCH=${GOARCH} go build -v -o bin/ctr-remote ./cmd/main.go - -release: - @CGO_ENABLED=0 ${PROXY} GOOS=linux GOARCH=${GOARCH} go build -ldflags '-s -w -extldflags "-static"' -v -o bin/ctr-remote ./cmd/main.go - -test: - go vet $(PACKAGES) - go test -v -cover ${PACKAGES} - -lint: - golangci-lint run - -clean: - rm -f bin/* diff --git a/contrib/ctr-remote/cmd/main.go b/contrib/ctr-remote/cmd/main.go deleted file mode 100644 index 4721e3122a5..00000000000 --- a/contrib/ctr-remote/cmd/main.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package main - -import ( - "fmt" - "os" - - "github.com/containerd/containerd/cmd/ctr/app" - "github.com/containerd/containerd/pkg/seed" //nolint:staticcheck // Global math/rand seed is deprecated, but still used by external dependencies - "github.com/dragonflyoss/nydus/contrib/ctr-remote/commands" - "github.com/urfave/cli" -) - -func init() { - // From https://github.com/containerd/containerd/blob/f7f2be732159a411eae46b78bfdb479b133a823b/cmd/ctr/main.go - //nolint:staticcheck // Global math/rand seed is deprecated, but still used by external dependencies - seed.WithTimeAndRand() -} - -func main() { - customCommands := []cli.Command{commands.RpullCommand} - app := app.New() - app.Description = "NOTE: Enhanced for nydus-snapshotter\n" + app.Description - for i := range app.Commands { - if app.Commands[i].Name == "images" { - sc := map[string]cli.Command{} - for _, subcmd := range customCommands { - sc[subcmd.Name] = subcmd - } - - // First, replace duplicated subcommands - for j := range app.Commands[i].Subcommands { - for name, subcmd := range sc { - if name == app.Commands[i].Subcommands[j].Name { - app.Commands[i].Subcommands[j] = subcmd - delete(sc, name) - } - } - } - - // Next, append all new sub commands - for _, subcmd := range sc { - app.Commands[i].Subcommands = append(app.Commands[i].Subcommands, subcmd) - } - break - } - } - if err := app.Run(os.Args); err != nil { - fmt.Fprintf(os.Stderr, "ctr-remote: %v\n", err) - os.Exit(1) - } -} diff --git a/contrib/ctr-remote/commands/rpull.go b/contrib/ctr-remote/commands/rpull.go deleted file mode 100644 index e9b28604b1d..00000000000 --- a/contrib/ctr-remote/commands/rpull.go +++ /dev/null @@ -1,103 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package commands - -import ( - "context" - "fmt" - - "github.com/containerd/containerd" - "github.com/containerd/containerd/cmd/ctr/commands" - "github.com/containerd/containerd/cmd/ctr/commands/content" - "github.com/containerd/containerd/images" - "github.com/containerd/log" - "github.com/containerd/nydus-snapshotter/pkg/label" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/urfave/cli" -) - -const ( - remoteSnapshotterName = "nydus" -) - -// RpullCommand is a subcommand to pull an image from a registry levaraging nydus snapshotter -var RpullCommand = cli.Command{ - Name: "rpull", - Usage: "pull an image from a registry leveraging nydus-snapshotter", - ArgsUsage: "[flags] ", - Description: `Fetch and prepare an image for use in containerd leveraging nydus-snapshotter. -After pulling an image, it should be ready to use the same reference in a run command.`, - Flags: append(commands.RegistryFlags, commands.LabelFlag), - Action: func(context *cli.Context) error { - var ( - ref = context.Args().First() - config = &rPullConfig{} - ) - if ref == "" { - return fmt.Errorf("please provide an image reference to pull") - } - - client, ctx, cancel, err := commands.NewClient(context) - if err != nil { - return err - } - defer cancel() - - ctx, done, err := client.WithLease(ctx) - if err != nil { - return err - } - defer done(ctx) - - fc, err := content.NewFetchConfig(ctx, context) - if err != nil { - return err - } - config.FetchConfig = fc - - return pull(ctx, client, ref, config) - }, -} - -type rPullConfig struct { - *content.FetchConfig -} - -func pull(ctx context.Context, client *containerd.Client, ref string, config *rPullConfig) error { - pCtx := ctx - h := images.HandlerFunc(func(_ context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if desc.MediaType != images.MediaTypeDockerSchema1Manifest { - fmt.Printf("fetching %v... %v\n", desc.Digest.String()[:15], desc.MediaType) - } - return nil, nil - }) - - log.G(pCtx).WithField("image", ref).Debug("fetching") - configLabels := commands.LabelArgs(config.Labels) - if _, err := client.Pull(pCtx, ref, []containerd.RemoteOpt{ - containerd.WithPullLabels(configLabels), - containerd.WithResolver(config.Resolver), - containerd.WithImageHandler(h), - containerd.WithPullUnpack, - containerd.WithPullSnapshotter(remoteSnapshotterName), - containerd.WithImageHandlerWrapper(label.AppendLabelsHandlerWrapper(ref)), - }...); err != nil { - return err - } - - return nil -} diff --git a/contrib/ctr-remote/go.mod b/contrib/ctr-remote/go.mod deleted file mode 100644 index 55917c4e660..00000000000 --- a/contrib/ctr-remote/go.mod +++ /dev/null @@ -1,84 +0,0 @@ -module github.com/dragonflyoss/nydus/contrib/ctr-remote - -go 1.21 - -require ( - github.com/containerd/containerd v1.7.11 - github.com/containerd/log v0.1.0 - github.com/containerd/nydus-snapshotter v0.13.4 - github.com/opencontainers/image-spec v1.1.0-rc5 - github.com/urfave/cli v1.22.14 -) - -require ( - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.11.4 // indirect - github.com/cilium/ebpf v0.10.0 // indirect - github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/containerd/console v1.0.3 // indirect - github.com/containerd/continuity v0.4.3 // indirect - github.com/containerd/fifo v1.1.0 // indirect - github.com/containerd/go-cni v1.1.9 // indirect - github.com/containerd/go-runc v1.0.0 // indirect - github.com/containerd/ttrpc v1.2.2 // indirect - github.com/containerd/typeurl/v2 v2.1.1 // indirect - github.com/containernetworking/cni v1.1.2 // indirect - github.com/containernetworking/plugins v1.2.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect - github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/uuid v1.5.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/intel/goresctrl v0.3.0 // indirect - github.com/klauspost/compress v1.17.4 // indirect - github.com/moby/locker v1.0.1 // indirect - github.com/moby/sys/mountinfo v0.7.1 // indirect - github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/signal v0.7.0 // indirect - github.com/moby/sys/symlink v0.2.0 // indirect - github.com/onsi/gomega v1.27.6 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/runc v1.1.12 // indirect - github.com/opencontainers/runtime-spec v1.1.0 // indirect - github.com/opencontainers/selinux v1.11.0 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/procfs v0.12.0 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/stretchr/objx v0.5.1 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect - go.uber.org/goleak v1.2.1 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.16.1 // indirect - google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apimachinery v0.28.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect -) diff --git a/contrib/ctr-remote/go.sum b/contrib/ctr-remote/go.sum deleted file mode 100644 index 04d4b061e3f..00000000000 --- a/contrib/ctr-remote/go.sum +++ /dev/null @@ -1,358 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 h1:dIScnXFlF784X79oi7MzVT6GWqr/W1uUt0pB5CsDs9M= -github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= -github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.10.0 h1:nk5HPMeoBXtOzbkZBWym+ZWq1GIiHUsBFXxwewXAHLQ= -github.com/cilium/ebpf v0.10.0/go.mod h1:DPiVdY/kT534dgc9ERmvP8mWA+9gvwgKfRvk4nNWnoE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= -github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= -github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= -github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= -github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= -github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= -github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= -github.com/containerd/go-cni v1.1.9 h1:ORi7P1dYzCwVM6XPN4n3CbkuOx/NZ2DOqy+SHRdo9rU= -github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= -github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/nydus-snapshotter v0.13.4 h1:veTQCgpfRGdPD031dVNGlU+vK/W9vBhZNlMWR9oupiQ= -github.com/containerd/nydus-snapshotter v0.13.4/go.mod h1:y41TM10lXhskfHHvge7kf1VucM4CeWwsCmQ5Q51UJrc= -github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= -github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= -github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= -github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= -github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= -github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= -github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c= -github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= -github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= -github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= -github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc= -github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= -github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= -github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= -github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= -github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/contrib/nydus-overlayfs/.golangci.yml b/contrib/nydus-overlayfs/.golangci.yml index 734653d6721..98c90d53cf4 100644 --- a/contrib/nydus-overlayfs/.golangci.yml +++ b/contrib/nydus-overlayfs/.golangci.yml @@ -8,7 +8,7 @@ linters: - goimports - revive - ineffassign - - vet + - govet - unused - misspell disable: @@ -16,6 +16,3 @@ linters: run: deadline: 4m - skip-dirs: - - misc - diff --git a/contrib/nydusify/.golangci.yml b/contrib/nydusify/.golangci.yml index 734653d6721..c8f194edc22 100644 --- a/contrib/nydusify/.golangci.yml +++ b/contrib/nydusify/.golangci.yml @@ -8,7 +8,7 @@ linters: - goimports - revive - ineffassign - - vet + - govet - unused - misspell disable: @@ -16,6 +16,5 @@ linters: run: deadline: 4m - skip-dirs: + issues.exclude-dirs: - misc - diff --git a/contrib/nydusify/cmd/nydusify.go b/contrib/nydusify/cmd/nydusify.go index 590ef133e05..82d597778ea 100644 --- a/contrib/nydusify/cmd/nydusify.go +++ b/contrib/nydusify/cmd/nydusify.go @@ -24,7 +24,6 @@ import ( "github.com/urfave/cli/v2" "github.com/dragonflyoss/nydus/contrib/nydusify/pkg/checker" - "github.com/dragonflyoss/nydus/contrib/nydusify/pkg/checker/rule" "github.com/dragonflyoss/nydus/contrib/nydusify/pkg/chunkdict/generator" "github.com/dragonflyoss/nydus/contrib/nydusify/pkg/committer" "github.com/dragonflyoss/nydus/contrib/nydusify/pkg/converter" @@ -80,7 +79,7 @@ func getBackendConfig(c *cli.Context, prefix string, required bool) (string, str return "", "", nil } - possibleBackendTypes := []string{"oss", "s3"} + possibleBackendTypes := []string{"oss", "s3", "localfs"} if !isPossibleValue(possibleBackendTypes, backendType) { return "", "", fmt.Errorf("--%sbackend-type should be one of %v", prefix, possibleBackendTypes) } @@ -90,7 +89,7 @@ func getBackendConfig(c *cli.Context, prefix string, required bool) (string, str ) if err != nil { return "", "", err - } else if (backendType == "oss" || backendType == "s3") && strings.TrimSpace(backendConfig) == "" { + } else if (backendType == "oss" || backendType == "s3" || backendType == "localfs") && strings.TrimSpace(backendConfig) == "" { return "", "", errors.Errorf("backend configuration is empty, please specify option '--%sbackend-config'", prefix) } @@ -796,7 +795,12 @@ func main() { Usage: "Json configuration file for storage backend", EnvVars: []string{"BACKEND_CONFIG_FILE"}, }, - + &cli.BoolFlag{ + Name: "prefetch", + Value: false, + Usage: "Enable full image data prefetch", + EnvVars: []string{"PREFETCH"}, + }, &cli.StringFlag{ Name: "mount-path", Value: "./image-fs", @@ -836,13 +840,11 @@ func main() { return err } - backendConfigStruct, err := rule.NewRegistryBackendConfig(parsed) + backendConfigStruct, err := utils.NewRegistryBackendConfig(parsed, c.Bool("target-insecure")) if err != nil { return errors.Wrap(err, "parse registry backend configuration") } - backendConfigStruct.SkipVerify = c.Bool("target-insecure") - bytes, err := json.Marshal(backendConfigStruct) if err != nil { return errors.Wrap(err, "marshal registry backend configuration") @@ -865,6 +867,7 @@ func main() { BackendType: backendType, BackendConfig: backendConfig, ExpectedArch: arch, + Prefetch: c.Bool("prefetch"), }) if err != nil { return err diff --git a/contrib/nydusify/cmd/nydusify_test.go b/contrib/nydusify/cmd/nydusify_test.go index ff7afe2a4fc..f13fba873f1 100644 --- a/contrib/nydusify/cmd/nydusify_test.go +++ b/contrib/nydusify/cmd/nydusify_test.go @@ -8,6 +8,7 @@ package main import ( "encoding/json" "flag" + "fmt" "os" "testing" @@ -80,54 +81,13 @@ func TestParseBackendConfig(t *testing.T) { } func TestGetBackendConfig(t *testing.T) { - app := &cli.App{ - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "prefixbackend-type", - Value: "", - }, - &cli.StringFlag{ - Name: "prefixbackend-config", - Value: "", - }, - &cli.StringFlag{ - Name: "prefixbackend-config-file", - Value: "", - }, - }, - } - ctx := cli.NewContext(app, nil, nil) - - backendType, backendConfig, err := getBackendConfig(ctx, "prefix", false) - require.NoError(t, err) - require.Empty(t, backendType) - require.Empty(t, backendConfig) - - backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) - require.Error(t, err) - require.Contains(t, err.Error(), "backend type is empty, please specify option") - require.Empty(t, backendType) - require.Empty(t, backendConfig) - - flagSet := flag.NewFlagSet("test1", flag.PanicOnError) - flagSet.String("prefixbackend-type", "errType", "") - ctx = cli.NewContext(app, flagSet, nil) - backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) - require.Error(t, err) - require.Contains(t, err.Error(), "backend-type should be one of") - require.Empty(t, backendType) - require.Empty(t, backendConfig) - - flagSet = flag.NewFlagSet("test2", flag.PanicOnError) - flagSet.String("prefixbackend-type", "oss", "") - ctx = cli.NewContext(app, flagSet, nil) - backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) - require.Error(t, err) - require.Contains(t, err.Error(), "backend configuration is empty, please specify option") - require.Empty(t, backendType) - require.Empty(t, backendConfig) - - configJSON := ` + tests := []struct { + backendType string + backendConfig string + }{ + { + backendType: "oss", + backendConfig: ` { "bucket_name": "test", "endpoint": "region.oss.com", @@ -135,45 +95,106 @@ func TestGetBackendConfig(t *testing.T) { "access_key_secret": "testSK", "meta_prefix": "meta", "blob_prefix": "blob" - }` - require.True(t, json.Valid([]byte(configJSON))) - - flagSet = flag.NewFlagSet("test3", flag.PanicOnError) - flagSet.String("prefixbackend-type", "oss", "") - flagSet.String("prefixbackend-config", configJSON, "") - ctx = cli.NewContext(app, flagSet, nil) - backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) - require.NoError(t, err) - require.Equal(t, "oss", backendType) - require.Equal(t, configJSON, backendConfig) - - file, err := os.CreateTemp("", "nydusify-backend-config-test.json") - require.NoError(t, err) - defer os.RemoveAll(file.Name()) - - _, err = file.WriteString(configJSON) - require.NoError(t, err) - file.Sync() - - flagSet = flag.NewFlagSet("test4", flag.PanicOnError) - flagSet.String("prefixbackend-type", "oss", "") - flagSet.String("prefixbackend-config-file", file.Name(), "") - ctx = cli.NewContext(app, flagSet, nil) - backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) - require.NoError(t, err) - require.Equal(t, "oss", backendType) - require.Equal(t, configJSON, backendConfig) + }`, + }, + { + backendType: "localfs", + backendConfig: ` + { + "dir": "/path/to/blobs" + }`, + }, + } - flagSet = flag.NewFlagSet("test5", flag.PanicOnError) - flagSet.String("prefixbackend-type", "oss", "") - flagSet.String("prefixbackend-config", configJSON, "") - flagSet.String("prefixbackend-config-file", file.Name(), "") - ctx = cli.NewContext(app, flagSet, nil) - backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) - require.Error(t, err) - require.Contains(t, err.Error(), "--backend-config conflicts with --backend-config-file") - require.Empty(t, backendType) - require.Empty(t, backendConfig) + for _, test := range tests { + t.Run(fmt.Sprintf("backend config %s", test.backendType), func(t *testing.T) { + app := &cli.App{ + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "prefixbackend-type", + Value: "", + }, + &cli.StringFlag{ + Name: "prefixbackend-config", + Value: "", + }, + &cli.StringFlag{ + Name: "prefixbackend-config-file", + Value: "", + }, + }, + } + ctx := cli.NewContext(app, nil, nil) + + backendType, backendConfig, err := getBackendConfig(ctx, "prefix", false) + require.NoError(t, err) + require.Empty(t, backendType) + require.Empty(t, backendConfig) + + backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) + require.Error(t, err) + require.Contains(t, err.Error(), "backend type is empty, please specify option") + require.Empty(t, backendType) + require.Empty(t, backendConfig) + + flagSet := flag.NewFlagSet("test1", flag.PanicOnError) + flagSet.String("prefixbackend-type", "errType", "") + ctx = cli.NewContext(app, flagSet, nil) + backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) + require.Error(t, err) + require.Contains(t, err.Error(), "backend-type should be one of") + require.Empty(t, backendType) + require.Empty(t, backendConfig) + + flagSet = flag.NewFlagSet("test2", flag.PanicOnError) + flagSet.String("prefixbackend-type", test.backendType, "") + ctx = cli.NewContext(app, flagSet, nil) + backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) + require.Error(t, err) + require.Contains(t, err.Error(), "backend configuration is empty, please specify option") + require.Empty(t, backendType) + require.Empty(t, backendConfig) + + require.True(t, json.Valid([]byte(test.backendConfig))) + + flagSet = flag.NewFlagSet("test3", flag.PanicOnError) + flagSet.String("prefixbackend-type", test.backendType, "") + flagSet.String("prefixbackend-config", test.backendConfig, "") + ctx = cli.NewContext(app, flagSet, nil) + backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) + require.NoError(t, err) + require.Equal(t, test.backendType, backendType) + require.Equal(t, test.backendConfig, backendConfig) + + file, err := os.CreateTemp("", "nydusify-backend-config-test.json") + require.NoError(t, err) + defer os.RemoveAll(file.Name()) + + _, err = file.WriteString(test.backendConfig) + require.NoError(t, err) + file.Sync() + + flagSet = flag.NewFlagSet("test4", flag.PanicOnError) + flagSet.String("prefixbackend-type", test.backendType, "") + flagSet.String("prefixbackend-config-file", file.Name(), "") + ctx = cli.NewContext(app, flagSet, nil) + backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) + require.NoError(t, err) + require.Equal(t, test.backendType, backendType) + require.Equal(t, test.backendConfig, backendConfig) + + flagSet = flag.NewFlagSet("test5", flag.PanicOnError) + flagSet.String("prefixbackend-type", test.backendType, "") + flagSet.String("prefixbackend-config", test.backendConfig, "") + flagSet.String("prefixbackend-config-file", file.Name(), "") + ctx = cli.NewContext(app, flagSet, nil) + backendType, backendConfig, err = getBackendConfig(ctx, "prefix", true) + require.Error(t, err) + require.Contains(t, err.Error(), "--backend-config conflicts with --backend-config-file") + require.Empty(t, backendType) + require.Empty(t, backendConfig) + }) + } } func TestGetTargetReference(t *testing.T) { diff --git a/contrib/nydusify/go.mod b/contrib/nydusify/go.mod index d73c44c7a21..cfb76bbf287 100644 --- a/contrib/nydusify/go.mod +++ b/contrib/nydusify/go.mod @@ -11,6 +11,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 github.com/containerd/containerd v1.7.18 github.com/containerd/continuity v0.4.3 + github.com/containerd/errdefs v0.1.0 github.com/containerd/nydus-snapshotter v0.13.11 github.com/distribution/reference v0.5.0 github.com/docker/cli v26.0.0+incompatible @@ -55,7 +56,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/fifo v1.1.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter v0.15.1 // indirect @@ -65,7 +65,7 @@ require ( github.com/containers/ocicrypt v1.1.10 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v25.0.6+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/fatih/color v1.16.0 // indirect diff --git a/contrib/nydusify/go.sum b/contrib/nydusify/go.sum index 02d10653667..07b67fac1de 100644 --- a/contrib/nydusify/go.sum +++ b/contrib/nydusify/go.sum @@ -88,8 +88,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v26.0.0+incompatible h1:90BKrx1a1HKYpSnnBFR6AgDq/FqkHxwlUyzJVPxD30I= github.com/docker/cli v26.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= +github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= diff --git a/contrib/nydusify/pkg/build/builder.go b/contrib/nydusify/pkg/build/builder.go index 177c0b9a209..ff899284679 100644 --- a/contrib/nydusify/pkg/build/builder.go +++ b/contrib/nydusify/pkg/build/builder.go @@ -38,7 +38,12 @@ type CompactOption struct { BackendType string BackendConfigPath string OutputJSONPath string - CompactConfigPath string + + MinUsedRatio string + CompactBlobSize string + MaxCompactSize string + LayersToCompact string + BlobsDir string } type GenerateOption struct { @@ -82,7 +87,11 @@ func (builder *Builder) Compact(option CompactOption) error { args := []string{ "compact", "--bootstrap", option.BootstrapPath, - "--config", option.CompactConfigPath, + "--blob-dir", option.BlobsDir, + "--min-used-ratio", option.MinUsedRatio, + "--compact-blob-size", option.CompactBlobSize, + "--max-compact-size", option.MaxCompactSize, + "--layers-to-compact", option.LayersToCompact, "--backend-type", option.BackendType, "--backend-config-file", option.BackendConfigPath, "--log-level", "info", diff --git a/contrib/nydusify/pkg/checker/checker.go b/contrib/nydusify/pkg/checker/checker.go index f8d81a40c11..58dcf92e05e 100644 --- a/contrib/nydusify/pkg/checker/checker.go +++ b/contrib/nydusify/pkg/checker/checker.go @@ -168,6 +168,7 @@ func (checker *Checker) check(ctx context.Context) error { TargetInsecure: checker.TargetInsecure, PlainHTTP: checker.targetParser.Remote.IsWithHTTP(), NydusdConfig: tool.NydusdConfig{ + EnablePrefetch: true, NydusdPath: checker.NydusdPath, BackendType: checker.BackendType, BackendConfig: checker.BackendConfig, diff --git a/contrib/nydusify/pkg/checker/rule/filesystem.go b/contrib/nydusify/pkg/checker/rule/filesystem.go index 1bb881b7ce0..898251c971a 100644 --- a/contrib/nydusify/pkg/checker/rule/filesystem.go +++ b/contrib/nydusify/pkg/checker/rule/filesystem.go @@ -6,7 +6,6 @@ package rule import ( "context" - "encoding/base64" "encoding/hex" "encoding/json" "fmt" @@ -16,7 +15,6 @@ import ( "syscall" "github.com/distribution/reference" - dockerconfig "github.com/docker/cli/cli/config" "github.com/dragonflyoss/nydus/contrib/nydusify/pkg/checker/tool" "github.com/dragonflyoss/nydus/contrib/nydusify/pkg/parser" @@ -221,28 +219,6 @@ func (rule *FilesystemRule) mountSourceImage() (*tool.Image, error) { return image, nil } -func NewRegistryBackendConfig(parsed reference.Named) (RegistryBackendConfig, error) { - - backendConfig := RegistryBackendConfig{ - Scheme: "https", - Host: reference.Domain(parsed), - Repo: reference.Path(parsed), - } - - config := dockerconfig.LoadDefaultConfigFile(os.Stderr) - authConfig, err := config.GetAuthConfig(backendConfig.Host) - if err != nil { - return backendConfig, errors.Wrap(err, "get docker registry auth config") - } - var auth string - if authConfig.Username != "" && authConfig.Password != "" { - auth = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", authConfig.Username, authConfig.Password))) - } - backendConfig.Auth = auth - - return backendConfig, nil -} - func (rule *FilesystemRule) mountNydusImage() (*tool.Nydusd, error) { logrus.Infof("Mounting Nydus image to %s", rule.NydusdConfig.MountPath) @@ -263,7 +239,7 @@ func (rule *FilesystemRule) mountNydusImage() (*tool.Nydusd, error) { rule.NydusdConfig.BackendType = "registry" if rule.NydusdConfig.BackendConfig == "" { - backendConfig, err := NewRegistryBackendConfig(parsed) + backendConfig, err := utils.NewRegistryBackendConfig(parsed, rule.TargetInsecure) if err != nil { return nil, errors.Wrap(err, "failed to parse backend configuration") } diff --git a/contrib/nydusify/pkg/checker/tool/nydusd.go b/contrib/nydusify/pkg/checker/tool/nydusd.go index ed8c341ae90..2f47dd0c5e1 100644 --- a/contrib/nydusify/pkg/checker/tool/nydusd.go +++ b/contrib/nydusify/pkg/checker/tool/nydusd.go @@ -76,12 +76,10 @@ func makeConfig(conf NydusdConfig) error { if conf.BackendType == "" { conf.BackendType = "localfs" conf.BackendConfig = `{"dir": "/fake"}` - conf.EnablePrefetch = false } else { if conf.BackendConfig == "" { return errors.Errorf("empty backend configuration string") } - conf.EnablePrefetch = true } if err := tpl.Execute(&ret, conf); err != nil { return errors.New("failed to prepare configuration file for Nydusd") @@ -176,7 +174,7 @@ func (nydusd *Nydusd) Mount() error { "--apisock", nydusd.APISockPath, "--log-level", - "error", + "warn", } cmd := exec.Command(nydusd.NydusdPath, args...) diff --git a/contrib/nydusify/pkg/compactor/compactor.go b/contrib/nydusify/pkg/compactor/compactor.go index b833ad7ed0a..81da4b6b7b5 100644 --- a/contrib/nydusify/pkg/compactor/compactor.go +++ b/contrib/nydusify/pkg/compactor/compactor.go @@ -10,18 +10,18 @@ import ( ) var defaultCompactConfig = &CompactConfig{ - MinUsedRatio: 5, - CompactBlobSize: 10485760, - MaxCompactSize: 104857600, - LayersToCompact: 32, + MinUsedRatio: "5", + CompactBlobSize: "10485760", + MaxCompactSize: "104857600", + LayersToCompact: "32", } type CompactConfig struct { - MinUsedRatio int `json:"min_used_ratio"` - CompactBlobSize int `json:"compact_blob_size"` - MaxCompactSize int `json:"max_compact_size"` - LayersToCompact int `json:"layers_to_compact"` - BlobsDir string `json:"blobs_dir,omitempty"` + MinUsedRatio string + CompactBlobSize string + MaxCompactSize string + LayersToCompact string + BlobsDir string } func (cfg *CompactConfig) Dumps(filePath string) error { @@ -81,11 +81,6 @@ func (compactor *Compactor) Compact(bootstrapPath, chunkDict, backendType, backe if err := os.Remove(targetBootstrap); err != nil && !os.IsNotExist(err) { return "", errors.Wrap(err, "failed to delete old bootstrap file") } - // prepare config file - configFilePath := filepath.Join(compactor.workdir, "compact.json") - if err := compactor.cfg.Dumps(configFilePath); err != nil { - return "", errors.Wrap(err, "compact err") - } outputJSONPath := filepath.Join(compactor.workdir, "compact-result.json") if err := os.Remove(outputJSONPath); err != nil && !os.IsNotExist(err) { return "", errors.Wrap(err, "failed to delete old output-json file") @@ -97,7 +92,11 @@ func (compactor *Compactor) Compact(bootstrapPath, chunkDict, backendType, backe BackendType: backendType, BackendConfigPath: backendConfigFile, OutputJSONPath: outputJSONPath, - CompactConfigPath: configFilePath, + MinUsedRatio: compactor.cfg.MinUsedRatio, + CompactBlobSize: compactor.cfg.CompactBlobSize, + MaxCompactSize: compactor.cfg.MaxCompactSize, + LayersToCompact: compactor.cfg.LayersToCompact, + BlobsDir: compactor.cfg.BlobsDir, }) if err != nil { return "", errors.Wrap(err, "failed to run compact command") diff --git a/contrib/nydusify/pkg/converter/provider/ported.go b/contrib/nydusify/pkg/converter/provider/ported.go index 652cf5ff838..5fdb5c5de2e 100644 --- a/contrib/nydusify/pkg/converter/provider/ported.go +++ b/contrib/nydusify/pkg/converter/provider/ported.go @@ -6,15 +6,20 @@ package provider import ( "context" + "encoding/json" "fmt" + "io" "strings" "github.com/containerd/containerd" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/errdefs" + "github.com/opencontainers/go-digest" // nolint:staticcheck "github.com/containerd/containerd/remotes/docker/schema1" @@ -22,6 +27,18 @@ import ( "golang.org/x/sync/semaphore" ) +type importOpts struct { + indexName string + imageRefT func(string) string + dgstRefT func(digest.Digest) string + skipDgstRef func(string) bool + platformMatcher platforms.MatchComparer + compress bool + discardLayers bool + skipMissing bool + imageLabels map[string]string +} + // Ported from containerd project, copyright The containerd Authors. // github.com/containerd/containerd/blob/main/pull.go func fetch(ctx context.Context, store content.Store, rCtx *containerd.RemoteContext, ref string, limit int) (images.Image, error) { @@ -177,3 +194,106 @@ func push(ctx context.Context, store content.Store, pushCtx *containerd.RemoteCo return remotes.PushContent(ctx, pusher, desc, store, limiter, pushCtx.PlatformMatcher, wrapper) } + +// Ported from containerd project, copyright The containerd Authors. +// github.com/containerd/containerd/blob/main/import.go +func load(ctx context.Context, reader io.Reader, store content.Store, iopts importOpts) ([]images.Image, error) { + var aio []archive.ImportOpt + if iopts.compress { + aio = append(aio, archive.WithImportCompression()) + } + + index, err := archive.ImportIndex(ctx, store, reader, aio...) + if err != nil { + return nil, err + } + + var imgs []images.Image + + if iopts.indexName != "" { + imgs = append(imgs, images.Image{ + Name: iopts.indexName, + Target: index, + }) + } + var platformMatcher = iopts.platformMatcher + + var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + // Only save images at top level + if desc.Digest != index.Digest { + // Don't set labels on missing content. + children, err := images.Children(ctx, store, desc) + if iopts.skipMissing && errdefs.IsNotFound(err) { + return nil, images.ErrSkipDesc + } + return children, err + } + + var idx ocispec.Index + p, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return nil, err + } + if err := json.Unmarshal(p, &idx); err != nil { + return nil, err + } + + for _, m := range idx.Manifests { + name := imageName(m.Annotations, iopts.imageRefT) + if name != "" { + imgs = append(imgs, images.Image{ + Name: name, + Target: m, + }) + } + if iopts.skipDgstRef != nil { + if iopts.skipDgstRef(name) { + continue + } + } + if iopts.dgstRefT != nil { + ref := iopts.dgstRefT(m.Digest) + if ref != "" { + imgs = append(imgs, images.Image{ + Name: ref, + Target: m, + }) + } + } + } + + return idx.Manifests, nil + } + + handler = images.FilterPlatforms(handler, platformMatcher) + if iopts.discardLayers { + handler = images.SetChildrenMappedLabels(store, handler, images.ChildGCLabelsFilterLayers) + } else { + handler = images.SetChildrenLabels(store, handler) + } + if err := images.WalkNotEmpty(ctx, handler, index); err != nil { + return nil, err + } + + for i := range imgs { + if iopts.imageLabels != nil { + imgs[i].Labels = iopts.imageLabels + } + } + + return imgs, nil +} + +func imageName(annotations map[string]string, ociCleanup func(string) string) string { + name := annotations[images.AnnotationImageName] + if name != "" { + return name + } + name = annotations[ocispec.AnnotationRefName] + if name != "" { + if ociCleanup != nil { + name = ociCleanup(name) + } + } + return name +} diff --git a/contrib/nydusify/pkg/converter/provider/provider.go b/contrib/nydusify/pkg/converter/provider/provider.go index 08a7988dba1..3d4b238dc21 100644 --- a/contrib/nydusify/pkg/converter/provider/provider.go +++ b/contrib/nydusify/pkg/converter/provider/provider.go @@ -7,6 +7,7 @@ package provider import ( "context" "crypto/tls" + "io" "net" "net/http" "os" @@ -17,13 +18,16 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/goharbor/acceleration-service/pkg/cache" accelcontent "github.com/goharbor/acceleration-service/pkg/content" "github.com/goharbor/acceleration-service/pkg/remote" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) var LayerConcurrentLimit = 5 @@ -152,6 +156,36 @@ func (pvd *Provider) Push(ctx context.Context, desc ocispec.Descriptor, ref stri return push(ctx, pvd.store, rc, desc, ref) } +func (pvd *Provider) Import(ctx context.Context, reader io.Reader) (string, error) { + iopts := importOpts{ + dgstRefT: func(dgst digest.Digest) string { + return "nydus" + "@" + dgst.String() + }, + skipDgstRef: func(name string) bool { return name != "" }, + platformMatcher: pvd.platformMC, + } + images, err := load(ctx, reader, pvd.store, iopts) + if err != nil { + return "", err + } + + if len(images) != 1 { + return "", errors.New("incorrect tarball format") + } + image := images[0] + + pvd.mutex.Lock() + defer pvd.mutex.Unlock() + pvd.images[image.Name] = &image.Target + + return image.Name, nil +} + +func (pvd *Provider) Export(ctx context.Context, writer io.Writer, img *ocispec.Descriptor, name string) error { + opts := []archive.ExportOpt{archive.WithManifest(*img, name), archive.WithPlatform(pvd.platformMC)} + return archive.Export(ctx, pvd.store, writer, opts...) +} + func (pvd *Provider) Image(_ context.Context, ref string) (*ocispec.Descriptor, error) { pvd.mutex.Lock() defer pvd.mutex.Unlock() diff --git a/contrib/nydusify/pkg/copier/copier.go b/contrib/nydusify/pkg/copier/copier.go index fa3d059035c..b684629558e 100644 --- a/contrib/nydusify/pkg/copier/copier.go +++ b/contrib/nydusify/pkg/copier/copier.go @@ -13,6 +13,7 @@ import ( "path/filepath" "strings" + "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" containerdErrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" @@ -246,6 +247,28 @@ func getPlatform(platform *ocispec.Platform) string { return platforms.Format(*platform) } +// getLocalPath checks if the given reference is a local file path and returns its absolute path. +// +// Parameters: +// - ref: A string which may be a docker reference or a local file path prefixed with "file://". +// +// Returns: +// - isLocalPath: A boolean indicating whether the reference is a local file path. +// - absPath: A string containing the absolute path of the local file, if applicable. +// - err: An error object if any error occurs during the process of getting the absolute path. +func getLocalPath(ref string) (isLocalPath bool, absPath string, err error) { + if !strings.HasPrefix(ref, "file://") { + return false, "", nil + } + path := strings.TrimPrefix(ref, "file://") + absPath, err = filepath.Abs(path) + if err != nil { + return true, "", err + } + return true, absPath, nil +} + +// Copy copies an image from the source to the target. func Copy(ctx context.Context, opt Opt) error { // Containerd image fetch requires a namespace context. ctx = namespaces.WithNamespace(ctx, "nydusify") @@ -285,41 +308,86 @@ func Copy(ctx context.Context, opt Opt) error { } defer os.RemoveAll(tmpDir) - sourceNamed, err := docker.ParseDockerRef(opt.Source) + isLocalSource, inputPath, err := getLocalPath(opt.Source) if err != nil { - return errors.Wrap(err, "parse source reference") - } - targetNamed, err := docker.ParseDockerRef(opt.Target) - if err != nil { - return errors.Wrap(err, "parse target reference") + return errors.Wrap(err, "parse source path") } - source := sourceNamed.String() - target := targetNamed.String() + var source string + if isLocalSource { + logrus.Infof("importing source image from %s", inputPath) + + f, err := os.Open(inputPath) + if err != nil { + return err + } + defer f.Close() + + ds, err := compression.DecompressStream(f) + if err != nil { + return err + } + defer ds.Close() - logrus.Infof("pulling source image %s", source) - if err := pvd.Pull(ctx, source); err != nil { - if errdefs.NeedsRetryWithHTTP(err) { - pvd.UsePlainHTTP() - if err := pvd.Pull(ctx, source); err != nil { - return errors.Wrap(err, "try to pull image") + if source, err = pvd.Import(ctx, ds); err != nil { + return errors.Wrap(err, "import source image") + } + logrus.Infof("imported source image %s", source) + } else { + sourceNamed, err := docker.ParseDockerRef(opt.Source) + if err != nil { + return errors.Wrap(err, "parse source reference") + } + source = sourceNamed.String() + + logrus.Infof("pulling source image %s", source) + if err := pvd.Pull(ctx, source); err != nil { + if errdefs.NeedsRetryWithHTTP(err) { + pvd.UsePlainHTTP() + if err := pvd.Pull(ctx, source); err != nil { + return errors.Wrap(err, "try to pull image") + } + } else { + return errors.Wrap(err, "pull source image") } - } else { - return errors.Wrap(err, "pull source image") } + logrus.Infof("pulled source image %s", source) } - logrus.Infof("pulled source image %s", source) sourceImage, err := pvd.Image(ctx, source) if err != nil { return errors.Wrap(err, "find image from store") } + isLocalTarget, outputPath, err := getLocalPath(opt.Target) + if err != nil { + return errors.Wrap(err, "parse target path") + } + if isLocalTarget { + logrus.Infof("exporting source image to %s", outputPath) + f, err := os.OpenFile(outputPath, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + if err := pvd.Export(ctx, f, sourceImage, source); err != nil { + return errors.Wrap(err, "export source image to target tar file") + } + logrus.Infof("exported image %s", source) + return nil + } + sourceDescs, err := utils.GetManifests(ctx, pvd.ContentStore(), *sourceImage, platformMC) if err != nil { return errors.Wrap(err, "get image manifests") } targetDescs := make([]ocispec.Descriptor, len(sourceDescs)) + targetNamed, err := docker.ParseDockerRef(opt.Target) + if err != nil { + return errors.Wrap(err, "parse target reference") + } + target := targetNamed.String() + sem := semaphore.NewWeighted(1) eg := errgroup.Group{} for idx := range sourceDescs { diff --git a/contrib/nydusify/pkg/utils/backend.go b/contrib/nydusify/pkg/utils/backend.go new file mode 100644 index 00000000000..c93690a37a7 --- /dev/null +++ b/contrib/nydusify/pkg/utils/backend.go @@ -0,0 +1,57 @@ +package utils + +import ( + "encoding/base64" + "fmt" + "os" + + "github.com/distribution/reference" + dockerconfig "github.com/docker/cli/cli/config" + "github.com/pkg/errors" +) + +type RegistryBackendConfig struct { + Scheme string `json:"scheme"` + Host string `json:"host"` + Repo string `json:"repo"` + Auth string `json:"auth,omitempty"` + SkipVerify bool `json:"skip_verify,omitempty"` + Proxy BackendProxyConfig `json:"proxy"` +} + +type BackendProxyConfig struct { + URL string `json:"url"` + Fallback bool `json:"fallback"` + PingURL string `json:"ping_url"` +} + +func NewRegistryBackendConfig(parsed reference.Named, insecure bool) (RegistryBackendConfig, error) { + proxyURL := os.Getenv("HTTP_PROXY") + if proxyURL == "" { + proxyURL = os.Getenv("HTTPS_PROXY") + } + + backendConfig := RegistryBackendConfig{ + Scheme: "https", + Host: reference.Domain(parsed), + Repo: reference.Path(parsed), + SkipVerify: insecure, + Proxy: BackendProxyConfig{ + URL: proxyURL, + Fallback: true, + }, + } + + config := dockerconfig.LoadDefaultConfigFile(os.Stderr) + authConfig, err := config.GetAuthConfig(backendConfig.Host) + if err != nil { + return backendConfig, errors.Wrap(err, "get docker registry auth config") + } + var auth string + if authConfig.Username != "" && authConfig.Password != "" { + auth = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", authConfig.Username, authConfig.Password))) + } + backendConfig.Auth = auth + + return backendConfig, nil +} diff --git a/contrib/nydusify/pkg/viewer/viewer.go b/contrib/nydusify/pkg/viewer/viewer.go index 5f28b394864..2bea0b3f2e4 100644 --- a/contrib/nydusify/pkg/viewer/viewer.go +++ b/contrib/nydusify/pkg/viewer/viewer.go @@ -37,6 +37,7 @@ type Opt struct { BackendConfig string ExpectedArch string FsVersion string + Prefetch bool } // fsViewer provides complete view of file system in nydus image @@ -63,15 +64,16 @@ func New(opt Opt) (*FsViewer, error) { mode := "cached" nydusdConfig := tool.NydusdConfig{ - NydusdPath: opt.NydusdPath, - BackendType: opt.BackendType, - BackendConfig: opt.BackendConfig, - BootstrapPath: filepath.Join(opt.WorkDir, "nydus_bootstrap"), - ConfigPath: filepath.Join(opt.WorkDir, "fs/nydusd_config.json"), - BlobCacheDir: filepath.Join(opt.WorkDir, "fs/nydus_blobs"), - MountPath: opt.MountPath, - APISockPath: filepath.Join(opt.WorkDir, "fs/nydus_api.sock"), - Mode: mode, + EnablePrefetch: opt.Prefetch, + NydusdPath: opt.NydusdPath, + BackendType: opt.BackendType, + BackendConfig: opt.BackendConfig, + BootstrapPath: filepath.Join(opt.WorkDir, "nydus_bootstrap"), + ConfigPath: filepath.Join(opt.WorkDir, "fs/nydusd_config.json"), + BlobCacheDir: filepath.Join(opt.WorkDir, "fs/nydus_blobs"), + MountPath: opt.MountPath, + APISockPath: filepath.Join(opt.WorkDir, "fs/nydus_api.sock"), + Mode: mode, } fsViewer := &FsViewer{ diff --git a/deny.toml b/deny.toml index 48c7f660a6c..00492de0eb0 100644 --- a/deny.toml +++ b/deny.toml @@ -48,6 +48,7 @@ notice = "warn" # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. ignore = [ + { id = "RUSTSEC-2024-0357", reason = "openssl 0.10.55 can't build in riscv64 and ppc64le" }, ] # Threshold for security vulnerabilities, any vulnerability with a CVSS score # lower than the range specified will be ignored. Note that ignored advisories diff --git a/docs/containerd-env-setup.md b/docs/containerd-env-setup.md index 5d6eebcf02c..7c806c47ed5 100644 --- a/docs/containerd-env-setup.md +++ b/docs/containerd-env-setup.md @@ -90,7 +90,7 @@ Please refer to the nydusd [doc](./nydusd.md) to learn more options. Make sure the default nydus snapshotter root directory is clear. ``` -sudo rm -rf /var/lib/containerd-nydus +sudo rm -rf /var/lib/containerd/io.containerd.snapshotter.v1.nydus ``` 3. Start `containerd-nydus-grpc` (nydus snapshotter): diff --git a/docs/nydus-fscache.md b/docs/nydus-fscache.md index 4e3a6252c08..d30a08f235f 100644 --- a/docs/nydus-fscache.md +++ b/docs/nydus-fscache.md @@ -78,7 +78,7 @@ sudo systemctl status cachefilesd sudo lsof /dev/cachefiles ``` -## Get ctr-remote and the fscache-supported nydusd +## Get nerdctl and the fscache-supported nydusd 1. Make sure you have installed _rust 1.52.1_ version and golang. @@ -95,12 +95,7 @@ make release 4. Copy the "nydus-image" binary file compiled in Step 3 into _$PATH_ e.g. /usr/bin with \ ``cp target/release/nydus-image /usr/bin`` -5. Build ctr-remote with - -``` bash -cd contrib/ctr-remote -make -``` +5. Get nerdctl with [nerdctl docs](https://github.com/containerd/nerdctl?tab=readme-ov-file#install). ## Run container with nydus snapshotter @@ -134,7 +129,7 @@ make ``` # make sure the directory exists. -mkdir -p /var/lib/containerd-nydus +mkdir -p /var/lib/containerd/io.containerd.snapshotter.v1.nydus ./bin/containerd-nydus-grpc \ --nydusd-config /etc/nydus/nydusd-config.fscache.json \ @@ -172,17 +167,17 @@ For more information on how to configure containerd to use nydus snapshotter ple 6. Restart containerd with `service containerd restart` -7. Run container with [ctr-remote](../contrib/ctr-remote) +7. Run container with nerdctl ``` shell # pull nydus image -contrib/ctr-remote/bin/ctr-remote images rpull docker.io/hsiangkao/ubuntu:20.04-rafs-v6 +nerdctl images pull --snapshotter=nydus docker.io/hsiangkao/ubuntu:20.04-rafs-v6 # run nydus image -ctr run --rm -t --snapshotter=nydus docker.io/hsiangkao/ubuntu:20.04-rafs-v6 ubuntu /bin/bash +nerdctl run --rm -t --snapshotter=nydus docker.io/hsiangkao/ubuntu:20.04-rafs-v6 ubuntu /bin/bash # remove nydus image -ctr images rm docker.io/hsiangkao/ubuntu:20.04-rafs-v6 +nerdctl images rm docker.io/hsiangkao/ubuntu:20.04-rafs-v6 ``` Some RAFS v6 referenced images (in Zstd algorithms): diff --git a/docs/nydus-image.md b/docs/nydus-image.md index 7e7974eeed6..4af677eb433 100644 --- a/docs/nydus-image.md +++ b/docs/nydus-image.md @@ -182,6 +182,29 @@ data blobs: ["9e50ae5ac02b2ef6ffb86075720e49d95d8240eed4717dd8ac9c68cadba00762"] -rw-r--r-- 1 root root 20480 3月 29 17:02 df01f389850b79cd5a6ca6db98495bb457aa0821b0558351c55537551322fb96 ``` +## Unpack Nydus Image +`nydus-image` tool supports to unpack Nydus image to a tar file. +```shell +# use --blob to specify RAFS data blob +nydus-image unpack --blob image/blob1 image/bootstrap --output tmp.tar + +# use --blob-dir to specify the directory containing RAFS data blobs +nydus-image unpack --blob-dir=image/ image/bootstrap --output tmp.tar + +# example-oss.config +{ + "endpoint": "region.aliyuncs.com", + "scheme": "https", + "access_key_id": "", + "access_key_secret": "", + "bucket_name": "", + "object_prefix": "image/" +} + +# use backend config file to specify remote storage for RAFS data blobs +nydus-image unpack --backend-type oss --backend-config-file example-oss.config image/bootstrap --output tmp.tar +``` + ## Compact Nydus Image `nydus-image` tool supports to compact Nydus image for 1. reduce number of blobs diff --git a/docs/nydusd.md b/docs/nydusd.md index 242941e4266..bcd51f61231 100644 --- a/docs/nydusd.md +++ b/docs/nydusd.md @@ -324,7 +324,7 @@ The `HttpProxy` backend also supports the `Proxy` and `Mirrors` configurations f ##### Enable Mirrors for Storage Backend (Recommend) -Nydus is deeply integrated with [Dragonfly](https://d7y.io/) P2P mirror mode, please refer the [doc](https://d7y.io/docs/setup/integration/nydus) to learn how configuring Nydus to use Dragonfly. +Nydus is deeply integrated with [Dragonfly](https://d7y.io/) P2P mirror mode, please refer the [doc](https://d7y.io/docs/next/operations/integrations/container-runtime/nydus/) to learn how configuring Nydus to use Dragonfly. Add `device.backend.config.mirrors` field to enable mirrors for storage backend. The mirror can be a P2P distribution server or registry. If the request to mirror server failed, it will fall back to the original registry. Currently, the mirror mode is only tested in the registry backend, and in theory, the OSS backend also supports it. @@ -356,6 +356,9 @@ Currently, the mirror mode is only tested in the registry backend, and in theory "health_check_interval": 5, // Failure counts before disabling this mirror. Use 5 as default if left empty. "failure_limit": 5, + // Elapsed time to pause mirror health check when the request is inactive, in seconds. + // Use 300 as default if left empty. + "health_check_pause_elapsed": 300, }, { "host": "http://dragonfly2.io:65001", @@ -393,6 +396,9 @@ Add `device.backend.config.proxy` field to enable HTTP proxy for storage backend "ping_url": "http://p2p-proxy:40901/server/ping", // Interval of P2P proxy health checking, in seconds "check_interval": 5 + // Elapsed time to pause proxy health check when the request is inactive, in seconds. + // Use 300 as default if left empty. + "check_pause_elapsed": 300, }, ... } diff --git a/docs/nydusify.md b/docs/nydusify.md index 0da352b0562..32e2239871b 100644 --- a/docs/nydusify.md +++ b/docs/nydusify.md @@ -68,7 +68,7 @@ cat /path/to/backend-config.json } ``` -Note: the `endpoint` in the s3 `backend-config.json` **should not** contains the scheme prefix. +Note: the `endpoint` in the s3 `backend-config.json` **should not** contain the scheme prefix. ``` shell nydusify convert \ @@ -78,6 +78,22 @@ nydusify convert \ --backend-config-file /path/to/backend-config.json ``` +### localfs + +``` shell +cat /path/to/backend-config.json +{ + "dir": "/path/to/blobs" +} + +nydusify convert \ + --source myregistry/repo:tag \ + --target myregistry/repo:tag-nydus \ + --backend-config-file /path/to/backend-config.json \ +``` + +Note: Image manifest is still published to target registry (`myregistry`). Blob files are published to localfs. + ## Push Nydus Image to storage backend with subcommand pack ### OSS @@ -176,9 +192,9 @@ nydusify check \ ## Mount the nydus image as a filesystem -The nydusify mount command can mount a nydus image stored in the backend as a filesystem. Now the supported backend types include Registry (default backend), s3 and oss. +The nydusify mount command can mount a nydus image stored in the backend as a filesystem. Now the supported backend types include Registry (default backend), s3, oss, and localfs. -When using Registy as the backend, you don't need specify the `--backend-type` . +When using Registry as the backend, you don't need to specify the `--backend-type` . ``` shell nydusify mount \ @@ -204,6 +220,26 @@ nydusify copy \ It supports copying OCI v1 or Nydus images, use the options `--all-platforms` / `--platform` to copy the images of specific platforms. +## Export to / Import from local tarball + +All you need is to change the `source` or `target` parameter in `nydusify copy` command to a local file path, which must start with `file://`. + +``` shell +# registry repository --> local tarball +nydusify copy \ + --source myregistry/repo:tag-nydus \ + --target file:///home/user/repo-tag-nydus.tar +``` + +Absolute path is also supported. + +``` shell +# local tarball --> registry repository +nydusify copy \ + --source file://./repo-tag-nydus.tar \ + --target myregistry/repo:tag-nydus +``` + ## Commit nydus image from container's changes The nydusify commit command can commit a nydus image from a nydus container, like `nerdctl commit` command. @@ -232,7 +268,7 @@ See `nydusify convert/check/mount --help` ## Use Nydusify as a package -``` +``` See `contrib/nydusify/examples/converter/main.go` ``` diff --git a/go.work b/go.work index 6f89070aac9..40651a2d2cb 100644 --- a/go.work +++ b/go.work @@ -1,7 +1,6 @@ go 1.21 use ( - ./contrib/ctr-remote ./contrib/nydus-overlayfs ./contrib/nydusify ./smoke diff --git a/misc/performance/nydusd_config.json b/misc/performance/nydusd_config.json index 1f7fd38927b..1facd6d0915 100644 --- a/misc/performance/nydusd_config.json +++ b/misc/performance/nydusd_config.json @@ -14,7 +14,7 @@ "cache": { "type": "blobcache", "config": { - "work_dir": "/var/lib/containerd-nydus/cache" + "work_dir": "/var/lib/containerd/io.containerd.snapshotter.v1.nydus/cache" } } }, diff --git a/misc/performance/snapshotter_config.toml b/misc/performance/snapshotter_config.toml index e8d3c118d9c..bcfefba75cd 100644 --- a/misc/performance/snapshotter_config.toml +++ b/misc/performance/snapshotter_config.toml @@ -1,6 +1,6 @@ version = 1 # Snapshotter's own home directory where it stores and creates necessary resources -root = "/var/lib/containerd-nydus" +root = "/var/lib/containerd/io.containerd.snapshotter.v1.nydus" # The snapshotter's GRPC server socket, containerd will connect to plugin on this socket address = "/run/containerd-nydus/containerd-nydus-grpc.sock" daemon_mode = "dedicated" diff --git a/misc/prepare.sh b/misc/prepare.sh index 80d70e74c4f..d424a770c55 100644 --- a/misc/prepare.sh +++ b/misc/prepare.sh @@ -2,24 +2,24 @@ SNAPSHOTTER_CONFIG="misc/performance/snapshotter_config.toml" if [ "$1" == "takeover_test" ]; then - SNAPSHOTTER_CONFIG="misc/takeover/snapshotter_config.toml" + sed -i 's/recover_policy = "restart"/recover_policy = "failover"/' "$SNAPSHOTTER_CONFIG" fi -readonly SNAPSHOTTER_VERSION=0.13.13 -readonly NERDCTL_VERSION=1.7.6 -readonly CNI_PLUGINS_VERSION=1.5.0 +readonly SNAPSHOTTER_VERSION=`curl -s https://api.github.com/repos/containerd/nydus-snapshotter/releases/latest | grep tag_name | cut -f4 -d "\""` +readonly NERDCTL_VERSION=`curl -s https://api.github.com/repos/containerd/nerdctl/releases/latest | grep tag_name | cut -f4 -d "\"" | sed 's/^v//g'` +readonly CNI_PLUGINS_VERSION=`curl -s https://api.github.com/repos/containernetworking/plugins/releases/latest | grep tag_name | cut -f4 -d "\""` # setup nerdctl and nydusd env sudo install -D -m 755 contrib/nydusify/cmd/nydusify /usr/local/bin sudo install -D -m 755 target/release/nydusd target/release/nydus-image /usr/local/bin -wget https://github.com/containerd/nydus-snapshotter/releases/download/v$SNAPSHOTTER_VERSION/nydus-snapshotter-v$SNAPSHOTTER_VERSION-linux-amd64.tar.gz -tar zxvf nydus-snapshotter-v$SNAPSHOTTER_VERSION-linux-amd64.tar.gz +wget https://github.com/containerd/nydus-snapshotter/releases/download/$SNAPSHOTTER_VERSION/nydus-snapshotter-$SNAPSHOTTER_VERSION-linux-amd64.tar.gz +tar zxvf nydus-snapshotter-$SNAPSHOTTER_VERSION-linux-amd64.tar.gz sudo install -D -m 755 bin/containerd-nydus-grpc /usr/local/bin sudo wget https://github.com/containerd/nerdctl/releases/download/v$NERDCTL_VERSION/nerdctl-$NERDCTL_VERSION-linux-amd64.tar.gz sudo tar -xzvf nerdctl-$NERDCTL_VERSION-linux-amd64.tar.gz -C /usr/local/bin sudo mkdir -p /opt/cni/bin -sudo wget https://github.com/containernetworking/plugins/releases/download/v$CNI_PLUGINS_VERSION/cni-plugins-linux-amd64-v$CNI_PLUGINS_VERSION.tgz -sudo tar -xzvf cni-plugins-linux-amd64-v$CNI_PLUGINS_VERSION.tgz -C /opt/cni/bin +sudo wget https://github.com/containernetworking/plugins/releases/download/$CNI_PLUGINS_VERSION/cni-plugins-linux-amd64-$CNI_PLUGINS_VERSION.tgz +sudo tar -xzvf cni-plugins-linux-amd64-$CNI_PLUGINS_VERSION.tgz -C /opt/cni/bin sudo install -D misc/performance/containerd_config.toml /etc/containerd/config.toml sudo systemctl restart containerd sudo install -D misc/performance/nydusd_config.json /etc/nydus/nydusd-config.fusedev.json diff --git a/misc/takeover/snapshotter_config.toml b/misc/takeover/snapshotter_config.toml deleted file mode 100644 index 1138d652d87..00000000000 --- a/misc/takeover/snapshotter_config.toml +++ /dev/null @@ -1,132 +0,0 @@ -version = 1 -# Snapshotter's own home directory where it stores and creates necessary resources -root = "/var/lib/containerd-nydus" -# The snapshotter's GRPC server socket, containerd will connect to plugin on this socket -address = "/run/containerd-nydus/containerd-nydus-grpc.sock" -daemon_mode = "dedicated" -# Whether snapshotter should try to clean up resources when it is closed -cleanup_on_close = false - -[system] -# Snapshotter's debug and trace HTTP server interface -enable = true -# Unix domain socket path where system controller is listening on -address = "/run/containerd-nydus/system.sock" - -[system.debug] -# Snapshotter can profile the CPU utilization of each nydusd daemon when it is being started. -# This option specifies the profile duration when nydusd is downloading and uncomproessing data. -daemon_cpu_profile_duration_secs = 5 -# Enable by assigning an address, empty indicates pprof server is disabled -pprof_address = "" - -[daemon] -# Specify a configuration file for nydusd -nydusd_config = "/etc/nydus/nydusd-config.fusedev.json" -nydusd_path = "/usr/local/bin/nydusd" -nydusimage_path = "/usr/local/bin/nydus-image" -# fusedev or fscache -fs_driver = "fusedev" -# How to process when daemon dies: "none", "restart" or "failover" -recover_policy = "failover" -# Nydusd worker thread number to handle FUSE or fscache requests, [0-1024]. -# Setting to 0 will use the default configuration of nydusd. -threads_number = 4 -# Log rotation size for nydusd, in unit MB(megabytes) -log_rotation_size = 100 - -[cgroup] -# Whether to use separate cgroup for nydusd. -enable = true -# The memory limit for nydusd cgroup, which contains all nydusd processes. -# Percentage is supported as well, please ensure it is end with "%". -# The default unit is bytes. Acceptable values include "209715200", "200MiB", "200Mi" and "10%". -memory_limit = "" - -[log] -# Print logs to stdout rather than logging files -log_to_stdout = false -# Snapshotter's log level -level = "info" -log_rotation_compress = true -log_rotation_local_time = true -# Max number of days to retain logs -log_rotation_max_age = 7 -log_rotation_max_backups = 5 -# In unit MB(megabytes) -log_rotation_max_size = 100 - -[metrics] -# Enable by assigning an address, empty indicates metrics server is disabled -address = ":9110" - -[remote] -convert_vpc_registry = false - -[remote.mirrors_config] -# Snapshotter will overwrite daemon's mirrors configuration -# if the values loaded from this driectory are not null before starting a daemon. -# Set to "" or an empty directory to disable it. -#dir = "/etc/nydus/certs.d" - -[remote.auth] -# Fetch the private registry auth by listening to K8s API server -enable_kubeconfig_keychain = false -# synchronize `kubernetes.io/dockerconfigjson` secret from kubernetes API server with specified kubeconfig (default `$KUBECONFIG` or `~/.kube/config`) -kubeconfig_path = "" -# Fetch the private registry auth as CRI image service proxy -enable_cri_keychain = false -# the target image service when using image proxy -#image_service_address = "/run/containerd/containerd.sock" - -[snapshot] -# Let containerd use nydus-overlayfs mount helper -enable_nydus_overlayfs = false -# Insert Kata Virtual Volume option to `Mount.Options` -enable_kata_volume = false -# Whether to remove resources when a snapshot is removed -sync_remove = false - -[cache_manager] -# Disable or enable recyclebin -disable = false -# How long to keep deleted files in recyclebin -gc_period = "24h" -# Directory to host cached files -cache_dir = "" - -[image] -public_key_file = "" -validate_signature = false - -# The configuraions for features that are not production ready -[experimental] -# Whether to enable stargz support -enable_stargz = false -# Whether to enable referrers support -# The option enables trying to fetch the Nydus image associated with the OCI image and run it. -# Also see https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers -enable_referrer_detect = false -# Whether to enable authentication support -# The option enables nydus snapshot to provide backend information to nydusd. -enable_backend_source = false -[experimental.tarfs] -# Whether to enable nydus tarfs mode. Tarfs is supported by: -# - The EROFS filesystem driver since Linux 6.4 -# - Nydus Image Service release v2.3 -enable_tarfs = false -# Mount rafs on host by loopdev and EROFS -mount_tarfs_on_host = false -# Only enable nydus tarfs mode for images with `tarfs hint` label when true -tarfs_hint = false -# Maximum of concurrence to converting OCIv1 images to tarfs, 0 means default -max_concurrent_proc = 0 -# Mode to export tarfs images: -# - "none" or "": do not export tarfs -# - "layer_verity_only": only generate disk verity information for a layer blob -# - "image_verity_only": only generate disk verity information for all blobs of an image -# - "layer_block": generate a raw block disk image with tarfs for a layer -# - "image_block": generate a raw block disk image with tarfs for an image -# - "layer_block_with_verity": generate a raw block disk image with tarfs for a layer with dm-verity info -# - "image_block_with_verity": generate a raw block disk image with tarfs for an image with dm-verity info -export_mode = "" diff --git a/rafs/src/metadata/direct_v6.rs b/rafs/src/metadata/direct_v6.rs index 3330aea9451..558702cc267 100644 --- a/rafs/src/metadata/direct_v6.rs +++ b/rafs/src/metadata/direct_v6.rs @@ -206,6 +206,7 @@ impl DirectSuperBlockV6 { let mut blob_table = RafsV6BlobTable::new(); let meta = &old_state.meta; r.seek(SeekFrom::Start(meta.blob_table_offset))?; + blob_table.load(r, meta.blob_table_size, meta.chunk_size, meta.flags)?; let blob_extra_infos = rafsv6_load_blob_extra_info(meta, r)?; @@ -1324,6 +1325,7 @@ impl RafsInodeExt for OndiskInodeWrapper { /// It depends on Self::validate() to ensure valid memory layout. fn get_chunk_info(&self, idx: u32) -> Result> { let state = self.state(); + let inode = self.disk_inode(&state); if !self.is_reg() || idx >= self.get_chunk_count() { return Err(enoent!("invalid chunk info")); @@ -1362,6 +1364,7 @@ impl RafsInodeExt for OndiskInodeWrapper { if chunk_map.is_none() { *chunk_map = Some(self.mapping.load_chunk_map()?); } + match chunk_map.as_ref().unwrap().get(chunk_addr) { None => Err(enoent!(format!( "failed to get chunk info for chunk {}/{}/{}", diff --git a/rafs/src/metadata/inode.rs b/rafs/src/metadata/inode.rs index 938b058ae6d..8301539e57a 100644 --- a/rafs/src/metadata/inode.rs +++ b/rafs/src/metadata/inode.rs @@ -17,8 +17,9 @@ use crate::metadata::direct_v6::OndiskInodeWrapper as OndiskInodeWrapperV6; use crate::metadata::layout::v5::{RafsV5ChunkInfo, RafsV5Inode}; use crate::metadata::layout::v6::{RafsV6InodeCompact, RafsV6InodeExtended}; use crate::metadata::layout::RafsXAttrs; -use crate::metadata::{Inode, RafsVersion}; +use crate::metadata::RafsVersion; use crate::RafsInodeExt; +use nydus_utils::metrics::Inode; /// An inode object wrapper for different RAFS versions. #[derive(Clone)] @@ -183,7 +184,7 @@ impl InodeWrapper { match self { InodeWrapper::V5(i) => i.is_sock(), InodeWrapper::V6(i) => i.is_sock(), - InodeWrapper::Ref(i) => i.as_inode().is_dir(), + InodeWrapper::Ref(i) => i.as_inode().is_sock(), } } diff --git a/rafs/src/metadata/layout/v6.rs b/rafs/src/metadata/layout/v6.rs index 6a64607fb07..4f02a6ccd20 100644 --- a/rafs/src/metadata/layout/v6.rs +++ b/rafs/src/metadata/layout/v6.rs @@ -1328,7 +1328,6 @@ impl RafsV6Device { } Err(_) => return Err(einval!("blob_id in RAFS v6 device entry is invalid")), } - if self.blocks() == 0 { let msg = format!("invalid blocks {} in Rafs v6 device entry", self.blocks()); return Err(einval!(msg)); @@ -1691,7 +1690,6 @@ impl RafsV6Blob { ); return false; } - let blob_features = match BlobFeatures::try_from(self.features) { Ok(v) => v, Err(_) => return false, @@ -1773,7 +1771,7 @@ impl RafsV6Blob { #[derive(Clone, Debug, Default)] pub struct RafsV6BlobTable { /// Base blob information array. - entries: Vec>, + pub entries: Vec>, } impl RafsV6BlobTable { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b36fa2701c4..c1f5c7bd1fd 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.72.1" +channel = "1.75.0" components = ["rustfmt", "clippy"] diff --git a/smoke/.golangci.yml b/smoke/.golangci.yml index 734653d6721..98c90d53cf4 100644 --- a/smoke/.golangci.yml +++ b/smoke/.golangci.yml @@ -8,7 +8,7 @@ linters: - goimports - revive - ineffassign - - vet + - govet - unused - misspell disable: @@ -16,6 +16,3 @@ linters: run: deadline: 4m - skip-dirs: - - misc - diff --git a/smoke/Makefile b/smoke/Makefile index 05562e6f769..4703117b6cb 100644 --- a/smoke/Makefile +++ b/smoke/Makefile @@ -13,6 +13,7 @@ build: # NYDUS_BUILDER=/path/to/latest/nydus-image \ # NYDUS_NYDUSD=/path/to/latest/nydusd \ # NYDUS_NYDUSIFY=/path/to/latest/nydusify \ +# SKIP_CASES=compressor=lz4_block,fs_version=5 \ # make test test: build golangci-lint run diff --git a/smoke/tests/api_test.go b/smoke/tests/api_test.go index 33cea47e81a..807c1c28189 100644 --- a/smoke/tests/api_test.go +++ b/smoke/tests/api_test.go @@ -5,13 +5,11 @@ package tests import ( - "context" "fmt" "io" "os" "path/filepath" "testing" - "time" "github.com/containerd/log" "github.com/containerd/nydus-snapshotter/pkg/converter" @@ -33,7 +31,7 @@ func (a *APIV1TestSuite) TestDaemonStatus(t *testing.T) { rootFs := texture.MakeLowerLayer(t, filepath.Join(ctx.Env.WorkDir, "root-fs")) - rafs := a.rootFsToRafs(t, ctx, rootFs) + rafs := a.buildLayer(t, ctx, rootFs) nydusd, err := tool.NewNydusd(tool.NydusdConfig{ NydusdPath: ctx.Binary.Nydusd, @@ -60,17 +58,8 @@ func (a *APIV1TestSuite) TestDaemonStatus(t *testing.T) { } }() - // The implementation of runNydusd() has checked stats, however, - // it's clear of semantic to check stats again. - newCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - select { - case <-tool.CheckReady(newCtx, nydusd.APISockPath): - return - case <-time.After(50 * time.Millisecond): - require.Fail(t, "nydusd status is not RUNNING") - } + err = nydusd.WaitStatus("RUNNING") + require.NoError(t, err) } func (a *APIV1TestSuite) TestMetrics(t *testing.T) { @@ -82,7 +71,7 @@ func (a *APIV1TestSuite) TestMetrics(t *testing.T) { rootFs := texture.MakeLowerLayer(t, filepath.Join(ctx.Env.WorkDir, "root-fs")) - rafs := a.rootFsToRafs(t, ctx, rootFs) + rafs := a.buildLayer(t, ctx, rootFs) nydusd, err := tool.NewNydusd(tool.NydusdConfig{ NydusdPath: ctx.Binary.Nydusd, @@ -164,7 +153,7 @@ func (a *APIV1TestSuite) TestPrefetch(t *testing.T) { filepath.Join(ctx.Env.WorkDir, "root-fs"), texture.LargerFileMaker("large-blob.bin", 5)) - rafs := a.rootFsToRafs(t, ctx, rootFs) + rafs := a.buildLayer(t, ctx, rootFs) config := tool.NydusdConfig{ NydusdPath: ctx.Binary.Nydusd, @@ -195,7 +184,6 @@ func (a *APIV1TestSuite) TestPrefetch(t *testing.T) { config.RafsMode = ctx.Runtime.RafsMode err = nydusd.MountByAPI(config) require.NoError(t, err) - time.Sleep(time.Millisecond * 15) bcm, err := nydusd.GetBlobCacheMetrics("") require.NoError(t, err) @@ -205,7 +193,49 @@ func (a *APIV1TestSuite) TestPrefetch(t *testing.T) { require.NoError(t, err) } -func (a *APIV1TestSuite) rootFsToRafs(t *testing.T, ctx *tool.Context, rootFs *tool.Layer) string { +func (a *APIV1TestSuite) TestMount(t *testing.T) { + + ctx := tool.DefaultContext(t) + + ctx.PrepareWorkDir(t) + defer ctx.Destroy(t) + + rootFs := texture.MakeLowerLayer(t, filepath.Join(ctx.Env.WorkDir, "rootfs")) + + rafs := a.buildLayer(t, ctx, rootFs) + + config := tool.NydusdConfig{ + NydusdPath: ctx.Binary.Nydusd, + MountPath: ctx.Env.MountDir, + APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"), + ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"), + } + nydusd, err := tool.NewNydusd(config) + require.NoError(t, err) + + err = nydusd.Mount() + require.NoError(t, err) + + config.BootstrapPath = rafs + config.MountPath = "/mount" + config.BackendType = "localfs" + config.BackendConfig = fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir) + config.BlobCacheDir = ctx.Env.CacheDir + config.CacheType = ctx.Runtime.CacheType + config.CacheCompressed = ctx.Runtime.CacheCompressed + config.RafsMode = ctx.Runtime.RafsMode + config.EnablePrefetch = ctx.Runtime.EnablePrefetch + config.DigestValidate = false + config.AmplifyIO = ctx.Runtime.AmplifyIO + err = nydusd.MountByAPI(config) + require.NoError(t, err) + + defer nydusd.Umount() + defer nydusd.UmountByAPI(config.MountPath) + nydusd.VerifyByPath(t, rootFs.FileTree, config.MountPath) +} + +func (a *APIV1TestSuite) buildLayer(t *testing.T, ctx *tool.Context, rootFs *tool.Layer) string { digest := rootFs.Pack(t, converter.PackOption{ BuilderPath: ctx.Binary.Builder, diff --git a/smoke/tests/benchmark_test.go b/smoke/tests/benchmark_test.go index fc7cdf67bff..6957090a445 100644 --- a/smoke/tests/benchmark_test.go +++ b/smoke/tests/benchmark_test.go @@ -18,7 +18,7 @@ import ( // Environment Requirement: Containerd, nerdctl >= 0.22, nydus-snapshotter, nydusd, nydus-image and nydusify. // Prepare: setup nydus for containerd, reference: https://github.com/dragonflyoss/nydus/blob/master/docs/containerd-env-setup.md. -// TestBenchmark will dump json file(benchmark.json) which includes container e2e time, image size, read-amount and read-cout. +// TestBenchmark will dump json file(benchmark.json) which includes container e2e time, image size, read-amount and read-count. // Example: // { // e2e_time: 2747131 @@ -65,25 +65,25 @@ func (b *BenchmarkTestSuite) TestBenchmark(t *testing.T) { image = "wordpress:6.1.1" } else { if !tool.SupportContainerImage(tool.ImageRepo(b.t, image)) { - b.t.Fatalf("Benchmark don't support image " + image) + b.t.Fatalf("Benchmark don't support %s image ", image) } } targetImageSize, conversionElapsed := b.prepareImage(b.t, ctx, image) - // run contaienr + // run container b.testContainerName = uuid.NewString() - containerMetic := tool.RunContainer(b.t, b.testImage, b.snapshotter, b.testContainerName) + containerMetric := tool.RunContainer(b.t, b.testImage, b.snapshotter, b.testContainerName) b.metric = tool.ContainerMetrics{ - E2ETime: containerMetic.E2ETime, + E2ETime: containerMetric.E2ETime, ConversionElapsed: time.Duration(conversionElapsed), - ReadCount: containerMetic.ReadCount, - ReadAmountTotal: containerMetic.ReadAmountTotal, + ReadCount: containerMetric.ReadCount, + ReadAmountTotal: containerMetric.ReadAmountTotal, ImageSize: targetImageSize, } - // save metirc + // save metric b.dumpMetric() - t.Logf(fmt.Sprintf("Metric: E2ETime %d ConversionElapsed %s ReadCount %d ReadAmount %d ImageSize %d", b.metric.E2ETime, b.metric.ConversionElapsed, b.metric.ReadCount, b.metric.ReadAmountTotal, b.metric.ImageSize)) + t.Logf("Metric: E2ETime %d ConversionElapsed %s ReadCount %d ReadAmount %d ImageSize %d", b.metric.E2ETime, b.metric.ConversionElapsed, b.metric.ReadCount, b.metric.ReadAmountTotal, b.metric.ImageSize) } func (b *BenchmarkTestSuite) prepareImage(t *testing.T, ctx *tool.Context, image string) (int64, int64) { @@ -120,18 +120,18 @@ func (b *BenchmarkTestSuite) prepareImage(t *testing.T, ctx *tool.Context, image t.Fatalf("can't read convert metric file") return 0, 0 } - var convertMetirc map[string]int64 - err = json.Unmarshal(metricData, &convertMetirc) + var convertMetric map[string]int64 + err = json.Unmarshal(metricData, &convertMetric) if err != nil { t.Fatalf("can't parsing convert metric file") return 0, 0 } if b.snapshotter == "nydus" { b.testImage = target - return convertMetirc["TargetImageSize"], convertMetirc["ConversionElapsed"] + return convertMetric["TargetImageSize"], convertMetric["ConversionElapsed"] } b.testImage = source - return convertMetirc["SourceImageSize"], 0 + return convertMetric["SourceImageSize"], 0 } func (b *BenchmarkTestSuite) dumpMetric() { diff --git a/smoke/tests/blobcache_test.go b/smoke/tests/blobcache_test.go index 4a701170eb7..ffda24c1b17 100644 --- a/smoke/tests/blobcache_test.go +++ b/smoke/tests/blobcache_test.go @@ -1,3 +1,7 @@ +// Copyright 2023 Nydus Developers. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 + package tests import ( diff --git a/smoke/tests/commit_test.go b/smoke/tests/commit_test.go index 2c1a0d1e0b6..4f9c16e75cf 100644 --- a/smoke/tests/commit_test.go +++ b/smoke/tests/commit_test.go @@ -36,14 +36,14 @@ func (c *CommitTestSuite) TestCommitContainer() test.Generator { ctx.Build.FSVersion = scenario.GetString(paramFSVersion) image, committedImage := c.prepareImage(c.t, ctx, scenario.GetString(paramImage)) - return scenario.Str(), func(t *testing.T) { + return scenario.Str(), func(_ *testing.T) { c.TestCommitAndCheck(*ctx, image, committedImage) } } } -func (c *CommitTestSuite) TestCommitAndCheck(ctx tool.Context, image, commmitedImage string) { - // run nydus contaienr +func (c *CommitTestSuite) TestCommitAndCheck(ctx tool.Context, image, commitedImage string) { + // run nydus container containerName := uuid.NewString() runContainerCmd := fmt.Sprintf("sudo nerdctl --snapshotter nydus run -d -t --insecure-registry --name=%s %s sh", containerName, image) containerID := strings.Trim(tool.RunWithOutput(runContainerCmd), "\n") @@ -60,13 +60,13 @@ func (c *CommitTestSuite) TestCommitAndCheck(ctx tool.Context, image, commmitedI // commit container committedContainerName := fmt.Sprintf("%s-committed", containerName) - commitCmd := fmt.Sprintf("sudo %s commit --container %s --target %s", ctx.Binary.Nydusify, containerID, commmitedImage) + commitCmd := fmt.Sprintf("sudo %s commit --container %s --target %s", ctx.Binary.Nydusify, containerID, commitedImage) tool.RunWithoutOutput(c.t, commitCmd) // run committed container - runCommittedContainerCmd := fmt.Sprintf("sudo nerdctl --snapshotter nydus run -d -t --insecure-registry --name=%s %s sh", committedContainerName, commmitedImage) + runCommittedContainerCmd := fmt.Sprintf("sudo nerdctl --snapshotter nydus run -d -t --insecure-registry --name=%s %s sh", committedContainerName, commitedImage) tool.RunWithOutput(runCommittedContainerCmd) - defer tool.ClearContainer(c.t, commmitedImage, "nydus", committedContainerName) + defer tool.ClearContainer(c.t, commitedImage, "nydus", committedContainerName) // check committed file content checkFileContent(c.t, committedContainerName, "/root/commit", "This is Nydus commit") diff --git a/smoke/tests/hot_upgrade_test.go b/smoke/tests/hot_upgrade_test.go new file mode 100644 index 00000000000..d785decea4e --- /dev/null +++ b/smoke/tests/hot_upgrade_test.go @@ -0,0 +1,156 @@ +// Copyright 2024 Nydus Developers. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 + +package tests + +import ( + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/containerd/nydus-snapshotter/pkg/converter" + "github.com/containerd/nydus-snapshotter/pkg/supervisor" + "github.com/dragonflyoss/nydus/smoke/tests/texture" + "github.com/dragonflyoss/nydus/smoke/tests/tool" + "github.com/dragonflyoss/nydus/smoke/tests/tool/test" + "github.com/stretchr/testify/require" +) + +type Snapshotter struct { +} + +type HotUpgradeTestSuite struct { + t *testing.T +} + +func (c *HotUpgradeTestSuite) buildLayer(t *testing.T, ctx *tool.Context, rootFs *tool.Layer) string { + digest := rootFs.Pack(t, + converter.PackOption{ + BuilderPath: ctx.Binary.Builder, + Compressor: "lz4_block", + FsVersion: "5", + }, + ctx.Env.BlobDir) + _, bootstrap := tool.MergeLayers(t, *ctx, + converter.MergeOption{ + BuilderPath: ctx.Binary.Builder, + }, + []converter.Layer{ + {Digest: digest}, + }) + return bootstrap +} + +func (c *HotUpgradeTestSuite) newNydusd(t *testing.T, ctx *tool.Context, bootstrap, name string, upgrade bool) *tool.Nydusd { + config := tool.NydusdConfig{ + NydusdPath: ctx.Binary.Nydusd, + MountPath: ctx.Env.MountDir, + APISockPath: filepath.Join(ctx.Env.WorkDir, fmt.Sprintf("nydusd-api-%s.sock", name)), + ConfigPath: filepath.Join(ctx.Env.WorkDir, fmt.Sprintf("nydusd-config.fusedev-%s.json", name)), + SupervisorSockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-supervisor.sock"), + } + if upgrade { + config.Upgrade = true + } + + nydusd, err := tool.NewNydusd(config) + require.NoError(t, err) + + _, err = nydusd.Run() + require.NoError(t, err) + + if upgrade { + err = nydusd.WaitStatus("INIT") + } else { + err = nydusd.WaitStatus("RUNNING") + } + require.NoError(t, err) + + config.BootstrapPath = bootstrap + config.MountPath = "/" + config.BackendType = "localfs" + config.BackendConfig = fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir) + config.EnablePrefetch = true + config.PrefetchFiles = []string{"/"} + config.BlobCacheDir = ctx.Env.CacheDir + config.CacheType = ctx.Runtime.CacheType + config.CacheCompressed = ctx.Runtime.CacheCompressed + config.RafsMode = ctx.Runtime.RafsMode + + err = nydusd.MountByAPI(config) + require.NoError(t, err) + + return nydusd +} + +func (c *HotUpgradeTestSuite) TestHotUpgrade(t *testing.T) { + ctx := tool.DefaultContext(t) + ctx.PrepareWorkDir(t) + defer ctx.Destroy(t) + + // Build nydus layer + layer := texture.MakeLowerLayer(t, filepath.Join(ctx.Env.WorkDir, "root")) + bootstrap := c.buildLayer(t, ctx, layer) + + // Start snapshotter simulator + ss, err := supervisor.NewSupervisorSet(filepath.Join(ctx.Env.WorkDir)) + require.NoError(t, err) + supervisor := ss.NewSupervisor("nydusd-supervisor") + defer ss.DestroySupervisor("nydusd-supervisor") + + // Start old nydusd to mount rootfs + oldNydusd := c.newNydusd(t, ctx, bootstrap, "old", false) + defer oldNydusd.Umount() + + // Old nydusd's state should be RUNNING + err = oldNydusd.WaitStatus("RUNNING") + require.NoError(t, err) + + // Verify filesytem on new nydusd + oldNydusd.Verify(t, layer.FileTree) + + // Snapshotter receive fuse fd from old nydusd + err = supervisor.FetchDaemonStates(oldNydusd.SendFd) + require.NoError(t, err) + + // Start new nydusd in upgrade mode (don't mount) + newNydusd := c.newNydusd(t, ctx, bootstrap, "new", true) + defer newNydusd.Umount() + + // New nydusd's state should be INIT + err = newNydusd.WaitStatus("INIT") + require.NoError(t, err) + + // Tells old nydusd to exit + err = oldNydusd.Exit() + require.NoError(t, err) + + // Send fuse fd to new nydusd + err = supervisor.SendStatesTimeout(time.Second * 5) + require.NoError(t, err) + err = newNydusd.Takeover() + require.NoError(t, err) + + // New nydusd's state should be RUNNING | READY + // Only have RUNNING state for older nydusd version (v1.x) + err = newNydusd.WaitStatus("RUNNING", "READY") + require.NoError(t, err) + + // Snapshotter receive fuse fd from new nydusd + err = supervisor.FetchDaemonStates(newNydusd.SendFd) + require.NoError(t, err) + + // Start new nydusd to serve mountpoint + // It's unnecessary for older nydusd version (v1.x) + err = newNydusd.StartByAPI() + require.NoError(t, err) + + // Verify filesystem on new nydusd + newNydusd.Verify(t, layer.FileTree) +} + +func TestHotUpgrade(t *testing.T) { + test.Run(t, &HotUpgradeTestSuite{t: t}) +} diff --git a/smoke/tests/image_test.go b/smoke/tests/image_test.go index e4f27dc6765..b9c8ef666b8 100644 --- a/smoke/tests/image_test.go +++ b/smoke/tests/image_test.go @@ -6,6 +6,7 @@ package tests import ( "fmt" + "os" "path/filepath" "strings" "testing" @@ -13,6 +14,7 @@ import ( "github.com/dragonflyoss/nydus/smoke/tests/tool" "github.com/dragonflyoss/nydus/smoke/tests/tool/test" "github.com/google/uuid" + "github.com/stretchr/testify/require" ) const ( @@ -45,7 +47,7 @@ func (i *ImageTestSuite) TestConvertImages() test.Generator { } // Zran and Batch can not work together. - // Zran and Encrpt can not work together. + // Zran and Encrypt can not work together. return (param.GetBool(paramZran) && param.GetString(paramBatch) != "0") || (param.GetBool(paramZran) && param.GetBool(paramEncrypt)) }) @@ -121,10 +123,12 @@ func (i *ImageTestSuite) TestConvertAndCopyImage(t *testing.T, ctx tool.Context, ) tool.RunWithoutOutput(t, checkCmd) - if !testCopy { - return + if testCopy { + testNydusifyCopy(t, ctx, source, target, logLevel, nydusifyPath) } +} +func testNydusifyCopy(t *testing.T, ctx tool.Context, source, target, logLevel, nydusifyPath string) { // Copy image targetCopied := fmt.Sprintf("%s_copied", target) copyCmd := fmt.Sprintf( @@ -135,11 +139,38 @@ func (i *ImageTestSuite) TestConvertAndCopyImage(t *testing.T, ctx tool.Context, tool.RunWithoutOutput(t, copyCmd) // Check copied image - checkCmd = fmt.Sprintf( + checkCmd := fmt.Sprintf( "%s %s check --source %s --target %s --nydus-image %s --nydusd %s --work-dir %s", nydusifyPath, logLevel, source, targetCopied, ctx.Binary.Builder, ctx.Binary.Nydusd, filepath.Join(ctx.Env.WorkDir, "check"), ) tool.RunWithoutOutput(t, checkCmd) + + // Save image + targetSaved := fmt.Sprintf("file://%s", filepath.Join(ctx.Env.WorkDir, "saved.tar")) + saveCmd := fmt.Sprintf( + "%s %s copy --source %s --target %s --nydus-image %s --work-dir %s", + ctx.Binary.Nydusify, logLevel, target, targetSaved, ctx.Binary.Builder, filepath.Join(ctx.Env.WorkDir, "save"), + ) + tool.RunWithoutOutput(t, saveCmd) + + // Check saved image + _, err := os.Stat(filepath.Join(ctx.Env.WorkDir, "saved.tar")) + require.NoError(t, err) + + // Load image + targetLoaded := fmt.Sprintf("%s_loaded", target) + loadCmd := fmt.Sprintf( + "%s %s copy --source %s --target %s --nydus-image %s --work-dir %s", + ctx.Binary.Nydusify, logLevel, targetSaved, targetLoaded, ctx.Binary.Builder, filepath.Join(ctx.Env.WorkDir, "load"), + ) + tool.RunWithoutOutput(t, loadCmd) + + // Check loaded image + checkCmd = fmt.Sprintf( + "%s %s check --source %s --target %s --nydus-image %s --nydusd %s --work-dir %s", + nydusifyPath, logLevel, source, targetLoaded, ctx.Binary.Builder, ctx.Binary.Nydusd, filepath.Join(ctx.Env.WorkDir, "check"), + ) + tool.RunWithoutOutput(t, checkCmd) } func (i *ImageTestSuite) TestGenerateChunkdicts() test.Generator { diff --git a/smoke/tests/main_test.go b/smoke/tests/main_test.go index e93598802a0..0b134112bb3 100644 --- a/smoke/tests/main_test.go +++ b/smoke/tests/main_test.go @@ -22,8 +22,16 @@ func TestMain(m *testing.M) { registryPort = "5077" os.Setenv("REGISTRY_PORT", registryPort) } - reg := tool.NewRegistry() + + var reg *tool.Registry + if os.Getenv("DISABLE_REGISTRY") == "" { + reg = tool.NewRegistry() + } + code := m.Run() - reg.Destroy() + + if reg != nil { + reg.Destroy() + } os.Exit(code) } diff --git a/smoke/tests/native_layer_test.go b/smoke/tests/native_layer_test.go index 77619a4aed8..8e72cb56a14 100644 --- a/smoke/tests/native_layer_test.go +++ b/smoke/tests/native_layer_test.go @@ -45,7 +45,6 @@ func (n *NativeLayerTestSuite) TestMakeLayers() test.Generator { Dimension(paramEncrypt, []interface{}{false, true}). Dimension(paramAmplifyIO, []interface{}{uint64(0x100000)}). Skip(func(param *tool.DescartesItem) bool { - // rafs v6 not support cached mode nor dummy cache if param.GetString(paramFSVersion) == "6" { return param.GetString(paramRafsMode) == "cached" || param.GetString(paramCacheType) == "" @@ -70,17 +69,16 @@ func (n *NativeLayerTestSuite) TestMakeLayers() test.Generator { } scenario := scenarios.Next() - ctx := tool.DefaultContext(n.t) - ctx.Build.Compressor = scenario.GetString(paramCompressor) - ctx.Build.FSVersion = scenario.GetString(paramFSVersion) - ctx.Build.ChunkSize = scenario.GetString(paramChunkSize) - ctx.Runtime.CacheType = scenario.GetString(paramCacheType) - ctx.Runtime.CacheCompressed = scenario.GetBool(paramCacheCompressed) - ctx.Runtime.RafsMode = scenario.GetString(paramRafsMode) - ctx.Runtime.EnablePrefetch = scenario.GetBool(paramEnablePrefetch) - ctx.Runtime.AmplifyIO = scenario.GetUInt64(paramAmplifyIO) - return scenario.Str(), func(t *testing.T) { + ctx := tool.DefaultContext(n.t) + ctx.Build.Compressor = scenario.GetString(paramCompressor) + ctx.Build.FSVersion = scenario.GetString(paramFSVersion) + ctx.Build.ChunkSize = scenario.GetString(paramChunkSize) + ctx.Runtime.CacheType = scenario.GetString(paramCacheType) + ctx.Runtime.CacheCompressed = scenario.GetBool(paramCacheCompressed) + ctx.Runtime.RafsMode = scenario.GetString(paramRafsMode) + ctx.Runtime.EnablePrefetch = scenario.GetBool(paramEnablePrefetch) + ctx.Runtime.AmplifyIO = scenario.GetUInt64(paramAmplifyIO) n.testMakeLayers(*ctx, t) } } @@ -105,7 +103,6 @@ func (n *NativeLayerTestSuite) TestAmplifyIO() test.Generator { /* Amplify io - target param */ Dimension(paramAmplifyIO, []interface{}{uint64(0x0), uint64(0x100000), uint64(0x10000000)}). Skip(func(param *tool.DescartesItem) bool { - // Rafs v6 not support cached mode nor dummy cache if param.GetString(paramFSVersion) == "6" { return param.GetString(paramRafsMode) == "cached" || param.GetString(paramCacheType) == "" @@ -130,17 +127,16 @@ func (n *NativeLayerTestSuite) TestAmplifyIO() test.Generator { } scenario := scenarios.Next() - ctx := tool.DefaultContext(n.t) - ctx.Build.Compressor = scenario.GetString(paramCompressor) - ctx.Build.FSVersion = scenario.GetString(paramFSVersion) - ctx.Build.ChunkSize = scenario.GetString(paramChunkSize) - ctx.Runtime.CacheType = scenario.GetString(paramCacheType) - ctx.Runtime.CacheCompressed = scenario.GetBool(paramCacheCompressed) - ctx.Runtime.RafsMode = scenario.GetString(paramRafsMode) - ctx.Runtime.EnablePrefetch = scenario.GetBool(paramEnablePrefetch) - ctx.Runtime.AmplifyIO = scenario.GetUInt64(paramAmplifyIO) - return scenario.Str(), func(t *testing.T) { + ctx := tool.DefaultContext(n.t) + ctx.Build.Compressor = scenario.GetString(paramCompressor) + ctx.Build.FSVersion = scenario.GetString(paramFSVersion) + ctx.Build.ChunkSize = scenario.GetString(paramChunkSize) + ctx.Runtime.CacheType = scenario.GetString(paramCacheType) + ctx.Runtime.CacheCompressed = scenario.GetBool(paramCacheCompressed) + ctx.Runtime.RafsMode = scenario.GetString(paramRafsMode) + ctx.Runtime.EnablePrefetch = scenario.GetBool(paramEnablePrefetch) + ctx.Runtime.AmplifyIO = scenario.GetUInt64(paramAmplifyIO) n.testMakeLayers(*ctx, t) } } diff --git a/smoke/tests/overlay_fs_test.go b/smoke/tests/overlay_fs_test.go index 818bfe93ecb..d3a92238aa4 100644 --- a/smoke/tests/overlay_fs_test.go +++ b/smoke/tests/overlay_fs_test.go @@ -51,6 +51,7 @@ func (ts *OverlayFsTestSuite) prepareTestEnv(t *testing.T) *tool.Context { // Verify lower layer mounted by nydusd ctx.Env.BootstrapPath = lowerBootstrap tool.Verify(t, *ctx, lowerLayer.FileTree) + return ctx } diff --git a/smoke/tests/performance_test.go b/smoke/tests/performance_test.go index ce28ed3c2d6..5b24cd7679d 100644 --- a/smoke/tests/performance_test.go +++ b/smoke/tests/performance_test.go @@ -47,13 +47,13 @@ func (p *PerformanceTestSuite) TestPerformance(_ *testing.T) { image = "wordpress:6.1.1" } else { if !tool.SupportContainerImage(tool.ImageRepo(p.t, image)) { - p.t.Fatalf("Unsupport image " + image) + p.t.Fatalf("Unsupport %s image ", image) } } // prepare test image p.prepareTestImage(p.t, ctx, image) - // run Contaienr + // run Container p.testContainerName = uuid.NewString() tool.RunContainerWithBaseline(p.t, p.testImage, p.testContainerName, mode) } diff --git a/smoke/tests/takeover_test.go b/smoke/tests/takeover_test.go index f556be0b930..5c47eee0fa4 100644 --- a/smoke/tests/takeover_test.go +++ b/smoke/tests/takeover_test.go @@ -56,8 +56,8 @@ func (f *TakeoverTestSuit) clear() { tool.RunWithoutOutput(f.t, fmt.Sprintf("sudo nerdctl --snapshotter %s image rm %s", snapshotter, f.testImage)) } -func (f *TakeoverTestSuit) rmContainer(conatinerName string) { - tool.RunWithoutOutput(f.t, fmt.Sprintf("sudo nerdctl --snapshotter %s rm -f %s", snapshotter, conatinerName)) +func (f *TakeoverTestSuit) rmContainer(containerName string) { + tool.RunWithoutOutput(f.t, fmt.Sprintf("sudo nerdctl --snapshotter %s rm -f %s", snapshotter, containerName)) } func (f *TakeoverTestSuit) TestFailover(t *testing.T) { @@ -82,10 +82,10 @@ func (f *TakeoverTestSuit) TestFailover(t *testing.T) { // check the container by requesting its wait url runArgs := tool.GetRunArgs(t, imageName) resp, err := http.Get(runArgs.WaitURL) - require.NoError(t, err, "access to the wait url of the recoverd container") + require.NoError(t, err, "access to the wait url of the recovered container") defer resp.Body.Close() if resp.StatusCode/100 != 2 { - t.Fatalf("Failed to access the wait url of the recoverd container") + t.Fatalf("Failed to access the wait url of the recovered container") } } @@ -120,10 +120,10 @@ func (f *TakeoverTestSuit) TestHotUpgrade(t *testing.T) { // check the container by requesting its wait url runArgs := tool.GetRunArgs(t, imageName) resp, err := http.Get(runArgs.WaitURL) - require.NoError(t, err, "access to the wait url of the recoverd container") + require.NoError(t, err, "access to the wait url of the recovered container") defer resp.Body.Close() if resp.StatusCode/100 != 2 { - t.Fatalf("Failed to access the wait url of the recoverd container") + t.Fatalf("Failed to access the wait url of the recovered container") } } diff --git a/smoke/tests/tool/container.go b/smoke/tests/tool/container.go index 97b9ad170e6..fff2d027e92 100644 --- a/smoke/tests/tool/container.go +++ b/smoke/tests/tool/container.go @@ -154,22 +154,22 @@ func RunContainerWithBaseline(t *testing.T, image string, containerName string, runURLWaitContainer(t, image, "nydus", containerName, args) defer ClearContainer(t, image, "nydus", containerName) } else { - t.Fatalf(fmt.Sprintf("%s is not in URL_WAIT", image)) + t.Fatalf("%s is not in URL_WAIT", image) } backendMetrics, err := getContainerBackendMetrics(t) if err != nil { - t.Logf(err.Error()) + t.Logf("Can't get containerd backend metrics: %s", err.Error()) } if backendMetrics.ReadAmountTotal > uint64(float64(args.BaselineReadAmount[mode])*1.05) || backendMetrics.ReadCount > uint64(float64(args.BaselineReadCount[mode])*1.05) { - t.Fatalf(fmt.Sprintf("Performance reduction with ReadAmount %d and ReadCount %d", backendMetrics.ReadAmountTotal, backendMetrics.ReadCount)) + t.Fatalf("Performance reduction with ReadAmount %d and ReadCount %d", backendMetrics.ReadAmountTotal, backendMetrics.ReadCount) } - t.Logf(fmt.Sprintf("Performance Test: ReadAmount %d and ReadCount %d", backendMetrics.ReadAmountTotal, backendMetrics.ReadCount)) + t.Logf("Performance Test: ReadAmount %d and ReadCount %d", backendMetrics.ReadAmountTotal, backendMetrics.ReadCount) } // RunContainer and return container metric func RunContainer(t *testing.T, image string, snapshotter string, containerName string) *ContainerMetrics { - var containerMetic ContainerMetrics + var containerMetric ContainerMetrics startTime := time.Now() // runContainer @@ -182,17 +182,17 @@ func RunContainer(t *testing.T, image string, snapshotter string, containerName defer ClearContainer(t, image, snapshotter, containerName) } - containerMetic.E2ETime = time.Since(startTime) + containerMetric.E2ETime = time.Since(startTime) if snapshotter == "nydus" { backendMetrics, err := getContainerBackendMetrics(t) if err != nil { - t.Logf(err.Error()) + t.Logf("Can't get containerd backend metrics: %s", err.Error()) } - containerMetic.ReadAmountTotal = backendMetrics.ReadAmountTotal - containerMetic.ReadCount = backendMetrics.ReadCount + containerMetric.ReadAmountTotal = backendMetrics.ReadAmountTotal + containerMetric.ReadCount = backendMetrics.ReadCount } - return &containerMetic + return &containerMetric } // RunContainerSimple just runs a container simply @@ -256,11 +256,11 @@ func getContainerBackendMetrics(t *testing.T) (*ContainerMetrics, error) { return &info, nil } -// searchAPISockPath search sock filepath in nydusd work dir, default in "/var/lib/containerd-nydus/socket" +// searchAPISockPath search sock filepath in nydusd work dir, default in "/var/lib/containerd/io.containerd.snapshotter.v1.nydus/socket" func searchAPISockPath(t *testing.T) string { var apiSockPath string - err := filepath.Walk("/var/lib/containerd-nydus/socket", func(path string, info os.FileInfo, err error) error { + err := filepath.Walk("/var/lib/containerd/io.containerd.snapshotter.v1.nydus/socket", func(path string, info os.FileInfo, err error) error { if err != nil { return err } diff --git a/smoke/tests/tool/iterator.go b/smoke/tests/tool/iterator.go index 8622643df45..bdd360a5522 100644 --- a/smoke/tests/tool/iterator.go +++ b/smoke/tests/tool/iterator.go @@ -2,10 +2,25 @@ package tool import ( "fmt" + "os" "sort" "strings" ) +func isIgnoredByEnv(param *DescartesItem) bool { + if skipCases := os.Getenv("SKIP_CASES"); skipCases != "" { + kvs := strings.Split(skipCases, ",") + for _, kv := range kvs { + k := strings.Split(kv, "=")[0] + v := strings.Split(kv, "=")[1] + if param.GetString(k) == v { + return true + } + } + } + return false +} + type DescartesItem struct { vals map[string]interface{} } @@ -80,7 +95,7 @@ func (d *DescartesItem) GetUInt64(name string) uint64 { // fmt.Println(item.Str()) // } type DescartesIterator struct { - cursores []int + cursors []int valLists [][]interface{} cursorMap map[string]int skip func(item *DescartesItem) bool @@ -103,7 +118,7 @@ func (c *DescartesIterator) Next() *DescartesItem { return nil } - c.cursores = c.nextCursors + c.cursors = c.nextCursors result := c.nextItem c.clearNext() @@ -118,8 +133,8 @@ func (c *DescartesIterator) HasNext() bool { func (c *DescartesIterator) calNext() { - cursors := make([]int, len(c.cursores)) - copy(cursors, c.cursores) + cursors := make([]int, len(c.cursors)) + copy(cursors, c.cursors) item := &DescartesItem{vals: make(map[string]interface{})} for { @@ -141,7 +156,7 @@ func (c *DescartesIterator) calNext() { for name, idx := range c.cursorMap { item.vals[name] = c.valLists[idx][cursors[idx]] } - if c.skip == nil || !c.skip(item) { + if !isIgnoredByEnv(item) && (c.skip == nil || !c.skip(item)) { c.haveNext(cursors, item) return } @@ -171,11 +186,11 @@ func (c *DescartesIterator) Dimension(name string, vals []interface{}) *Descarte c.cursorMap = make(map[string]int) } - c.cursores = append(c.cursores, 0) + c.cursors = append(c.cursors, 0) c.valLists = append(c.valLists, vals) - c.cursorMap[name] = len(c.cursores) - 1 + c.cursorMap[name] = len(c.cursors) - 1 - c.cursores[0] = -1 + c.cursors[0] = -1 return c } diff --git a/smoke/tests/tool/layer.go b/smoke/tests/tool/layer.go index 523ea301198..9075f9e2188 100644 --- a/smoke/tests/tool/layer.go +++ b/smoke/tests/tool/layer.go @@ -232,6 +232,9 @@ func (l *Layer) recordFileTree(t *testing.T) { l.FileTree = map[string]*File{} filepath.Walk(l.workDir, func(path string, _ os.FileInfo, _ error) error { targetPath := l.TargetPath(t, path) + if targetPath == "." || targetPath == ".." { + return nil + } l.FileTree[targetPath] = NewFile(t, path, targetPath) return nil }) diff --git a/smoke/tests/tool/nydusd.go b/smoke/tests/tool/nydusd.go index d1f155309b0..c340cce88b1 100644 --- a/smoke/tests/tool/nydusd.go +++ b/smoke/tests/tool/nydusd.go @@ -10,15 +10,20 @@ import ( "encoding/json" "fmt" "io" + "io/fs" "net" "net/http" "os" "os/exec" + "path/filepath" "strings" + "testing" "text/template" "time" + "github.com/google/uuid" "github.com/pkg/errors" + "github.com/stretchr/testify/require" ) type GlobalMetrics struct { @@ -69,6 +74,9 @@ type NydusdConfig struct { AccessPattern bool PrefetchFiles []string AmplifyIO uint64 + // Hot Upgrade config. + Upgrade bool + SupervisorSockPath string // Overlay config. OvlUpperDir string OvlWorkDir string @@ -76,6 +84,8 @@ type NydusdConfig struct { } type Nydusd struct { + client *http.Client + cmd *exec.Cmd NydusdConfig } @@ -168,8 +178,34 @@ func makeConfig(tplType TemplateType, conf NydusdConfig) error { return nil } -func CheckReady(ctx context.Context, sock string) <-chan bool { - ready := make(chan bool) +func newNydusd(conf NydusdConfig) (*Nydusd, error) { + args := []string{ + "--mountpoint", + conf.MountPath, + "--apisock", + conf.APISockPath, + "--log-level", + "error", + } + if len(conf.ConfigPath) > 0 { + args = append(args, "--config", conf.ConfigPath) + } + if len(conf.BootstrapPath) > 0 { + args = append(args, "--bootstrap", conf.BootstrapPath) + } + if conf.Upgrade { + args = append(args, "--upgrade") + } + if len(conf.SupervisorSockPath) > 0 { + args = append(args, "--supervisor", conf.SupervisorSockPath, "--id", uuid.NewString()) + } + if conf.Writable { + args = append(args, "--writable") + } + + cmd := exec.Command(conf.NydusdPath, args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr transport := &http.Transport{ MaxIdleConns: 10, @@ -180,7 +216,7 @@ func CheckReady(ctx context.Context, sock string) <-chan bool { Timeout: 5 * time.Second, KeepAlive: 5 * time.Second, } - return dialer.DialContext(ctx, "unix", sock) + return dialer.DialContext(ctx, "unix", conf.APISockPath) }, } @@ -189,122 +225,103 @@ func CheckReady(ctx context.Context, sock string) <-chan bool { Transport: transport, } - go func() { - for { - select { - case <-ctx.Done(): - return - default: - } - - resp, err := client.Get(fmt.Sprintf("http://unix%s", "/api/v1/daemon")) - if err != nil { - continue - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - continue - } - - var info daemonInfo - if err = json.Unmarshal(body, &info); err != nil { - continue - } - - if info.State == "RUNNING" { - ready <- true - break - } - } - }() + nydusd := &Nydusd{ + client: client, + cmd: cmd, + NydusdConfig: conf, + } - return ready + return nydusd, nil } func NewNydusd(conf NydusdConfig) (*Nydusd, error) { if err := makeConfig(NydusdConfigTpl, conf); err != nil { return nil, errors.Wrap(err, "create config file for Nydusd") } - return &Nydusd{ - NydusdConfig: conf, - }, nil + + nydusd, err := newNydusd(conf) + if err != nil { + return nil, err + } + + return nydusd, nil } func NewNydusdWithOverlay(conf NydusdConfig) (*Nydusd, error) { if err := makeConfig(NydusdOvlConfigTpl, conf); err != nil { return nil, errors.Wrap(err, "create config file for Nydusd") } - return &Nydusd{ - NydusdConfig: conf, - }, nil -} -func (nydusd *Nydusd) Mount() error { - _ = nydusd.Umount() - - args := []string{ - "--mountpoint", - nydusd.MountPath, - "--apisock", - nydusd.APISockPath, - "--log-level", - "error", + nydusd, err := newNydusd(conf) + if err != nil { + return nil, err } - if len(nydusd.ConfigPath) > 0 { - args = append(args, "--config", nydusd.ConfigPath) + + return nydusd, nil +} + +func NewNydusdWithContext(ctx Context) (*Nydusd, error) { + conf := NydusdConfig{ + EnablePrefetch: ctx.Runtime.EnablePrefetch, + NydusdPath: ctx.Binary.Nydusd, + BootstrapPath: ctx.Env.BootstrapPath, + ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"), + BackendType: "localfs", + BackendConfig: fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir), + BlobCacheDir: ctx.Env.CacheDir, + APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"), + MountPath: filepath.Join(ctx.Env.WorkDir, "mnt"), + CacheType: ctx.Runtime.CacheType, + CacheCompressed: ctx.Runtime.CacheCompressed, + RafsMode: ctx.Runtime.RafsMode, + DigestValidate: false, + AmplifyIO: ctx.Runtime.AmplifyIO, } - if len(nydusd.BootstrapPath) > 0 { - args = append(args, "--bootstrap", nydusd.BootstrapPath) + + if err := makeConfig(NydusdConfigTpl, conf); err != nil { + return nil, errors.Wrap(err, "create config file for Nydusd") } - if nydusd.Writable { - args = append(args, "--writable") + + nydusd, err := newNydusd(conf) + if err != nil { + return nil, err } - cmd := exec.Command(nydusd.NydusdPath, args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr + return nydusd, nil +} + +func (nydusd *Nydusd) Run() (chan error, error) { + errChan := make(chan error) + if err := nydusd.cmd.Start(); err != nil { + return errChan, err + } - runErr := make(chan error) go func() { - runErr <- cmd.Run() + errChan <- nydusd.cmd.Wait() }() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + time.Sleep(2 * time.Second) - ready := CheckReady(ctx, nydusd.APISockPath) + return errChan, nil +} - select { - case err := <-runErr: - if err != nil { - return errors.Wrap(err, "run Nydusd binary") - } - case <-ready: - return nil - case <-time.After(10 * time.Second): - return errors.New("timeout to wait Nydusd ready") +func (nydusd *Nydusd) Mount() error { + _, err := nydusd.Run() + if err != nil { + return err } - return nil + return nydusd.WaitStatus("RUNNING") } func (nydusd *Nydusd) MountByAPI(config NydusdConfig) error { + tpl := template.Must(template.New("").Parse(configTpl)) - err := makeConfig(NydusdConfigTpl, config) - if err != nil { - return err - } - f, err := os.Open(config.ConfigPath) - if err != nil { - return err - } - defer f.Close() - rafsConfig, err := io.ReadAll(f) - if err != nil { - return err + var ret bytes.Buffer + if err := tpl.Execute(&ret, config); err != nil { + return errors.New("prepare config template for Nydusd") } + rafsConfig := ret.String() nydusdConfig := struct { Bootstrap string `json:"source"` @@ -313,39 +330,23 @@ func (nydusd *Nydusd) MountByAPI(config NydusdConfig) error { PrefetchFiles []string `json:"prefetch_files"` }{ Bootstrap: config.BootstrapPath, - RafsConfig: string(rafsConfig), + RafsConfig: rafsConfig, FsType: "rafs", PrefetchFiles: config.PrefetchFiles, } - transport := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 5 * time.Second, - } - return dialer.DialContext(ctx, "unix", nydusd.APISockPath) - }, - } - client := &http.Client{ - Timeout: 30 * time.Second, - Transport: transport, - } - body, err := json.Marshal(nydusdConfig) if err != nil { return err } - _, err = client.Post( + _, err = nydusd.client.Post( fmt.Sprintf("http://unix/api/v1/mount?mountpoint=%s", config.MountPath), "application/json", bytes.NewBuffer(body), ) return err + } func (nydusd *Nydusd) Umount() error { @@ -359,27 +360,121 @@ func (nydusd *Nydusd) Umount() error { return nil } -func (nydusd *Nydusd) GetGlobalMetrics() (*GlobalMetrics, error) { +func (nydusd *Nydusd) UmountByAPI(subPath string) error { + url := fmt.Sprintf("http://unix/api/v1/mount?mountpoint=%s", subPath) + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return err + } + resp, err := nydusd.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() - transport := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 5 * time.Second, + return nil +} + +func (nydusd *Nydusd) WaitStatus(states ...string) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + var currentState string + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout to wait nydusd state, expected: %s, current: %s", states, currentState) + default: + } + + resp, err := nydusd.client.Get(fmt.Sprintf("http://unix%s", "/api/v1/daemon")) + if err != nil { + continue + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + continue + } + + var info daemonInfo + if err = json.Unmarshal(body, &info); err != nil { + continue + } + currentState = info.State + + for _, state := range states { + if currentState == state { + return nil } - return dialer.DialContext(ctx, "unix", nydusd.APISockPath) - }, + } } +} - client := &http.Client{ - Timeout: 30 * time.Second, - Transport: transport, +func (nydusd *Nydusd) StartByAPI() error { + req, err := http.NewRequest("PUT", "http://unix/api/v1/daemon/start", nil) + if err != nil { + return err + } + + resp, err := nydusd.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (nydusd *Nydusd) SendFd() error { + req, err := http.NewRequest("PUT", "http://unix/api/v1/daemon/fuse/sendfd", nil) + if err != nil { + return err } - resp, err := client.Get(fmt.Sprintf("http://unix%s", "/api/v1/metrics")) + resp, err := nydusd.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (nydusd *Nydusd) Takeover() error { + req, err := http.NewRequest("PUT", "http://unix/api/v1/daemon/fuse/takeover", nil) + if err != nil { + return err + } + + resp, err := nydusd.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (nydusd *Nydusd) Exit() error { + req, err := http.NewRequest("PUT", "http://unix/api/v1/daemon/exit", nil) + if err != nil { + return err + } + + resp, err := nydusd.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (nydusd *Nydusd) GetGlobalMetrics() (*GlobalMetrics, error) { + resp, err := nydusd.client.Get(fmt.Sprintf("http://unix%s", "/api/v1/metrics")) if err != nil { return nil, err } @@ -399,25 +494,7 @@ func (nydusd *Nydusd) GetGlobalMetrics() (*GlobalMetrics, error) { } func (nydusd *Nydusd) GetFilesMetrics(id string) (map[string]FileMetrics, error) { - transport := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 5 * time.Second, - } - return dialer.DialContext(ctx, "unix", nydusd.APISockPath) - }, - } - - client := &http.Client{ - Timeout: 30 * time.Second, - Transport: transport, - } - - resp, err := client.Get(fmt.Sprintf("http://unix/api/v1/metrics/files?id=%s", id)) + resp, err := nydusd.client.Get(fmt.Sprintf("http://unix/api/v1/metrics/files?id=%s", id)) if err != nil { return nil, err } @@ -437,25 +514,7 @@ func (nydusd *Nydusd) GetFilesMetrics(id string) (map[string]FileMetrics, error) } func (nydusd *Nydusd) GetBackendMetrics(id string) (*BackendMetrics, error) { - transport := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 5 * time.Second, - } - return dialer.DialContext(ctx, "unix", nydusd.APISockPath) - }, - } - - client := &http.Client{ - Timeout: 30 * time.Second, - Transport: transport, - } - - resp, err := client.Get(fmt.Sprintf("http://unix/api/v1/metrics/backend?id=%s", id)) + resp, err := nydusd.client.Get(fmt.Sprintf("http://unix/api/v1/metrics/backend?id=%s", id)) if err != nil { return nil, err } @@ -475,25 +534,7 @@ func (nydusd *Nydusd) GetBackendMetrics(id string) (*BackendMetrics, error) { } func (nydusd *Nydusd) GetLatestFileMetrics() ([][]uint64, error) { - transport := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 5 * time.Second, - } - return dialer.DialContext(ctx, "unix", nydusd.APISockPath) - }, - } - - client := &http.Client{ - Timeout: 30 * time.Second, - Transport: transport, - } - - resp, err := client.Get("http://unix/api/v1/metrics/files?latest=true") + resp, err := nydusd.client.Get("http://unix/api/v1/metrics/files?latest=true") if err != nil { return nil, err } @@ -513,30 +554,12 @@ func (nydusd *Nydusd) GetLatestFileMetrics() ([][]uint64, error) { } func (nydusd *Nydusd) GetAccessPatternMetrics(id string) ([]AccessPatternMetrics, error) { - transport := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 5 * time.Second, - } - return dialer.DialContext(ctx, "unix", nydusd.APISockPath) - }, - } - - client := &http.Client{ - Timeout: 30 * time.Second, - Transport: transport, - } - args := "" if len(id) > 0 { args += "?id=" + id } - resp, err := client.Get(fmt.Sprintf("http://unix/api/v1/metrics/pattern%s", args)) + resp, err := nydusd.client.Get(fmt.Sprintf("http://unix/api/v1/metrics/pattern%s", args)) if err != nil { return nil, err } @@ -560,31 +583,12 @@ func (nydusd *Nydusd) GetAccessPatternMetrics(id string) ([]AccessPatternMetrics } func (nydusd *Nydusd) GetBlobCacheMetrics(id string) (*BlobCacheMetrics, error) { - - transport := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 5 * time.Second, - } - return dialer.DialContext(ctx, "unix", nydusd.APISockPath) - }, - } - - client := &http.Client{ - Timeout: 30 * time.Second, - Transport: transport, - } - args := "" if len(id) > 0 { args += "?id=" + id } - resp, err := client.Get(fmt.Sprintf("http://unix/api/v1/metrics/blobcache%s", args)) + resp, err := nydusd.client.Get(fmt.Sprintf("http://unix/api/v1/metrics/blobcache%s", args)) if err != nil { return nil, err } @@ -604,26 +608,7 @@ func (nydusd *Nydusd) GetBlobCacheMetrics(id string) (*BlobCacheMetrics, error) } func (nydusd *Nydusd) GetInflightMetrics() (*InflightMetrics, error) { - - transport := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 5 * time.Second, - } - return dialer.DialContext(ctx, "unix", nydusd.APISockPath) - }, - } - - client := &http.Client{ - Timeout: 30 * time.Second, - Transport: transport, - } - - resp, err := client.Get("http://unix/api/v1/metrics/inflight") + resp, err := nydusd.client.Get("http://unix/api/v1/metrics/inflight") if err != nil { return nil, err } @@ -645,3 +630,47 @@ func (nydusd *Nydusd) GetInflightMetrics() (*InflightMetrics, error) { return &info, err } + +func (nydusd *Nydusd) Verify(t *testing.T, expectedFileTree map[string]*File) { + nydusd.VerifyByPath(t, expectedFileTree, "") +} + +func (nydusd *Nydusd) VerifyByPath(t *testing.T, expectedFileTree map[string]*File, subPath string) { + actualFiles := map[string]*File{} + mountPath := filepath.Join(nydusd.MountPath, subPath) + err := filepath.WalkDir(mountPath, func(path string, _ fs.DirEntry, err error) error { + require.Nil(t, err) + targetPath, err := filepath.Rel(mountPath, path) + require.NoError(t, err) + if targetPath == "." || targetPath == ".." { + return nil + } + file := NewFile(t, path, targetPath) + actualFiles[targetPath] = file + if expectedFileTree[targetPath] != nil { + expectedFileTree[targetPath].Compare(t, file) + } else { + t.Fatalf("not found file %s in OCI layer", targetPath) + } + + return nil + }) + require.NoError(t, err) + + for targetPath, file := range expectedFileTree { + if actualFiles[targetPath] != nil { + actualFiles[targetPath].Compare(t, file) + } else { + t.Fatalf("not found file %s in nydus layer: %s %s", targetPath, nydusd.MountPath, nydusd.BootstrapPath) + } + } +} + +func Verify(t *testing.T, ctx Context, expectedFileTree map[string]*File) { + nydusd, err := NewNydusdWithContext(ctx) + require.NoError(t, err) + err = nydusd.Mount() + require.NoError(t, err) + defer nydusd.Umount() + nydusd.Verify(t, expectedFileTree) +} diff --git a/smoke/tests/tool/snapshotter.go b/smoke/tests/tool/snapshotter.go index c301e46c2a8..de65f80538e 100644 --- a/smoke/tests/tool/snapshotter.go +++ b/smoke/tests/tool/snapshotter.go @@ -17,7 +17,7 @@ import ( "github.com/pkg/errors" ) -// SnapshotterClient commnicates with nydus-snapshotter via +// SnapshotterClient communicates with nydus-snapshotter via // the system controller endpoint unix socket of nydus-snapshotter. type SnapshotterClient struct { client *http.Client diff --git a/smoke/tests/tool/test/suite.go b/smoke/tests/tool/test/suite.go index 705730ed233..c6b8b9b696d 100644 --- a/smoke/tests/tool/test/suite.go +++ b/smoke/tests/tool/test/suite.go @@ -37,7 +37,7 @@ type Generator func() (name string, testCase Case) // asynchronous/synchronized control is suite-leveled. // // Compared with github.com/onsi/ginkgo, this framework provides simpler way to organize -// cases into suite, which requires less learing of terms and less nested definitions. +// cases into suite, which requires less learning of terms and less nested definitions. // Moreover, the asynchronous run is more golang-natived, which requires no other binary. // // Compared with github.com/stretchr/testify, this framework provides asynchronous mode @@ -161,7 +161,7 @@ type Generator func() (name string, testCase Case) // // `go test -v --parallel 4` // 1. The cases are parallel executed, which leads to random completion. -// 2. The dynamic tests are named automicly in lack of customized name. +// 2. The dynamic tests are named automatically in lack of customized name. // // --- PASS: Test1 (0.00s) // --- PASS: Test1/TestDynamicTest_4 (5.00s) diff --git a/smoke/tests/tool/util.go b/smoke/tests/tool/util.go index 427f7fa4984..0812c92dd23 100644 --- a/smoke/tests/tool/util.go +++ b/smoke/tests/tool/util.go @@ -82,6 +82,6 @@ func ImageRepo(t *testing.T, image string) string { if len(parts) > 0 { return parts[0] } - t.Fatalf("Can't get image repo of " + image) + t.Fatalf("Can't get %s repo", image) return "" } diff --git a/smoke/tests/tool/verify.go b/smoke/tests/tool/verify.go deleted file mode 100644 index b3dd177d373..00000000000 --- a/smoke/tests/tool/verify.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2023 Nydus Developers. All rights reserved. -// -// SPDX-License-Identifier: Apache-2.0 - -package tool - -import ( - "fmt" - "io/fs" - "path/filepath" - "testing" - - "github.com/containerd/log" - "github.com/stretchr/testify/require" -) - -func Verify(t *testing.T, ctx Context, expectedFiles map[string]*File) { - config := NydusdConfig{ - EnablePrefetch: ctx.Runtime.EnablePrefetch, - NydusdPath: ctx.Binary.Nydusd, - BootstrapPath: ctx.Env.BootstrapPath, - ConfigPath: filepath.Join(ctx.Env.WorkDir, "nydusd-config.fusedev.json"), - BackendType: "localfs", - BackendConfig: fmt.Sprintf(`{"dir": "%s"}`, ctx.Env.BlobDir), - BlobCacheDir: ctx.Env.CacheDir, - APISockPath: filepath.Join(ctx.Env.WorkDir, "nydusd-api.sock"), - MountPath: ctx.Env.MountDir, - CacheType: ctx.Runtime.CacheType, - CacheCompressed: ctx.Runtime.CacheCompressed, - RafsMode: ctx.Runtime.RafsMode, - DigestValidate: false, - AmplifyIO: ctx.Runtime.AmplifyIO, - } - - nydusd, err := NewNydusd(config) - require.NoError(t, err) - err = nydusd.Mount() - require.NoError(t, err) - defer func() { - if err := nydusd.Umount(); err != nil { - log.L.WithError(err).Errorf("umount") - } - }() - - actualFiles := map[string]*File{} - err = filepath.WalkDir(ctx.Env.MountDir, func(path string, _ fs.DirEntry, err error) error { - require.Nil(t, err) - - targetPath, err := filepath.Rel(ctx.Env.MountDir, path) - require.NoError(t, err) - - file := NewFile(t, path, targetPath) - actualFiles[targetPath] = file - if expectedFiles[targetPath] != nil { - expectedFiles[targetPath].Compare(t, file) - } else { - t.Fatalf("not found file %s in OCI layer", targetPath) - } - - return nil - }) - require.NoError(t, err) - - for targetPath, file := range expectedFiles { - if actualFiles[targetPath] != nil { - actualFiles[targetPath].Compare(t, file) - } else { - t.Fatalf("not found file %s in nydus layer", targetPath) - } - } -} diff --git a/src/bin/nydus-image/deduplicate.rs b/src/bin/nydus-image/deduplicate.rs index c28130e023f..cf4fb9d640f 100644 --- a/src/bin/nydus-image/deduplicate.rs +++ b/src/bin/nydus-image/deduplicate.rs @@ -47,6 +47,7 @@ impl From for DatabaseError { } } +#[allow(dead_code)] pub trait Database { /// Creates a new chunk in the database. fn create_chunk_table(&self) -> Result<()>; @@ -186,7 +187,6 @@ pub fn update_ctx_from_parent_bootstrap( bootstrap_path: &PathBuf, ) -> Result<()> { let (sb, _) = RafsSuper::load_from_file(bootstrap_path, Arc::new(ConfigV2::default()), false)?; - // Obtain the features of the first blob to use as the features for the blobs in chunkdict. if let Some(first_blob) = sb.superblock.get_blob_infos().first() { ctx.blob_features = first_blob.features(); @@ -271,7 +271,7 @@ impl Deduplicate { version: String, ) -> anyhow::Result<()> { let process_chunk = &mut |t: &Tree| -> Result<()> { - let node = t.lock_node(); + let node = t.borrow_mut_node(); for chunk in &node.chunks { let index = chunk.inner.blob_index(); let chunk_blob_id = blob_infos[index as usize].blob_id(); @@ -345,7 +345,7 @@ impl Algorithm { } info!( "Chunkdict size is {}", - chunkdict_size as f64 / 1024 as f64 / 1024 as f64 + chunkdict_size as f64 / 1024_f64 / 1024_f64 ); for chunk in all_chunks { if !core_image.contains(&chunk.image_reference) @@ -500,7 +500,7 @@ impl Algorithm { for chunk in all_chunks { image_chunks .entry(chunk.image_reference.clone()) - .or_insert(Vec::new()) + .or_default() .push(chunk.clone()); } for (index, chunks) in image_chunks { @@ -527,7 +527,7 @@ impl Algorithm { for chunk in chunks { let entry = image_chunks .entry(chunk.image_reference.clone()) - .or_insert(Vec::new()); + .or_default(); entry.push(chunk.clone()); } @@ -543,7 +543,7 @@ impl Algorithm { for chunk in chunk_list { let entry = version_chunks .entry(CustomString(chunk.version.clone())) - .or_insert(Vec::new()); + .or_default(); entry.push(chunk.clone()); } @@ -651,10 +651,7 @@ impl Algorithm { for (index, point) in data_point.iter().enumerate() { if point.clustered { let cluster_id = point.cluster_id; - cluster_map - .entry(cluster_id) - .or_insert(Vec::new()) - .push(index); + cluster_map.entry(cluster_id).or_default().push(index); } } @@ -793,7 +790,7 @@ impl Algorithm { } info!( "All chunk size is {}", - all_chunks_size as f64 / 1024 as f64 / 1024 as f64 + all_chunks_size as f64 / 1024_f64 / 1024_f64 ); let train_percentage = 0.7; @@ -805,7 +802,7 @@ impl Algorithm { } info!( "Train set size is {}", - train_set_size as f64 / 1024 as f64 / 1024 as f64 + train_set_size as f64 / 1024_f64 / 1024_f64 ); let mut test_set_size = 0; @@ -814,7 +811,7 @@ impl Algorithm { } info!( "Test set size is {}", - test_set_size as f64 / 1024 as f64 / 1024 as f64 + test_set_size as f64 / 1024_f64 / 1024_f64 ); let mut version_datadict: HashMap> = HashMap::new(); @@ -883,7 +880,7 @@ impl Algorithm { } info!( "After deduplicating test set size is {} and deduplicating rate is {} ", - min_test_size as f64 / 1024 as f64 / 1024 as f64, + min_test_size as f64 / 1024_f64 / 1024_f64, 1.0 - (min_test_size as f64) / (test_set_size as f64) ); Ok((min_data_dict, datadict)) @@ -900,6 +897,7 @@ struct DataPoint { cluster_id: i32, } +#[allow(dead_code)] pub trait Table: Sync + Send + Sized + 'static where Err: std::error::Error + 'static, diff --git a/src/bin/nydus-image/inspect.rs b/src/bin/nydus-image/inspect.rs index 0a0e720f72d..13827f6926e 100644 --- a/src/bin/nydus-image/inspect.rs +++ b/src/bin/nydus-image/inspect.rs @@ -392,7 +392,7 @@ RAFS Blob Size: {rafs_size} } } } else { - let file_path = self.rafs_meta.path_from_ino(ino as u64)?; + let file_path = self.rafs_meta.path_from_ino(ino)?; file_paths.push(file_path); }; Ok(file_paths) diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index 5fa3a3a3c10..65b2e38c777 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -#![deny(warnings)] +// #![deny(warnings)] #[macro_use(crate_authors)] extern crate clap; #[macro_use] @@ -18,9 +18,10 @@ use crate::deduplicate::{ SqliteDatabase, }; use std::convert::TryFrom; -use std::fs::{self, metadata, DirEntry, File, OpenOptions}; +use std::fs::{self, metadata, DirEntry, OpenOptions}; use std::os::unix::fs::FileTypeExt; use std::path::{Path, PathBuf}; +use std::result::Result::Ok; use std::sync::{Arc, Mutex}; use anyhow::{bail, Context, Result}; @@ -33,8 +34,10 @@ use nydus_builder::{ parse_chunk_dict_arg, ArtifactStorage, BlobCacheGenerator, BlobCompactor, BlobManager, BootstrapManager, BuildContext, BuildOutput, Builder, ChunkdictBlobInfo, ChunkdictChunkInfo, ConversionType, DirectoryBuilder, Feature, Features, Generator, HashChunkDict, Merger, - Prefetch, PrefetchPolicy, StargzBuilder, TarballBuilder, WhiteoutSpec, + Prefetch, PrefetchPolicy, StargzBuilder, TarballBuilder, Tree, TreeNode, WhiteoutSpec, }; + +use nydus_rafs::metadata::layout::v6::RafsV6BlobTable; use nydus_rafs::metadata::{MergeError, RafsSuper, RafsSuperConfig, RafsVersion}; use nydus_storage::backend::localfs::LocalFs; use nydus_storage::backend::BlobBackend; @@ -48,6 +51,7 @@ use nydus_utils::{ }; use serde::{Deserialize, Serialize}; +use crate::prefetch::update_ctx_from_bootstrap; use crate::unpack::{OCIUnpacker, Unpacker}; use crate::validator::Validator; @@ -58,6 +62,7 @@ use std::str::FromStr; mod deduplicate; mod inspect; +mod prefetch; mod stat; mod unpack; mod validator; @@ -529,6 +534,36 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .arg(arg_output_json.clone()), ); + let app = app.subcommand( + App::new("optimize") + .about("Optimize By Prefetch") + .arg( + Arg::new("bootstrap") + .help("File path of RAFS metadata") + .short('B') + .long("bootstrap") + .required(true), + ) + .arg( + Arg::new("prefetch-files") + .long("prefetch-files") + .short('p') + .help("Prefetch files") + .action(ArgAction::Set) + .num_args(1), + ) + .arg(arg_config.clone()) + .arg( + Arg::new("blob-dir") + .long("blob-dir") + .short('D') + .conflicts_with("config") + .help( + "Directory for localfs storage backend, hosting data blobs and cache files", + ), + ), + ); + #[cfg(target_os = "linux")] let app = app.subcommand( App::new("export") @@ -671,16 +706,58 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .required(true), ) .arg( - Arg::new("config") - .long("config") - .short('C') - .help("config to compactor") - .required(true), + Arg::new("backend-type") + .long("backend-type") + .help(format!( + "Type of backend [possible values: {}]", + BlobFactory::supported_backends() + .into_iter() + .filter(|x| x != "localfs") + .collect::>() + .join(", ") + )) + .required(false) ) .arg( Arg::new("backend-config") .long("backend-config") - .help("config file of backend") + .help("Config string of backend") + .required(false), + ) + .arg( + Arg::new("backend-config-file") + .long("backend-config-file") + .help("Config file of backend") + .conflicts_with("backend-config") + .required(false), + ) + .arg( + Arg::new("min-used-ratio") + .long("min-used-ratio") + .help("Lower bound of used ratio for blobs to be kept, possible values: 0-99, 0 means disable") + ) + .arg( + Arg::new("compact-blob-size") + .long("compact-blob-size") + .help("Upper bound of blob size for blobs to be compacted, in bytes") + ) + .arg( + Arg::new("max-compact-size") + .long("max-compact-size") + .help("Upper bound of compacted blob size, in bytes") + ) + .arg( + Arg::new("layers-to-compact") + .long("layers-to-compact") + .help("If number of blobs >= layers_to_compact, do compact. 0 means always compact") + ) + .arg( + Arg::new("blob-dir") + .long("blob-dir") + .short('D') + .help( + "Local blobs dir for blobs not in backend, also for output blobs", + ) .required(true), ) .arg( arg_chunk_dict ) @@ -703,10 +780,31 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .help("File path of RAFS metadata") .required_unless_present("bootstrap"), ) + .arg( + Arg::new("backend-type") + .long("backend-type") + .help(format!( + "Type of backend [possible values: {}]", + BlobFactory::supported_backends() + .into_iter() + .filter(|x| x != "localfs") + .collect::>() + .join(", ") + )) + .required(false) + .group("backend"), + ) .arg( Arg::new("backend-config") .long("backend-config") - .help("config file of backend") + .help("Config string of backend") + .required(false), + ) + .arg( + Arg::new("backend-config-file") + .long("backend-config-file") + .help("Config file of backend") + .conflicts_with("backend-config") .required(false), ) .arg( @@ -721,24 +819,29 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { Arg::new("blob") .long("blob") .short('b') - .help("path to RAFS data blob file") - .required(false), + .help("Path to RAFS data blob file") + .required(false) + .group("backend"), ) .arg( Arg::new("blob-dir") .long("blob-dir") .short('D') - .conflicts_with("config") .help( "Directory for localfs storage backend, hosting data blobs and cache files", - ), + ) + .group("backend"), ) - .arg(arg_config) .arg( Arg::new("output") .long("output") - .help("path for output tar file") + .help("Path for output tar file") .required(true), + ) + .group( + clap::ArgGroup::new("backend") + .args(["backend-type", "blob", "blob-dir"]) + .required(false), ), ) } @@ -808,6 +911,8 @@ fn main() -> Result<()> { Command::compact(matches, &build_info) } else if let Some(matches) = cmd.subcommand_matches("unpack") { Command::unpack(matches) + } else if let Some(matches) = cmd.subcommand_matches("optimize") { + Command::optimize(matches) } else { #[cfg(target_os = "linux")] if let Some(matches) = cmd.subcommand_matches("export") { @@ -1090,6 +1195,7 @@ impl Command { features, encrypt, ); + build_ctx.set_fs_version(version); build_ctx.set_chunk_size(chunk_size); build_ctx.set_batch_size(batch_size); @@ -1183,6 +1289,7 @@ impl Command { | ConversionType::TarToStargz | ConversionType::TargzToStargz => unimplemented!(), }; + let build_output = timing_tracer!( { builder @@ -1217,7 +1324,7 @@ impl Command { check_bootstrap_versions_consistency(&mut build_ctx, &source_bootstrap_paths)?; update_ctx_from_parent_bootstrap(&mut build_ctx, &source_bootstrap_paths[0])?; - for (_, bootstrap_path) in source_bootstrap_paths.iter().enumerate() { + for bootstrap_path in source_bootstrap_paths.iter() { let path_name = bootstrap_path.as_path(); // Extract the image name and version name from the bootstrap directory. @@ -1436,17 +1543,19 @@ impl Command { } fn compact(matches: &ArgMatches, build_info: &BuildTimeInfo) -> Result<()> { - let config = - Self::get_configuration(matches).context("failed to get configuration information")?; - config - .internal - .set_blob_accessible(matches.get_one::("config").is_some()); let bootstrap_path = PathBuf::from(Self::get_bootstrap(matches)?); let dst_bootstrap = match matches.get_one::("output-bootstrap") { None => bootstrap_path.with_extension("bootstrap.compact"), Some(s) => PathBuf::from(s), }; + let (config, backend) = match Self::get_backend(matches, "compactor") { + Ok((c, b)) => (c, b), + Err(e) => { + bail!("{}, --blob-dir or --backend-type must be specified", e); + } + }; + let (rs, _) = RafsSuper::load_from_file(&bootstrap_path, config.clone(), false)?; info!("load bootstrap {:?} successfully", bootstrap_path); let chunk_dict = match matches.get_one::("chunk-dict") { @@ -1458,18 +1567,26 @@ impl Command { )?), }; - let backend = Self::get_backend(matches, "compactor")?; + let mut compact_config: nydus_builder::CompactConfig = Default::default(); + if let Some(min_used_ratio) = matches.get_one::("min-used-ratio") { + compact_config.min_used_ratio = min_used_ratio.parse()?; + } + if let Some(compact_blob_size) = matches.get_one::("compact-blob-size") { + compact_config.compact_blob_size = compact_blob_size.parse()?; + } + if let Some(max_compact_size) = matches.get_one::("max-compact-size") { + compact_config.max_compact_size = max_compact_size.parse()?; + } + if let Some(layers_to_compact) = matches.get_one::("layers-to-compact") { + compact_config.layers_to_compact = layers_to_compact.parse()?; + } - let config_file_path = matches.get_one::("config").unwrap(); - let file = File::open(config_file_path) - .with_context(|| format!("failed to open config file {}", config_file_path))?; - let config = serde_json::from_reader(file) - .with_context(|| format!("invalid config file {}", config_file_path))?; + compact_config.blobs_dir = matches.get_one::("blob-dir").unwrap().clone(); let version = rs.meta.version.try_into().unwrap(); let compressor = rs.meta.get_compressor(); if let Some(build_output) = - BlobCompactor::compact(rs, dst_bootstrap, chunk_dict, backend, &config)? + BlobCompactor::compact(rs, dst_bootstrap, chunk_dict, backend, &compact_config)? { OutputSerializer::dump(matches, build_output, build_info, compressor, version)?; } @@ -1478,19 +1595,15 @@ impl Command { fn unpack(matches: &ArgMatches) -> Result<()> { let bootstrap = Self::get_bootstrap(matches)?; - let config = Self::get_configuration(matches)?; - config - .internal - .set_blob_accessible(matches.get_one::("config").is_some()); let output = matches.get_one::("output").expect("pass in output"); if output.is_empty() { return Err(anyhow!("invalid empty --output option")); } - let blob = matches.get_one::("blob").map(|s| s.as_str()); - let backend: Option> = match blob { - Some(blob_path) => { - let blob_path = PathBuf::from(blob_path); + let (config, backend): (Arc, Arc) = + // if --blob is specified, use localfs backend and default config + if let Some(p) = matches.get_one::("blob") { + let blob_path = PathBuf::from(p); let local_fs_conf = LocalFsConfig { blob_file: blob_path.to_str().unwrap().to_owned(), dir: Default::default(), @@ -1499,21 +1612,20 @@ impl Command { let local_fs = LocalFs::new(&local_fs_conf, Some("unpacker")) .with_context(|| format!("fail to create local backend for {:?}", blob_path))?; - Some(Arc::new(local_fs)) - } - None => { - if let Some(backend) = &config.backend { - Some(BlobFactory::new_backend(&backend, "unpacker")?) - } else { - match Self::get_backend(matches, "unpacker") { - Ok(backend) => Some(backend), - Err(_) => bail!("one of `--blob`, `--blob-dir` and `--backend-config` must be specified"), + (Arc::new(ConfigV2::default()), Arc::new(local_fs)) + } else { + match Self::get_backend(matches, "unpacker") { + Ok((c, b)) => (c, b), + Err(e) => { + bail!( + "{}, --blob, --blob-dir or --backend-type must be specified", + e + ); } } - } - }; + }; - OCIUnpacker::new(bootstrap, backend, output) + OCIUnpacker::new(bootstrap, Some(backend), output) .with_context(|| "fail to create unpacker")? .unpack(config) .with_context(|| "fail to unpack") @@ -1561,6 +1673,52 @@ impl Command { Ok(()) } + fn optimize(matches: &ArgMatches) -> Result<()> { + let blobs_dir_path = Self::get_blobs_dir(matches).unwrap(); + let prefetch_files = Self::get_prefetch_files(matches).unwrap(); + prefetch_files.iter().for_each(|f| println!("{}", f)); + let bootstrap_path = Self::get_bootstrap(matches)?; + let config = Self::get_configuration(matches)?; + config.internal.set_blob_accessible(true); + let mut build_ctx = BuildContext { + blob_id: String::from("Prefetch-blob"), + compressor: compress::Algorithm::Zstd, + blob_inline_meta: true, + ..Default::default() + }; + + let sb = update_ctx_from_bootstrap(&mut build_ctx, config, bootstrap_path)?; + let mut tree = Tree::from_bootstrap(&sb, &mut ()).unwrap(); + + let mut prefetch_nodes: Vec = Vec::new(); + // Init prefetch nodes + for f in prefetch_files.iter() { + let path = PathBuf::from(f); + if let Some(tree) = tree.get_node(&path) { + prefetch_nodes.push(tree.node.clone()); + } + } + + let bootstrap_path = ArtifactStorage::SingleFile(PathBuf::from("nydus_prefetch_bootstrap")); + let mut bootstrap_mgr = BootstrapManager::new(Some(bootstrap_path), None); + let blobs = sb.superblock.get_blob_infos(); + let mut rafsv6table = RafsV6BlobTable::new(); + for blob in &blobs { + rafsv6table.entries.push(blob.clone()); + } + + Generator::generate_prefetch( + &mut tree, + &mut build_ctx, + &mut bootstrap_mgr, + &mut rafsv6table, + blobs_dir_path.to_path_buf(), + prefetch_nodes, + ) + .unwrap(); + Ok(()) + } + fn inspect(matches: &ArgMatches) -> Result<()> { let bootstrap_path = Self::get_bootstrap(matches)?; let mut config = Self::get_configuration(matches)?; @@ -1661,6 +1819,32 @@ impl Command { } } + fn get_blobs_dir(matches: &ArgMatches) -> Result<&Path> { + match matches.get_one::("blob-dir") { + Some(s) => Ok(Path::new(s)), + None => bail!("missing parameter `blob-dir`"), + } + } + + fn get_prefetch_files(matches: &ArgMatches) -> Result> { + match matches.get_one::("prefetch-files") { + Some(v) => { + let content = std::fs::read_to_string(v) + .map_err(|_| anyhow!("failed to read prefetch files from {}", v))?; + + let mut prefetch_files: Vec = Vec::new(); + for line in content.lines() { + if line.is_empty() || line.trim().is_empty() { + continue; + } + prefetch_files.push(line.trim().to_string()); + } + Ok(prefetch_files) + } + None => panic!("missing parameter `prefetch-files`"), + } + } + fn get_bootstrap_storage(matches: &ArgMatches) -> Result { if let Some(s) = matches.get_one::("bootstrap") { Ok(ArtifactStorage::SingleFile(s.into())) @@ -1778,15 +1962,35 @@ impl Command { fn get_backend( matches: &ArgMatches, blob_id: &str, - ) -> Result> { - let cfg_file = matches - .get_one::("backend-config") - .context("missing backend-config argument")?; - let cfg = ConfigV2::from_file(cfg_file)?; - let backend_cfg = cfg.get_backend_config()?; - let backend = BlobFactory::new_backend(backend_cfg, blob_id)?; - - Ok(backend) + ) -> Result<(Arc, Arc)> { + let config: Arc; + let backend: Arc; + if let Some(backend_type) = matches.get_one::("backend-type") { + let content = + if let Some(backend_file) = matches.get_one::("backend-config-file") { + fs::read_to_string(backend_file).with_context(|| { + format!("fail to read backend config file {:?}", backend_file) + })? + } else if let Some(backend_config) = matches.get_one::("backend-config") { + backend_config.clone() + } else { + bail!("--backend-config or --backend-config-file must be specified"); + }; + + if backend_type == "localfs" { + bail!("Use --blob-dir to specify localfs backend"); + } else { + backend = BlobFactory::new_backend_from_json(backend_type, &content, blob_id)?; + config = Arc::new(ConfigV2::default()); + } + } else if let Some(dir) = matches.get_one::("blob-dir") { + config = Arc::new(ConfigV2::new_localfs("", dir)?); + backend = BlobFactory::new_backend(config.backend.as_ref().unwrap(), blob_id)?; + } else { + return Err(anyhow!("invalid backend configuration")); + } + + Ok((config, backend)) } fn get_blob_id(matches: &ArgMatches) -> Result { diff --git a/src/bin/nydus-image/optimize.rs b/src/bin/nydus-image/optimize.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/bin/nydus-image/prefetch.rs b/src/bin/nydus-image/prefetch.rs new file mode 100644 index 00000000000..a9325db81ea --- /dev/null +++ b/src/bin/nydus-image/prefetch.rs @@ -0,0 +1,27 @@ +use anyhow::{Context, Result}; +use nydus_api::ConfigV2; +use nydus_builder::{BuildContext, ConversionType}; +use nydus_rafs::metadata::RafsSuper; +use nydus_rafs::metadata::RafsVersion; +use std::result::Result::Ok; +use std::{path::Path, sync::Arc}; + +pub fn update_ctx_from_bootstrap( + ctx: &mut BuildContext, + config: Arc, + bootstrap_path: &Path, +) -> Result { + let (sb, _) = RafsSuper::load_from_file(bootstrap_path, config, false)?; + + ctx.blob_features = sb.superblock.get_blob_infos().first().unwrap().features(); + + let config = sb.meta.get_config(); + if config.is_tarfs_mode { + ctx.conversion_type = ConversionType::TarToRafs; + } + + ctx.fs_version = + RafsVersion::try_from(sb.meta.version).context("Failed to get RAFS version")?; + ctx.compressor = config.compressor; + Ok(sb) +} diff --git a/src/bin/nydus-image/stat.rs b/src/bin/nydus-image/stat.rs index 2ee8c796d97..c1e494a408c 100644 --- a/src/bin/nydus-image/stat.rs +++ b/src/bin/nydus-image/stat.rs @@ -171,7 +171,7 @@ impl ImageStat { }; let pre = &mut |t: &Tree| -> Result<()> { - let node = t.lock_node(); + let node = t.borrow_mut_node(); if node.is_reg() { image.files += 1; if node.is_hardlink() { diff --git a/src/bin/nydus-image/validator.rs b/src/bin/nydus-image/validator.rs index 95e8355c277..143a5088493 100644 --- a/src/bin/nydus-image/validator.rs +++ b/src/bin/nydus-image/validator.rs @@ -33,7 +33,7 @@ impl Validator { let tree = Tree::from_bootstrap(&self.sb, &mut ()).context(err)?; let pre = &mut |t: &Tree| -> Result<()> { - let node = t.lock_node(); + let node = t.borrow_mut_node(); if verbosity { println!("inode: {}", node); for chunk in &node.chunks { diff --git a/src/bin/nydusctl/main.rs b/src/bin/nydusctl/main.rs index 0d0741fc631..c8512646bfd 100644 --- a/src/bin/nydusctl/main.rs +++ b/src/bin/nydusctl/main.rs @@ -26,16 +26,15 @@ use commands::{ CommandBackend, CommandCache, CommandDaemon, CommandFsStats, CommandMount, CommandUmount, }; use nydus::get_build_time_info; -use nydus_api::BuildTimeInfo; lazy_static! { - static ref BTI: BuildTimeInfo = get_build_time_info().1; + static ref BTI_STRING: String = get_build_time_info().0; } #[tokio::main] async fn main() -> Result<()> { let app = Command::new("A client to query and configure the nydusd daemon\n") - .version(BTI.package_ver.as_str()) + .version(BTI_STRING.as_str()) .author(crate_authors!()) .arg( Arg::new("sock") diff --git a/storage/src/backend/connection.rs b/storage/src/backend/connection.rs index 6b6b2e69e43..75de84a3640 100644 --- a/storage/src/backend/connection.rs +++ b/storage/src/backend/connection.rs @@ -7,7 +7,7 @@ use std::cell::RefCell; use std::collections::HashMap; use std::io::{Read, Result}; use std::str::FromStr; -use std::sync::atomic::{AtomicBool, AtomicI16, AtomicU8, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicI16, AtomicU64, AtomicU8, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use std::{fmt, thread}; @@ -185,14 +185,16 @@ struct ProxyHealth { status: AtomicBool, ping_url: Option, check_interval: Duration, + check_pause_elapsed: u64, } impl ProxyHealth { - fn new(check_interval: u64, ping_url: Option) -> Self { + fn new(check_interval: u64, check_pause_elapsed: u64, ping_url: Option) -> Self { ProxyHealth { status: AtomicBool::from(true), ping_url, check_interval: Duration::from_secs(check_interval), + check_pause_elapsed, } } @@ -265,6 +267,8 @@ pub(crate) struct Connection { proxy: Option>, pub mirrors: Vec>, pub shutdown: AtomicBool, + /// Timestamp of connection's last active request, represents as duration since UNIX_EPOCH in seconds. + last_active: Arc, } #[derive(Debug)] @@ -314,7 +318,11 @@ impl Connection { }; Some(Arc::new(Proxy { client: Self::build_connection(&config.proxy.url, config)?, - health: ProxyHealth::new(config.proxy.check_interval, ping_url), + health: ProxyHealth::new( + config.proxy.check_interval, + config.proxy.check_pause_elapsed, + ping_url, + ), fallback: config.proxy.fallback, use_http: config.proxy.use_http, replace_scheme: AtomicI16::new(SCHEME_REVERSION_CACHE_UNSET), @@ -340,9 +348,15 @@ impl Connection { proxy, mirrors, shutdown: AtomicBool::new(false), + last_active: Arc::new(AtomicU64::new( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(), + )), }); - // Start proxy's health checking thread. + // Start proxy's health checking thread. connection.start_proxy_health_thread(config.connect_timeout as u64); // Start mirrors' health checking thread. @@ -355,37 +369,47 @@ impl Connection { if let Some(proxy) = self.proxy.as_ref() { if proxy.health.ping_url.is_some() { let proxy = proxy.clone(); - // Spawn thread to update the health status of proxy server + let last_active = Arc::clone(&self.last_active); + + // Spawn thread to update the health status of proxy server. thread::spawn(move || { let ping_url = proxy.health.ping_url.as_ref().unwrap(); let mut last_success = true; loop { - let client = Client::new(); - let _ = client - .get(ping_url.clone()) - .timeout(Duration::from_secs(connect_timeout as u64)) - .send() - .map(|resp| { - let success = is_success_status(resp.status()); - if last_success && !success { - warn!( - "Detected proxy unhealthy when pinging proxy, response status {}", - resp.status() - ); - } else if !last_success && success { - info!("Backend proxy recovered") - } - last_success = success; - proxy.health.set(success); - }) - .map_err(|e| { - if last_success { - warn!("Detected proxy unhealthy when ping proxy, {}", e); - } - last_success = false; - proxy.health.set(false) - }); + let elapsed = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() + - last_active.load(Ordering::Relaxed); + // If the connection is not active for a set time, skip proxy health check. + if elapsed <= proxy.health.check_pause_elapsed { + let client = Client::new(); + let _ = client + .get(ping_url.clone()) + .timeout(Duration::from_secs(connect_timeout as u64)) + .send() + .map(|resp| { + let success = is_success_status(resp.status()); + if last_success && !success { + warn!( + "Detected proxy unhealthy when pinging proxy, response status {}", + resp.status() + ); + } else if !last_success && success { + info!("Backend proxy recovered") + } + last_success = success; + proxy.health.set(success); + }) + .map_err(|e| { + if last_success { + warn!("Detected proxy unhealthy when ping proxy, {}", e); + } + last_success = false; + proxy.health.set(false) + }); + } thread::sleep(proxy.health.check_interval); } @@ -397,6 +421,9 @@ impl Connection { fn start_mirrors_health_thread(&self, timeout: u64) { for mirror in self.mirrors.iter() { let mirror_cloned = mirror.clone(); + let last_active = Arc::clone(&self.last_active); + + // Spawn thread to update the health status of mirror server. thread::spawn(move || { let mirror_health_url = if mirror_cloned.config.ping_url.is_empty() { format!("{}/v2", mirror_cloned.config.host) @@ -410,35 +437,43 @@ impl Connection { let client = Client::new(); loop { - // Try to recover the mirror server when it is unavailable. - if !mirror_cloned.status.load(Ordering::Relaxed) { - info!( - "[mirror] server unhealthy, try to recover: {}", - mirror_cloned.config.host - ); + let elapsed = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() + - last_active.load(Ordering::Relaxed); + // If the connection is not active for a set time, skip mirror health check. + if elapsed <= mirror_cloned.config.health_check_pause_elapsed { + // Try to recover the mirror server when it is unavailable. + if !mirror_cloned.status.load(Ordering::Relaxed) { + info!( + "[mirror] server unhealthy, try to recover: {}", + mirror_cloned.config.host + ); - let _ = client - .get(mirror_health_url.as_str()) - .timeout(Duration::from_secs(timeout as u64)) - .send() - .map(|resp| { - // If the response status is less than StatusCode::INTERNAL_SERVER_ERROR, - // the mirror server is recovered. - if resp.status() < StatusCode::INTERNAL_SERVER_ERROR { - info!( - "[mirror] server recovered: {}", - mirror_cloned.config.host + let _ = client + .get(mirror_health_url.as_str()) + .timeout(Duration::from_secs(timeout as u64)) + .send() + .map(|resp| { + // If the response status is less than StatusCode::INTERNAL_SERVER_ERROR, + // the mirror server is recovered. + if resp.status() < StatusCode::INTERNAL_SERVER_ERROR { + info!( + "[mirror] server recovered: {}", + mirror_cloned.config.host + ); + mirror_cloned.failed_times.store(0, Ordering::Relaxed); + mirror_cloned.status.store(true, Ordering::Relaxed); + } + }) + .map_err(|e| { + warn!( + "[mirror] failed to recover server: {}, {}", + mirror_cloned.config.host, e ); - mirror_cloned.failed_times.store(0, Ordering::Relaxed); - mirror_cloned.status.store(true, Ordering::Relaxed); - } - }) - .map_err(|e| { - warn!( - "[mirror] failed to recover server: {}, {}", - mirror_cloned.config.host, e - ); - }); + }); + } } thread::sleep(Duration::from_secs( @@ -467,6 +502,13 @@ impl Connection { if self.shutdown.load(Ordering::Acquire) { return Err(ConnectionError::Disconnected); } + self.last_active.store( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(), + Ordering::Relaxed, + ); if let Some(proxy) = &self.proxy { if proxy.health.ok() { @@ -713,7 +755,7 @@ mod tests { #[test] fn test_proxy_health() { - let checker = ProxyHealth::new(5, None); + let checker = ProxyHealth::new(5, 300, None); assert!(checker.ok()); assert!(checker.ok()); @@ -741,6 +783,7 @@ mod tests { assert_eq!(config.connect_timeout, 5); assert_eq!(config.retry_limit, 0); assert_eq!(config.proxy.check_interval, 5); + assert_eq!(config.proxy.check_pause_elapsed, 300); assert!(config.proxy.fallback); assert_eq!(config.proxy.ping_url, ""); assert_eq!(config.proxy.url, ""); diff --git a/storage/src/cache/worker.rs b/storage/src/cache/worker.rs index d76f4fe251d..e174feaebbe 100644 --- a/storage/src/cache/worker.rs +++ b/storage/src/cache/worker.rs @@ -507,8 +507,8 @@ mod tests { assert!(mgr .send_prefetch_message(AsyncPrefetchMessage::RateLimiter(u64::MAX)) .is_ok()); - assert_eq!(mgr.prefetch_inflight.load(Ordering::Acquire), 3); - thread::sleep(Duration::from_secs(1)); + assert!(mgr.prefetch_inflight.load(Ordering::Acquire) <= 3); + thread::sleep(Duration::from_secs(2)); assert!(mgr.prefetch_inflight.load(Ordering::Acquire) <= 2); assert!(mgr.prefetch_inflight.load(Ordering::Acquire) >= 1); thread::sleep(Duration::from_secs(3)); diff --git a/storage/src/device.rs b/storage/src/device.rs index 6e6cbc15ed6..c8b44347377 100644 --- a/storage/src/device.rs +++ b/storage/src/device.rs @@ -229,6 +229,36 @@ impl BlobInfo { blob_info } + /// Set the chunk count + pub fn set_chunk_count(&mut self, count: usize) { + self.chunk_count = count as u32; + } + + /// Set compressed size + pub fn set_compressed_size(&mut self, size: usize) { + self.compressed_size = size as u64; + } + + /// Set uncompressed size + pub fn set_uncompressed_size(&mut self, size: usize) { + self.uncompressed_size = size as u64; + } + + /// Set meta ci compressed size + pub fn set_meta_ci_compressed_size(&mut self, size: usize) { + self.meta_ci_compressed_size = size as u64; + } + + /// Set meta ci uncompressed size + pub fn set_meta_ci_uncompressed_size(&mut self, size: usize) { + self.meta_ci_uncompressed_size = size as u64; + } + + /// Set meta ci offset + pub fn set_meta_ci_offset(&mut self, size: usize) { + self.meta_ci_offset = size as u64; + } + /// Set the is_chunkdict_generated flag. pub fn set_chunkdict_generated(&mut self, is_chunkdict_generated: bool) { self.is_chunkdict_generated = is_chunkdict_generated; @@ -258,6 +288,11 @@ impl BlobInfo { self.blob_id.clone() } + /// Set the blob id + pub fn set_blob_id(&mut self, blob_id: String) { + self.blob_id = blob_id + } + /// Get raw blob id, without special handling of `inlined-meta` case. pub fn raw_blob_id(&self) -> &str { &self.blob_id diff --git a/storage/src/factory.rs b/storage/src/factory.rs index ef74a129b8d..e441e26209c 100644 --- a/storage/src/factory.rs +++ b/storage/src/factory.rs @@ -17,7 +17,10 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use lazy_static::lazy_static; -use nydus_api::{default_user_io_batch_size, BackendConfigV2, ConfigV2}; +use nydus_api::{ + default_user_io_batch_size, BackendConfigV2, ConfigV2, HttpProxyConfig, LocalDiskConfig, + LocalFsConfig, OssConfig, RegistryConfig, S3Config, +}; use tokio::runtime::{Builder, Runtime}; use tokio::time; @@ -201,6 +204,24 @@ impl BlobFactory { } } + pub fn supported_backends() -> Vec { + let backends = vec![ + #[cfg(feature = "backend-oss")] + "oss".to_string(), + #[cfg(feature = "backend-s3")] + "s3".to_string(), + #[cfg(feature = "backend-registry")] + "registry".to_string(), + #[cfg(feature = "backend-localfs")] + "localfs".to_string(), + #[cfg(feature = "backend-localdisk")] + "localdisk".to_string(), + #[cfg(feature = "backend-http-proxy")] + "http-proxy".to_string(), + ]; + backends + } + /// Create a storage backend for the blob with id `blob_id`. #[allow(unused_variables)] pub fn new_backend( @@ -245,6 +266,49 @@ impl BlobFactory { } } + pub fn new_backend_from_json( + backend_type: &str, + content: &str, + blob_id: &str, + ) -> IOResult> { + match backend_type { + #[cfg(feature = "backend-oss")] + "oss" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(oss::Oss::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-s3")] + "s3" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(s3::S3::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-registry")] + "registry" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(registry::Registry::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-localfs")] + "localfs" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(localfs::LocalFs::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-localdisk")] + "localdisk" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(localdisk::LocalDisk::new(&cfg, Some(blob_id))?)) + } + #[cfg(feature = "backend-http-proxy")] + "http-proxy" => { + let cfg = serde_json::from_str::(&content)?; + Ok(Arc::new(http_proxy::HttpProxy::new(&cfg, Some(blob_id))?)) + } + _ => Err(einval!(format!( + "unsupported backend type '{}'", + backend_type + ))), + } + } + fn check_cache_stat(&self) { let mgrs = self.mgrs.lock().unwrap(); for (_key, mgr) in mgrs.iter() { diff --git a/storage/src/meta/mod.rs b/storage/src/meta/mod.rs index 9e9d40334c3..054db02b1e7 100644 --- a/storage/src/meta/mod.rs +++ b/storage/src/meta/mod.rs @@ -753,15 +753,35 @@ impl BlobCompressionContextInfo { let expected_raw_size = (compressed_size + BLOB_CCT_HEADER_SIZE) as usize; let mut raw_data = alloc_buf(expected_raw_size); - let read_size = reader - .read_all(&mut raw_data, blob_info.meta_ci_offset()) - .map_err(|e| { - eio!(format!( - "failed to read metadata for blob {} from backend, {}", - blob_info.blob_id(), - e - )) - })?; + let read_size = (|| { + // The maximum retry times + let mut retry_count = 3; + + loop { + match reader.read_all(&mut raw_data, blob_info.meta_ci_offset()) { + Ok(size) => return Ok(size), + Err(e) => { + // Handle BackendError, retry a maximum of three times. + if retry_count > 0 { + warn!( + "failed to read metadata for blob {} from backend, {}, retry read metadata", + blob_info.blob_id(), + e + ); + retry_count -= 1; + continue; + } + + return Err(eio!(format!( + "failed to read metadata for blob {} from backend, {}", + blob_info.blob_id(), + e + ))); + } + } + } + })()?; + if read_size != expected_raw_size { return Err(eio!(format!( "failed to read metadata for blob {} from backend, compressor {}, got {} bytes, expect {} bytes", @@ -1080,6 +1100,7 @@ impl BlobCompressionContext { } } +#[derive(Clone)] /// A customized array to host chunk information table for a blob. pub enum BlobMetaChunkArray { /// V1 chunk compression information array. diff --git a/storage/src/meta/toc.rs b/storage/src/meta/toc.rs index 91fc8ea2601..3215b017f4a 100644 --- a/storage/src/meta/toc.rs +++ b/storage/src/meta/toc.rs @@ -272,6 +272,7 @@ impl TocEntry { } } +#[derive(Clone)] /// Container to host a group of ToC entries. pub struct TocEntryList { entries: Vec, diff --git a/tests/bats/Makefile b/tests/bats/Makefile index c43bb98667e..2bfd2fb3fbf 100644 --- a/tests/bats/Makefile +++ b/tests/bats/Makefile @@ -8,7 +8,6 @@ ci: bash -f ./install_bats.sh bats --formatter tap build_docker_image.bats bats --formatter tap compile_nydusd.bats - bats --formatter tap compile_ctr_remote.bats bats --formatter tap compile_nydus_snapshotter.bats bats --formatter tap run_container_with_rafs.bats bats --formatter tap run_container_with_zran.bats diff --git a/tests/bats/compile_ctr_remote.bats b/tests/bats/compile_ctr_remote.bats deleted file mode 100644 index 3542bbbda6e..00000000000 --- a/tests/bats/compile_ctr_remote.bats +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/bats - -load "${BATS_TEST_DIRNAME}/common_tests.sh" - -@test "compile ctr remote" { - docker run --rm -v $repo_base_dir:/image-service $compile_image bash -c 'cd /image-service/contrib/ctr-remote && make clean && make' - if [ -f "${repo_base_dir}/contrib/ctr-remote/bin/ctr-remote" ]; then - /usr/bin/cp -f ${repo_base_dir}/contrib/ctr-remote/bin/ctr-remote /usr/local/bin/ - else - echo "cannot find ctr-remote binary" - exit 1 - fi -} - diff --git a/tests/bats/run_container_with_rafs.bats b/tests/bats/run_container_with_rafs.bats index 2034e5d5de6..e03bfde6d10 100644 --- a/tests/bats/run_container_with_rafs.bats +++ b/tests/bats/run_container_with_rafs.bats @@ -7,7 +7,7 @@ setup() { run_nydus_snapshotter config_containerd_for_nydus ctr images ls | grep -q "${nydus_rafs_image}" && ctr images rm $nydus_rafs_image - ctr-remote images rpull $nydus_rafs_image + nerdctl pull --snapshotter=nydus $nydus_rafs_image } @test "run container with rafs" { diff --git a/tests/bats/run_container_with_zran.bats b/tests/bats/run_container_with_zran.bats index bee0645e08b..2e7fccb8737 100644 --- a/tests/bats/run_container_with_zran.bats +++ b/tests/bats/run_container_with_zran.bats @@ -7,7 +7,7 @@ setup() { run_nydus_snapshotter config_containerd_for_nydus ctr images ls | grep -q "${nydus_zran_image}" && ctr images rm $nydus_zran_image - ctr-remote images rpull $nydus_zran_image + nerdctl pull --snapshotter=nydus $nydus_zran_image } @test "run container with zran" { diff --git a/utils/src/digest.rs b/utils/src/digest.rs index 12e74486f3b..39df1fc8304 100644 --- a/utils/src/digest.rs +++ b/utils/src/digest.rs @@ -328,7 +328,7 @@ mod test { let text = b"The quick brown fox jumps over the lazy dog"; let d1 = RafsDigest::from_buf(text, Algorithm::Blake3); - let d2 = RafsDigest::try_from(d1.data).unwrap(); + let d2 = RafsDigest::from(d1.data); let s1: String = d1.into(); let s2: String = d2.into(); print!("{:?}", d1); From bf8a535b1ed4c110db169cfee2f01c5bd9fde808 Mon Sep 17 00:00:00 2001 From: daiyongxuan <2423226609@qq.com> Date: Sun, 27 Oct 2024 07:26:19 +0000 Subject: [PATCH 2/3] style: fix code formatting and conventions Signed-off-by: daiyongxuan <2423226609@qq.com> --- builder/src/chunkdict_generator.rs | 63 +++++++++++------------------- src/bin/nydus-image/main.rs | 4 +- 2 files changed, 25 insertions(+), 42 deletions(-) diff --git a/builder/src/chunkdict_generator.rs b/builder/src/chunkdict_generator.rs index f8dc8a16786..6b03ad5609e 100644 --- a/builder/src/chunkdict_generator.rs +++ b/builder/src/chunkdict_generator.rs @@ -27,7 +27,7 @@ use nydus_rafs::metadata::chunk::ChunkWrapper; use nydus_rafs::metadata::inode::InodeWrapper; use nydus_rafs::metadata::layout::v6::RafsV6BlobTable; use nydus_rafs::metadata::layout::{RafsBlobTable, RafsXAttrs}; -use nydus_storage::device::{BlobFeatures, BlobInfo}; +use nydus_storage::device::BlobInfo; use nydus_storage::meta::BatchContextGenerator; use nydus_storage::meta::BlobChunkInfoV1Ondisk; use nydus_utils::compress; @@ -69,12 +69,9 @@ pub struct ChunkdictBlobInfo { pub blob_meta_ci_offset: u64, } -// TODO(daiyongxuan): implement Read Trait for BlobNodeReader #[derive(Debug)] -#[allow(dead_code)] pub struct BlobNodeReader { blob: Arc, - start: u64, end: u64, position: u64, } @@ -83,7 +80,6 @@ impl BlobNodeReader { pub fn new(blob: Arc, start: u64, end: u64) -> Result { let mut reader = BlobNodeReader { blob, - start, end, position: start, }; @@ -109,12 +105,6 @@ impl Read for BlobNodeReader { /// Struct to generate chunkdict RAFS bootstrap. pub struct Generator {} -#[allow(dead_code)] -struct BlobIdAndCompressor { - pub blob_id: String, - pub compressor: compress::Algorithm, -} - struct PrefetchBlobState { blob_info: BlobInfo, blob_ctx: BlobContext, @@ -126,23 +116,18 @@ impl PrefetchBlobState { fn new(ctx: &BuildContext, blob_layer_num: u32, blobs_dir_path: &Path) -> Result { let mut blob_info = BlobInfo::new( blob_layer_num, - String::from("Prefetch-blob"), + String::from("prefetch-blob"), 0, 0, ctx.chunk_size, u32::MAX, - BlobFeatures::ALIGNED - | BlobFeatures::INLINED_CHUNK_DIGEST - | BlobFeatures::HAS_TAR_HEADER - | BlobFeatures::HAS_TOC - | BlobFeatures::CAP_TAR_TOC, + ctx.blob_features, ); - blob_info.set_compressor(Algorithm::Zstd); + blob_info.set_compressor(ctx.compressor); let mut blob_ctx = BlobContext::from(ctx, &blob_info, ChunkSource::Build)?; - blob_ctx.chunk_count = 0; blob_ctx.blob_meta_info_enabled = true; let blob_writer = ArtifactWriter::new(crate::ArtifactStorage::SingleFile( - blobs_dir_path.join("Prefetch-blob"), + blobs_dir_path.join("prefetch-blob"), )) .map(|writer| Box::new(writer) as Box)?; Ok(Self { @@ -193,23 +178,23 @@ impl Generator { tree: &mut Tree, ctx: &mut BuildContext, bootstrap_mgr: &mut BootstrapManager, - blobtable: &mut RafsV6BlobTable, + blob_table: &mut RafsV6BlobTable, blobs_dir_path: PathBuf, prefetch_nodes: Vec, ) -> Result<()> { // create a new blob for prefetch layer - let blob_layer_num = blobtable.entries.len(); + let blob_layer_num = blob_table.entries.len(); let mut blob_state = PrefetchBlobState::new(&ctx, blob_layer_num as u32, &blobs_dir_path).unwrap(); - let mut batch = BatchContextGenerator::new(4096).unwrap(); + let mut batch = BatchContextGenerator::new(0).unwrap(); for node in &prefetch_nodes { Self::process_prefetch_node( tree, &node, &mut blob_state, &mut batch, - blobtable, + blob_table, &blobs_dir_path, ); } @@ -226,11 +211,11 @@ impl Generator { ); } - Self::finalize_blob(ctx, blobtable, &mut blob_state); + Self::finalize_blob(ctx, blob_table, &mut blob_state); debug!("prefetch blob id: {}", ctx.blob_id); - Self::build_and_dump_bootstrap(tree, ctx, bootstrap_mgr, blobtable)?; + Self::build_and_dump_bootstrap(tree, ctx, bootstrap_mgr, blob_table)?; Ok(()) } @@ -238,7 +223,7 @@ impl Generator { tree: &mut Tree, ctx: &mut BuildContext, bootstrap_mgr: &mut BootstrapManager, - blobtable: &mut RafsV6BlobTable, + blob_table: &mut RafsV6BlobTable, ) -> Result<()> { let mut bootstrap_ctx = bootstrap_mgr.create_ctx()?; let mut bootstrap = Bootstrap::new(tree.clone())?; @@ -248,20 +233,20 @@ impl Generator { // Verify and update prefetch blob assert!( - blobtable + blob_table .entries .iter() - .filter(|blob| blob.blob_id() == "Prefetch-blob") + .filter(|blob| blob.blob_id() == "prefetch-blob") .count() == 1, - "Expected exactly one Prefetch-blob" + "Expected exactly one prefetch-blob" ); // Rewrite prefetch blob id - blobtable + blob_table .entries .iter_mut() - .filter(|blob| blob.blob_id() == "Prefetch-blob") + .filter(|blob| blob.blob_id() == "prefetch-blob") .for_each(|blob| { let mut info = (**blob).clone(); info.set_blob_id(ctx.blob_id.clone()); @@ -269,7 +254,7 @@ impl Generator { }); // Dump bootstrap - let blob_table_withprefetch = RafsBlobTable::V6(blobtable.clone()); + let blob_table_withprefetch = RafsBlobTable::V6(blob_table.clone()); bootstrap.dump( ctx, &mut bootstrap_mgr.bootstrap_storage, @@ -282,19 +267,17 @@ impl Generator { fn finalize_blob( ctx: &mut BuildContext, - blobtable: &mut RafsV6BlobTable, + blob_table: &mut RafsV6BlobTable, blob_state: &mut PrefetchBlobState, ) { - blobtable.entries.push(blob_state.blob_info.clone().into()); + blob_table.entries.push(blob_state.blob_info.clone().into()); let mut blob_mgr = BlobManager::new(nydus_utils::digest::Algorithm::Blake3); blob_mgr.add_blob(blob_state.blob_ctx.clone()); blob_mgr.set_current_blob_index(0); Blob::finalize_blob_data(&ctx, &mut blob_mgr, blob_state.blob_writer.as_mut()).unwrap(); if let Some((_, blob_ctx)) = blob_mgr.get_current_blob() { Blob::dump_meta_data(&ctx, blob_ctx, blob_state.blob_writer.as_mut()).unwrap(); - } else { - panic!(); - } + }; ctx.blob_id = String::from(""); blob_mgr.get_current_blob().unwrap().1.blob_id = String::from(""); finalize_blob(ctx, &mut blob_mgr, blob_state.blob_writer.as_mut()).unwrap(); @@ -317,7 +300,7 @@ impl Generator { node: &TreeNode, prefetch_state: &mut PrefetchBlobState, batch: &mut BatchContextGenerator, - blobtable: &RafsV6BlobTable, + blob_table: &RafsV6BlobTable, blobs_dir_path: &Path, ) { let tree_node = tree @@ -330,7 +313,7 @@ impl Generator { child .chunks .first() - .and_then(|chunk| blobtable.entries.get(chunk.inner.blob_index() as usize)) + .and_then(|chunk| blob_table.entries.get(chunk.inner.blob_index() as usize)) .map(|entry| entry.blob_id()) .unwrap() }; diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index d1002a8f5b0..af17a0dc91e 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -1681,7 +1681,7 @@ impl Command { let config = Self::get_configuration(matches)?; config.internal.set_blob_accessible(true); let mut build_ctx = BuildContext { - blob_id: String::from("Prefetch-blob"), + blob_id: String::from("prefetch-blob"), compressor: compress::Algorithm::Zstd, blob_inline_meta: true, ..Default::default() @@ -1841,7 +1841,7 @@ impl Command { } Ok(prefetch_files) } - None => panic!("missing parameter `prefetch-files`"), + None => bail!("missing parameter `prefetch-files`"), } } From cf0fe995f88c8bebd6b1174a2f9d433858d8bc48 Mon Sep 17 00:00:00 2001 From: daiyongxuan <2423226609@qq.com> Date: Sun, 27 Oct 2024 16:19:43 +0000 Subject: [PATCH 3/3] style: add some error handle --- builder/src/chunkdict_generator.rs | 56 ++++++------------------------ src/bin/nydus-image/main.rs | 3 +- 2 files changed, 12 insertions(+), 47 deletions(-) diff --git a/builder/src/chunkdict_generator.rs b/builder/src/chunkdict_generator.rs index 6b03ad5609e..a57dcd97f34 100644 --- a/builder/src/chunkdict_generator.rs +++ b/builder/src/chunkdict_generator.rs @@ -37,7 +37,6 @@ use sha2::digest::Update; use crate::finalize_blob; use crate::Artifact; -use core::panic; use std::fs::File; use std::io::Read; use std::io::Seek; @@ -69,39 +68,6 @@ pub struct ChunkdictBlobInfo { pub blob_meta_ci_offset: u64, } -#[derive(Debug)] -pub struct BlobNodeReader { - blob: Arc, - end: u64, - position: u64, -} - -impl BlobNodeReader { - pub fn new(blob: Arc, start: u64, end: u64) -> Result { - let mut reader = BlobNodeReader { - blob, - end, - position: start, - }; - reader.blob.seek(std::io::SeekFrom::Start(start))?; - Ok(reader) - } -} - -impl Read for BlobNodeReader { - fn read(&mut self, buf: &mut [u8]) -> Result { - // EOF - if self.position > self.end { - return std::io::Result::Ok(0); - } - let max_read = (self.end - self.position) as usize; - let to_read = std::cmp::min(buf.len(), max_read); - let bytes_read = self.blob.read(&mut buf[..to_read])?; - self.position += bytes_read as u64; - std::io::Result::Ok(bytes_read) - } -} - /// Struct to generate chunkdict RAFS bootstrap. pub struct Generator {} @@ -126,8 +92,8 @@ impl PrefetchBlobState { blob_info.set_compressor(ctx.compressor); let mut blob_ctx = BlobContext::from(ctx, &blob_info, ChunkSource::Build)?; blob_ctx.blob_meta_info_enabled = true; - let blob_writer = ArtifactWriter::new(crate::ArtifactStorage::SingleFile( - blobs_dir_path.join("prefetch-blob"), + let blob_writer = ArtifactWriter::new(crate::ArtifactStorage::FileDir( + blobs_dir_path.to_path_buf(), )) .map(|writer| Box::new(writer) as Box)?; Ok(Self { @@ -317,7 +283,7 @@ impl Generator { .map(|entry| entry.blob_id()) .unwrap() }; - let blob_file = Arc::new(File::open(blobs_dir_path.join(blob_id)).unwrap()); + let mut blob_file = Arc::new(File::open(blobs_dir_path.join(blob_id)).unwrap()); { let mut child = tree_node.borrow_mut(); child.layer_idx = prefetch_state.blob_info.blob_index() as u16; @@ -332,15 +298,13 @@ impl Generator { for chunk in chunks { let inner = Arc::make_mut(&mut chunk.inner); - let mut reader = BlobNodeReader::new( - Arc::clone(&blob_file), - inner.compressed_offset(), - inner.compressed_offset() + inner.compressed_size() as u64, - ) - .unwrap(); - let buf = &mut vec![0u8; inner.compressed_size() as usize]; - reader.read_exact(buf).unwrap(); - prefetch_state.blob_writer.write_all(buf).unwrap(); + + let mut buf = vec![0u8; inner.compressed_size() as usize]; + blob_file + .seek(std::io::SeekFrom::Start(inner.compressed_offset())) + .unwrap(); + blob_file.read_exact(&mut buf).unwrap(); + prefetch_state.blob_writer.write_all(&buf).unwrap(); let info = batch .generate_chunk_info( blob_ctx.current_compressed_offset, diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index af17a0dc91e..b3a69c84933 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -1715,7 +1715,8 @@ impl Command { blobs_dir_path.to_path_buf(), prefetch_nodes, ) - .unwrap(); + .with_context(|| "Failed to generate prefetch bootstrap")?; + Ok(()) }