Skip to content

Commit

Permalink
Fix clippy warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
jssblck committed Feb 28, 2025
1 parent fcafb5f commit 7364d5d
Show file tree
Hide file tree
Showing 7 changed files with 113 additions and 96 deletions.
3 changes: 2 additions & 1 deletion bin/src/extract.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use circe_lib::{
daemon::Daemon, registry::Registry, Authentication, Filters, ImageSource, LayerDescriptor, Platform, Reference,
daemon::Daemon, registry::Registry, Authentication, Filters, ImageSource, LayerDescriptor,
Platform, Reference,
};
use clap::{Args, Parser, ValueEnum};
use color_eyre::eyre::{bail, Context, Result};
Expand Down
5 changes: 1 addition & 4 deletions bin/src/list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,7 @@ pub async fn main(opts: Options) -> Result<()> {
let mut listing = HashMap::new();
for (descriptor, layer) in layers.into_iter().zip(1usize..) {
info!(layer = %descriptor, %layer, "reading layer");
let files = source
.list_files(&descriptor)
.await
.context("list files")?;
let files = source.list_files(&descriptor).await.context("list files")?;

debug!(layer = %descriptor, files = %files.len(), "listed files");
listing.insert(descriptor.digest.to_string(), files);
Expand Down
140 changes: 75 additions & 65 deletions lib/src/daemon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ use tokio_tar::Archive;
use tracing::{debug, info, warn};

use crate::{
transform::Chunk,
Digest, FilterMatch, Filters, ImageSource, LayerDescriptor, LayerMediaType, Platform, Reference,
transform::Chunk, Digest, FilterMatch, Filters, ImageSource, LayerDescriptor, LayerMediaType,
Platform, Reference,
};

/// Each instance represents a Docker daemon connection for a specific image.
Expand Down Expand Up @@ -66,8 +66,7 @@ impl Daemon {
/// The platform to use for the daemon.
platform: Option<Platform>,
) -> Result<Self> {
let docker = Docker::connect_with_local_defaults()
.context("connect to Docker daemon")?;
let docker = Docker::connect_with_local_defaults().context("connect to Docker daemon")?;

// Verify Docker daemon is accessible
docker
Expand All @@ -93,14 +92,18 @@ impl Daemon {
..Default::default()
});

let images = self.docker.list_images(options).await.context("list images")?;

let images = self
.docker
.list_images(options)
.await
.context("list images")?;

let mut image_tags = Vec::new();
for image in images {
// RepoTags is a Vec, not an Option
image_tags.extend(image.repo_tags);
}

Ok(image_tags)
}

Expand All @@ -112,15 +115,19 @@ impl Daemon {
..Default::default()
});

let images = self.docker.list_images(options).await.context("list images")?;

let images = self
.docker
.list_images(options)
.await
.context("list images")?;

for image in images {
// RepoTags is a Vec, not an Option
if image.repo_tags.iter().any(|tag| tag == &image_name) {
return Ok(true);
}
}

Ok(false)
}

Expand All @@ -133,31 +140,31 @@ impl Daemon {
}

info!("Pulling image {} from registry", self.reference);

let tag = match &self.reference.version {
crate::Version::Tag(tag) => tag.clone(),
crate::Version::Digest(_) => {
warn!("Using digest for Docker daemon pull is not supported, falling back to 'latest'");
"latest".to_string()
}
};

let mut options = CreateImageOptions {
from_image: self.reference.repository.clone(),
tag,
..Default::default()
};

// Apply platform if specified
if let Some(platform) = &self.platform {
info!("Requesting image for platform: {}", platform);
options.platform = platform.to_string();
}

let options = Some(options);

let mut pull_stream = self.docker.create_image(options, None, None);

while let Some(info) = pull_stream.next().await {
match info {
Ok(info) => debug!(?info, "Pull progress"),
Expand All @@ -176,41 +183,43 @@ impl Daemon {
// Create a temporary directory for the export
let temp_dir = tempfile::TempDir::new().context("create temporary directory")?;
let export_path = temp_dir.path().join("image.tar");

// Create a container from the image (we don't need to run it)
let container_config = Config {
image: Some(self.reference.to_string()),
cmd: Some(vec!["true".to_string()]),
..Default::default()
};

// Platform is applied at image pull time, not container creation time
// We've already applied platform constraints during the ensure_image step

let container = self.docker.create_container::<String, String>(None, container_config)

let container = self
.docker
.create_container::<String, String>(None, container_config)
.await
.context("create temporary container")?;

// Export the container to a tar file
let export_stream = self.docker.export_container(&container.id);



// Write the export stream to a file
let mut file = tokio::fs::File::create(&export_path)
.await
.context("create export file")?;

let mut export_stream = Box::pin(export_stream);
while let Some(chunk) = export_stream.next().await {
let chunk = chunk.context("read export chunk")?;
file.write_all(&chunk).await.context("write export chunk")?;
}

// Remove the temporary container
self.docker.remove_container(&container.id, None)
self.docker
.remove_container(&container.id, None)
.await
.context("remove temporary container")?;

Ok((temp_dir, export_path))
}

Expand All @@ -220,28 +229,30 @@ impl Daemon {
// This is a simplified implementation as we don't have the same concept of layers
// as in the OCI registry. Instead, we'll generate layer descriptors based on the
// contents of the exported tar file.

// Export the image to a tarball
let (_temp_dir, export_path) = self.export_image().await?;

// Read the tarball to get the layers
let tar_data = tokio::fs::read(&export_path).await.context("read export tar")?;

let tar_data = tokio::fs::read(&export_path)
.await
.context("read export tar")?;

// We're going to simulate layers based on the tar entries
// This is a simplified approach - in a real implementation, we'd parse the manifest.json
// from the exported tarball to get the actual layers

// For now, we'll just return a single "layer" that represents the entire exported image
let digest_hex = blake3::hash(&tar_data).to_hex().to_string();
let digest_str = format!("sha256:{}", digest_hex);
let digest = Digest::from_str(&digest_str).context("parse digest")?;

let layer = LayerDescriptor::builder()
.digest(digest)
.size(tar_data.len() as i64)
.media_type(LayerMediaType::Oci(vec![]))
.build();

if self.layer_filters.matches(&layer) {
Ok(vec![layer])
} else {
Expand All @@ -265,13 +276,15 @@ impl Daemon {
) -> Result<impl Stream<Item = Chunk>> {
// Export the image to a tarball
let (_temp_dir, export_path) = self.export_image().await?;

// Read the tarball
let tar_data = tokio::fs::read(&export_path).await.context("read export tar")?;

let tar_data = tokio::fs::read(&export_path)
.await
.context("read export tar")?;

// Create a stream from the tar data
let stream = futures_lite::stream::once(Ok(Bytes::from(tar_data)) as Chunk);

Ok(stream)
}

Expand All @@ -281,22 +294,22 @@ impl Daemon {
let reader = tokio_util::io::StreamReader::new(stream);
let mut archive = Archive::new(reader);
let mut entries = archive.entries().context("read entries from tar")?;

let mut files = Vec::new();
while let Some(entry) = entries.next().await {
let entry = entry.context("read entry")?;
let path = entry.path().context("read entry path")?;

// Apply file filters if they exist
if !self.file_filters.matches(&path.to_path_buf()) {
debug!(?path, "skip: path filter");
continue;
}

debug!(?path, "enumerate");
files.push(path.to_string_lossy().to_string());
}

Ok(files)
}

Expand All @@ -305,13 +318,13 @@ impl Daemon {
let stream = self.pull_layer_internal(layer).await?;
let reader = tokio_util::io::StreamReader::new(stream);
let mut archive = Archive::new(reader);

// Just unpack the whole archive - we'll let tokio_tar handle the details
// This is a simplification, but should work for our purposes
archive.unpack(output).await.context("unpack archive")?;

debug!("Applied layer to {}", output.display());

Ok(())
}
}
Expand All @@ -325,28 +338,30 @@ impl ImageSource for Daemon {
// This is a simplified implementation as we don't have the same concept of layers
// as in the OCI registry. Instead, we'll generate layer descriptors based on the
// contents of the exported tar file.

// Export the image to a tarball
let (_temp_dir, export_path) = self.export_image().await?;

// Read the tarball to get the layers
let tar_data = tokio::fs::read(&export_path).await.context("read export tar")?;

let tar_data = tokio::fs::read(&export_path)
.await
.context("read export tar")?;

// We're going to simulate layers based on the tar entries
// This is a simplified approach - in a real implementation, we'd parse the manifest.json
// from the exported tarball to get the actual layers

// For now, we'll just return a single "layer" that represents the entire exported image
let digest_hex = blake3::hash(&tar_data).to_hex().to_string();
let digest_str = format!("sha256:{}", digest_hex);
let digest = Digest::from_str(&digest_str).context("parse digest")?;

let layer = LayerDescriptor::builder()
.digest(digest)
.size(tar_data.len() as i64)
.media_type(LayerMediaType::Oci(vec![]))
.build();

if self.layer_filters.matches(&layer) {
Ok(vec![layer])
} else {
Expand All @@ -361,22 +376,22 @@ impl ImageSource for Daemon {
let reader = tokio_util::io::StreamReader::new(stream);
let mut archive = Archive::new(reader);
let mut entries = archive.entries().context("read entries from tar")?;

let mut files = Vec::new();
while let Some(entry) = entries.next().await {
let entry = entry.context("read entry")?;
let path = entry.path().context("read entry path")?;

// Apply file filters if they exist
if !self.file_filters.matches(&path.to_path_buf()) {
debug!(?path, "skip: path filter");
continue;
}

debug!(?path, "enumerate");
files.push(path.to_string_lossy().to_string());
}

Ok(files)
}

Expand All @@ -386,26 +401,21 @@ impl ImageSource for Daemon {
let stream = self.pull_layer_internal(layer).await?;
let reader = tokio_util::io::StreamReader::new(stream);
let mut archive = Archive::new(reader);

// Just unpack the whole archive - we'll let tokio_tar handle the details
// This is a simplification, but should work for our purposes
archive.unpack(output).await.context("unpack archive")?;

debug!("Applied layer to {}", output.display());

Ok(())
}
}

/// Checks if Docker daemon is available.
pub async fn is_daemon_available() -> bool {
match Docker::connect_with_local_defaults() {
Ok(docker) => {
match docker.version().await {
Ok(_) => true,
Err(_) => false,
}
}
Ok(docker) => docker.version().await.is_ok(),
Err(_) => false,
}
}
}
Loading

0 comments on commit 7364d5d

Please sign in to comment.