-
Notifications
You must be signed in to change notification settings - Fork 3.1k
Proposal for support for runtime composefs validation #28658
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
88628dd
33c1f72
022c992
01f4e91
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,170 @@ | ||
| #!/bin/bash | ||
| set -euo pipefail | ||
|
|
||
| usage() { | ||
| echo "Usage: $0 [--root STORAGE_ROOT] [--key KEY_PREFIX] SOURCE_IMAGE DEST_IMAGE" | ||
| echo "" | ||
| echo "Reads composefs blob fs-verity digests from container storage for each" | ||
| echo "layer of SOURCE_IMAGE, creates a new OCI image with" | ||
| echo "io.containers.composefs.digest annotations on each layer descriptor," | ||
| echo "signs it with cosign, and pushes the signed image to DEST_IMAGE using" | ||
| echo "skopeo copy." | ||
| echo "" | ||
| echo "SOURCE_IMAGE is a local podman image name/id." | ||
| echo "DEST_IMAGE is a remote image reference (e.g. docker://registry/repo:tag)." | ||
| echo "" | ||
| echo "Options:" | ||
| echo " --root STORAGE_ROOT Use a custom storage root" | ||
| echo " --key KEY_PREFIX Path prefix for cosign key pair (default: cosign)" | ||
| echo " Uses KEY_PREFIX.key and KEY_PREFIX.pub." | ||
| echo " If they don't exist, generates a new key pair." | ||
| exit 1 | ||
| } | ||
|
|
||
| STORAGE_ROOT="" | ||
| PODMAN_ARGS=() | ||
| KEY_PREFIX="cosign" | ||
|
|
||
| while [[ $# -gt 0 ]]; do | ||
| case "$1" in | ||
| --root) | ||
| STORAGE_ROOT="$2" | ||
| shift 2 | ||
| ;; | ||
| --key) | ||
| KEY_PREFIX="$2" | ||
| shift 2 | ||
| ;; | ||
| -h|--help) | ||
| usage | ||
| ;; | ||
| *) | ||
| break | ||
| ;; | ||
| esac | ||
| done | ||
|
|
||
| if [[ $# -ne 2 ]]; then | ||
| usage | ||
| fi | ||
|
|
||
| SOURCE_IMAGE="$1" | ||
| DEST_IMAGE="$2" | ||
|
|
||
| if [[ -n "$STORAGE_ROOT" ]]; then | ||
| PODMAN_ARGS+=(--root "$STORAGE_ROOT") | ||
| fi | ||
|
|
||
| if [[ -z "$STORAGE_ROOT" ]]; then | ||
| STORAGE_ROOT=$(podman info --format '{{.Store.GraphRoot}}') | ||
| fi | ||
|
|
||
| # Generate cosign key pair if it doesn't exist | ||
| if [[ ! -f "${KEY_PREFIX}.key" ]]; then | ||
| echo "Generating cosign key pair at ${KEY_PREFIX}.key / ${KEY_PREFIX}.pub" | ||
| COSIGN_PASSWORD="" cosign generate-key-pair --output-key-prefix "$KEY_PREFIX" | ||
| fi | ||
|
|
||
| LAYERS_JSON="$STORAGE_ROOT/overlay-layers/layers.json" | ||
| if [[ ! -f "$LAYERS_JSON" ]]; then | ||
| echo "ERROR: layers.json not found at $LAYERS_JSON" >&2 | ||
| exit 1 | ||
| fi | ||
|
|
||
| # Get image ID | ||
| IMAGE_ID=$(podman "${PODMAN_ARGS[@]}" image inspect "$SOURCE_IMAGE" --format '{{.Id}}') | ||
| if [[ -z "$IMAGE_ID" ]]; then | ||
| echo "ERROR: could not find image $SOURCE_IMAGE" >&2 | ||
| exit 1 | ||
| fi | ||
|
|
||
| # Get diff IDs for the image layers (bottom-to-top order) | ||
| mapfile -t DIFF_IDS < <(podman "${PODMAN_ARGS[@]}" image inspect "$SOURCE_IMAGE" \ | ||
| --format '{{range .RootFS.Layers}}{{.}}{{"\n"}}{{end}}' | grep -v '^$') | ||
|
|
||
| echo "Image has ${#DIFF_IDS[@]} layers" | ||
|
|
||
| # For each diff ID, find the storage layer ID and measure its composefs blob | ||
| VERITY_DIGESTS=() | ||
| for diff_id in "${DIFF_IDS[@]}"; do | ||
| layer_id=$(jq -r --arg did "$diff_id" '.[] | select(."diff-digest" == $did) | .id' "$LAYERS_JSON") | ||
| if [[ -z "$layer_id" ]]; then | ||
| echo "ERROR: no storage layer for diff-digest $diff_id" >&2 | ||
| exit 1 | ||
| fi | ||
|
|
||
| blob_path="$STORAGE_ROOT/overlay/$layer_id/composefs-data/composefs.blob" | ||
| if [[ ! -f "$blob_path" ]]; then | ||
| echo "ERROR: no composefs blob at $blob_path" >&2 | ||
| exit 1 | ||
| fi | ||
|
|
||
| verity=$(fsverity measure "$blob_path" | awk '{print $1}') | ||
| echo " layer $layer_id -> $verity" | ||
| VERITY_DIGESTS+=("$verity") | ||
| done | ||
|
|
||
| # Create temporary OCI layout directories | ||
| TMPDIR=$(mktemp -d) | ||
| trap 'rm -rf "$TMPDIR"' EXIT | ||
|
|
||
| OCI_DIR="$TMPDIR/oci" | ||
| ZST_DIR="$TMPDIR/zst" | ||
|
|
||
| echo "Exporting image to OCI layout..." | ||
| podman "${PODMAN_ARGS[@]}" save --format oci-dir -o "$OCI_DIR" "$SOURCE_IMAGE" | ||
|
|
||
| # Convert to zstd:chunked first, before adding annotations. | ||
| # Recompression changes layer digests and rewrites the manifest, | ||
| # which would discard any annotations we added. | ||
| echo "Converting to zstd:chunked..." | ||
| skopeo copy \ | ||
| --dest-compress-format zstd:chunked \ | ||
| --dest-force-compress-format \ | ||
| "oci:${OCI_DIR}" \ | ||
| "oci:${ZST_DIR}" | ||
|
|
||
| # Find the manifest in the zstd:chunked OCI layout | ||
| MANIFEST_DIGEST=$(jq -r '.manifests[0].digest' "$ZST_DIR/index.json") | ||
| MANIFEST_PATH="$ZST_DIR/blobs/${MANIFEST_DIGEST/://}" | ||
|
|
||
| echo "Adding verity annotations to manifest..." | ||
|
|
||
| # Build jq filter to add annotation to each layer | ||
| JQ_FILTER='.' | ||
| for i in "${!VERITY_DIGESTS[@]}"; do | ||
| digest="${VERITY_DIGESTS[$i]}" | ||
| JQ_FILTER="$JQ_FILTER | .layers[$i].annotations.\"io.containers.composefs.digest\" = \"$digest\"" | ||
| done | ||
|
|
||
| jq "$JQ_FILTER" "$MANIFEST_PATH" > "$TMPDIR/manifest.json" | ||
|
|
||
| echo "Layer annotations:" | ||
| jq '.layers[].annotations' "$TMPDIR/manifest.json" | ||
|
|
||
| # Replace the manifest blob and update the index with the new digest | ||
| NEW_MANIFEST_DIGEST="sha256:$(sha256sum "$TMPDIR/manifest.json" | awk '{print $1}')" | ||
| NEW_MANIFEST_SIZE=$(stat -c%s "$TMPDIR/manifest.json") | ||
| NEW_BLOB_PATH="$ZST_DIR/blobs/${NEW_MANIFEST_DIGEST/://}" | ||
|
|
||
| cp "$TMPDIR/manifest.json" "$NEW_BLOB_PATH" | ||
|
|
||
| # Remove old manifest blob if digest changed | ||
| if [[ "$MANIFEST_DIGEST" != "$NEW_MANIFEST_DIGEST" ]]; then | ||
| rm -f "$MANIFEST_PATH" | ||
| fi | ||
|
|
||
| # Update index.json with new digest and size | ||
| jq --arg digest "$NEW_MANIFEST_DIGEST" --argjson size "$NEW_MANIFEST_SIZE" \ | ||
| '.manifests[0].digest = $digest | .manifests[0].size = $size' \ | ||
| "$ZST_DIR/index.json" > "$TMPDIR/index.json" | ||
| cp "$TMPDIR/index.json" "$ZST_DIR/index.json" | ||
|
|
||
| echo "Copying and signing image to $DEST_IMAGE..." | ||
| skopeo copy \ | ||
| --sign-by-sigstore-private-key "${KEY_PREFIX}.key" \ | ||
| "oci:${ZST_DIR}" \ | ||
| "$DEST_IMAGE" | ||
|
|
||
| echo "Done: signed image with verity annotations pushed to $DEST_IMAGE" | ||
| echo "Verify with: cosign verify --key ${KEY_PREFIX}.pub --insecure-ignore-tlog $DEST_IMAGE" |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,6 +17,10 @@ import ( | |
| "github.com/sirupsen/logrus" | ||
| "go.podman.io/common/libimage" | ||
| "go.podman.io/common/libnetwork/pasta" | ||
| dockerTransport "go.podman.io/image/v5/docker" | ||
| "go.podman.io/image/v5/docker/reference" | ||
| "go.podman.io/image/v5/image" | ||
| "go.podman.io/image/v5/signature" | ||
| "go.podman.io/podman/v6/libpod" | ||
| "go.podman.io/podman/v6/libpod/define" | ||
| "go.podman.io/podman/v6/pkg/namespaces" | ||
|
|
@@ -188,6 +192,21 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener | |
| } | ||
|
|
||
| options = append(options, libpod.WithRootFSFromImage(newImage.ID(), resolvedImageName, s.RawImageName)) | ||
|
|
||
| if s.SignaturePolicy != "" { | ||
| requireSigned := s.SignaturePolicy == "require" | ||
| if err := validateManifestSignature(ctx, rt, newImage, requireSigned); err != nil { | ||
| return nil, nil, nil, err | ||
| } | ||
| } | ||
|
|
||
| if s.VerityEnforce != nil && *s.VerityEnforce { | ||
| digests, err := extractVerityDigests(imageData) | ||
| if err != nil { | ||
| return nil, nil, nil, err | ||
| } | ||
| options = append(options, libpod.WithVerityEnforce(digests)) | ||
| } | ||
| } | ||
|
|
||
| _, err = rt.LookupPod(s.Hostname) | ||
|
|
@@ -760,3 +779,82 @@ func Inherit(infra *libpod.Container, s *specgen.SpecGenerator, rt *libpod.Runti | |
| func applyInfraInherit(compatibleOptions *libpod.InfraInherit, s *specgen.SpecGenerator) error { | ||
| return copier.CopyWithOption(s, compatibleOptions, copier.Option{IgnoreEmpty: true}) | ||
| } | ||
|
|
||
| const verityDigestAnnotation = "io.containers.composefs.digest" | ||
|
|
||
| func extractVerityDigests(imageData *libimage.ImageData) ([][]string, error) { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. A further reason not to do this in Specgen: You don't know what storage driver is in use by Libpod, so you could try to run this check on a system using a btrfs or zfs store. From my initial read of the code it'll probably work but you definitely aren't getting the benefits you expect if composefs isn't the backing store. |
||
| if len(imageData.LayersData) == 0 { | ||
| return nil, fmt.Errorf("verity enforcement: image has no layer data") | ||
| } | ||
| digests := make([][]string, len(imageData.LayersData)) | ||
| for i, layer := range imageData.LayersData { | ||
| val, ok := layer.Annotations[verityDigestAnnotation] | ||
| if !ok || val == "" { | ||
| return nil, fmt.Errorf("verity enforcement: layer %d missing %s annotation", i, verityDigestAnnotation) | ||
| } | ||
| parts := strings.Split(val, ",") | ||
| allowed := make([]string, 0, len(parts)) | ||
| for _, p := range parts { | ||
| d := strings.TrimSpace(p) | ||
| if d != "" { | ||
| allowed = append(allowed, d) | ||
| } | ||
| } | ||
| if len(allowed) == 0 { | ||
| return nil, fmt.Errorf("verity enforcement: layer %d has empty %s annotation", i, verityDigestAnnotation) | ||
| } | ||
| digests[i] = allowed | ||
| } | ||
| return digests, nil | ||
| } | ||
|
|
||
| func validateManifestSignature(ctx context.Context, rt *libpod.Runtime, img *libimage.Image, requireSigned bool) error { | ||
| names := img.Names() | ||
| if len(names) == 0 { | ||
| return fmt.Errorf("manifest signature verification failed: image has no names") | ||
| } | ||
|
|
||
| named, err := reference.ParseNormalizedNamed(names[0]) | ||
| if err != nil { | ||
| return fmt.Errorf("parsing image name %q: %w", names[0], err) | ||
| } | ||
| dockerRef, err := dockerTransport.NewReference(named) | ||
| if err != nil { | ||
| return fmt.Errorf("creating docker reference for %q: %w", names[0], err) | ||
| } | ||
|
|
||
| policy, err := signature.DefaultPolicy(rt.SystemContext()) | ||
| if err != nil { | ||
| return fmt.Errorf("loading signature policy: %w", err) | ||
| } | ||
| pc, err := signature.NewPolicyContext(policy) | ||
| if err != nil { | ||
| return fmt.Errorf("creating policy context: %w", err) | ||
| } | ||
| defer pc.Destroy() | ||
|
|
||
| if requireSigned { | ||
| pc.RequireSignatureVerification(true) | ||
| } | ||
|
|
||
| src, err := img.ImageSource(ctx) | ||
| if err != nil { | ||
| return fmt.Errorf("getting image source: %w", err) | ||
| } | ||
|
|
||
| // This will access the cached manifest from ImageSource that was also | ||
| // used in image.Inspect(), which means we can trust the parsed ImageData | ||
| // from it with no risk for TOCTOU races. | ||
| // We use UnparsedInstanceWithReference to override Reference() with the | ||
| // docker transport reference so that policy.json lookup matches "docker" | ||
| // transport entries rather than "containers-storage". | ||
| unparsed := image.UnparsedInstanceWithReference( | ||
| image.UnparsedInstance(src, nil), | ||
| dockerRef, | ||
| ) | ||
| allowed, err := pc.IsRunningImageAllowed(ctx, unparsed) | ||
| if !allowed { | ||
| return fmt.Errorf("manifest signature verification failed: %w", err) | ||
| } | ||
| return nil | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I feel like there's a TOCTOU risk doing this in Specgen. The container isn't created yet, if the image tag we want to use is replaced before it is this check is subverted
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To add to this anything in specgen is by design not doing runtime validation, specgen runs once when the container is created. For podman stop/start it will not be called again.
Yes that does not matter for the quadlet use case but still if such cli options exists it must work with all of podman not just quadlet.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ok nvm I fully read the code now, I think the TOCTOU does not matter because you pass in the verify digests and if the image was replaced in between then at mount time the digests will be invald and cause failure as they should.