1
0
mirror of https://github.com/containers/image.git synced 2025-04-18 19:44:05 +03:00

Stop using deprecated digest.Digest.Hex

Should not change behavior.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
Miloslav Trmač 2024-01-17 23:02:58 +01:00
parent 562e58d09d
commit b71a3e3a3d
14 changed files with 34 additions and 34 deletions

View File

@ -164,7 +164,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
return fmt.Errorf("marshaling layer config: %w", err)
}
delete(layerConfig, "layer_id")
layerID := digest.Canonical.FromBytes(b).Hex()
layerID := digest.Canonical.FromBytes(b).Encoded()
layerConfig["id"] = layerID
configBytes, err := json.Marshal(layerConfig)
@ -309,10 +309,10 @@ func (w *Writer) Close() error {
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
if err := configDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
return configDigest.Hex() + ".json", nil
return configDigest.Encoded() + ".json", nil
}
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
@ -320,15 +320,15 @@ func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
if err := layerDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// Note that this can't be e.g. filepath.Join(l.Digest.Encoded(), legacyLayerFileName); due to the way
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball.
return layerDigest.Hex() + ".tar", nil
return layerDigest.Encoded() + ".tar", nil
}
type tarFI struct {

View File

@ -288,10 +288,10 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
// base is not nil from the caller
// NOTE: Keep this in sync with docs/signature-protocols.md!
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return nil, err
}
sigURL := *base
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1)
return &sigURL, nil
}

View File

@ -366,7 +366,7 @@ func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string)
if err := blobDigest.Validate(); err != nil {
return "", err
}
parts := append([]string{blobDigest.Hex()}, others...)
parts := append([]string{blobDigest.Encoded()}, others...)
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
return hex.EncodeToString(v1IDHash[:]), nil
}

View File

@ -342,5 +342,5 @@ func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
if err != nil {
return "", err
}
return digest.FromBytes(image).Hex(), nil
return digest.FromBytes(image).Encoded(), nil
}

View File

@ -295,7 +295,7 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
return "", err
}
return m.ConfigDescriptor.Digest.Hex(), nil
return m.ConfigDescriptor.Digest.Encoded(), nil
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image

View File

@ -260,7 +260,7 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
if err := m.Config.Digest.Validate(); err != nil {
return "", err
}
return m.Config.Digest.Hex(), nil
return m.Config.Digest.Encoded(), nil
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image

View File

@ -318,7 +318,7 @@ func loadFixture(t *testing.T, fixtureName string) string {
func assertBlobExists(t *testing.T, blobsDir string, blobDigest string) {
digest, err := digest.Parse(blobDigest)
require.NoError(t, err)
blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Hex())
blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Encoded())
_, err = os.Stat(blobPath)
require.NoError(t, err)
}
@ -326,7 +326,7 @@ func assertBlobExists(t *testing.T, blobsDir string, blobDigest string) {
func assertBlobDoesNotExist(t *testing.T, blobsDir string, blobDigest string) {
digest, err := digest.Parse(blobDigest)
require.NoError(t, err)
blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Hex())
blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Encoded())
_, err = os.Stat(blobPath)
require.True(t, os.IsNotExist(err))
}

View File

@ -256,5 +256,5 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st
} else {
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
}
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil
}

View File

@ -164,7 +164,7 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
return private.UploadedBlob{}, err
}
hash := blobDigest.Hex()
hash := blobDigest.Encoded()
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
@ -282,8 +282,8 @@ func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest,
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
return err
}
@ -323,7 +323,7 @@ func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle,
}
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
destinationPath := filepath.Dir(blob.BlobPath)
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
@ -348,10 +348,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
d.repo = repo
}
if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return false, private.ReusedBlob{}, err
}
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded())
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
@ -479,7 +479,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.Digest.Hex()
hash := layer.Digest.Encoded()
if err = checkLayer(hash); err != nil {
return err
}
@ -488,7 +488,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.BlobSum.Hex()
hash := layer.BlobSum.Encoded()
if err = checkLayer(hash); err != nil {
return err
}

View File

@ -289,7 +289,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, -1, err
}
blob := info.Digest.Hex()
blob := info.Digest.Encoded()
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
if s.compressed == nil {
@ -301,7 +301,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
}
compressedBlob, isCompressed := s.compressed[info.Digest]
if isCompressed {
blob = compressedBlob.Hex()
blob = compressedBlob.Encoded()
}
branch := fmt.Sprintf("ociimage/%s", blob)
@ -424,7 +424,7 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
layerBlobs := man.LayerInfos()
for _, layerBlob := range layerBlobs {
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded())
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return nil, err

View File

@ -111,7 +111,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
History: []imgspecv1.History{
{
Created: &created,
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Encoded(), os.PathSeparator),
Comment: "imported from SIF, uuid: " + sifImg.ID(),
},
{

View File

@ -564,7 +564,7 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
}
// ordinaryImageID is a digest of a config, which is a JSON value.
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex()
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
return tocImageID
}
@ -651,11 +651,11 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
defer s.lock.Unlock()
if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
}
if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
return d.Hex(), true // This looks like chain IDs, and it uses the traditional value.
return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
}
return "", false
}
@ -731,7 +731,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
id := layerIDComponent
if !layerIDComponentStandalone || parentLayer != "" {
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex()
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
}
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
// There's already a layer that should have the right contents, just reuse it.

View File

@ -437,8 +437,8 @@ func TestWriteRead(t *testing.T) {
manifest = strings.ReplaceAll(manifest, "%ch", config.compressedDigest.String())
manifest = strings.ReplaceAll(manifest, "%ls", fmt.Sprintf("%d", layer.compressedSize))
manifest = strings.ReplaceAll(manifest, "%cs", fmt.Sprintf("%d", config.compressedSize))
manifest = strings.ReplaceAll(manifest, "%li", layer.compressedDigest.Hex())
manifest = strings.ReplaceAll(manifest, "%ci", config.compressedDigest.Hex())
manifest = strings.ReplaceAll(manifest, "%li", layer.compressedDigest.Encoded())
manifest = strings.ReplaceAll(manifest, "%ci", config.compressedDigest.Encoded())
t.Logf("this manifest is %q", manifest)
err = dest.PutManifest(context.Background(), []byte(manifest), nil)
require.NoError(t, err)

View File

@ -117,7 +117,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
history = append(history, imgspecv1.History{
Created: &blobTime,
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator),
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Encoded(), os.PathSeparator),
Comment: comment,
})
// Use the mtime of the most recently modified file as the image's creation time.