mirror of
https://github.com/containers/image.git
synced 2025-04-17 08:37:06 +03:00
fix: handle error on Close() of writable objects
For objects (particularly files) that we write to, we need to check the return value of f.Close() and pass error back to the caller if this fails. Signed-off-by: Adam Eijdenberg <adam@continusec.com>
This commit is contained in:
parent
16ec61f640
commit
bcbb44651e
@ -178,7 +178,7 @@ func (d *ociArchiveImageDestination) CommitWithOptions(ctx context.Context, opti
|
||||
|
||||
// tar converts the directory at src and saves it to dst
|
||||
// if contentModTimes is non-nil, tar header entries times are set to this
|
||||
func tarDirectory(src, dst string, contentModTimes *time.Time) error {
|
||||
func tarDirectory(src, dst string, contentModTimes *time.Time) (retErr error) {
|
||||
// input is a stream of bytes from the archive of the directory at path
|
||||
input, err := archive.TarWithOptions(src, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
@ -197,7 +197,14 @@ func tarDirectory(src, dst string, contentModTimes *time.Time) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating tar file %q: %w", dst, err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
// since we are writing to this file, make sure we handle errors
|
||||
defer func() {
|
||||
closeErr := outFile.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// copies the contents of the directory to the tar file
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
|
@ -159,7 +159,7 @@ func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error {
|
||||
return saveJSON(ref.indexPath(), index)
|
||||
}
|
||||
|
||||
func saveJSON(path string, content any) error {
|
||||
func saveJSON(path string, content any) (retErr error) {
|
||||
// If the file already exists, get its mode to preserve it
|
||||
var mode fs.FileMode
|
||||
existingfi, err := os.Stat(path)
|
||||
@ -177,7 +177,13 @@ func saveJSON(path string, content any) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
// since we are writing to this file, make sure we handle errors
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
return json.NewEncoder(file).Encode(content)
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func (d *ociImageDestination) Close() error {
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (_ private.UploadedBlob, retErr error) {
|
||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
@ -124,7 +124,10 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||
explicitClosed := false
|
||||
defer func() {
|
||||
if !explicitClosed {
|
||||
blobFile.Close()
|
||||
closeErr := blobFile.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}
|
||||
if !succeeded {
|
||||
os.Remove(blobFile.Name())
|
||||
@ -176,7 +179,10 @@ func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDiges
|
||||
}
|
||||
|
||||
// need to explicitly close the file, since a rename won't otherwise work on Windows
|
||||
blobFile.Close()
|
||||
err = blobFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*closed = true
|
||||
|
||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||
@ -323,7 +329,7 @@ type PutBlobFromLocalFileOption struct{}
|
||||
// It computes, and returns, the digest and size of the used file.
|
||||
//
|
||||
// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called.
|
||||
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (digest.Digest, int64, error) {
|
||||
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (_ digest.Digest, _ int64, retErr error) {
|
||||
d, ok := dest.(*ociImageDestination)
|
||||
if !ok {
|
||||
return "", -1, errors.New("caller error: PutBlobFromLocalFile called with a non-oci: destination")
|
||||
@ -337,7 +343,10 @@ func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file
|
||||
}
|
||||
defer func() {
|
||||
if !blobFileClosed {
|
||||
blobFile.Close()
|
||||
closeErr := blobFile.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}
|
||||
if !succeeded {
|
||||
os.Remove(blobFile.Name())
|
||||
|
@ -143,16 +143,24 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
|
||||
|
||||
blobPath := filepath.Join(tmpDir, "content")
|
||||
blobFile, err := os.Create(blobPath)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
defer blobFile.Close()
|
||||
|
||||
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
size, err := io.Copy(blobFile, stream)
|
||||
size, err := func() (_ int64, retErr error) { // A scope for defer
|
||||
// since we are writing to this file, make sure we handle errors
|
||||
defer func() {
|
||||
closeErr := blobFile.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
return io.Copy(blobFile, stream)
|
||||
}()
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
@ -247,9 +255,15 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin
|
||||
return err
|
||||
}
|
||||
|
||||
func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
|
||||
func generateTarSplitMetadata(output *bytes.Buffer, file string) (_ digest.Digest, _ int64, retErr error) {
|
||||
mfz := pgzip.NewWriter(output)
|
||||
defer mfz.Close()
|
||||
// since we are writing to this, make sure we handle errors
|
||||
defer func() {
|
||||
closeErr := mfz.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
metaPacker := storage.NewJSONPacker(mfz)
|
||||
|
||||
stream, err := os.OpenFile(file, os.O_RDONLY, 0)
|
||||
|
@ -87,14 +87,20 @@ func new2(path string) (*cache, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// We don’t check the schema before every operation, because that would be costly
|
||||
// and because we assume schema changes will be handled by using a different path.
|
||||
if err := ensureDBHasCurrentSchema(db); err != nil {
|
||||
err = func() (retErr error) { // A scope for defer
|
||||
defer func() {
|
||||
closeErr := db.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
// We don’t check the schema before every operation, because that would be costly
|
||||
// and because we assume schema changes will be handled by using a different path.
|
||||
return ensureDBHasCurrentSchema(db)
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cache{
|
||||
path: path,
|
||||
refCount: 0,
|
||||
@ -147,25 +153,30 @@ func (sqc *cache) Close() {
|
||||
type void struct{} // So that we don’t have to write struct{}{} all over the place
|
||||
|
||||
// transaction calls fn within a read-write transaction in sqc.
|
||||
func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer
|
||||
func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (_ T, retErr error) {
|
||||
db, closeDB, err := func() (*sql.DB, func() error, error) { // A scope for defer
|
||||
sqc.lock.Lock()
|
||||
defer sqc.lock.Unlock()
|
||||
|
||||
if sqc.db != nil {
|
||||
return sqc.db, func() {}, nil
|
||||
return sqc.db, func() error { return nil }, nil
|
||||
}
|
||||
db, err := rawOpen(sqc.path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err)
|
||||
}
|
||||
return db, func() { db.Close() }, nil
|
||||
return db, db.Close, nil
|
||||
}()
|
||||
if err != nil {
|
||||
var zeroRes T // A zero value of T
|
||||
return zeroRes, err
|
||||
}
|
||||
defer closeDB()
|
||||
defer func() {
|
||||
closeErr := closeDB()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
return dbTransaction(db, fn)
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Nam
|
||||
// editShortNameAlias loads the aliases.conf file and changes it. If value is
|
||||
// set, it adds the name-value pair as a new alias. Otherwise, it will remove
|
||||
// name from the config.
|
||||
func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error {
|
||||
func editShortNameAlias(ctx *types.SystemContext, name string, value *string) (retErr error) {
|
||||
if err := validateShortName(name); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -178,7 +178,13 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// since we are writing to this file, make sure we handle err on Close()
|
||||
defer func() {
|
||||
closeErr := f.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
encoder := toml.NewEncoder(f)
|
||||
return encoder.Encode(conf)
|
||||
|
10
sif/load.go
10
sif/load.go
@ -186,12 +186,18 @@ func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir
|
||||
// has an -o option that allows extracting a squashfs from the SIF file directly,
|
||||
// but that version is not currently available in RHEL 8.
|
||||
logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath)
|
||||
if err := func() error { // A scope for defer
|
||||
if err := func() (retErr error) { // A scope for defer
|
||||
f, err := os.Create(squashFSPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// since we are writing to this file, make sure we handle err on Close()
|
||||
defer func() {
|
||||
closeErr := f.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil {
|
||||
return err
|
||||
|
@ -272,43 +272,56 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
|
||||
}
|
||||
defer file.Close()
|
||||
counter := ioutils.NewWriteCounter(file)
|
||||
stream = io.TeeReader(stream, counter)
|
||||
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
|
||||
decompressed, err := archive.DecompressStream(stream)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
|
||||
}
|
||||
blobDigest, diffID, count, err := func() (_, _ digest.Digest, _ int64, retErr error) { // A scope for defer
|
||||
// since we are writing to this file, make sure we handle err on Close()
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
counter := ioutils.NewWriteCounter(file)
|
||||
stream = io.TeeReader(stream, counter)
|
||||
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
|
||||
decompressed, err := archive.DecompressStream(stream)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("setting up to decompress blob: %w", err)
|
||||
|
||||
diffID := digest.Canonical.Digester()
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
|
||||
_, err = io.Copy(diffID.Hash(), decompressed)
|
||||
decompressed.Close()
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
diffID := digest.Canonical.Digester()
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
|
||||
_, err = io.Copy(diffID.Hash(), decompressed)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
}
|
||||
|
||||
return digester.Digest(), diffID.Digest(), counter.Count, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
// Determine blob properties, and fail if information that we were given about the blob
|
||||
// is known to be incorrect.
|
||||
blobDigest := digester.Digest()
|
||||
blobSize := blobinfo.Size
|
||||
if blobSize < 0 {
|
||||
blobSize = counter.Count
|
||||
} else if blobinfo.Size != counter.Count {
|
||||
blobSize = count
|
||||
} else if blobinfo.Size != count {
|
||||
return private.UploadedBlob{}, ErrBlobSizeMismatch
|
||||
}
|
||||
|
||||
// Record information about the blob.
|
||||
s.lock.Lock()
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest()
|
||||
s.lockProtected.fileSizes[blobDigest] = counter.Count
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = diffID
|
||||
s.lockProtected.fileSizes[blobDigest] = count
|
||||
s.lockProtected.filenames[blobDigest] = filename
|
||||
s.lock.Unlock()
|
||||
// This is safe because we have just computed diffID, and blobDigest was either computed
|
||||
// by us, or validated by the caller (usually copy.digestingReader).
|
||||
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
|
||||
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID)
|
||||
return private.UploadedBlob{
|
||||
Digest: blobDigest,
|
||||
Size: blobSize,
|
||||
|
Loading…
x
Reference in New Issue
Block a user