mirror of
https://github.com/quay/quay.git
synced 2025-07-28 20:22:05 +03:00
Have the BlobUpload cleanup worker run on a single instance only (#239)
Instead of running simultaneously across multiple nodes, we change the worker to run only from a single instance. This is better for the DB and the previous behavior was not necessary given the size of the BlobUpload table. Fixes https://issues.redhat.com/browse/PROJQUAY-365
This commit is contained in:
@ -1,29 +1,45 @@
|
||||
import logging
|
||||
import logging.config
|
||||
|
||||
from datetime import timedelta
|
||||
from datetime import timedelta, datetime
|
||||
|
||||
from app import app, storage
|
||||
from data.database import UseThenDisconnect
|
||||
from workers.blobuploadcleanupworker.models_pre_oci import pre_oci_model as model
|
||||
from workers.worker import Worker
|
||||
from util.log import logfile_path
|
||||
from util.locking import GlobalLock, LockNotAcquiredException
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DELETION_DATE_THRESHOLD = timedelta(days=2)
|
||||
BLOBUPLOAD_CLEANUP_FREQUENCY = app.config.get("BLOBUPLOAD_CLEANUP_FREQUENCY", 60 * 60)
|
||||
LOCK_TTL = 60 * 20 # 20 minutes
|
||||
|
||||
|
||||
class BlobUploadCleanupWorker(Worker):
|
||||
def __init__(self):
|
||||
super(BlobUploadCleanupWorker, self).__init__()
|
||||
self.add_operation(self._cleanup_uploads, BLOBUPLOAD_CLEANUP_FREQUENCY)
|
||||
self.add_operation(self._try_cleanup_uploads, BLOBUPLOAD_CLEANUP_FREQUENCY)
|
||||
|
||||
def _cleanup_uploads(self):
|
||||
def _try_cleanup_uploads(self):
|
||||
"""
|
||||
Performs garbage collection on the blobupload table.
|
||||
"""
|
||||
try:
|
||||
with GlobalLock("BLOB_CLEANUP", lock_ttl=LOCK_TTL):
|
||||
self._cleanup_uploads()
|
||||
except LockNotAcquiredException:
|
||||
logger.debug("Could not acquire global lock for blob upload cleanup worker")
|
||||
return
|
||||
|
||||
def _cleanup_uploads(self):
|
||||
"""
|
||||
Performs cleanup on the blobupload table.
|
||||
"""
|
||||
logger.debug("Performing blob upload cleanup")
|
||||
|
||||
while True:
|
||||
# Find all blob uploads older than the threshold (typically a week) and delete them.
|
||||
with UseThenDisconnect(app.config):
|
||||
@ -34,6 +50,8 @@ class BlobUploadCleanupWorker(Worker):
|
||||
|
||||
# Remove the stale upload from storage.
|
||||
logger.debug("Removing stale blob upload %s", stale_upload.uuid)
|
||||
assert stale_upload.created <= (datetime.utcnow() - DELETION_DATE_THRESHOLD)
|
||||
|
||||
try:
|
||||
storage.cancel_chunked_upload(
|
||||
[stale_upload.location_name], stale_upload.uuid, stale_upload.storage_metadata
|
||||
|
Reference in New Issue
Block a user