1
0
mirror of https://github.com/quay/quay.git synced 2026-01-26 06:21:37 +03:00
Files
quay/workers/gc/gcworker.py
Ivan Bazulic 62e42f8f54 storage: Disable pushes on registry (PROJQUAY-6870) (#2755)
* storage: Disable pushes on registry (PROJQUAY-6870)

The current read-only option for Quay is not sometimes feasible, since it requires an insert of the service key and other manual config changes. For instance, if you want to just recalculate quota on the registry, but would like to allow all registry operations (including UI) without the possibility of pushes until recalculation is done, setting the whole registry `read-only` cannot be done since it makes the database read only as well.

This PR introduces a new flag called `DISABLE_PUSHES` which allows all registry operations to continue (changing tags, repo editing, robot account creation/deletion, user creation etc.) but will disable pushes of new images to the registry (i.e. backend storage will not change). If a registry already contains the image and a new tag is simply being added, that operation should succeed.

The following message would appear in the logs:

~~~
gunicorn-registry stdout | 2024-03-13 20:19:49,414 [369] [DEBUG] [endpoints.v2] sending response: b'{"errors":[{"code":"METHOD NOT ALLOWED","detail":{},"message":"Pushes to the registry are currently disabled. Please contact the administrator for more information."}]}\n'
gunicorn-registry stdout | 2024-03-13 20:19:49,414 [369] [INFO] [gunicorn.access] 172.17.0.1 - - [13/Mar/2024:20:19:49 +0000] "PUT /v2/ibazulic/mariadb/manifests/sha256:c4694ba424e0259694a5117bbb510d67340051f0bdb7f9fa8033941a2d66e53e HTTP/1.1" 405 169 "-" "skopeo/1.9.3"
nginx stdout | 172.17.0.1 (-) - - [13/Mar/2024:20:19:49 +0000] "PUT /v2/ibazulic/mariadb/manifests/sha256:c4694ba424e0259694a5117bbb510d67340051f0bdb7f9fa8033941a2d66e53e HTTP/1.1" 405 169 "-" "skopeo/1.9.3" (0.002 3813 0.002)
~~~

The flag defaults to `False` (pushes enabled), unless set otherwise.

* Removed constraint on storage replication when pushes are disabled

* Rebase

* Fix isort sorting

* Fix isort sorting #2

* Removed constraint on storage replication when pushes are disabled

* Rebase

* Remove constraint on storage replication worker

* Fix linting on config.py
2024-08-07 15:40:10 -04:00

119 lines
4.1 KiB
Python

import logging
import time
from contextlib import contextmanager
import features
from app import app
from data.database import Repository, RepositoryState, UseThenDisconnect
from data.model.gc import garbage_collect_repo
from data.model.repository import get_random_gc_policy
from data.registry_model import registry_model
from notifications.notificationevent import RepoImageExpiryEvent
from util.locking import GlobalLock, LockNotAcquiredException
from util.metrics.prometheus import gc_iterations
from util.notification import scan_for_image_expiry_notifications
from workers.gunicorn_worker import GunicornWorker
from workers.worker import Worker
logger = logging.getLogger(__name__)
REPOSITORY_GC_TIMEOUT = 3 * 60 * 60 # 3h
LOCK_TIMEOUT_PADDING = 60 # 60 seconds
@contextmanager
def empty_context():
yield None
class GarbageCollectionWorker(Worker):
def __init__(self):
super(GarbageCollectionWorker, self).__init__()
self.add_operation(
self._garbage_collection_repos, app.config.get("GARBAGE_COLLECTION_FREQUENCY", 30)
)
if features.IMAGE_EXPIRY_TRIGGER:
self.add_operation(
self._scan_notifications, app.config.get("GARBAGE_COLLECTION_FREQUENCY", 30)
)
def _scan_notifications(self):
# scan for tags that are expiring based on configured RepositoryNotifications
scan_for_image_expiry_notifications(event_name=RepoImageExpiryEvent.event_name())
def _garbage_collection_repos(self, skip_lock_for_testing=False):
"""
Performs garbage collection on repositories.
"""
with UseThenDisconnect(app.config):
policy = get_random_gc_policy()
if policy is None:
logger.debug("No GC policies found")
return
repo_ref = registry_model.find_repository_with_garbage(policy)
if repo_ref is None:
logger.debug("No repository with garbage found")
return
assert features.GARBAGE_COLLECTION
try:
with (
GlobalLock(
"REPO_GARBAGE_COLLECTION_%s" % repo_ref.id,
lock_ttl=REPOSITORY_GC_TIMEOUT + LOCK_TIMEOUT_PADDING,
)
if not skip_lock_for_testing
else empty_context()
):
try:
repository = Repository.get(id=repo_ref.id)
except Repository.DoesNotExist:
return
logger.debug(
"Starting GC of repository #%s (%s)", repository.id, repository.name
)
garbage_collect_repo(repository)
logger.debug(
"Finished GC of repository #%s (%s)", repository.id, repository.name
)
gc_iterations.inc()
except LockNotAcquiredException:
logger.debug("Could not acquire repo lock for garbage collection")
def create_gunicorn_worker():
"""
follows the gunicorn application factory pattern, enabling
a quay worker to run as a gunicorn worker thread.
this is useful when utilizing gunicorn's hot reload in local dev.
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
"""
worker = GunicornWorker(__name__, app, GarbageCollectionWorker(), features.GARBAGE_COLLECTION)
return worker
if __name__ == "__main__":
if app.config.get("ACCOUNT_RECOVERY_MODE", False):
logger.debug("Quay running in account recovery mode")
while True:
time.sleep(100000)
if not features.GARBAGE_COLLECTION:
logger.debug("Garbage collection is disabled; skipping")
while True:
time.sleep(100000)
if app.config.get("DISABLE_PUSHES", False):
logger.debug("Pushes to the registry are disabled; skipping startup")
while True:
time.sleep(100000)
GlobalLock.configure(app.config)
worker = GarbageCollectionWorker()
worker.start()