mirror of
https://github.com/quay/quay.git
synced 2025-04-18 10:44:06 +03:00
registry: implements the OCI 1.1 referrers API (PROJQUAY-7280) (#2597)
* registry: implements the OCI 1.1 referrers API Migrations: - Adds a subject column for lookup - Adds a subject_backfilled column to track status of the backfilling of existing manifests - Adds a manifest_json column making use of postgres' JSONB support, for future use. Manifestsubjectbackfillworker: Indexes existing manifests for possible existing subject field. * Deprecate IGNORE_UNKNOWN_MEDIATYPES * Cleanup
This commit is contained in:
parent
1cc6d5292d
commit
4546163e83
13
.github/workflows/CI.yaml
vendored
13
.github/workflows/CI.yaml
vendored
@ -259,6 +259,19 @@ jobs:
|
||||
name: cypress-videos
|
||||
path: web/cypress/videos
|
||||
|
||||
- name: Create report
|
||||
run: |
|
||||
mkdir -p .logs/
|
||||
docker logs quay-quay >.logs/quay.log 2>&1 || true
|
||||
if: always()
|
||||
|
||||
- name: Upload Quay logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: logs
|
||||
path: .logs/
|
||||
if: always()
|
||||
|
||||
mysql:
|
||||
name: E2E MySQL Test
|
||||
runs-on: ubuntu-20.04
|
||||
|
@ -48,6 +48,7 @@ def registry_services():
|
||||
"servicekey": {"autostart": "true"},
|
||||
"repomirrorworker": {"autostart": "false"},
|
||||
"manifestbackfillworker": {"autostart": "true"},
|
||||
"manifestsubjectbackfillworker": {"autostart": "true"},
|
||||
"securityscanningnotificationworker": {"autostart": "true"},
|
||||
"config-editor": {"autostart": "false"},
|
||||
"quotatotalworker": {"autostart": "true"},
|
||||
@ -87,6 +88,7 @@ def config_services():
|
||||
"servicekey": {"autostart": "false"},
|
||||
"repomirrorworker": {"autostart": "false"},
|
||||
"manifestbackfillworker": {"autostart": "false"},
|
||||
"manifestsubjectbackfillworker": {"autostart": "false"},
|
||||
"securityscanningnotificationworker": {"autostart": "false"},
|
||||
"config-editor": {"autostart": "true"},
|
||||
"quotatotalworker": {"autostart": "false"},
|
||||
|
@ -320,6 +320,18 @@ autostart = {{ config['manifestbackfillworker']['autostart'] }}
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[program:manifestsubjectbackfillworker]
|
||||
environment=
|
||||
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||
command={% if hotreload -%}
|
||||
gunicorn --timeout=600 -b 'unix:/tmp/manifestsubjectbackfillworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.manifestsubjectbackfillworker:create_gunicorn_worker()'
|
||||
{% else -%}
|
||||
python -m workers.manifestsubjectbackfillworker
|
||||
{% endif -%}
|
||||
autostart = {{ config['manifestsubjectbackfillworker']['autostart'] }}
|
||||
stdout_events_enabled = true
|
||||
stderr_events_enabled = true
|
||||
|
||||
[program:securityscanningnotificationworker]
|
||||
environment=
|
||||
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||
|
@ -765,12 +765,12 @@ class DefaultConfig(ImmutableConfig):
|
||||
],
|
||||
}
|
||||
|
||||
IGNORE_UNKNOWN_MEDIATYPES = False
|
||||
|
||||
# Feature Flag: Whether to allow Helm OCI content types.
|
||||
# See: https://helm.sh/docs/topics/registries/
|
||||
FEATURE_HELM_OCI_SUPPORT = True
|
||||
|
||||
FEATURE_REFERRERS_API = False
|
||||
|
||||
# The set of hostnames disallowed from webhooks, beyond localhost (which will
|
||||
# not work due to running inside a container).
|
||||
WEBHOOK_HOSTNAME_BLACKLIST: Optional[List[str]] = []
|
||||
@ -784,8 +784,9 @@ class DefaultConfig(ImmutableConfig):
|
||||
# Feature Flag: Whether the repository action count worker is enabled.
|
||||
FEATURE_REPOSITORY_ACTION_COUNTER = True
|
||||
|
||||
# TEMP FEATURE: Backfill the sizes of manifests.
|
||||
# TEMP FEATURE: Backfill the sizes and subjects of manifests.
|
||||
FEATURE_MANIFEST_SIZE_BACKFILL = True
|
||||
FEATURE_MANIFEST_SUBJECT_BACKFILL = False
|
||||
|
||||
# Repos created by push default to private visibility
|
||||
CREATE_PRIVATE_REPO_ON_PUSH = True
|
||||
|
8
data/cache/cache_key.py
vendored
8
data/cache/cache_key.py
vendored
@ -93,3 +93,11 @@ def for_repository_manifest(repository_id, digest, cache_config):
|
||||
"""
|
||||
cache_ttl = cache_config.get("repository_manifest_cache_ttl", "300s")
|
||||
return CacheKey("repository_manifest__%s_%s" % (repository_id, digest), cache_ttl)
|
||||
|
||||
|
||||
def for_manifest_referrers(repository_id, manifest_digest, cache_config):
|
||||
"""
|
||||
Returns a cache key for listing a manifest's referrers
|
||||
"""
|
||||
cache_ttl = cache_config.get("manifest_referrers_cache_ttl", "60s")
|
||||
return CacheKey(f"manifest_referrers__{repository_id}_{manifest_digest}", cache_ttl)
|
||||
|
@ -1729,6 +1729,8 @@ class Manifest(BaseModel):
|
||||
|
||||
config_media_type = CharField(null=True)
|
||||
layers_compressed_size = BigIntegerField(null=True)
|
||||
subject = CharField(null=True)
|
||||
subject_backfilled = BooleanField(default=False)
|
||||
|
||||
class Meta:
|
||||
database = db
|
||||
@ -1737,6 +1739,7 @@ class Manifest(BaseModel):
|
||||
(("repository", "digest"), True),
|
||||
(("repository", "media_type"), False),
|
||||
(("repository", "config_media_type"), False),
|
||||
(("repository", "subject"), False),
|
||||
)
|
||||
|
||||
|
||||
|
@ -0,0 +1,42 @@
|
||||
"""Create manifestsubject table
|
||||
|
||||
Revision ID: 946f0e90f9c9
|
||||
Revises: 2062bbd5ef0e
|
||||
Create Date: 2023-11-17 12:06:16.662150
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "946f0e90f9c9"
|
||||
down_revision = "36cd2b747a08"
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.engine.reflection import Inspector
|
||||
|
||||
|
||||
def upgrade(op, tables, tester):
|
||||
bind = op.get_bind()
|
||||
|
||||
inspector = Inspector.from_engine(bind)
|
||||
manifest_columns = inspector.get_columns("manifest")
|
||||
manifest_indexes = inspector.get_indexes("manifest")
|
||||
|
||||
if not "subject" in [c["name"] for c in manifest_columns]:
|
||||
op.add_column("manifest", sa.Column("subject", sa.String(length=255), nullable=True))
|
||||
|
||||
if not "subject_backfilled" in [c["name"] for c in manifest_columns]:
|
||||
op.add_column("manifest", sa.Column("subject_backfilled", sa.Boolean()))
|
||||
|
||||
if not "manifest_repository_id_subject" in [i["name"] for i in manifest_indexes]:
|
||||
op.create_index(
|
||||
"manifest_repository_id_subject",
|
||||
"manifest",
|
||||
["repository_id", "subject"],
|
||||
unique=False,
|
||||
)
|
||||
|
||||
|
||||
def downgrade(op, tables, tester):
|
||||
op.drop_index("manifest_repository_id_subject", table_name="manifest")
|
||||
op.drop_column("manifest", "subject")
|
||||
op.drop_column("manifest", "subject_backfilled")
|
@ -349,6 +349,18 @@ def _check_manifest_used(manifest_id):
|
||||
except ManifestChild.DoesNotExist:
|
||||
pass
|
||||
|
||||
Referrer = Manifest.alias()
|
||||
# Check if the manifest is the subject of another manifest.
|
||||
# Note: Manifest referrers with a valid subject field are created a non expiring
|
||||
# hidden tag, in order to prevent GC from inadvertently removing a referrer.
|
||||
try:
|
||||
Manifest.select().join(Referrer, on=(Manifest.digest == Referrer.subject)).where(
|
||||
Manifest.id == manifest_id
|
||||
).get()
|
||||
return True
|
||||
except Manifest.DoesNotExist:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
|
||||
|
@ -140,6 +140,18 @@ def _lookup_manifest(repository_id, manifest_digest, allow_dead=False, allow_hid
|
||||
return None
|
||||
|
||||
|
||||
def lookup_manifest_referrers(repository_id, manifest_digest, config_media_type=None):
|
||||
query = (
|
||||
Manifest.select()
|
||||
.where(Manifest.repository == repository_id)
|
||||
.where(Manifest.subject == manifest_digest)
|
||||
)
|
||||
if config_media_type is not None:
|
||||
query = query.where(Manifest.config_media_type == config_media_type)
|
||||
|
||||
return query
|
||||
|
||||
|
||||
@overload
|
||||
def create_manifest(
|
||||
repository_id: int,
|
||||
@ -175,9 +187,13 @@ def create_manifest(
|
||||
repository=repository_id,
|
||||
digest=manifest.digest,
|
||||
media_type=media_type,
|
||||
manifest_bytes=manifest.bytes.as_encoded_str(),
|
||||
manifest_bytes=manifest.bytes.as_encoded_str(), # TODO(kleesc): Remove once fully on JSONB only
|
||||
config_media_type=manifest.config_media_type,
|
||||
layers_compressed_size=manifest.layers_compressed_size,
|
||||
subject_backfilled=True, # TODO(kleesc): Remove once backfill is done
|
||||
subject=manifest.subject.digest
|
||||
if manifest.subject
|
||||
else None, # TODO(kleesc): Remove once fully on JSONB only
|
||||
)
|
||||
except IntegrityError as e:
|
||||
# NOTE: An IntegrityError means (barring a bug) that the manifest was created by
|
||||
@ -378,6 +394,7 @@ def _create_manifest(
|
||||
create_temporary_tag_if_necessary(
|
||||
manifest,
|
||||
temp_tag_expiration_sec,
|
||||
skip_expiration=manifest_interface_instance.subject is not None,
|
||||
)
|
||||
|
||||
# Define the labels for the manifest (if any).
|
||||
|
@ -24,6 +24,17 @@ class RepositoryContentRetriever(ContentRetriever):
|
||||
def for_repository(cls, repository_id, storage):
|
||||
return RepositoryContentRetriever(repository_id, storage)
|
||||
|
||||
def get_manifest_with_digest(self, digest):
|
||||
query = (
|
||||
Manifest.select()
|
||||
.where(Manifest.repository == self.repository_id)
|
||||
.where(Manifest.digest == digest)
|
||||
)
|
||||
try:
|
||||
return query.get()
|
||||
except Manifest.DoesNotExist:
|
||||
return None
|
||||
|
||||
def get_manifest_bytes_with_digest(self, digest):
|
||||
"""
|
||||
Returns the bytes of the manifest with the given digest or None if none found.
|
||||
|
@ -326,14 +326,18 @@ def get_expired_tag(repository_id, tag_name):
|
||||
return None
|
||||
|
||||
|
||||
def create_temporary_tag_if_necessary(manifest, expiration_sec):
|
||||
def create_temporary_tag_if_necessary(manifest, expiration_sec, skip_expiration=False):
|
||||
"""
|
||||
Creates a temporary tag pointing to the given manifest, with the given expiration in seconds,
|
||||
unless there is an existing tag that will keep the manifest around.
|
||||
"""
|
||||
tag_name = "$temp-%s" % str(uuid.uuid4())
|
||||
now_ms = get_epoch_timestamp_ms()
|
||||
end_ms = now_ms + (expiration_sec * 1000)
|
||||
if skip_expiration:
|
||||
# Skip expiration for hidden tags used for OCI artifacts referring to a subject manifest
|
||||
end_ms = None
|
||||
else:
|
||||
end_ms = now_ms + (expiration_sec * 1000)
|
||||
|
||||
# Check if there is an existing tag on the manifest that won't expire within the
|
||||
# timeframe. If so, no need for a temporary tag.
|
||||
|
@ -1,5 +1,6 @@
|
||||
import json
|
||||
from test.fixtures import *
|
||||
import random
|
||||
import string
|
||||
|
||||
import pytest
|
||||
from playhouse.test_utils import assert_query_count
|
||||
@ -19,6 +20,7 @@ from data.model.oci.manifest import (
|
||||
CreateManifestException,
|
||||
get_or_create_manifest,
|
||||
lookup_manifest,
|
||||
lookup_manifest_referrers,
|
||||
)
|
||||
from data.model.oci.retriever import RepositoryContentRetriever
|
||||
from data.model.oci.tag import filter_to_alive_tags, get_tag
|
||||
@ -31,8 +33,11 @@ from image.docker.schema2.manifest import (
|
||||
DockerSchema2Manifest,
|
||||
DockerSchema2ManifestBuilder,
|
||||
)
|
||||
from image.oci.config import OCIConfig
|
||||
from image.oci.manifest import OCIManifestBuilder
|
||||
from image.shared.interfaces import ContentRetriever
|
||||
from image.shared.schemas import parse_manifest_from_bytes
|
||||
from test.fixtures import *
|
||||
from util.bytes import Bytes
|
||||
|
||||
|
||||
@ -594,3 +599,73 @@ def test_create_manifest_cannot_load_config_blob(initialized_db):
|
||||
get_or_create_manifest(
|
||||
repository, manifest, storage, retriever=broken_retriever, raise_on_error=True
|
||||
)
|
||||
|
||||
|
||||
def test_get_or_create_manifest_with_subject(initialized_db):
|
||||
def generate_random_data_for_layer():
|
||||
charset = string.ascii_uppercase + string.ascii_lowercase + string.digits
|
||||
return "".join(random.choice(charset) for _ in range(random.randrange(1, 20)))
|
||||
|
||||
repository = create_repository("devtable", "newrepo", None)
|
||||
|
||||
# Manifest 1
|
||||
# Add a blob containing the config.
|
||||
config1 = {
|
||||
"os": "linux",
|
||||
"architecture": "amd64",
|
||||
"rootfs": {"type": "layers", "diff_ids": []},
|
||||
"history": [],
|
||||
}
|
||||
config1_json = json.dumps(config1)
|
||||
_, config1_digest = _populate_blob(config1_json)
|
||||
|
||||
# Add a blob of random data.
|
||||
random_data1 = generate_random_data_for_layer()
|
||||
_, random_digest1 = _populate_blob(random_data1)
|
||||
|
||||
oci_builder1 = OCIManifestBuilder()
|
||||
oci_builder1.set_config_digest(config1_digest, len(config1_json.encode("utf-8")))
|
||||
oci_builder1.add_layer(random_digest1, len(random_data1.encode("utf-8")))
|
||||
oci_manifest1 = oci_builder1.build()
|
||||
|
||||
# Manifest 2
|
||||
# Add a blob containing the config.
|
||||
config2 = {
|
||||
"os": "linux",
|
||||
"architecture": "amd64",
|
||||
"rootfs": {"type": "layers", "diff_ids": []},
|
||||
"history": [],
|
||||
}
|
||||
config2_json = json.dumps(config2)
|
||||
_, config2_digest = _populate_blob(config2_json)
|
||||
|
||||
# Add a blob of random data.
|
||||
random_data2 = generate_random_data_for_layer()
|
||||
_, random_digest2 = _populate_blob(random_data1)
|
||||
|
||||
oci_builder2 = OCIManifestBuilder()
|
||||
oci_builder2.set_config_digest(config2_digest, len(config2_json.encode("utf-8")))
|
||||
oci_builder2.add_layer(random_digest2, len(random_data2.encode("utf-8")))
|
||||
oci_builder2.set_subject(
|
||||
oci_manifest1.digest, len(oci_manifest1.bytes.as_encoded_str()), oci_manifest1.media_type
|
||||
)
|
||||
oci_manifest2 = oci_builder2.build()
|
||||
|
||||
manifest1_created = get_or_create_manifest(repository, oci_manifest1, storage)
|
||||
assert manifest1_created
|
||||
|
||||
manifest2_created = get_or_create_manifest(repository, oci_manifest2, storage)
|
||||
assert manifest2_created
|
||||
|
||||
assert (
|
||||
oci_manifest2.subject
|
||||
and oci_manifest2.subject.digest == oci_manifest1.digest
|
||||
and oci_manifest2.subject.size == len(oci_manifest1.bytes.as_encoded_str())
|
||||
and oci_manifest2.subject.mediatype == oci_manifest1.media_type
|
||||
)
|
||||
|
||||
referrers = lookup_manifest_referrers(repository.id, oci_manifest1.digest)
|
||||
assert referrers and len(list(referrers)) == 1
|
||||
|
||||
referrer = referrers[0]
|
||||
assert referrer.digest == oci_manifest2.digest
|
||||
|
@ -1,6 +1,7 @@
|
||||
import hashlib
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
@ -625,3 +626,80 @@ def test_purge_repository_storage_blob(default_tag_policy, initialized_db):
|
||||
assert not storage.exists(
|
||||
{preferred}, storage.blob_path(removed_blob_from_storage.content_checksum)
|
||||
)
|
||||
|
||||
|
||||
def test_delete_manifests_with_subject(initialized_db):
|
||||
def generate_random_data_for_layer():
|
||||
charset = string.ascii_uppercase + string.ascii_lowercase + string.digits
|
||||
return "".join(random.choice(charset) for _ in range(random.randrange(1, 20)))
|
||||
|
||||
repository = create_repository("devtable", "newrepo")
|
||||
|
||||
config1 = {
|
||||
"os": "linux",
|
||||
"architecture": "amd64",
|
||||
"rootfs": {"type": "layers", "diff_ids": []},
|
||||
"history": [],
|
||||
}
|
||||
config1_json = json.dumps(config1)
|
||||
_, config1_digest = _populate_blob(repository, config1_json.encode("ascii"))
|
||||
|
||||
# Add a blob of random data.
|
||||
random_data1 = generate_random_data_for_layer()
|
||||
_, random_digest1 = _populate_blob(repository, random_data1.encode("ascii"))
|
||||
|
||||
oci_builder1 = OCIManifestBuilder()
|
||||
oci_builder1.set_config_digest(config1_digest, len(config1_json.encode("utf-8")))
|
||||
oci_builder1.add_layer(random_digest1, len(random_data1.encode("utf-8")))
|
||||
oci_manifest1 = oci_builder1.build()
|
||||
|
||||
# Manifest 2
|
||||
# Add a blob containing the config.
|
||||
config2 = {
|
||||
"os": "linux",
|
||||
"architecture": "amd64",
|
||||
"rootfs": {"type": "layers", "diff_ids": []},
|
||||
"history": [],
|
||||
}
|
||||
config2_json = json.dumps(config2)
|
||||
_, config2_digest = _populate_blob(repository, config2_json.encode("ascii"))
|
||||
|
||||
# Add a blob of random data.
|
||||
random_data2 = generate_random_data_for_layer()
|
||||
_, random_digest2 = _populate_blob(repository, random_data1.encode("ascii"))
|
||||
|
||||
oci_builder2 = OCIManifestBuilder()
|
||||
oci_builder2.set_config_digest(config2_digest, len(config2_json.encode("utf-8")))
|
||||
oci_builder2.add_layer(random_digest2, len(random_data2.encode("utf-8")))
|
||||
oci_builder2.set_subject(
|
||||
oci_manifest1.digest, len(oci_manifest1.bytes.as_encoded_str()), oci_manifest1.media_type
|
||||
)
|
||||
oci_manifest2 = oci_builder2.build()
|
||||
|
||||
manifest1_created = model.oci.manifest.get_or_create_manifest(
|
||||
repository, oci_manifest1, storage
|
||||
)
|
||||
assert manifest1_created
|
||||
|
||||
# Delete temp tags for GC check
|
||||
Tag.delete().where(Tag.manifest == manifest1_created.manifest.id).execute()
|
||||
|
||||
# Subject does not have referrers yet
|
||||
assert not model.gc._check_manifest_used(manifest1_created.manifest.id)
|
||||
|
||||
manifest2_created = model.oci.manifest.get_or_create_manifest(
|
||||
repository, oci_manifest2, storage
|
||||
)
|
||||
assert manifest2_created
|
||||
|
||||
# Check that the "temp" tag won't expire for the referrer
|
||||
tag2 = Tag.select().where(Tag.manifest == manifest2_created.manifest.id).get()
|
||||
assert tag2.lifetime_end_ms is None
|
||||
|
||||
assert model.gc._check_manifest_used(manifest1_created.manifest.id)
|
||||
|
||||
# The referrer should also be considered in use even without a tag,
|
||||
# otherwise GC would delete a valid manifest referrer.
|
||||
# These are kept alive with a "non-temporary" hidden tag.
|
||||
# In order to clean these up, they need to be manually deleted for now.
|
||||
assert model.gc._check_manifest_used(manifest2_created.manifest.id)
|
||||
|
@ -12,6 +12,9 @@ from data.database import Tag as TagTable
|
||||
from data.database import get_epoch_timestamp_ms
|
||||
from data.registry_model.datatype import datatype, optionalinput, requiresinput
|
||||
from image.docker.schema1 import DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
|
||||
from image.oci import OCI_IMAGE_INDEX_CONTENT_TYPE
|
||||
from image.oci.index import INDEX_DIGEST_KEY, INDEX_MANIFESTS_KEY, OCIIndex
|
||||
from image.oci.manifest import OCI_MANIFEST_ARTIFACT_TYPE_KEY
|
||||
from image.shared import ManifestException
|
||||
from image.shared.schemas import is_manifest_list_type, parse_manifest_from_bytes
|
||||
from util.bytes import Bytes
|
||||
@ -334,7 +337,9 @@ class Manifest(
|
||||
"""
|
||||
assert self.internal_manifest_bytes
|
||||
return parse_manifest_from_bytes(
|
||||
self.internal_manifest_bytes, self.media_type, validate=validate
|
||||
self.internal_manifest_bytes,
|
||||
self.media_type,
|
||||
validate=validate,
|
||||
)
|
||||
|
||||
@property
|
||||
@ -355,6 +360,17 @@ class Manifest(
|
||||
return None
|
||||
return result
|
||||
|
||||
@property
|
||||
def artifact_type(self):
|
||||
"""
|
||||
Returns the manifest's artifact type, if any.
|
||||
"""
|
||||
parsed = self.get_parsed_manifest()
|
||||
if parsed is None:
|
||||
return None
|
||||
|
||||
return parsed.artifact_type
|
||||
|
||||
@property
|
||||
def has_been_scanned(self):
|
||||
"""
|
||||
@ -446,6 +462,53 @@ class Manifest(
|
||||
)
|
||||
|
||||
|
||||
class ManifestIndex(Manifest):
|
||||
"""
|
||||
ManifestIndex represents a Manifest of whose content type is that a manifestlist.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def for_manifest_index(cls, manifest, legacy_id_handler, legacy_image_row=None):
|
||||
if manifest is None:
|
||||
return None
|
||||
|
||||
# NOTE: `manifest_bytes` will be None if not selected by certain join queries.
|
||||
manifest_bytes = (
|
||||
Bytes.for_string_or_unicode(manifest.manifest_bytes)
|
||||
if manifest.manifest_bytes is not None
|
||||
else None
|
||||
)
|
||||
return ManifestIndex(
|
||||
db_id=manifest.id,
|
||||
digest=manifest.digest,
|
||||
internal_manifest_bytes=manifest_bytes,
|
||||
media_type=ManifestTable.media_type.get_name(manifest.media_type_id),
|
||||
_layers_compressed_size=manifest.layers_compressed_size,
|
||||
config_media_type=manifest.config_media_type,
|
||||
inputs=dict(
|
||||
legacy_id_handler=legacy_id_handler,
|
||||
legacy_image_row=legacy_image_row,
|
||||
repository=RepositoryReference.for_id(manifest.repository_id),
|
||||
),
|
||||
)
|
||||
|
||||
def manifests(self, retriever, legacy_id_handler, legacy_image_row=None):
|
||||
assert self.is_manifest_list and self.media_type == OCI_IMAGE_INDEX_CONTENT_TYPE
|
||||
|
||||
parsed = self.get_parsed_manifest()
|
||||
assert isinstance(parsed, OCIIndex)
|
||||
|
||||
manifests = parsed.manifest_dict[INDEX_MANIFESTS_KEY]
|
||||
|
||||
ret = [
|
||||
Manifest.for_manifest(m, legacy_id_handler)
|
||||
for m in [
|
||||
retriever.get_manifest_with_digest(m_obj[INDEX_DIGEST_KEY]) for m_obj in manifests
|
||||
]
|
||||
]
|
||||
return ret
|
||||
|
||||
|
||||
class LegacyImage(
|
||||
namedtuple(
|
||||
"LegacyImage",
|
||||
|
@ -26,6 +26,7 @@ from data.registry_model.datatypes import (
|
||||
LegacyImage,
|
||||
LikelyVulnerableTag,
|
||||
Manifest,
|
||||
ManifestIndex,
|
||||
ManifestLayer,
|
||||
RepositoryReference,
|
||||
SecurityScanStatus,
|
||||
@ -35,11 +36,9 @@ from data.registry_model.datatypes import (
|
||||
from data.registry_model.interface import RegistryDataInterface
|
||||
from data.registry_model.label_handlers import LABEL_EXPIRY_KEY, apply_label_to_manifest
|
||||
from data.registry_model.shared import SyntheticIDHandler
|
||||
from image.docker.schema1 import (
|
||||
DOCKER_SCHEMA1_CONTENT_TYPES,
|
||||
DockerSchema1ManifestBuilder,
|
||||
)
|
||||
from image.docker.schema1 import DOCKER_SCHEMA1_CONTENT_TYPES
|
||||
from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES
|
||||
from image.oci import OCI_IMAGE_INDEX_CONTENT_TYPE
|
||||
from image.shared import ManifestException
|
||||
from util.bytes import Bytes
|
||||
from util.timedeltastring import convert_to_timedelta
|
||||
@ -172,6 +171,70 @@ class OCIModel(RegistryDataInterface):
|
||||
|
||||
return Manifest.for_manifest(manifest, self._legacy_image_id_handler)
|
||||
|
||||
def lookup_cached_referrers_for_manifest(
|
||||
self, model_cache, repository_ref, manifest, artifact_type=None
|
||||
):
|
||||
def load_referrers():
|
||||
return self.lookup_referrers_for_manifest(repository_ref, manifest, artifact_type)
|
||||
|
||||
referrers_cache_key = cache_key.for_manifest_referrers(
|
||||
repository_ref, manifest.digest, model_cache.cache_config
|
||||
)
|
||||
result = model_cache.retrieve(referrers_cache_key, load_referrers)
|
||||
try:
|
||||
return [Manifest.from_dict(referrer_dict) for referrer_dict in result]
|
||||
except FromDictionaryException:
|
||||
return self.lookup_referrers_for_manifest(repository_ref, manifest, artifact_type)
|
||||
|
||||
def lookup_referrers_for_manifest(self, repository_ref, manifest, artifact_type=None):
|
||||
"""
|
||||
Looks up the referrers of a manifest under a repository.
|
||||
Returns a manifest index.
|
||||
"""
|
||||
|
||||
referrers = oci.manifest.lookup_manifest_referrers(
|
||||
manifest.repository._db_id, manifest.digest, artifact_type
|
||||
)
|
||||
|
||||
referrers_manifests = [
|
||||
Manifest.for_manifest(referrer, self._legacy_image_id_handler) for referrer in referrers
|
||||
]
|
||||
referrers_digests = {r.digest for r in referrers}
|
||||
|
||||
# Check for existing image indices with referrers tag schema
|
||||
referrers_tag_schema_index = self.lookup_referrers_for_tag_schema(manifest)
|
||||
if referrers_tag_schema_index:
|
||||
for m in referrers_tag_schema_index:
|
||||
if (
|
||||
m.digest in referrers_digests
|
||||
or artifact_type is not None
|
||||
and artifact_type != m.artifact_type
|
||||
):
|
||||
continue
|
||||
referrers_manifests.append(m)
|
||||
|
||||
return referrers_manifests
|
||||
|
||||
def lookup_referrers_for_tag_schema(self, manifest):
|
||||
retriever = RepositoryContentRetriever(manifest.repository._db_id, None)
|
||||
|
||||
referrers_tag_schema_tag = oci.tag.get_tag(
|
||||
manifest.repository._db_id,
|
||||
"-".join(manifest.digest.split(":", 1)),
|
||||
)
|
||||
|
||||
if (
|
||||
referrers_tag_schema_tag
|
||||
and referrers_tag_schema_tag.manifest.media_type.name == OCI_IMAGE_INDEX_CONTENT_TYPE
|
||||
):
|
||||
tag_schema_index = ManifestIndex.for_manifest_index(
|
||||
referrers_tag_schema_tag.manifest, self._legacy_image_id_handler
|
||||
)
|
||||
if tag_schema_index:
|
||||
return tag_schema_index.manifests(retriever, self._legacy_image_id_handler)
|
||||
|
||||
return []
|
||||
|
||||
def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None):
|
||||
"""
|
||||
Creates a label on the manifest with the given key and value.
|
||||
|
@ -251,4 +251,4 @@ def v2_support_enabled():
|
||||
return response
|
||||
|
||||
|
||||
from endpoints.v2 import blob, catalog, manifest, tag, v2auth
|
||||
from endpoints.v2 import blob, catalog, manifest, referrers, tag, v2auth
|
||||
|
@ -359,7 +359,6 @@ def _parse_manifest(content_type, request_data):
|
||||
return parse_manifest_from_bytes(
|
||||
Bytes.for_string_or_unicode(request_data),
|
||||
content_type,
|
||||
ignore_unknown_mediatypes=app.config.get("IGNORE_UNKNOWN_MEDIATYPES"),
|
||||
)
|
||||
except ManifestException as me:
|
||||
logger.exception("failed to parse manifest when writing by tagname")
|
||||
|
72
endpoints/v2/referrers.py
Normal file
72
endpoints/v2/referrers.py
Normal file
@ -0,0 +1,72 @@
|
||||
from flask import Response, request
|
||||
|
||||
import features
|
||||
from app import model_cache
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth
|
||||
from data.model import ManifestDoesNotExist, RepositoryDoesNotExist
|
||||
from data.registry_model import registry_model
|
||||
from digest import digest_tools
|
||||
from endpoints.decorators import (
|
||||
anon_protect,
|
||||
check_readonly,
|
||||
disallow_for_account_recovery_mode,
|
||||
inject_registry_model,
|
||||
parse_repository_name,
|
||||
route_show_if,
|
||||
)
|
||||
from endpoints.v2 import require_repo_read, v2_bp
|
||||
from endpoints.v2.errors import ManifestUnknown, NameUnknown
|
||||
from image.oci.index import OCIIndexBuilder
|
||||
from image.shared.schemas import parse_manifest_from_bytes
|
||||
from util.bytes import Bytes
|
||||
from util.http import abort
|
||||
|
||||
BASE_REFERRERS_ROUTE = '/<repopath:repository>/referrers/<regex("{0}"):manifest_ref>'
|
||||
MANIFEST_REFERRERS_ROUTE = BASE_REFERRERS_ROUTE.format(digest_tools.DIGEST_PATTERN)
|
||||
|
||||
|
||||
@v2_bp.route(MANIFEST_REFERRERS_ROUTE, methods=["GET"])
|
||||
@route_show_if(features.REFERRERS_API)
|
||||
@disallow_for_account_recovery_mode
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=["pull"])
|
||||
@require_repo_read(allow_for_superuser=True)
|
||||
@anon_protect
|
||||
@inject_registry_model()
|
||||
def list_manifest_referrers(namespace_name, repo_name, manifest_ref, registry_model):
|
||||
try:
|
||||
repository_ref = registry_model.lookup_repository(
|
||||
namespace_name, repo_name, raise_on_error=True, manifest_ref=manifest_ref
|
||||
)
|
||||
except RepositoryDoesNotExist as e:
|
||||
raise NameUnknown("repository not found")
|
||||
|
||||
try:
|
||||
manifest = registry_model.lookup_manifest_by_digest(
|
||||
repository_ref, manifest_ref, raise_on_error=True
|
||||
)
|
||||
except ManifestDoesNotExist as e:
|
||||
raise ManifestUnknown(str(e))
|
||||
|
||||
artifact_type = request.args.get("artifactType", None)
|
||||
|
||||
referrers = registry_model.lookup_cached_referrers_for_manifest(
|
||||
model_cache, repository_ref, manifest, artifact_type
|
||||
)
|
||||
index = _build_referrers_index_for_manifests(referrers)
|
||||
headers = {"Content-Type": index.media_type}
|
||||
if artifact_type is not None:
|
||||
headers["OCI-Filters-Applied"] = "artifactType"
|
||||
|
||||
return Response(index.bytes.as_unicode(), status=200, headers=headers)
|
||||
|
||||
|
||||
def _build_referrers_index_for_manifests(referrers):
|
||||
index_builder = OCIIndexBuilder()
|
||||
|
||||
for referrer in referrers:
|
||||
parsed_referrer = referrer.get_parsed_manifest()
|
||||
index_builder.add_manifest(parsed_referrer)
|
||||
|
||||
index = index_builder.build()
|
||||
return index
|
@ -211,3 +211,7 @@ ENTITLEMENT_RECONCILIATION_MARKETPLACE_ENDPOINT: FeatureNameValue
|
||||
RH_MARKETPLACE: FeatureNameValue
|
||||
|
||||
AUTO_PRUNE: FeatureNameValue
|
||||
|
||||
# Referrer APIO
|
||||
MANIFEST_SUBJECT_BACKFILL: FeatureNameValue
|
||||
REFERRERS_API: FeatureNameValue
|
||||
|
@ -182,7 +182,7 @@ class DockerSchema1Manifest(ManifestInterface):
|
||||
],
|
||||
}
|
||||
|
||||
def __init__(self, manifest_bytes, validate=True, ignore_unknown_mediatypes=False):
|
||||
def __init__(self, manifest_bytes, validate=True):
|
||||
assert isinstance(manifest_bytes, Bytes)
|
||||
|
||||
self._layers = None
|
||||
|
@ -145,7 +145,7 @@ class DockerSchema2Manifest(ManifestInterface):
|
||||
],
|
||||
}
|
||||
|
||||
def __init__(self, manifest_bytes, validate=False, ignore_unknown_mediatypes=False):
|
||||
def __init__(self, manifest_bytes, validate=False):
|
||||
assert isinstance(manifest_bytes, Bytes)
|
||||
|
||||
self._payload = manifest_bytes
|
||||
|
@ -10,7 +10,6 @@ def get_descriptor_schema(
|
||||
allowed_media_types,
|
||||
additional_properties=None,
|
||||
additional_required=None,
|
||||
ignore_unknown_mediatypes=False,
|
||||
):
|
||||
properties = {
|
||||
DESCRIPTOR_MEDIATYPE_KEY: {
|
||||
@ -47,9 +46,6 @@ def get_descriptor_schema(
|
||||
},
|
||||
}
|
||||
|
||||
if not ignore_unknown_mediatypes:
|
||||
properties[DESCRIPTOR_MEDIATYPE_KEY]["enum"] = allowed_media_types
|
||||
|
||||
if additional_properties:
|
||||
properties.update(additional_properties)
|
||||
|
||||
|
@ -114,7 +114,6 @@ class OCIIndex(ManifestListInterface):
|
||||
"description": "The manifests field contains a list of manifests for specific platforms",
|
||||
"items": get_descriptor_schema(
|
||||
allowed_media_types=ALLOWED_MEDIA_TYPES,
|
||||
ignore_unknown_mediatypes=self._ignore_unknown_mediatypes,
|
||||
additional_properties={
|
||||
INDEX_PLATFORM_KEY: {
|
||||
"type": "object",
|
||||
@ -164,10 +163,7 @@ class OCIIndex(ManifestListInterface):
|
||||
additional_required=[],
|
||||
),
|
||||
},
|
||||
INDEX_SUBJECT_KEY: get_descriptor_schema(
|
||||
[],
|
||||
ignore_unknown_mediatypes=True,
|
||||
),
|
||||
INDEX_SUBJECT_KEY: get_descriptor_schema([]),
|
||||
INDEX_ANNOTATIONS_KEY: {
|
||||
"type": "object",
|
||||
"description": "The annotations, if any, on this index",
|
||||
@ -181,12 +177,11 @@ class OCIIndex(ManifestListInterface):
|
||||
}
|
||||
return METASCHEMA
|
||||
|
||||
def __init__(self, manifest_bytes, ignore_unknown_mediatypes=False):
|
||||
def __init__(self, manifest_bytes):
|
||||
assert isinstance(manifest_bytes, Bytes)
|
||||
|
||||
self._layers = None
|
||||
self._manifest_bytes = manifest_bytes
|
||||
self._ignore_unknown_mediatypes = ignore_unknown_mediatypes
|
||||
|
||||
try:
|
||||
self._parsed = json.loads(manifest_bytes.as_unicode())
|
||||
@ -302,7 +297,6 @@ class OCIIndex(ManifestListInterface):
|
||||
INDEX_DIGEST_KEY,
|
||||
INDEX_SIZE_KEY,
|
||||
INDEX_MEDIATYPE_KEY,
|
||||
ignore_unknown_mediatypes=self._ignore_unknown_mediatypes,
|
||||
)
|
||||
for m in manifests
|
||||
]
|
||||
@ -415,7 +409,7 @@ class OCIIndexBuilder(object):
|
||||
self.manifests = []
|
||||
self.annotations = {}
|
||||
|
||||
def add_manifest(self, manifest, architecture, os):
|
||||
def add_manifest(self, manifest, architecture=None, os=None):
|
||||
"""
|
||||
Adds a manifest to the list.
|
||||
"""
|
||||
@ -438,18 +432,25 @@ class OCIIndexBuilder(object):
|
||||
"""
|
||||
Adds a manifest to the list.
|
||||
"""
|
||||
platform_dict = {}
|
||||
if architecture:
|
||||
platform_dict[INDEX_ARCHITECTURE_KEY] = architecture
|
||||
|
||||
if os:
|
||||
platform_dict[INDEX_OS_KEY] = os
|
||||
|
||||
self.manifests.append(
|
||||
(
|
||||
manifest_digest,
|
||||
manifest_size,
|
||||
media_type,
|
||||
{
|
||||
INDEX_ARCHITECTURE_KEY: architecture,
|
||||
INDEX_OS_KEY: os,
|
||||
},
|
||||
platform_dict,
|
||||
)
|
||||
)
|
||||
|
||||
def set_subject(self, digest, size, mediatype):
|
||||
self.subject = OCIManifestDescriptor(digest=digest, size=size, mediatype=mediatype)
|
||||
|
||||
def build(self):
|
||||
"""
|
||||
Builds and returns the DockerSchema2ManifestList.
|
||||
@ -459,17 +460,20 @@ class OCIIndexBuilder(object):
|
||||
manifest_list_dict = {
|
||||
INDEX_VERSION_KEY: 2,
|
||||
INDEX_MEDIATYPE_KEY: OCI_IMAGE_INDEX_CONTENT_TYPE,
|
||||
INDEX_MANIFESTS_KEY: [
|
||||
{
|
||||
INDEX_MEDIATYPE_KEY: manifest[2],
|
||||
INDEX_DIGEST_KEY: manifest[0],
|
||||
INDEX_SIZE_KEY: manifest[1],
|
||||
INDEX_PLATFORM_KEY: manifest[3],
|
||||
}
|
||||
for manifest in self.manifests
|
||||
],
|
||||
INDEX_MANIFESTS_KEY: [],
|
||||
}
|
||||
|
||||
for manifest in self.manifests:
|
||||
manifest_dict = {
|
||||
INDEX_MEDIATYPE_KEY: manifest[2],
|
||||
INDEX_DIGEST_KEY: manifest[0],
|
||||
INDEX_SIZE_KEY: manifest[1],
|
||||
}
|
||||
if manifest[3]:
|
||||
manifest_dict[INDEX_PLATFORM_KEY] = manifest[3]
|
||||
|
||||
manifest_list_dict[INDEX_MANIFESTS_KEY].append(manifest_dict)
|
||||
|
||||
if self.annotations:
|
||||
manifest_list_dict[INDEX_ANNOTATIONS_KEY] = self.annotations
|
||||
|
||||
|
@ -125,22 +125,15 @@ class OCIManifest(ManifestInterface):
|
||||
"type": "string",
|
||||
"description": "Type of an artifact when the manifest is used for an artifact.",
|
||||
},
|
||||
OCI_MANIFEST_CONFIG_KEY: get_descriptor_schema(
|
||||
ALLOWED_ARTIFACT_TYPES,
|
||||
ignore_unknown_mediatypes=self._ignore_unknown_mediatypes,
|
||||
),
|
||||
OCI_MANIFEST_CONFIG_KEY: get_descriptor_schema(ALLOWED_ARTIFACT_TYPES),
|
||||
OCI_MANIFEST_LAYERS_KEY: {
|
||||
"type": "array",
|
||||
"description": "The array MUST have the base layer at index 0. Subsequent layers MUST then follow in stack order (i.e. from layers[0] to layers[len(layers)-1])",
|
||||
"items": get_descriptor_schema(
|
||||
OCI_IMAGE_LAYER_CONTENT_TYPES + ADDITIONAL_LAYER_CONTENT_TYPES,
|
||||
ignore_unknown_mediatypes=self._ignore_unknown_mediatypes,
|
||||
OCI_IMAGE_LAYER_CONTENT_TYPES + ADDITIONAL_LAYER_CONTENT_TYPES
|
||||
),
|
||||
},
|
||||
OCI_MANIFEST_SUBJECT_KEY: get_descriptor_schema(
|
||||
[],
|
||||
ignore_unknown_mediatypes=True,
|
||||
),
|
||||
OCI_MANIFEST_SUBJECT_KEY: get_descriptor_schema([]),
|
||||
OCI_MANIFEST_ANNOTATIONS_KEY: {
|
||||
"type": "object",
|
||||
"description": "The annotations, if any, on this manifest",
|
||||
@ -156,14 +149,13 @@ class OCIManifest(ManifestInterface):
|
||||
|
||||
return METASCHEMA
|
||||
|
||||
def __init__(self, manifest_bytes, validate=False, ignore_unknown_mediatypes=False):
|
||||
def __init__(self, manifest_bytes, validate=False):
|
||||
assert isinstance(manifest_bytes, Bytes)
|
||||
|
||||
self._payload = manifest_bytes
|
||||
|
||||
self._filesystem_layers = None
|
||||
self._cached_built_config = None
|
||||
self._ignore_unknown_mediatypes = ignore_unknown_mediatypes
|
||||
|
||||
try:
|
||||
self._parsed = json.loads(self._payload.as_unicode())
|
||||
@ -538,7 +530,9 @@ class OCIManifestBuilder(object):
|
||||
|
||||
def __init__(self):
|
||||
self.config = None
|
||||
self.subject = None
|
||||
self.filesystem_layers = []
|
||||
self.annotations = {}
|
||||
|
||||
def clone(self):
|
||||
cloned = OCIManifestBuilder()
|
||||
@ -558,6 +552,15 @@ class OCIManifestBuilder(object):
|
||||
"""
|
||||
self.config = OCIManifestConfig(size=config_size, digest=config_digest)
|
||||
|
||||
def set_subject(self, digest, size, mediatype):
|
||||
self.subject = OCIManifestDescriptor(digest=digest, size=size, mediatype=mediatype)
|
||||
|
||||
def add_annotation(self, key, value):
|
||||
"""
|
||||
Adds an annotation to the index
|
||||
"""
|
||||
self.annotations[key] = value
|
||||
|
||||
def add_layer(self, digest, size, urls=None):
|
||||
"""
|
||||
Adds a filesystem layer to the manifest.
|
||||
@ -607,5 +610,15 @@ class OCIManifestBuilder(object):
|
||||
OCI_MANIFEST_LAYERS_KEY: [_build_layer(layer) for layer in self.filesystem_layers],
|
||||
}
|
||||
|
||||
if self.annotations:
|
||||
manifest_dict[OCI_MANIFEST_ANNOTATIONS_KEY] = self.annotations
|
||||
|
||||
if self.subject:
|
||||
manifest_dict[OCI_MANIFEST_SUBJECT_KEY] = {
|
||||
OCI_MANIFEST_MEDIATYPE_KEY: self.subject.mediatype,
|
||||
OCI_MANIFEST_SIZE_KEY: self.subject.size,
|
||||
OCI_MANIFEST_DIGEST_KEY: self.subject.digest,
|
||||
}
|
||||
|
||||
json_str = json.dumps(manifest_dict, ensure_ascii=ensure_ascii, indent=3)
|
||||
return OCIManifest(Bytes.for_string_or_unicode(json_str))
|
||||
|
@ -173,68 +173,6 @@ def test_get_schema1_manifest():
|
||||
assert via_convert.digest == schema1.digest
|
||||
|
||||
|
||||
@pytest.mark.parametrize("ignore_unknown_mediatypes", [True, False])
|
||||
def test_validate_manifest_invalid_config_type(ignore_unknown_mediatypes):
|
||||
manifest_bytes = """{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/some.other.thing",
|
||||
"digest": "sha256:6bd578ec7d1e7381f63184dfe5fbe7f2f15805ecc4bfd485e286b76b1e796524",
|
||||
"size": 145
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/tar+gzip",
|
||||
"digest": "sha256:ce879e86a8f71031c0f1ab149a26b000b3b5b8810d8d047f240ef69a6b2516ee",
|
||||
"size": 2807
|
||||
}
|
||||
]
|
||||
}"""
|
||||
|
||||
if ignore_unknown_mediatypes:
|
||||
OCIManifest(
|
||||
Bytes.for_string_or_unicode(manifest_bytes),
|
||||
ignore_unknown_mediatypes=ignore_unknown_mediatypes,
|
||||
)
|
||||
else:
|
||||
with pytest.raises(MalformedOCIManifest):
|
||||
OCIManifest(Bytes.for_string_or_unicode(manifest_bytes))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("ignore_unknown_mediatypes", [True, False])
|
||||
def test_validate_manifest_with_subject_artifact_type(ignore_unknown_mediatypes):
|
||||
manifest_bytes = """{
|
||||
"schemaVersion": 2,
|
||||
"artifactType": "application/some.thing",
|
||||
"config": {
|
||||
"mediaType": "application/some.other.thing",
|
||||
"digest": "sha256:6bd578ec7d1e7381f63184dfe5fbe7f2f15805ecc4bfd485e286b76b1e796524",
|
||||
"size": 145
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/tar+gzip",
|
||||
"digest": "sha256:ce879e86a8f71031c0f1ab149a26b000b3b5b8810d8d047f240ef69a6b2516ee",
|
||||
"size": 2807
|
||||
}
|
||||
],
|
||||
"subject": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"size": 7023,
|
||||
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
|
||||
}
|
||||
}"""
|
||||
|
||||
if ignore_unknown_mediatypes:
|
||||
OCIManifest(
|
||||
Bytes.for_string_or_unicode(manifest_bytes),
|
||||
ignore_unknown_mediatypes=ignore_unknown_mediatypes,
|
||||
)
|
||||
else:
|
||||
with pytest.raises(MalformedOCIManifest):
|
||||
OCIManifest(Bytes.for_string_or_unicode(manifest_bytes))
|
||||
|
||||
|
||||
def test_get_schema1_manifest_missing_history():
|
||||
retriever = ContentRetrieverForTesting.for_config(
|
||||
{
|
||||
|
@ -25,7 +25,6 @@ def parse_manifest_from_bytes(
|
||||
media_type,
|
||||
validate=True,
|
||||
sparse_manifest_support=False,
|
||||
ignore_unknown_mediatypes=False,
|
||||
):
|
||||
"""
|
||||
Parses and returns a manifest from the given bytes, for the given media type.
|
||||
@ -44,10 +43,10 @@ def parse_manifest_from_bytes(
|
||||
return DockerSchema2ManifestList(manifest_bytes)
|
||||
|
||||
if media_type == OCI_IMAGE_MANIFEST_CONTENT_TYPE:
|
||||
return OCIManifest(manifest_bytes, ignore_unknown_mediatypes=ignore_unknown_mediatypes)
|
||||
return OCIManifest(manifest_bytes)
|
||||
|
||||
if media_type == OCI_IMAGE_INDEX_CONTENT_TYPE:
|
||||
return OCIIndex(manifest_bytes, ignore_unknown_mediatypes=ignore_unknown_mediatypes)
|
||||
return OCIIndex(manifest_bytes)
|
||||
|
||||
if media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
|
||||
return DockerSchema1Manifest(manifest_bytes, validate=validate)
|
||||
|
@ -65,7 +65,6 @@ class LazyManifestLoader(object):
|
||||
digest_key,
|
||||
size_key,
|
||||
media_type_key,
|
||||
ignore_unknown_mediatypes=False,
|
||||
):
|
||||
self._manifest_data = manifest_data
|
||||
self._content_retriever = content_retriever
|
||||
@ -74,7 +73,6 @@ class LazyManifestLoader(object):
|
||||
self._size_key = size_key
|
||||
self._media_type_key = media_type_key
|
||||
self._supported_types = supported_types
|
||||
self._ignore_unknown_mediatypes = ignore_unknown_mediatypes
|
||||
|
||||
@property
|
||||
def manifest_obj(self):
|
||||
@ -108,5 +106,4 @@ class LazyManifestLoader(object):
|
||||
return self._supported_types[content_type](
|
||||
Bytes.for_string_or_unicode(manifest_bytes),
|
||||
validate=False,
|
||||
ignore_unknown_mediatypes=self._ignore_unknown_mediatypes,
|
||||
)
|
||||
|
@ -1330,9 +1330,9 @@ CONFIG_SCHEMA = {
|
||||
],
|
||||
},
|
||||
},
|
||||
"IGNORE_UNKNOWN_MEDIATYPES": {
|
||||
"FEATURE_REFERRERS_API": {
|
||||
"type": "boolean",
|
||||
"description": "If set to true, ignore ALLOWED_OCI_ARTIFACT_TYPES",
|
||||
"description": "Enables OCI 1.1's referrers API",
|
||||
"x-example": False,
|
||||
},
|
||||
# Clean partial uploads during S3 multipart upload
|
||||
@ -1402,6 +1402,11 @@ CONFIG_SCHEMA = {
|
||||
"description": "Use Red Hat Export Compliance Service during Red Hat SSO (only used in Quay.io)",
|
||||
"x-example": False,
|
||||
},
|
||||
"FEATURE_MANIFEST_SUBJECT_BACKFILL": {
|
||||
"type": "boolean",
|
||||
"description": "Enable the backfill worker to index existing manifest subjects",
|
||||
"x-example": True,
|
||||
},
|
||||
"UI_V2_FEEDBACK_FORM": {
|
||||
"type": "string",
|
||||
"description": "User feedback form for UI-V2",
|
||||
|
@ -365,6 +365,7 @@ DROP INDEX IF EXISTS public.manifestblob_repository_id;
|
||||
DROP INDEX IF EXISTS public.manifestblob_manifest_id_blob_id;
|
||||
DROP INDEX IF EXISTS public.manifestblob_manifest_id;
|
||||
DROP INDEX IF EXISTS public.manifestblob_blob_id;
|
||||
DROP INDEX IF EXISTS public.manifest_repository_id_subject;
|
||||
DROP INDEX IF EXISTS public.manifest_repository_id_media_type_id;
|
||||
DROP INDEX IF EXISTS public.manifest_repository_id_digest;
|
||||
DROP INDEX IF EXISTS public.manifest_repository_id_config_media_type;
|
||||
@ -2316,7 +2317,9 @@ CREATE TABLE public.manifest (
|
||||
media_type_id integer NOT NULL,
|
||||
manifest_bytes text NOT NULL,
|
||||
config_media_type character varying(255),
|
||||
layers_compressed_size bigint
|
||||
layers_compressed_size bigint,
|
||||
subject character varying(255),
|
||||
subject_backfilled boolean
|
||||
);
|
||||
|
||||
|
||||
@ -5623,7 +5626,7 @@ COPY public.accesstokenkind (id, name) FROM stdin;
|
||||
--
|
||||
|
||||
COPY public.alembic_version (version_num) FROM stdin;
|
||||
b4da5b09c8df
|
||||
946f0e90f9c9
|
||||
\.
|
||||
|
||||
|
||||
@ -6298,6 +6301,8 @@ COPY public.logentrykind (id, name) FROM stdin;
|
||||
108 create_repository_autoprune_policy
|
||||
109 update_repository_autoprune_policy
|
||||
110 delete_repository_autoprune_policy
|
||||
111 enable_team_sync
|
||||
112 disable_team_sync
|
||||
\.
|
||||
|
||||
|
||||
@ -6313,6 +6318,7 @@ COPY public.loginservice (id, name) FROM stdin;
|
||||
5 keystone
|
||||
6 dex
|
||||
7 jwtauthn
|
||||
8 oidc
|
||||
\.
|
||||
|
||||
|
||||
@ -6320,19 +6326,19 @@ COPY public.loginservice (id, name) FROM stdin;
|
||||
-- Data for Name: manifest; Type: TABLE DATA; Schema: public; Owner: quay
|
||||
--
|
||||
|
||||
COPY public.manifest (id, repository_id, digest, media_type_id, manifest_bytes, config_media_type, layers_compressed_size) FROM stdin;
|
||||
1 1 sha256:f54a58bc1aac5ea1a25d796ae155dc228b3f0e11d046ae276b39c4bf2f13d8c4 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1469,\n "digest": "sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2479,\n "digest": "sha256:2db29710123e3e53a794f2694094b9b4338aa9ee5c40b930cb8063a1be392c54"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2479
|
||||
2 1 sha256:7b8b7289d0536a08eabdf71c20246e23f7116641db7e1d278592236ea4dcb30c 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1482,\n "digest": "sha256:c0218de6585df06a66d67b25237bdda42137c727c367373a32639710c7a9fa94"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3684,\n "digest": "sha256:b921b04d0447ddcd82a9220d887cd146f6ef39e20a938ee5e19a90fc3323e030"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3684
|
||||
3 1 sha256:f130bd2d67e6e9280ac6d0a6c83857bfaf70234e8ef4236876eccfbd30973b1c 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1482,\n "digest": "sha256:1ec996c686eb87d8f091080ec29dd1862b39b5822ddfd8f9a1e2c9288fad89fe"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2993,\n "digest": "sha256:9b157615502ddff86482f7fe2fa7a074db74a62fce12b4e8507827ac8f08d0ce"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2993
|
||||
4 1 sha256:432f982638b3aefab73cc58ab28f5c16e96fdb504e8c134fc58dff4bae8bf338 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1485,\n "digest": "sha256:46331d942d6350436f64e614d75725f6de3bb5c63e266e236e04389820a234c4"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3208,\n "digest": "sha256:7050e35b49f5e348c4809f5eff915842962cb813f32062d3bbdd35c750dd7d01"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3208
|
||||
5 1 sha256:995efde2e81b21d1ea7066aa77a59298a62a9e9fbb4b77f36c189774ec9b1089 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1468,\n "digest": "sha256:36d89aa75357c8f99e359f8cabc0aae667d47d8f25ed51cbe66e148e3a77e19c"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2736,\n "digest": "sha256:7f0d4fad461d1ac69488092b5914b5ec642133c0fb884539045de33fbcd2eadb"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2736
|
||||
6 1 sha256:eb11b1a194ff8e236a01eff392c4e1296a53b0fb4780d8b0382f7996a15d5392 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1473,\n "digest": "sha256:5004e9d559e7a75f42249ddeca4d5764fa4db05592a7a9a641e4ac37cc619ba1"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 4092,\n "digest": "sha256:bbc6052697e5fdcd1b311e0b3f65189ffbe354cf8ae97e7a55d588e855097174"\n }\n ]\n} application/vnd.docker.container.image.v1+json 4092
|
||||
7 1 sha256:b836bb24a270b9cc935962d8228517fde0f16990e88893d935efcb1b14c0017a 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1471,\n "digest": "sha256:61fff98d5ca765a4351964c8f4b5fb1a0d2c48458026f5452a389eb52d146fe8"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3929,\n "digest": "sha256:33450689bfb495ed64ead935c9933f1d6b3e42fe369b8de9680cf4ff9d89ce5c"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3929
|
||||
8 1 sha256:98c9722322be649df94780d3fbe594fce7996234b259f27eac9428b84050c849 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1471,\n "digest": "sha256:b3593dab05491cdf5ee88c29bee36603c0df0bc34798eed5067f6e1335a9d391"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3000,\n "digest": "sha256:3caa6dc69d0b73f21d29bfa75356395f2695a7abad34f010656740e90ddce399"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3000
|
||||
9 1 sha256:c7b6944911848ce39b44ed660d95fb54d69bbd531de724c7ce6fc9f743c0b861 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1469,\n "digest": "sha256:df5477cea5582b0ae6a31de2d1c9bbacb506091f42a3b0fe77a209006f409fd8"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3276,\n "digest": "sha256:abc70fcc95b2f52b325d69cc5c259dd9babb40a9df152e88b286fada1d3248bd"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3276
|
||||
10 1 sha256:7693efac53eb85ff1afb03f7f2560015c57ac2175707f1f141f31161634c9dba 15 {"manifests":[{"digest":"sha256:f54a58bc1aac5ea1a25d796ae155dc228b3f0e11d046ae276b39c4bf2f13d8c4","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"amd64","os":"linux"},"size":525},{"digest":"sha256:7b8b7289d0536a08eabdf71c20246e23f7116641db7e1d278592236ea4dcb30c","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"arm","os":"linux","variant":"v5"},"size":525},{"digest":"sha256:f130bd2d67e6e9280ac6d0a6c83857bfaf70234e8ef4236876eccfbd30973b1c","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"arm","os":"linux","variant":"v7"},"size":525},{"digest":"sha256:432f982638b3aefab73cc58ab28f5c16e96fdb504e8c134fc58dff4bae8bf338","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"arm64","os":"linux","variant":"v8"},"size":525},{"digest":"sha256:995efde2e81b21d1ea7066aa77a59298a62a9e9fbb4b77f36c189774ec9b1089","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"386","os":"linux"},"size":525},{"digest":"sha256:eb11b1a194ff8e236a01eff392c4e1296a53b0fb4780d8b0382f7996a15d5392","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"mips64le","os":"linux"},"size":525},{"digest":"sha256:b836bb24a270b9cc935962d8228517fde0f16990e88893d935efcb1b14c0017a","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"ppc64le","os":"linux"},"size":525},{"digest":"sha256:98c9722322be649df94780d3fbe594fce7996234b259f27eac9428b84050c849","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"riscv64","os":"linux"},"size":525},{"digest":"sha256:c7b6944911848ce39b44ed660d95fb54d69bbd531de724c7ce6fc9f743c0b861","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"s390x","os":"linux"},"size":525}],"mediaType":"application\\/vnd.docker.distribution.manifest.list.v2+json","schemaVersion":2} \N 0
|
||||
11 155 sha256:f54a58bc1aac5ea1a25d796ae155dc228b3f0e11d046ae276b39c4bf2f13d8c4 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1469,\n "digest": "sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2479,\n "digest": "sha256:2db29710123e3e53a794f2694094b9b4338aa9ee5c40b930cb8063a1be392c54"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2479
|
||||
12 1 sha256:7e9b6e7ba2842c91cf49f3e214d04a7a496f8214356f41d81a6e6dcad11f11e3 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1470,\n "digest": "sha256:9c7a54a9a43cca047013b82af109fe963fde787f63f9e016fdc3384500c2823d"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2457,\n "digest": "sha256:719385e32844401d57ecfd3eacab360bf551a1491c05b85806ed8f1b08d792f6"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2457
|
||||
COPY public.manifest (id, repository_id, digest, media_type_id, manifest_bytes, config_media_type, layers_compressed_size, subject, subject_backfilled) FROM stdin;
|
||||
1 1 sha256:f54a58bc1aac5ea1a25d796ae155dc228b3f0e11d046ae276b39c4bf2f13d8c4 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1469,\n "digest": "sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2479,\n "digest": "sha256:2db29710123e3e53a794f2694094b9b4338aa9ee5c40b930cb8063a1be392c54"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2479 \N \N
|
||||
2 1 sha256:7b8b7289d0536a08eabdf71c20246e23f7116641db7e1d278592236ea4dcb30c 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1482,\n "digest": "sha256:c0218de6585df06a66d67b25237bdda42137c727c367373a32639710c7a9fa94"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3684,\n "digest": "sha256:b921b04d0447ddcd82a9220d887cd146f6ef39e20a938ee5e19a90fc3323e030"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3684 \N \N
|
||||
3 1 sha256:f130bd2d67e6e9280ac6d0a6c83857bfaf70234e8ef4236876eccfbd30973b1c 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1482,\n "digest": "sha256:1ec996c686eb87d8f091080ec29dd1862b39b5822ddfd8f9a1e2c9288fad89fe"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2993,\n "digest": "sha256:9b157615502ddff86482f7fe2fa7a074db74a62fce12b4e8507827ac8f08d0ce"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2993 \N \N
|
||||
4 1 sha256:432f982638b3aefab73cc58ab28f5c16e96fdb504e8c134fc58dff4bae8bf338 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1485,\n "digest": "sha256:46331d942d6350436f64e614d75725f6de3bb5c63e266e236e04389820a234c4"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3208,\n "digest": "sha256:7050e35b49f5e348c4809f5eff915842962cb813f32062d3bbdd35c750dd7d01"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3208 \N \N
|
||||
5 1 sha256:995efde2e81b21d1ea7066aa77a59298a62a9e9fbb4b77f36c189774ec9b1089 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1468,\n "digest": "sha256:36d89aa75357c8f99e359f8cabc0aae667d47d8f25ed51cbe66e148e3a77e19c"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2736,\n "digest": "sha256:7f0d4fad461d1ac69488092b5914b5ec642133c0fb884539045de33fbcd2eadb"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2736 \N \N
|
||||
6 1 sha256:eb11b1a194ff8e236a01eff392c4e1296a53b0fb4780d8b0382f7996a15d5392 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1473,\n "digest": "sha256:5004e9d559e7a75f42249ddeca4d5764fa4db05592a7a9a641e4ac37cc619ba1"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 4092,\n "digest": "sha256:bbc6052697e5fdcd1b311e0b3f65189ffbe354cf8ae97e7a55d588e855097174"\n }\n ]\n} application/vnd.docker.container.image.v1+json 4092 \N \N
|
||||
7 1 sha256:b836bb24a270b9cc935962d8228517fde0f16990e88893d935efcb1b14c0017a 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1471,\n "digest": "sha256:61fff98d5ca765a4351964c8f4b5fb1a0d2c48458026f5452a389eb52d146fe8"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3929,\n "digest": "sha256:33450689bfb495ed64ead935c9933f1d6b3e42fe369b8de9680cf4ff9d89ce5c"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3929 \N \N
|
||||
8 1 sha256:98c9722322be649df94780d3fbe594fce7996234b259f27eac9428b84050c849 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1471,\n "digest": "sha256:b3593dab05491cdf5ee88c29bee36603c0df0bc34798eed5067f6e1335a9d391"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3000,\n "digest": "sha256:3caa6dc69d0b73f21d29bfa75356395f2695a7abad34f010656740e90ddce399"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3000 \N \N
|
||||
9 1 sha256:c7b6944911848ce39b44ed660d95fb54d69bbd531de724c7ce6fc9f743c0b861 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1469,\n "digest": "sha256:df5477cea5582b0ae6a31de2d1c9bbacb506091f42a3b0fe77a209006f409fd8"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 3276,\n "digest": "sha256:abc70fcc95b2f52b325d69cc5c259dd9babb40a9df152e88b286fada1d3248bd"\n }\n ]\n} application/vnd.docker.container.image.v1+json 3276 \N \N
|
||||
10 1 sha256:7693efac53eb85ff1afb03f7f2560015c57ac2175707f1f141f31161634c9dba 15 {"manifests":[{"digest":"sha256:f54a58bc1aac5ea1a25d796ae155dc228b3f0e11d046ae276b39c4bf2f13d8c4","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"amd64","os":"linux"},"size":525},{"digest":"sha256:7b8b7289d0536a08eabdf71c20246e23f7116641db7e1d278592236ea4dcb30c","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"arm","os":"linux","variant":"v5"},"size":525},{"digest":"sha256:f130bd2d67e6e9280ac6d0a6c83857bfaf70234e8ef4236876eccfbd30973b1c","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"arm","os":"linux","variant":"v7"},"size":525},{"digest":"sha256:432f982638b3aefab73cc58ab28f5c16e96fdb504e8c134fc58dff4bae8bf338","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"arm64","os":"linux","variant":"v8"},"size":525},{"digest":"sha256:995efde2e81b21d1ea7066aa77a59298a62a9e9fbb4b77f36c189774ec9b1089","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"386","os":"linux"},"size":525},{"digest":"sha256:eb11b1a194ff8e236a01eff392c4e1296a53b0fb4780d8b0382f7996a15d5392","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"mips64le","os":"linux"},"size":525},{"digest":"sha256:b836bb24a270b9cc935962d8228517fde0f16990e88893d935efcb1b14c0017a","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"ppc64le","os":"linux"},"size":525},{"digest":"sha256:98c9722322be649df94780d3fbe594fce7996234b259f27eac9428b84050c849","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"riscv64","os":"linux"},"size":525},{"digest":"sha256:c7b6944911848ce39b44ed660d95fb54d69bbd531de724c7ce6fc9f743c0b861","mediaType":"application\\/vnd.docker.distribution.manifest.v2+json","platform":{"architecture":"s390x","os":"linux"},"size":525}],"mediaType":"application\\/vnd.docker.distribution.manifest.list.v2+json","schemaVersion":2} \N 0 \N \N
|
||||
11 155 sha256:f54a58bc1aac5ea1a25d796ae155dc228b3f0e11d046ae276b39c4bf2f13d8c4 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1469,\n "digest": "sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2479,\n "digest": "sha256:2db29710123e3e53a794f2694094b9b4338aa9ee5c40b930cb8063a1be392c54"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2479 \N \N
|
||||
12 1 sha256:7e9b6e7ba2842c91cf49f3e214d04a7a496f8214356f41d81a6e6dcad11f11e3 16 {\n "schemaVersion": 2,\n "mediaType": "application/vnd.docker.distribution.manifest.v2+json",\n "config": {\n "mediaType": "application/vnd.docker.container.image.v1+json",\n "size": 1470,\n "digest": "sha256:9c7a54a9a43cca047013b82af109fe963fde787f63f9e016fdc3384500c2823d"\n },\n "layers": [\n {\n "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",\n "size": 2457,\n "digest": "sha256:719385e32844401d57ecfd3eacab360bf551a1491c05b85806ed8f1b08d792f6"\n }\n ]\n} application/vnd.docker.container.image.v1+json 2457 \N \N
|
||||
\.
|
||||
|
||||
|
||||
@ -6574,8 +6580,8 @@ COPY public.quayservice (id, name) FROM stdin;
|
||||
--
|
||||
|
||||
COPY public.queueitem (id, queue_name, body, available_after, available, processing_expires, retries_remaining, state_id) FROM stdin;
|
||||
2 namespacegc/3/ {"marker_id": 2, "original_username": "clair"} 2024-02-20 14:01:29.891875 t 2024-02-20 16:56:29.836048 5 ff6a0b72-d8cc-4efe-ad83-70225ac325e1
|
||||
1 namespacegc/2/ {"marker_id": 1, "original_username": "quay"} 2024-02-20 14:01:34.91615 t 2024-02-20 16:56:34.901017 5 6e996e00-e92b-45a9-962f-7e7f3cdb9e63
|
||||
1 namespacegc/2/ {"marker_id": 1, "original_username": "quay"} 2024-06-07 15:55:41.812038 t 2024-06-07 18:50:41.733122 5 348c6b78-b9a0-4db1-8fa5-f67e2e42d485
|
||||
2 namespacegc/3/ {"marker_id": 2, "original_username": "clair"} 2024-06-07 15:55:46.859942 t 2024-06-07 18:50:46.830201 5 81d862c7-29b5-426d-86cf-55241ed696a6
|
||||
\.
|
||||
|
||||
|
||||
@ -7573,6 +7579,7 @@ COPY public.role (id, name) FROM stdin;
|
||||
--
|
||||
|
||||
COPY public.servicekey (id, name, kid, service, jwk, metadata, created_date, expiration_date, rotation_duration, approval_id) FROM stdin;
|
||||
2 http://localhost:8080 XGN_51pQtaIC1IH_QSSzG0lRI1jRwtwhAmYByS2oKO0 quay {"n": "nruebufHwRxWO3hvP7MzHA8NjGmUDyHvcHesTiRzdT0pz6w7B7rjWv90xRn0fanPMRtk5GRb3cA8uWuK5dmIOXzMB_Og4Hi1QqxS0vwheca8XZk3amEniEWIOQfb1n2E8--vVmAG9E4y0ZGXJzD-hadctz5Xoi6r3yUrBjqqjWQBffHvNdnSh2KsouX4nC1P3cTVjFime44cHA5nBNKRpOV1Q7XTMwYl2hUp7qRJ-5IAzad9n7XtoWsxwmFaAFg_MbOqA_ncPAmfvbthDJSGWkaB04dtze9qfWdbKD9VVl6QJH0ost0qn72LLS3e3sHPWu-jANf7Q-InP5-tjSgDUQ", "e": "AQAB", "kty": "RSA", "kid": "XGN_51pQtaIC1IH_QSSzG0lRI1jRwtwhAmYByS2oKO0"} {"created_by": "CLI tool"} 2024-06-07 15:53:13.557544 2024-06-07 17:53:13.494442 \N 2
|
||||
\.
|
||||
|
||||
|
||||
@ -7581,6 +7588,7 @@ COPY public.servicekey (id, name, kid, service, jwk, metadata, created_date, exp
|
||||
--
|
||||
|
||||
COPY public.servicekeyapproval (id, approver_id, approval_type, approved_date, notes) FROM stdin;
|
||||
2 \N ServiceKeyApprovalType.AUTOMATIC 2024-06-07 15:53:13.57574
|
||||
\.
|
||||
|
||||
|
||||
@ -8148,14 +8156,14 @@ SELECT pg_catalog.setval('public.logentry_id_seq', 1, false);
|
||||
-- Name: logentrykind_id_seq; Type: SEQUENCE SET; Schema: public; Owner: quay
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('public.logentrykind_id_seq', 110, true);
|
||||
SELECT pg_catalog.setval('public.logentrykind_id_seq', 112, true);
|
||||
|
||||
|
||||
--
|
||||
-- Name: loginservice_id_seq; Type: SEQUENCE SET; Schema: public; Owner: quay
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('public.loginservice_id_seq', 7, true);
|
||||
SELECT pg_catalog.setval('public.loginservice_id_seq', 8, true);
|
||||
|
||||
|
||||
--
|
||||
@ -8232,7 +8240,7 @@ SELECT pg_catalog.setval('public.namespacegeorestriction_id_seq', 1, false);
|
||||
-- Name: notification_id_seq; Type: SEQUENCE SET; Schema: public; Owner: quay
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('public.notification_id_seq', 1, true);
|
||||
SELECT pg_catalog.setval('public.notification_id_seq', 2, true);
|
||||
|
||||
|
||||
--
|
||||
@ -8477,14 +8485,14 @@ SELECT pg_catalog.setval('public.role_id_seq', 3, true);
|
||||
-- Name: servicekey_id_seq; Type: SEQUENCE SET; Schema: public; Owner: quay
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('public.servicekey_id_seq', 1, true);
|
||||
SELECT pg_catalog.setval('public.servicekey_id_seq', 2, true);
|
||||
|
||||
|
||||
--
|
||||
-- Name: servicekeyapproval_id_seq; Type: SEQUENCE SET; Schema: public; Owner: quay
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('public.servicekeyapproval_id_seq', 1, true);
|
||||
SELECT pg_catalog.setval('public.servicekeyapproval_id_seq', 2, true);
|
||||
|
||||
|
||||
--
|
||||
@ -10308,6 +10316,13 @@ CREATE UNIQUE INDEX manifest_repository_id_digest ON public.manifest USING btree
|
||||
CREATE INDEX manifest_repository_id_media_type_id ON public.manifest USING btree (repository_id, media_type_id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: manifest_repository_id_subject; Type: INDEX; Schema: public; Owner: quay
|
||||
--
|
||||
|
||||
CREATE INDEX manifest_repository_id_subject ON public.manifest USING btree (repository_id, subject);
|
||||
|
||||
|
||||
--
|
||||
-- Name: manifestblob_blob_id; Type: INDEX; Schema: public; Owner: quay
|
||||
--
|
||||
|
@ -69,6 +69,8 @@ const response = {
|
||||
},
|
||||
external_login: [],
|
||||
features: {
|
||||
REFERRERS_API: false,
|
||||
MANIFEST_SUBJECT_BACKFILL: false,
|
||||
SUPERUSERS_FULL_ACCESS: true,
|
||||
ACTION_LOG_ROTATION: false,
|
||||
ADVERTISE_V2: true,
|
||||
|
116
workers/manifestsubjectbackfillworker.py
Normal file
116
workers/manifestsubjectbackfillworker.py
Normal file
@ -0,0 +1,116 @@
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
|
||||
from peewee import fn
|
||||
|
||||
import features
|
||||
from app import app
|
||||
from data.database import Manifest
|
||||
from image.shared.schemas import ManifestException, parse_manifest_from_bytes
|
||||
from util.bytes import Bytes
|
||||
from util.log import logfile_path
|
||||
from util.migrate.allocator import yield_random_entries
|
||||
from workers.gunicorn_worker import GunicornWorker
|
||||
from workers.worker import Worker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
WORKER_FREQUENCY = app.config.get("MANIFEST_SUBJECT_BACKFILL_WORKER_FREQUENCY", 60)
|
||||
|
||||
|
||||
class ManifestSubjectBackfillWorker(Worker):
|
||||
"""
|
||||
Worker which backfills the newly added subject fields onto Manifest.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.add_operation(self._backfill_manifest_subject, WORKER_FREQUENCY)
|
||||
|
||||
def _backfill_manifest_subject(self):
|
||||
try:
|
||||
Manifest.select().where(
|
||||
Manifest.subject_backfilled == False | Manifest.subject_backfilled >> None
|
||||
).get()
|
||||
except Manifest.DoesNotExist:
|
||||
logger.debug("Manifest subject backfill worker has completed; skipping")
|
||||
return False
|
||||
|
||||
iterator = yield_random_entries(
|
||||
lambda: Manifest.select().where(Manifest.subject_backfilled == False),
|
||||
Manifest.id,
|
||||
250,
|
||||
Manifest.select(fn.Max(Manifest.id)).scalar(),
|
||||
1,
|
||||
)
|
||||
|
||||
for manifest_row, abt, _ in iterator:
|
||||
if manifest_row.subject_backfilled:
|
||||
logger.debug("Another worker preempted this worker")
|
||||
abt.set()
|
||||
continue
|
||||
|
||||
logger.debug("Setting manifest subject for manifest %s", manifest_row.id)
|
||||
manifest_bytes = Bytes.for_string_or_unicode(manifest_row.manifest_bytes)
|
||||
|
||||
try:
|
||||
parsed = parse_manifest_from_bytes(
|
||||
manifest_bytes, manifest_row.media_type.name, validate=False
|
||||
)
|
||||
subject = parsed.subject
|
||||
except ManifestException as me:
|
||||
logger.warning(
|
||||
"Got exception when trying to parse manifest %s: %s", manifest_row.id, me
|
||||
)
|
||||
|
||||
updated = (
|
||||
Manifest.update(
|
||||
subject=subject,
|
||||
subject_backfilled=True,
|
||||
)
|
||||
.where(Manifest.id == manifest_row.id, Manifest.subject_backfilled == False)
|
||||
.execute()
|
||||
)
|
||||
if updated != 1:
|
||||
logger.debug("Another worker preempted this worker")
|
||||
abt.set()
|
||||
continue
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def create_gunicorn_worker():
|
||||
"""
|
||||
follows the gunicorn application factory pattern, enabling
|
||||
a quay worker to run as a gunicorn worker thread.
|
||||
|
||||
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||
|
||||
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||
"""
|
||||
worker = GunicornWorker(
|
||||
__name__, app, ManifestSubjectBackfillWorker(), features.MANIFEST_SUBJECT_BACKFILL
|
||||
)
|
||||
return worker
|
||||
|
||||
|
||||
def main():
|
||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||
|
||||
if app.config.get("ACCOUNT_RECOVERY_MODE", False):
|
||||
logger.debug("Quay running in account recovery mode")
|
||||
while True:
|
||||
time.sleep(100000)
|
||||
|
||||
if not features.MANIFEST_SUBJECT_BACKFILL:
|
||||
logger.debug("Manifest backfill worker not enabled; skipping")
|
||||
while True:
|
||||
time.sleep(100000)
|
||||
|
||||
worker = ManifestSubjectBackfillWorker()
|
||||
worker.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
28
workers/test/test_manifestsubjectbackfillworker.py
Normal file
28
workers/test/test_manifestsubjectbackfillworker.py
Normal file
@ -0,0 +1,28 @@
|
||||
import pytest
|
||||
|
||||
from data import database, model
|
||||
from test.fixtures import *
|
||||
from workers.manifestsubjectbackfillworker import ManifestSubjectBackfillWorker
|
||||
|
||||
|
||||
def test_basic(initialized_db):
|
||||
worker = ManifestSubjectBackfillWorker()
|
||||
|
||||
# By default new manifests are already backfilled (i.e subject. if any, are parsed at creation)
|
||||
assert not worker._backfill_manifest_subject()
|
||||
|
||||
# Set manifests to be backfilled some manifests
|
||||
database.Manifest.update(subject_backfilled=False).execute()
|
||||
|
||||
assert worker._backfill_manifest_subject()
|
||||
|
||||
for manifest_row in database.Manifest.select():
|
||||
assert manifest_row.subject_backfilled is True
|
||||
|
||||
if manifest_row.subject is not None:
|
||||
database.Manifest.select().where(
|
||||
database.Manifest.repository == manifest_row.repository,
|
||||
database.Manifest.digest == manifest_row.subject,
|
||||
).get()
|
||||
|
||||
assert not worker._backfill_manifest_subject()
|
Loading…
x
Reference in New Issue
Block a user