1
0
mirror of https://github.com/quay/quay.git synced 2025-07-30 07:43:13 +03:00

[PROJQUAY-1021] task: Update "Black" to version 20.8b1

This commit is contained in:
Kurtis Mullins
2020-11-30 18:48:19 -05:00
committed by GitHub
parent 28a5200d19
commit bd7252c536
234 changed files with 5302 additions and 2233 deletions

View File

@ -25,17 +25,12 @@ jobs:
- name: Install dependencies - name: Install dependencies
run: | run: |
python -m pip install --upgrade pip python -m pip install --upgrade pip
pip install black==20.8b1
# https://issues.redhat.com/browse/PROJQUAY-92
# https://github.com/psf/black/issues/1207#issuecomment-566249522
pip install black==19.10b0 --no-binary=regex
pip install flake8 pip install flake8
- name: Check Formatting (Black) - name: Check Formatting (Black)
run: | run: |
# TODO(kleesc): Re-enable after buildman rewrite black --line-length=100 --target-version=py38 --check --diff .
black --line-length=100 --target-version=py38 --check --diff . --exclude "buildman"
- name: Check Formatting (Flake8) - name: Check Formatting (Flake8)
run: | run: |

View File

@ -177,5 +177,4 @@ generate-proto-py:
black: black:
black --line-length 100 --target-version py36 --exclude "/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist|buildman)/" . # TODO(kleesc): Re-enable after buildman rewrite black --line-length=100 --target-version=py38 --exclude "/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/" .

6
app.py
View File

@ -155,7 +155,11 @@ def _request_start():
host, port = os.getenv("PYDEV_DEBUG").split(":") host, port = os.getenv("PYDEV_DEBUG").split(":")
pydevd_pycharm.settrace( pydevd_pycharm.settrace(
host, port=int(port), stdoutToServer=True, stderrToServer=True, suspend=False, host,
port=int(port),
stdoutToServer=True,
stderrToServer=True,
suspend=False,
) )
logger.debug( logger.debug(

View File

@ -305,27 +305,42 @@ class ValidatedAuthContext(AuthContext):
# TODO: Remove this all once the new code is fully deployed. # TODO: Remove this all once the new code is fully deployed.
if self.token: if self.token:
dict_data.update( dict_data.update(
{"kind": "token", "token": self.token.get_code(),} {
"kind": "token",
"token": self.token.get_code(),
}
) )
if self.oauthtoken: if self.oauthtoken:
dict_data.update( dict_data.update(
{"kind": "oauth", "oauth": self.oauthtoken.uuid, "user": self.authed_user.username,} {
"kind": "oauth",
"oauth": self.oauthtoken.uuid,
"user": self.authed_user.username,
}
) )
if self.user or self.robot: if self.user or self.robot:
dict_data.update( dict_data.update(
{"kind": "user", "user": self.authed_user.username,} {
"kind": "user",
"user": self.authed_user.username,
}
) )
if self.appspecifictoken: if self.appspecifictoken:
dict_data.update( dict_data.update(
{"kind": "user", "user": self.authed_user.username,} {
"kind": "user",
"user": self.authed_user.username,
}
) )
if self.is_anonymous: if self.is_anonymous:
dict_data.update( dict_data.update(
{"kind": "anonymous",} {
"kind": "anonymous",
}
) )
# End of legacy information. # End of legacy information.

View File

@ -104,7 +104,9 @@ class UserEntityHandler(ContextEntityHandler):
return "user %s" % entity_reference.username return "user %s" % entity_reference.username
def analytics_id_and_public_metadata(self, entity_reference): def analytics_id_and_public_metadata(self, entity_reference):
return entity_reference.username, {"username": entity_reference.username,} return entity_reference.username, {
"username": entity_reference.username,
}
class RobotEntityHandler(ContextEntityHandler): class RobotEntityHandler(ContextEntityHandler):
@ -121,7 +123,10 @@ class RobotEntityHandler(ContextEntityHandler):
return "robot %s" % entity_reference.username return "robot %s" % entity_reference.username
def analytics_id_and_public_metadata(self, entity_reference): def analytics_id_and_public_metadata(self, entity_reference):
return entity_reference.username, {"username": entity_reference.username, "is_robot": True,} return entity_reference.username, {
"username": entity_reference.username,
"is_robot": True,
}
class TokenEntityHandler(ContextEntityHandler): class TokenEntityHandler(ContextEntityHandler):
@ -138,7 +143,9 @@ class TokenEntityHandler(ContextEntityHandler):
return "token %s" % entity_reference.friendly_name return "token %s" % entity_reference.friendly_name
def analytics_id_and_public_metadata(self, entity_reference): def analytics_id_and_public_metadata(self, entity_reference):
return "token:%s" % entity_reference.id, {"token": entity_reference.friendly_name,} return "token:%s" % entity_reference.id, {
"token": entity_reference.friendly_name,
}
class OAuthTokenEntityHandler(ContextEntityHandler): class OAuthTokenEntityHandler(ContextEntityHandler):

View File

@ -48,12 +48,20 @@ SCOPE_MAX_REPO_ROLES.update(
SCOPE_MAX_TEAM_ROLES = defaultdict(lambda: None) SCOPE_MAX_TEAM_ROLES = defaultdict(lambda: None)
SCOPE_MAX_TEAM_ROLES.update( SCOPE_MAX_TEAM_ROLES.update(
{scopes.CREATE_REPO: "creator", scopes.DIRECT_LOGIN: "admin", scopes.ORG_ADMIN: "admin",} {
scopes.CREATE_REPO: "creator",
scopes.DIRECT_LOGIN: "admin",
scopes.ORG_ADMIN: "admin",
}
) )
SCOPE_MAX_USER_ROLES = defaultdict(lambda: None) SCOPE_MAX_USER_ROLES = defaultdict(lambda: None)
SCOPE_MAX_USER_ROLES.update( SCOPE_MAX_USER_ROLES.update(
{scopes.READ_USER: "read", scopes.DIRECT_LOGIN: "admin", scopes.ADMIN_USER: "admin",} {
scopes.READ_USER: "read",
scopes.DIRECT_LOGIN: "admin",
scopes.ADMIN_USER: "admin",
}
) )

View File

@ -31,12 +31,18 @@ ACCESS_SCHEMA = {
"description": "List of access granted to the subject", "description": "List of access granted to the subject",
"items": { "items": {
"type": "object", "type": "object",
"required": ["type", "name", "actions",], "required": [
"type",
"name",
"actions",
],
"properties": { "properties": {
"type": { "type": {
"type": "string", "type": "string",
"description": "We only allow repository permissions", "description": "We only allow repository permissions",
"enum": ["repository",], "enum": [
"repository",
],
}, },
"name": { "name": {
"type": "string", "type": "string",
@ -45,7 +51,14 @@ ACCESS_SCHEMA = {
"actions": { "actions": {
"type": "array", "type": "array",
"description": "List of specific verbs which can be performed against repository", "description": "List of specific verbs which can be performed against repository",
"items": {"type": "string", "enum": ["push", "pull", "*",],}, "items": {
"type": "string",
"enum": [
"push",
"pull",
"*",
],
},
}, },
}, },
}, },

View File

@ -24,7 +24,13 @@ def get_oauth_token(_):
(ContextEntityKind.user, "devtable", model.user.get_user), (ContextEntityKind.user, "devtable", model.user.get_user),
], ],
) )
@pytest.mark.parametrize("v1_dict_format", [(True), (False),]) @pytest.mark.parametrize(
"v1_dict_format",
[
(True),
(False),
],
)
def test_signed_auth_context(kind, entity_reference, loader, v1_dict_format, initialized_db): def test_signed_auth_context(kind, entity_reference, loader, v1_dict_format, initialized_db):
if kind == ContextEntityKind.anonymous: if kind == ContextEntityKind.anonymous:
validated = ValidatedAuthContext() validated = ValidatedAuthContext()

View File

@ -138,5 +138,6 @@ def test_invalid_unicode_3(app):
header = "basic " + b64encode(auth).decode("ascii") header = "basic " + b64encode(auth).decode("ascii")
result = validate_basic_auth(header) result = validate_basic_auth(header)
assert result == ValidateResult( assert result == ValidateResult(
AuthKind.basic, error_message="Could not find robot with specified username", AuthKind.basic,
error_message="Could not find robot with specified username",
) )

View File

@ -27,7 +27,13 @@ from test.fixtures import *
def _access(typ="repository", name="somens/somerepo", actions=None): def _access(typ="repository", name="somens/somerepo", actions=None):
actions = [] if actions is None else actions actions = [] if actions is None else actions
return [{"type": typ, "name": name, "actions": actions,}] return [
{
"type": typ,
"name": name,
"actions": actions,
}
]
def _delete_field(token_data, field_name): def _delete_field(token_data, field_name):
@ -228,7 +234,13 @@ def test_mixing_keys_e2e(initialized_db):
_parse_token(deleted_key_token) _parse_token(deleted_key_token)
@pytest.mark.parametrize("token", ["someunicodetoken✡", "\xc9\xad\xbd",]) @pytest.mark.parametrize(
"token",
[
"someunicodetoken✡",
"\xc9\xad\xbd",
],
)
def test_unicode_token(token): def test_unicode_token(token):
with pytest.raises(InvalidJWTException): with pytest.raises(InvalidJWTException):
_parse_token(token) _parse_token(token)

View File

@ -34,6 +34,12 @@ def test_token(header, expected_result):
def test_valid_grant(): def test_valid_grant():
header = "token " + generate_signed_token({"a": "b"}, {"c": "d"}) header = "token " + generate_signed_token({"a": "b"}, {"c": "d"})
expected = ValidateResult( expected = ValidateResult(
AuthKind.signed_grant, signed_data={"grants": {"a": "b",}, "user_context": {"c": "d"},} AuthKind.signed_grant,
signed_data={
"grants": {
"a": "b",
},
"user_context": {"c": "d"},
},
) )
assert validate_signed_grant(header) == expected assert validate_signed_grant(header) == expected

View File

@ -94,13 +94,13 @@ class BaseAvatar(object):
return self.get_data(external_user.username, external_user.email, "user") return self.get_data(external_user.username, external_user.email, "user")
def get_data(self, name, email_or_id, kind="user"): def get_data(self, name, email_or_id, kind="user"):
""" Computes and returns the full data block for the avatar: """Computes and returns the full data block for the avatar:
{ {
'name': name, 'name': name,
'hash': The gravatar hash, if any. 'hash': The gravatar hash, if any.
'color': The color for the avatar 'color': The color for the avatar
} }
""" """
colors = self.colors colors = self.colors
# Note: email_or_id may be None if gotten from external auth when email is disabled, # Note: email_or_id may be None if gotten from external auth when email is disabled,

View File

@ -4,11 +4,12 @@ import logging
from app import instance_keys from app import instance_keys
from util.security import jwtutil from util.security import jwtutil
from util.security.registry_jwt import \ from util.security.registry_jwt import (
generate_bearer_token, \ generate_bearer_token,
InvalidBearerTokenException, \ InvalidBearerTokenException,
ALGORITHM, \ ALGORITHM,
JWT_CLOCK_SKEW_SECONDS JWT_CLOCK_SKEW_SECONDS,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -40,7 +41,7 @@ BUILD_TOKEN_CONTEXT_SCHEMA = {
"type": "number", "type": "number",
"description": "The number of seconds until the job expires", "description": "The number of seconds until the job expires",
}, },
} },
} }
@ -54,7 +55,7 @@ def build_token(aud, token_type, build_id, job_id, expiration, instance_keys):
"token_type": token_type, "token_type": token_type,
"build_id": build_id, "build_id": build_id,
"job_id": job_id, "job_id": job_id,
"expiration": expiration "expiration": expiration,
} }
token = generate_bearer_token(aud, ANONYMOUS_SUB, token_data, {}, expiration, instance_keys) token = generate_bearer_token(aud, ANONYMOUS_SUB, token_data, {}, expiration, instance_keys)
@ -87,7 +88,7 @@ def verify_build_token(token, aud, token_type, instance_keys):
algorithms=[ALGORITHM], algorithms=[ALGORITHM],
audience=aud, audience=aud,
issuer=instance_keys.service_name, issuer=instance_keys.service_name,
leeway=JWT_CLOCK_SKEW_SECONDS leeway=JWT_CLOCK_SKEW_SECONDS,
) )
except jwtutil.InvalidTokenError as ite: except jwtutil.InvalidTokenError as ite:
logger.error("Invalid token reason: %s", ite) logger.error("Invalid token reason: %s", ite)
@ -99,19 +100,25 @@ def verify_build_token(token, aud, token_type, instance_keys):
if payload["sub"] != ANONYMOUS_SUB: if payload["sub"] != ANONYMOUS_SUB:
raise InvalidBuildTokenException("Wrong sub field in JWT") raise InvalidBuildTokenException("Wrong sub field in JWT")
if ("context" not in payload if (
"context" not in payload
or not payload["context"]["token_type"] or not payload["context"]["token_type"]
or not payload["context"]["build_id"] or not payload["context"]["build_id"]
or not payload["context"]["job_id"] or not payload["context"]["job_id"]
or not payload["context"]["expiration"]): or not payload["context"]["expiration"]
):
raise InvalidBuildTokenException("Missing context field in JWT") raise InvalidBuildTokenException("Missing context field in JWT")
try: try:
jsonschema.validate(payload["context"], BUILD_TOKEN_CONTEXT_SCHEMA) jsonschema.validate(payload["context"], BUILD_TOKEN_CONTEXT_SCHEMA)
except jsonschema.ValidationError: except jsonschema.ValidationError:
raise InvalidBuildTokenException("Unable to validate build token context schema: malformed context") raise InvalidBuildTokenException(
"Unable to validate build token context schema: malformed context"
)
if payload["context"]["token_type"] != token_type: if payload["context"]["token_type"] != token_type:
raise InvalidBuildTokenException("Build token type in JWT does not match expected type: %s" % token_type) raise InvalidBuildTokenException(
"Build token type in JWT does not match expected type: %s" % token_type
)
return payload return payload

View File

@ -11,7 +11,7 @@ from app import (
build_logs, build_logs,
dockerfile_build_queue, dockerfile_build_queue,
instance_keys, instance_keys,
OVERRIDE_CONFIG_DIRECTORY OVERRIDE_CONFIG_DIRECTORY,
) )
from util.log import logfile_path from util.log import logfile_path

File diff suppressed because it is too large Load Diff

View File

@ -15,35 +15,35 @@ class BuildManagerStub(object):
channel: A grpc.Channel. channel: A grpc.Channel.
""" """
self.Ping = channel.unary_unary( self.Ping = channel.unary_unary(
'/buildman_pb.BuildManager/Ping', "/buildman_pb.BuildManager/Ping",
request_serializer=buildman__pb2.PingRequest.SerializeToString, request_serializer=buildman__pb2.PingRequest.SerializeToString,
response_deserializer=buildman__pb2.PingReply.FromString, response_deserializer=buildman__pb2.PingReply.FromString,
) )
self.RegisterBuildJob = channel.unary_unary( self.RegisterBuildJob = channel.unary_unary(
'/buildman_pb.BuildManager/RegisterBuildJob', "/buildman_pb.BuildManager/RegisterBuildJob",
request_serializer=buildman__pb2.BuildJobArgs.SerializeToString, request_serializer=buildman__pb2.BuildJobArgs.SerializeToString,
response_deserializer=buildman__pb2.BuildPack.FromString, response_deserializer=buildman__pb2.BuildPack.FromString,
) )
self.Heartbeat = channel.stream_stream( self.Heartbeat = channel.stream_stream(
'/buildman_pb.BuildManager/Heartbeat', "/buildman_pb.BuildManager/Heartbeat",
request_serializer=buildman__pb2.HeartbeatRequest.SerializeToString, request_serializer=buildman__pb2.HeartbeatRequest.SerializeToString,
response_deserializer=buildman__pb2.HeartbeatResponse.FromString, response_deserializer=buildman__pb2.HeartbeatResponse.FromString,
) )
self.SetPhase = channel.unary_unary( self.SetPhase = channel.unary_unary(
'/buildman_pb.BuildManager/SetPhase', "/buildman_pb.BuildManager/SetPhase",
request_serializer=buildman__pb2.SetPhaseRequest.SerializeToString, request_serializer=buildman__pb2.SetPhaseRequest.SerializeToString,
response_deserializer=buildman__pb2.SetPhaseResponse.FromString, response_deserializer=buildman__pb2.SetPhaseResponse.FromString,
) )
self.LogMessage = channel.stream_stream( self.LogMessage = channel.stream_stream(
'/buildman_pb.BuildManager/LogMessage', "/buildman_pb.BuildManager/LogMessage",
request_serializer=buildman__pb2.LogMessageRequest.SerializeToString, request_serializer=buildman__pb2.LogMessageRequest.SerializeToString,
response_deserializer=buildman__pb2.LogMessageResponse.FromString, response_deserializer=buildman__pb2.LogMessageResponse.FromString,
) )
self.DetermineCachedTag = channel.unary_unary( self.DetermineCachedTag = channel.unary_unary(
'/buildman_pb.BuildManager/DetermineCachedTag', "/buildman_pb.BuildManager/DetermineCachedTag",
request_serializer=buildman__pb2.CachedTagRequest.SerializeToString, request_serializer=buildman__pb2.CachedTagRequest.SerializeToString,
response_deserializer=buildman__pb2.CachedTag.FromString, response_deserializer=buildman__pb2.CachedTag.FromString,
) )
class BuildManagerServicer(object): class BuildManagerServicer(object):
@ -52,174 +52,241 @@ class BuildManagerServicer(object):
def Ping(self, request, context): def Ping(self, request, context):
"""Missing associated documentation comment in .proto file.""" """Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!') context.set_details("Method not implemented!")
raise NotImplementedError('Method not implemented!') raise NotImplementedError("Method not implemented!")
def RegisterBuildJob(self, request, context): def RegisterBuildJob(self, request, context):
"""Missing associated documentation comment in .proto file.""" """Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!') context.set_details("Method not implemented!")
raise NotImplementedError('Method not implemented!') raise NotImplementedError("Method not implemented!")
def Heartbeat(self, request_iterator, context): def Heartbeat(self, request_iterator, context):
"""Missing associated documentation comment in .proto file.""" """Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!') context.set_details("Method not implemented!")
raise NotImplementedError('Method not implemented!') raise NotImplementedError("Method not implemented!")
def SetPhase(self, request, context): def SetPhase(self, request, context):
"""Missing associated documentation comment in .proto file.""" """Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!') context.set_details("Method not implemented!")
raise NotImplementedError('Method not implemented!') raise NotImplementedError("Method not implemented!")
def LogMessage(self, request_iterator, context): def LogMessage(self, request_iterator, context):
"""Missing associated documentation comment in .proto file.""" """Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!') context.set_details("Method not implemented!")
raise NotImplementedError('Method not implemented!') raise NotImplementedError("Method not implemented!")
def DetermineCachedTag(self, request, context): def DetermineCachedTag(self, request, context):
"""Missing associated documentation comment in .proto file.""" """Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!') context.set_details("Method not implemented!")
raise NotImplementedError('Method not implemented!') raise NotImplementedError("Method not implemented!")
def add_BuildManagerServicer_to_server(servicer, server): def add_BuildManagerServicer_to_server(servicer, server):
rpc_method_handlers = { rpc_method_handlers = {
'Ping': grpc.unary_unary_rpc_method_handler( "Ping": grpc.unary_unary_rpc_method_handler(
servicer.Ping, servicer.Ping,
request_deserializer=buildman__pb2.PingRequest.FromString, request_deserializer=buildman__pb2.PingRequest.FromString,
response_serializer=buildman__pb2.PingReply.SerializeToString, response_serializer=buildman__pb2.PingReply.SerializeToString,
), ),
'RegisterBuildJob': grpc.unary_unary_rpc_method_handler( "RegisterBuildJob": grpc.unary_unary_rpc_method_handler(
servicer.RegisterBuildJob, servicer.RegisterBuildJob,
request_deserializer=buildman__pb2.BuildJobArgs.FromString, request_deserializer=buildman__pb2.BuildJobArgs.FromString,
response_serializer=buildman__pb2.BuildPack.SerializeToString, response_serializer=buildman__pb2.BuildPack.SerializeToString,
), ),
'Heartbeat': grpc.stream_stream_rpc_method_handler( "Heartbeat": grpc.stream_stream_rpc_method_handler(
servicer.Heartbeat, servicer.Heartbeat,
request_deserializer=buildman__pb2.HeartbeatRequest.FromString, request_deserializer=buildman__pb2.HeartbeatRequest.FromString,
response_serializer=buildman__pb2.HeartbeatResponse.SerializeToString, response_serializer=buildman__pb2.HeartbeatResponse.SerializeToString,
), ),
'SetPhase': grpc.unary_unary_rpc_method_handler( "SetPhase": grpc.unary_unary_rpc_method_handler(
servicer.SetPhase, servicer.SetPhase,
request_deserializer=buildman__pb2.SetPhaseRequest.FromString, request_deserializer=buildman__pb2.SetPhaseRequest.FromString,
response_serializer=buildman__pb2.SetPhaseResponse.SerializeToString, response_serializer=buildman__pb2.SetPhaseResponse.SerializeToString,
), ),
'LogMessage': grpc.stream_stream_rpc_method_handler( "LogMessage": grpc.stream_stream_rpc_method_handler(
servicer.LogMessage, servicer.LogMessage,
request_deserializer=buildman__pb2.LogMessageRequest.FromString, request_deserializer=buildman__pb2.LogMessageRequest.FromString,
response_serializer=buildman__pb2.LogMessageResponse.SerializeToString, response_serializer=buildman__pb2.LogMessageResponse.SerializeToString,
), ),
'DetermineCachedTag': grpc.unary_unary_rpc_method_handler( "DetermineCachedTag": grpc.unary_unary_rpc_method_handler(
servicer.DetermineCachedTag, servicer.DetermineCachedTag,
request_deserializer=buildman__pb2.CachedTagRequest.FromString, request_deserializer=buildman__pb2.CachedTagRequest.FromString,
response_serializer=buildman__pb2.CachedTag.SerializeToString, response_serializer=buildman__pb2.CachedTag.SerializeToString,
), ),
} }
generic_handler = grpc.method_handlers_generic_handler( generic_handler = grpc.method_handlers_generic_handler(
'buildman_pb.BuildManager', rpc_method_handlers) "buildman_pb.BuildManager", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,)) server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API. # This class is part of an EXPERIMENTAL API.
class BuildManager(object): class BuildManager(object):
"""Missing associated documentation comment in .proto file.""" """Missing associated documentation comment in .proto file."""
@staticmethod @staticmethod
def Ping(request, def Ping(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target, target,
options=(), "/buildman_pb.BuildManager/Ping",
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildman_pb.BuildManager/Ping',
buildman__pb2.PingRequest.SerializeToString, buildman__pb2.PingRequest.SerializeToString,
buildman__pb2.PingReply.FromString, buildman__pb2.PingReply.FromString,
options, channel_credentials, options,
call_credentials, compression, wait_for_ready, timeout, metadata) channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod @staticmethod
def RegisterBuildJob(request, def RegisterBuildJob(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target, target,
options=(), "/buildman_pb.BuildManager/RegisterBuildJob",
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildman_pb.BuildManager/RegisterBuildJob',
buildman__pb2.BuildJobArgs.SerializeToString, buildman__pb2.BuildJobArgs.SerializeToString,
buildman__pb2.BuildPack.FromString, buildman__pb2.BuildPack.FromString,
options, channel_credentials, options,
call_credentials, compression, wait_for_ready, timeout, metadata) channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod @staticmethod
def Heartbeat(request_iterator, def Heartbeat(
request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.stream_stream(
request_iterator,
target, target,
options=(), "/buildman_pb.BuildManager/Heartbeat",
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/buildman_pb.BuildManager/Heartbeat',
buildman__pb2.HeartbeatRequest.SerializeToString, buildman__pb2.HeartbeatRequest.SerializeToString,
buildman__pb2.HeartbeatResponse.FromString, buildman__pb2.HeartbeatResponse.FromString,
options, channel_credentials, options,
call_credentials, compression, wait_for_ready, timeout, metadata) channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod @staticmethod
def SetPhase(request, def SetPhase(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target, target,
options=(), "/buildman_pb.BuildManager/SetPhase",
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildman_pb.BuildManager/SetPhase',
buildman__pb2.SetPhaseRequest.SerializeToString, buildman__pb2.SetPhaseRequest.SerializeToString,
buildman__pb2.SetPhaseResponse.FromString, buildman__pb2.SetPhaseResponse.FromString,
options, channel_credentials, options,
call_credentials, compression, wait_for_ready, timeout, metadata) channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod @staticmethod
def LogMessage(request_iterator, def LogMessage(
request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.stream_stream(
request_iterator,
target, target,
options=(), "/buildman_pb.BuildManager/LogMessage",
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/buildman_pb.BuildManager/LogMessage',
buildman__pb2.LogMessageRequest.SerializeToString, buildman__pb2.LogMessageRequest.SerializeToString,
buildman__pb2.LogMessageResponse.FromString, buildman__pb2.LogMessageResponse.FromString,
options, channel_credentials, options,
call_credentials, compression, wait_for_ready, timeout, metadata) channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod @staticmethod
def DetermineCachedTag(request, def DetermineCachedTag(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target, target,
options=(), "/buildman_pb.BuildManager/DetermineCachedTag",
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildman_pb.BuildManager/DetermineCachedTag',
buildman__pb2.CachedTagRequest.SerializeToString, buildman__pb2.CachedTagRequest.SerializeToString,
buildman__pb2.CachedTag.FromString, buildman__pb2.CachedTag.FromString,
options, channel_credentials, options,
call_credentials, compression, wait_for_ready, timeout, metadata) channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)

View File

@ -9,7 +9,7 @@ from buildman.buildman_pb import buildman_pb2_grpc
from buildman.build_token import ( from buildman.build_token import (
BUILD_JOB_REGISTRATION_TYPE, BUILD_JOB_REGISTRATION_TYPE,
BUILD_JOB_TOKEN_TYPE, BUILD_JOB_TOKEN_TYPE,
InvalidBuildTokenException InvalidBuildTokenException,
) )
from data.database import BUILD_PHASE from data.database import BUILD_PHASE
@ -36,7 +36,7 @@ class BuildManagerServicer(buildman_pb2_grpc.BuildManagerServicer):
context.set_code(code) context.set_code(code)
def _decode_build_token(self, token, token_type): def _decode_build_token(self, token, token_type):
""" Return the build token context, """Return the build token context,
or an error message if there was an exception decoding the token.""" or an error message if there was an exception decoding the token."""
msg = None msg = None
try: try:
@ -87,7 +87,7 @@ class BuildManagerServicer(buildman_pb2_grpc.BuildManagerServicer):
tag_names=build_args.get("tag_names", ""), tag_names=build_args.get("tag_names", ""),
base_image=buildman_pb2.BuildPack.BaseImage( base_image=buildman_pb2.BuildPack.BaseImage(
**build_args.get("base_image", {}), **build_args.get("base_image", {}),
) ),
) )
git_package = build_args.get("git") git_package = build_args.get("git")
@ -116,7 +116,9 @@ class BuildManagerServicer(buildman_pb2_grpc.BuildManagerServicer):
return buildman_pb2.HeartbeatResponse() return buildman_pb2.HeartbeatResponse()
job_id = decoded_token["job_id"] job_id = decoded_token["job_id"]
yield buildman_pb2.HeartbeatResponse(reply=self._lifecycle_manager.job_heartbeat(job_id)) yield buildman_pb2.HeartbeatResponse(
reply=self._lifecycle_manager.job_heartbeat(job_id)
)
def SetPhase(self, request, context): def SetPhase(self, request, context):
"""Update the job phase.""" """Update the job phase."""
@ -131,7 +133,9 @@ class BuildManagerServicer(buildman_pb2_grpc.BuildManagerServicer):
job_id = decoded_token["job_id"] job_id = decoded_token["job_id"]
phase_metadata = {} phase_metadata = {}
if request.HasField("pull_metadata"): if request.HasField("pull_metadata"):
phase_metadata.update(MessageToDict(request.pull_metadata, preserving_proto_field_name=True)) phase_metadata.update(
MessageToDict(request.pull_metadata, preserving_proto_field_name=True)
)
updated = self._lifecycle_manager.update_job_phase( updated = self._lifecycle_manager.update_job_phase(
job_id, job_id,
@ -139,8 +143,7 @@ class BuildManagerServicer(buildman_pb2_grpc.BuildManagerServicer):
phase_metadata, phase_metadata,
) )
return buildman_pb2.SetPhaseResponse( return buildman_pb2.SetPhaseResponse(
success=updated, success=updated, sequence_number=request.sequence_number
sequence_number=request.sequence_number
) )
def LogMessage(self, request_iterator, context): def LogMessage(self, request_iterator, context):
@ -169,8 +172,7 @@ class BuildManagerServicer(buildman_pb2_grpc.BuildManagerServicer):
log_message = req.log_message log_message = req.log_message
logged = self._lifecycle_manager.append_build_log(build_id, log_message) logged = self._lifecycle_manager.append_build_log(build_id, log_message)
yield buildman_pb2.LogMessageResponse( yield buildman_pb2.LogMessageResponse(
success=logged, success=logged, sequence_number=sequence_number
sequence_number=sequence_number
) )
last_sequence_number = sequence_number last_sequence_number = sequence_number

View File

@ -76,9 +76,11 @@ class CloudConfigContext(object):
timeout_start_sec = int(timeout_start_sec) timeout_start_sec = int(timeout_start_sec)
timeout_stop_sec = int(timeout_stop_sec) timeout_stop_sec = int(timeout_stop_sec)
except (ValueError, TypeError): except (ValueError, TypeError):
logger.error("Invalid timeouts (%s, %s): values should be integers", logger.error(
timeout_start_sec, "Invalid timeouts (%s, %s): values should be integers",
timeout_stop_sec) timeout_start_sec,
timeout_stop_sec,
)
raise raise
path = os.path.join(os.path.dirname(__file__), "templates") path = os.path.join(os.path.dirname(__file__), "templates")
@ -113,9 +115,8 @@ class CloudConfigContext(object):
data = "," + urlquote(content) data = "," + urlquote(content)
return "data:" + data return "data:" + data
def registry(self, container_name): def registry(self, container_name):
""" Parse the registry from repositories of the following formats: """Parse the registry from repositories of the following formats:
quay.io/quay/quay:tagname -> quay.io quay.io/quay/quay:tagname -> quay.io
localhost:5000/quay/quay:tagname -> localhost:5000 localhost:5000/quay/quay:tagname -> localhost:5000
localhost:5000/quay/quay -> localhost:5000 localhost:5000/quay/quay -> localhost:5000
@ -123,7 +124,7 @@ class CloudConfigContext(object):
quay/quay -> '' quay/quay -> ''
mysql:latest -> '' mysql:latest -> ''
mysql -> '' mysql -> ''
""" """
num_slashes = container_name.count("/") num_slashes = container_name.count("/")
if num_slashes == 2: if num_slashes == 2:
return container_name[: container_name.find("/")] return container_name[: container_name.find("/")]

View File

@ -49,7 +49,7 @@ class BuildStateInterface(ABC):
""" """
@abstractmethod @abstractmethod
def job_scheduled(self, job_id, execution_id, max_startup_time): def job_scheduled(self, job_id, execution_id, max_startup_time):
"""Mark the job as scheduled with execution_id. If the job is not started after """Mark the job as scheduled with execution_id. If the job is not started after
max_startup_time, the job should get expired. max_startup_time, the job should get expired.
""" """

View File

@ -120,9 +120,7 @@ class BuildJob(object):
Returns the tag to pull to prime the cache or None if none. Returns the tag to pull to prime the cache or None if none.
""" """
cached_tag = self._determine_cached_tag_by_tag() cached_tag = self._determine_cached_tag_by_tag()
logger.debug( logger.debug("Determined cached tag %s for %s: %s", cached_tag, base_image_id)
"Determined cached tag %s for %s: %s", cached_tag, base_image_id
)
return cached_tag return cached_tag
def _determine_cached_tag_by_tag(self): def _determine_cached_tag_by_tag(self):

View File

@ -16,7 +16,7 @@ from buildman.build_token import (
verify_build_token, verify_build_token,
InvalidBearerTokenException, InvalidBearerTokenException,
BUILD_JOB_REGISTRATION_TYPE, BUILD_JOB_REGISTRATION_TYPE,
BUILD_JOB_TOKEN_TYPE BUILD_JOB_TOKEN_TYPE,
) )
from buildman.interface import ( from buildman.interface import (
BuildStateInterface, BuildStateInterface,
@ -24,7 +24,7 @@ from buildman.interface import (
BuildJobDoesNotExistsError, BuildJobDoesNotExistsError,
BuildJobError, BuildJobError,
BuildJobResult, BuildJobResult,
RESULT_PHASES RESULT_PHASES,
) )
from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException
from buildman.manager.executor import PopenExecutor, EC2Executor, KubernetesExecutor from buildman.manager.executor import PopenExecutor, EC2Executor, KubernetesExecutor
@ -33,7 +33,7 @@ from buildman.orchestrator import (
KeyEvent, KeyEvent,
OrchestratorError, OrchestratorError,
OrchestratorConnectionError, OrchestratorConnectionError,
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION,
) )
from app import instance_keys from app import instance_keys
@ -58,7 +58,7 @@ build_ack_duration = Histogram(
build_duration = Histogram( build_duration = Histogram(
"quay_build_duration_seconds", "quay_build_duration_seconds",
"seconds taken for a build's execution", "seconds taken for a build's execution",
labelnames=["executor", "job_status"], # status in (COMPLETE, INCOMPLETE, ERROR) labelnames=["executor", "job_status"], # status in (COMPLETE, INCOMPLETE, ERROR)
) )
JOB_PREFIX = "building/" JOB_PREFIX = "building/"
@ -104,7 +104,9 @@ class EphemeralBuilderManager(BuildStateInterface):
"kubernetes": KubernetesExecutor, "kubernetes": KubernetesExecutor,
} }
def __init__(self, registry_hostname, manager_hostname, queue, build_logs, user_files, instance_keys): def __init__(
self, registry_hostname, manager_hostname, queue, build_logs, user_files, instance_keys
):
self._registry_hostname = registry_hostname self._registry_hostname = registry_hostname
self._manager_hostname = manager_hostname self._manager_hostname = manager_hostname
self._queue = queue self._queue = queue
@ -150,21 +152,11 @@ class EphemeralBuilderManager(BuildStateInterface):
def generate_build_token(self, token_type, build_id, job_id, expiration): def generate_build_token(self, token_type, build_id, job_id, expiration):
return build_token( return build_token(
self._manager_hostname, self._manager_hostname, token_type, build_id, job_id, expiration, self._instance_keys
token_type,
build_id,
job_id,
expiration,
self._instance_keys
) )
def verify_build_token(self, token, token_type): def verify_build_token(self, token, token_type):
return verify_build_token( return verify_build_token(token, self._manager_hostname, token_type, self._instance_keys)
token,
self._manager_hostname,
token_type,
self._instance_keys
)
def _config_prefix(self, key): def _config_prefix(self, key):
if self._manager_config.get("ORCHESTRATOR") is None: if self._manager_config.get("ORCHESTRATOR") is None:
@ -219,7 +211,7 @@ class EphemeralBuilderManager(BuildStateInterface):
return build_job return build_job
def create_job(self, build_id, build_metadata): def create_job(self, build_id, build_metadata):
"""Create the job in the orchestrator. """Create the job in the orchestrator.
The job will expire if it is not scheduled within CREATED_JOB_TIMEOUT. The job will expire if it is not scheduled within CREATED_JOB_TIMEOUT.
""" """
# Sets max threshold for build heartbeats. i.e max total running time of the build (default: 2h) # Sets max threshold for build heartbeats. i.e max total running time of the build (default: 2h)
@ -231,7 +223,10 @@ class EphemeralBuilderManager(BuildStateInterface):
job_key = self._job_key(build_id) job_key = self._job_key(build_id)
try: try:
self._orchestrator.set_key( self._orchestrator.set_key(
job_key, json.dumps(build_metadata), overwrite=False, expiration=CREATED_JOB_TIMEOUT, job_key,
json.dumps(build_metadata),
overwrite=False,
expiration=CREATED_JOB_TIMEOUT,
) )
except KeyError: except KeyError:
raise BuildJobAlreadyExistsError(job_key) raise BuildJobAlreadyExistsError(job_key)
@ -249,7 +244,10 @@ class EphemeralBuilderManager(BuildStateInterface):
job_data = self._orchestrator.get_key(job_id) job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data) job_data_json = json.loads(job_data)
except KeyError: except KeyError:
logger.warning("Failed to mark job %s as scheduled. Job no longer exists in the orchestrator", job_id) logger.warning(
"Failed to mark job %s as scheduled. Job no longer exists in the orchestrator",
job_id,
)
return False return False
except Exception as e: except Exception as e:
logger.warning("Exception loading job %s from orchestrator: %s", job_id, e) logger.warning("Exception loading job %s from orchestrator: %s", job_id, e)
@ -260,10 +258,7 @@ class EphemeralBuilderManager(BuildStateInterface):
job_data_json["execution_id"] = execution_id job_data_json["execution_id"] = execution_id
try: try:
self._orchestrator.set_key( self._orchestrator.set_key(
job_id, job_id, json.dumps(job_data_json), overwrite=True, expiration=max_startup_time
json.dumps(job_data_json),
overwrite=True,
expiration=max_startup_time
) )
except Exception as e: except Exception as e:
logger.warning("Exception updating job %s in orchestrator: %s", job_id, e) logger.warning("Exception updating job %s in orchestrator: %s", job_id, e)
@ -274,7 +269,8 @@ class EphemeralBuilderManager(BuildStateInterface):
if updated: if updated:
self._queue.extend_processing( self._queue.extend_processing(
build_job.job_item, build_job.job_item,
seconds_from_now=max_startup_time + 60, # Add some leeway to allow the expiry event to complete seconds_from_now=max_startup_time
+ 60, # Add some leeway to allow the expiry event to complete
minimum_extension=MINIMUM_JOB_EXTENSION, minimum_extension=MINIMUM_JOB_EXTENSION,
) )
@ -291,7 +287,7 @@ class EphemeralBuilderManager(BuildStateInterface):
return updated return updated
def job_unschedulable(self, job_id): def job_unschedulable(self, job_id):
""" Stop tracking the given unschedulable job. """Stop tracking the given unschedulable job.
Deletes any states that might have previously been stored in the orchestrator. Deletes any states that might have previously been stored in the orchestrator.
""" """
try: try:
@ -301,14 +297,14 @@ class EphemeralBuilderManager(BuildStateInterface):
logger.warning( logger.warning(
"Exception trying to mark job %s as unschedulable. Some state may not have been cleaned/updated: %s", "Exception trying to mark job %s as unschedulable. Some state may not have been cleaned/updated: %s",
job_id, job_id,
e e,
) )
def on_job_complete(self, build_job, job_result, executor_name, execution_id): def on_job_complete(self, build_job, job_result, executor_name, execution_id):
"""Handle a completed job by updating the queue, job metrics, and cleaning up """Handle a completed job by updating the queue, job metrics, and cleaning up
any remaining state. any remaining state.
If the job result is INCOMPLETE, the job is requeued with its retry restored. If the job result is INCOMPLETE, the job is requeued with its retry restored.
If a job result is in EXPIRED or ERROR, the job is requeued, but it retry is not restored. If a job result is in EXPIRED or ERROR, the job is requeued, but it retry is not restored.
If the job is cancelled, it is not requeued. If the job is cancelled, it is not requeued.
@ -325,15 +321,29 @@ class EphemeralBuilderManager(BuildStateInterface):
# Build timeout. No retry restored # Build timeout. No retry restored
if job_result == BuildJobResult.EXPIRED: if job_result == BuildJobResult.EXPIRED:
self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30) self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30)
logger.warning("Job %s completed with result %s. Requeuing build without restoring retry.", job_id, job_result) logger.warning(
"Job %s completed with result %s. Requeuing build without restoring retry.",
job_id,
job_result,
)
# Unfinished build due to internal error. Restore retry. # Unfinished build due to internal error. Restore retry.
elif job_result == BuildJobResult.INCOMPLETE: elif job_result == BuildJobResult.INCOMPLETE:
logger.warning("Job %s completed with result %s. Requeuing build with retry restored.", job_id, job_result) logger.warning(
"Job %s completed with result %s. Requeuing build with retry restored.",
job_id,
job_result,
)
self._queue.incomplete(build_job.job_item, restore_retry=True, retry_after=30) self._queue.incomplete(build_job.job_item, restore_retry=True, retry_after=30)
elif job_result in (BuildJobResult.ERROR, BuildJobResult.COMPLETE, BuildJobResult.CANCELLED): elif job_result in (
logger.warning("Job %s completed with result %s. Marking build done in queue.", job_id, job_result) BuildJobResult.ERROR,
BuildJobResult.COMPLETE,
BuildJobResult.CANCELLED,
):
logger.warning(
"Job %s completed with result %s. Marking build done in queue.", job_id, job_result
)
self._queue.complete(build_job.job_item) self._queue.complete(build_job.job_item)
# Disable trigger if needed # Disable trigger if needed
@ -352,7 +362,7 @@ class EphemeralBuilderManager(BuildStateInterface):
logger.debug("Job completed for job %s with result %s", job_id, job_result) logger.debug("Job completed for job %s with result %s", job_id, job_result)
def start_job(self, job_id, max_build_time): def start_job(self, job_id, max_build_time):
""" Starts the build job. This is invoked by the worker once the job has been created and """Starts the build job. This is invoked by the worker once the job has been created and
scheduled, returing the buildpack needed to start the actual build. scheduled, returing the buildpack needed to start the actual build.
""" """
try: try:
@ -412,7 +422,9 @@ class EphemeralBuilderManager(BuildStateInterface):
return (None, None) return (None, None)
# Generate the build token # Generate the build token
token = self.generate_build_token(BUILD_JOB_TOKEN_TYPE, build_job.build_uuid, job_id, max_build_time) token = self.generate_build_token(
BUILD_JOB_TOKEN_TYPE, build_job.build_uuid, job_id, max_build_time
)
# Publish the time it took for a worker to ack the build # Publish the time it took for a worker to ack the build
self._write_duration_metric(build_ack_duration, build_job.build_uuid) self._write_duration_metric(build_ack_duration, build_job.build_uuid)
@ -442,7 +454,7 @@ class EphemeralBuilderManager(BuildStateInterface):
"Job %s is already in a final completed phase (%s), cannot update to %s", "Job %s is already in a final completed phase (%s), cannot update to %s",
job_id, job_id,
build_job.repo_build.phase, build_job.repo_build.phase,
phase phase,
) )
return False return False
@ -450,7 +462,9 @@ class EphemeralBuilderManager(BuildStateInterface):
phase_metadata = phase_metadata or {} phase_metadata = phase_metadata or {}
updated = model.build.update_phase_then_close(build_job.build_uuid, phase) updated = model.build.update_phase_then_close(build_job.build_uuid, phase)
if updated: if updated:
self.append_log_message(build_job.build_uuid, phase, self._build_logs.PHASE, phase_metadata) self.append_log_message(
build_job.build_uuid, phase, self._build_logs.PHASE, phase_metadata
)
# Check if on_job_complete needs to be called # Check if on_job_complete needs to be called
if updated and phase in EphemeralBuilderManager.COMPLETED_PHASES: if updated and phase in EphemeralBuilderManager.COMPLETED_PHASES:
@ -460,16 +474,22 @@ class EphemeralBuilderManager(BuildStateInterface):
if phase == BUILD_PHASE.ERROR: if phase == BUILD_PHASE.ERROR:
self.on_job_complete(build_job, BuildJobResult.ERROR, executor_name, execution_id) self.on_job_complete(build_job, BuildJobResult.ERROR, executor_name, execution_id)
elif phase == BUILD_PHASE.COMPLETE: elif phase == BUILD_PHASE.COMPLETE:
self.on_job_complete(build_job, BuildJobResult.COMPLETE, executor_name, execution_id) self.on_job_complete(
build_job, BuildJobResult.COMPLETE, executor_name, execution_id
)
elif phase == BUILD_PHASE.INTERNAL_ERROR: elif phase == BUILD_PHASE.INTERNAL_ERROR:
self.on_job_complete(build_job, BuildJobResult.INCOMPLETE, executor_name, execution_id) self.on_job_complete(
build_job, BuildJobResult.INCOMPLETE, executor_name, execution_id
)
elif phase == BUILD_PHASE.CANCELLED: elif phase == BUILD_PHASE.CANCELLED:
self.on_job_complete(build_job, BuildJobResult.CANCELLED, executor_name, execution_id) self.on_job_complete(
build_job, BuildJobResult.CANCELLED, executor_name, execution_id
)
return updated return updated
def job_heartbeat(self, job_id): def job_heartbeat(self, job_id):
"""Extend the processing time in the queue and updates the ttl of the job in the """Extend the processing time in the queue and updates the ttl of the job in the
orchestrator. orchestrator.
""" """
try: try:
@ -489,11 +509,14 @@ class EphemeralBuilderManager(BuildStateInterface):
ttl = min(HEARTBEAT_PERIOD_SECONDS * 2, max_expiration_sec) ttl = min(HEARTBEAT_PERIOD_SECONDS * 2, max_expiration_sec)
# Update job expirations # Update job expirations
if (job_data_json["last_heartbeat"] and if (
dateutil.parser.isoparse(job_data_json["last_heartbeat"]) < datetime.utcnow() - HEARTBEAT_DELTA): job_data_json["last_heartbeat"]
and dateutil.parser.isoparse(job_data_json["last_heartbeat"])
< datetime.utcnow() - HEARTBEAT_DELTA
):
logger.warning( logger.warning(
"Heartbeat expired for job %s. Marking job as expired. Last heartbeat received at %s", "Heartbeat expired for job %s. Marking job as expired. Last heartbeat received at %s",
job_data_json["last_heartbeat"] job_data_json["last_heartbeat"],
) )
self.update_job_phase(job_id, BUILD_PHASE.INTERNAL_ERROR) self.update_job_phase(job_id, BUILD_PHASE.INTERNAL_ERROR)
return False return False
@ -507,9 +530,7 @@ class EphemeralBuilderManager(BuildStateInterface):
) )
try: try:
self._orchestrator.set_key( self._orchestrator.set_key(job_id, json.dumps(job_data_json), expiration=ttl)
job_id, json.dumps(job_data_json), expiration=ttl
)
except OrchestratorConnectionError: except OrchestratorConnectionError:
logger.error( logger.error(
"Could not update heartbeat for job %s. Orchestrator is not available", job_id "Could not update heartbeat for job %s. Orchestrator is not available", job_id
@ -536,7 +557,9 @@ class EphemeralBuilderManager(BuildStateInterface):
job_data_json.get("execution_id"), job_data_json.get("execution_id"),
) )
except KeyError: except KeyError:
logger.warning("Could not cleanup cancelled job %s. Job does not exist in orchestrator", job_id) logger.warning(
"Could not cleanup cancelled job %s. Job does not exist in orchestrator", job_id
)
return cancelled return cancelled
@ -561,24 +584,31 @@ class EphemeralBuilderManager(BuildStateInterface):
allowed_worker_count = self._manager_config.get("ALLOWED_WORKER_COUNT", 1) allowed_worker_count = self._manager_config.get("ALLOWED_WORKER_COUNT", 1)
if self._running_workers() >= allowed_worker_count: if self._running_workers() >= allowed_worker_count:
logger.warning("Could not schedule build %s. Number of workers at capacity: %s.", build_id, self._running_workers()) logger.warning(
"Could not schedule build %s. Number of workers at capacity: %s.",
build_id,
self._running_workers(),
)
return False, TOO_MANY_WORKERS_SLEEP_DURATION return False, TOO_MANY_WORKERS_SLEEP_DURATION
job_id = self._job_key(build_id) job_id = self._job_key(build_id)
try: try:
build_job = self._build_job_from_job_id(job_id) build_job = self._build_job_from_job_id(job_id)
except BuildJobDoesNotExistsError as bjne: except BuildJobDoesNotExistsError as bjne:
logger.warning("Failed to schedule job %s - Job no longer exists in the orchestrator, likely expired: %s", job_id, bjne) logger.warning(
"Failed to schedule job %s - Job no longer exists in the orchestrator, likely expired: %s",
job_id,
bjne,
)
return False, CREATED_JOB_TIMEOUT_SLEEP_DURATION return False, CREATED_JOB_TIMEOUT_SLEEP_DURATION
except BuildJobError as bje: except BuildJobError as bje:
logger.warning("Failed to schedule job %s - Could not get job from orchestrator: %s", job_id, bje) logger.warning(
"Failed to schedule job %s - Could not get job from orchestrator: %s", job_id, bje
)
return False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION return False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION
registration_token = self.generate_build_token( registration_token = self.generate_build_token(
BUILD_JOB_REGISTRATION_TYPE, BUILD_JOB_REGISTRATION_TYPE, build_job.build_uuid, job_id, EPHEMERAL_SETUP_TIMEOUT
build_job.build_uuid,
job_id,
EPHEMERAL_SETUP_TIMEOUT
) )
started_with_executor = None started_with_executor = None
@ -606,12 +636,17 @@ class EphemeralBuilderManager(BuildStateInterface):
) )
continue continue
logger.debug("Starting builder for job %s with selected executor: %s", job_id, executor.name) logger.debug(
"Starting builder for job %s with selected executor: %s", job_id, executor.name
)
try: try:
execution_id = executor.start_builder(registration_token, build_job.build_uuid) execution_id = executor.start_builder(registration_token, build_job.build_uuid)
except: except:
logger.exception("Exception when starting builder for job: %s - Falling back to next configured executor", job_id) logger.exception(
"Exception when starting builder for job: %s - Falling back to next configured executor",
job_id,
)
continue continue
started_with_executor = executor started_with_executor = executor
@ -629,7 +664,10 @@ class EphemeralBuilderManager(BuildStateInterface):
# Store metric data tracking job # Store metric data tracking job
metric_spec = json.dumps( metric_spec = json.dumps(
{"executor_name": started_with_executor.name, "start_time": time.time(),} {
"executor_name": started_with_executor.name,
"start_time": time.time(),
}
) )
# Mark the job as scheduled # Mark the job as scheduled
@ -642,7 +680,7 @@ class EphemeralBuilderManager(BuildStateInterface):
return True, None return True, None
def _job_expired_callback(self, key_change): def _job_expired_callback(self, key_change):
""" Callback invoked when job key is changed, except for CREATE, SET events. """Callback invoked when job key is changed, except for CREATE, SET events.
DELETE and EXPIRE exvents make sure the build is marked as completed and remove any DELETE and EXPIRE exvents make sure the build is marked as completed and remove any
state tracking, executors left. state tracking, executors left.
""" """
@ -670,7 +708,7 @@ class EphemeralBuilderManager(BuildStateInterface):
self.on_job_complete(build_job, job_result, executor_name, execution_id) self.on_job_complete(build_job, job_result, executor_name, execution_id)
def _cleanup_job_from_orchestrator(self, build_job): def _cleanup_job_from_orchestrator(self, build_job):
""" Cleanup the given job from the orchestrator. """Cleanup the given job from the orchestrator.
This includes any keys related to that job: job keys, expiry keys, metric keys, ... This includes any keys related to that job: job keys, expiry keys, metric keys, ...
""" """
lock_key = self._lock_key(build_job.build_uuid) lock_key = self._lock_key(build_job.build_uuid)
@ -682,14 +720,14 @@ class EphemeralBuilderManager(BuildStateInterface):
except KeyError: except KeyError:
pass pass
finally: finally:
self._orchestrator.delete_key(lock_key) # Release lock self._orchestrator.delete_key(lock_key) # Release lock
def append_build_log(self, build_id, log_message): def append_build_log(self, build_id, log_message):
""" """
Append the logs from Docker's build output. Append the logs from Docker's build output.
This checks if the given message is a "STEP" line from Docker's output, This checks if the given message is a "STEP" line from Docker's output,
and set the log type to "COMMAND" if so. and set the log type to "COMMAND" if so.
See https://github.com/quay/quay-builder/blob/master/docker/log_writer.go See https://github.com/quay/quay-builder/blob/master/docker/log_writer.go
to get the serialized message structure to get the serialized message structure
""" """
@ -704,11 +742,13 @@ class EphemeralBuilderManager(BuildStateInterface):
if key in log_data: if key in log_data:
fully_unwrapped = log_data[key] fully_unwrapped = log_data[key]
break break
current_log_string = str(fully_unwrapped) current_log_string = str(fully_unwrapped)
current_step = _extract_current_step(current_log_string) current_step = _extract_current_step(current_log_string)
if current_step: if current_step:
self.append_log_message(self, build_id, current_log_string, log_type=self._build_logs.COMMAND) self.append_log_message(
self, build_id, current_log_string, log_type=self._build_logs.COMMAND
)
else: else:
self.append_log_message(self, build_id, current_log_string) self.append_log_message(self, build_id, current_log_string)
@ -747,7 +787,9 @@ class EphemeralBuilderManager(BuildStateInterface):
"""Cleanup existing running executor running on `executor_name` with `execution_id`.""" """Cleanup existing running executor running on `executor_name` with `execution_id`."""
executor = self._executor_name_to_executor.get(executor_name) executor = self._executor_name_to_executor.get(executor_name)
if executor is None: if executor is None:
logger.error("Could not find registered executor %s to terminate %s", executor_name, execution_id) logger.error(
"Could not find registered executor %s to terminate %s", executor_name, execution_id
)
return return
# Terminate the executor's execution # Terminate the executor's execution
@ -791,7 +833,7 @@ class EphemeralBuilderManager(BuildStateInterface):
self._queue.update_metrics() self._queue.update_metrics()
with database.CloseForLongOperation(app.config): with database.CloseForLongOperation(app.config):
time.sleep(WORK_CHECK_TIMEOUT) time.sleep(WORK_CHECK_TIMEOUT)
logger.debug("Checking for more work from the build queue") logger.debug("Checking for more work from the build queue")
processing_time = EPHEMERAL_SETUP_TIMEOUT + SETUP_LEEWAY_SECONDS processing_time = EPHEMERAL_SETUP_TIMEOUT + SETUP_LEEWAY_SECONDS
@ -821,7 +863,10 @@ class EphemeralBuilderManager(BuildStateInterface):
logger.debug("Creating build job for build %s", build_id) logger.debug("Creating build job for build %s", build_id)
self.create_job(build_id, {"job_queue_item": build_job.job_item}) self.create_job(build_id, {"job_queue_item": build_job.job_item})
except BuildJobAlreadyExistsError: except BuildJobAlreadyExistsError:
logger.warning("Attempted to create job %s that already exists. Cleaning up existing job and returning it to the queue.", job_id) logger.warning(
"Attempted to create job %s that already exists. Cleaning up existing job and returning it to the queue.",
job_id,
)
self.job_unschedulable(job_id) self.job_unschedulable(job_id)
self._queue.incomplete(job_item, restore_retry=True) self._queue.incomplete(job_item, restore_retry=True)
continue continue
@ -864,4 +909,3 @@ def _extract_current_step(current_status_string):
step_increment = re.search(r"Step ([0-9]+) :", current_status_string) step_increment = re.search(r"Step ([0-9]+) :", current_status_string)
if step_increment: if step_increment:
return int(step_increment.group(1)) return int(step_increment.group(1))

View File

@ -167,22 +167,24 @@ class BuilderExecutor(object):
server_addr = manager_hostname.split(":", 1)[0] + ":" + str(DEFAULT_GRPC_SERVER_PORT) server_addr = manager_hostname.split(":", 1)[0] + ":" + str(DEFAULT_GRPC_SERVER_PORT)
rendered_json = json.load( rendered_json = json.load(
io.StringIO(TEMPLATE.render( io.StringIO(
token=token, TEMPLATE.render(
build_uuid=build_uuid, token=token,
quay_username=quay_username, build_uuid=build_uuid,
quay_password=quay_password, quay_username=quay_username,
manager_hostname=server_addr, quay_password=quay_password,
worker_image=self.executor_config.get( manager_hostname=server_addr,
"WORKER_IMAGE", "quay.io/coreos/registry-build-worker" worker_image=self.executor_config.get(
), "WORKER_IMAGE", "quay.io/coreos/registry-build-worker"
worker_tag=self.executor_config["WORKER_TAG"], ),
volume_size=self.executor_config.get("VOLUME_SIZE", "42G"), worker_tag=self.executor_config["WORKER_TAG"],
max_lifetime_s=self.executor_config.get("MAX_LIFETIME_S", 10800), volume_size=self.executor_config.get("VOLUME_SIZE", "42G"),
ssh_authorized_keys=self.executor_config.get("SSH_AUTHORIZED_KEYS", []), max_lifetime_s=self.executor_config.get("MAX_LIFETIME_S", 10800),
container_runtime=self.executor_config.get("CONTAINER_RUNTIME", "docker"), ssh_authorized_keys=self.executor_config.get("SSH_AUTHORIZED_KEYS", []),
ca_cert=self.executor_config.get("CA_CERT", self._ca_cert()), container_runtime=self.executor_config.get("CONTAINER_RUNTIME", "docker"),
)) ca_cert=self.executor_config.get("CA_CERT", self._ca_cert()),
)
)
) )
return json.dumps(rendered_json) return json.dumps(rendered_json)
@ -193,10 +195,9 @@ class EC2Executor(BuilderExecutor):
Implementation of BuilderExecutor which uses libcloud to start machines on a variety of cloud Implementation of BuilderExecutor which uses libcloud to start machines on a variety of cloud
providers. providers.
""" """
COREOS_STACK_ARCHITECTURE = "x86_64" COREOS_STACK_ARCHITECTURE = "x86_64"
COREOS_STACK_URL = ( COREOS_STACK_URL = "https://builds.coreos.fedoraproject.org/streams/%s.json"
"https://builds.coreos.fedoraproject.org/streams/%s.json"
)
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(EC2Executor, self).__init__(*args, **kwargs) super(EC2Executor, self).__init__(*args, **kwargs)
@ -215,9 +216,9 @@ class EC2Executor(BuilderExecutor):
@property @property
def running_builders_count(self): def running_builders_count(self):
ec2_conn = self._get_conn() ec2_conn = self._get_conn()
resp = ec2_conn.describe_instances(Filters=[ resp = ec2_conn.describe_instances(
{"Name": "tag:Name", "Values": ["Quay Ephemeral Builder"]} Filters=[{"Name": "tag:Name", "Values": ["Quay Ephemeral Builder"]}]
]) )
count = 0 count = 0
for reservation in resp["Reservations"]: for reservation in resp["Reservations"]:
@ -234,8 +235,10 @@ class EC2Executor(BuilderExecutor):
Retrieve the CoreOS AMI id from the canonical listing. Retrieve the CoreOS AMI id from the canonical listing.
""" """
stack_list_json = requests.get(EC2Executor.COREOS_STACK_URL % coreos_channel).json() stack_list_json = requests.get(EC2Executor.COREOS_STACK_URL % coreos_channel).json()
stack_amis = stack_list_json['architectures'][EC2Executor.COREOS_STACK_ARCHITECTURE]['images']['aws']['regions'] stack_amis = stack_list_json["architectures"][EC2Executor.COREOS_STACK_ARCHITECTURE][
return stack_amis[ec2_region]['image'] "images"
]["aws"]["regions"]
return stack_amis[ec2_region]["image"]
@observe(build_start_duration, "ec2") @observe(build_start_duration, "ec2")
def start_builder(self, token, build_uuid): def start_builder(self, token, build_uuid):
@ -246,9 +249,7 @@ class EC2Executor(BuilderExecutor):
if coreos_ami is None: if coreos_ami is None:
coreos_ami = self.get_coreos_ami(region, channel) coreos_ami = self.get_coreos_ami(region, channel)
user_data = self.generate_cloud_config( user_data = self.generate_cloud_config(token, build_uuid, self.manager_hostname)
token, build_uuid, self.manager_hostname
)
logger.debug("Generated cloud config for build %s: %s", build_uuid, user_data) logger.debug("Generated cloud config for build %s: %s", build_uuid, user_data)
ec2_conn = self._get_conn() ec2_conn = self._get_conn()
@ -259,8 +260,8 @@ class EC2Executor(BuilderExecutor):
"Ebs": { "Ebs": {
"VolumeSize": int(self.executor_config.get("BLOCK_DEVICE_SIZE", 48)), "VolumeSize": int(self.executor_config.get("BLOCK_DEVICE_SIZE", 48)),
"VolumeType": "gp2", "VolumeType": "gp2",
"DeleteOnTermination":True, "DeleteOnTermination": True,
} },
} }
] ]
@ -282,7 +283,7 @@ class EC2Executor(BuilderExecutor):
{"Key": "Name", "Value": "Quay Ephemeral Builder"}, {"Key": "Name", "Value": "Quay Ephemeral Builder"},
{"Key": "RegistrationToken", "Value": token[:36]}, {"Key": "RegistrationToken", "Value": token[:36]},
{"Key": "BuildUUID", "Value": build_uuid}, {"Key": "BuildUUID", "Value": build_uuid},
] ],
} }
] ]
@ -297,7 +298,7 @@ class EC2Executor(BuilderExecutor):
NetworkInterfaces=interfaces, NetworkInterfaces=interfaces,
MinCount=1, MinCount=1,
MaxCount=1, MaxCount=1,
TagSpecifications=tag_specs TagSpecifications=tag_specs,
) )
except (ec2_conn.exceptions.ClientError, botocore.exceptions.ClientError) as ec2e: except (ec2_conn.exceptions.ClientError, botocore.exceptions.ClientError) as ec2e:
raise ExecutorException(ec2e) raise ExecutorException(ec2e)
@ -325,7 +326,9 @@ class EC2Executor(BuilderExecutor):
logger.exception("Exception when trying to terminate instance %s", builder_id) logger.exception("Exception when trying to terminate instance %s", builder_id)
raise raise
if builder_id not in [si["InstanceId"] for si in terminated_instances["TerminatingInstances"]]: if builder_id not in [
si["InstanceId"] for si in terminated_instances["TerminatingInstances"]
]:
raise ExecutorException("Unable to terminate instance: %s" % builder_id) raise ExecutorException("Unable to terminate instance: %s" % builder_id)
@ -528,7 +531,9 @@ class KubernetesExecutor(BuilderExecutor):
}, },
"spec": { "spec": {
"activeDeadlineSeconds": self.executor_config.get("MAXIMUM_JOB_TIME", 7200), "activeDeadlineSeconds": self.executor_config.get("MAXIMUM_JOB_TIME", 7200),
"ttlSecondsAfterFinished": self.executor_config.get("RETENTION_AFTER_FINISHED", 120), "ttlSecondsAfterFinished": self.executor_config.get(
"RETENTION_AFTER_FINISHED", 120
),
"template": { "template": {
"metadata": { "metadata": {
"labels": { "labels": {
@ -580,9 +585,7 @@ class KubernetesExecutor(BuilderExecutor):
@observe(build_start_duration, "k8s") @observe(build_start_duration, "k8s")
def start_builder(self, token, build_uuid): def start_builder(self, token, build_uuid):
# generate resource # generate resource
user_data = self.generate_cloud_config( user_data = self.generate_cloud_config(token, build_uuid, self.manager_hostname)
token, build_uuid, self.manager_hostname
)
resource = self._job_resource(build_uuid, user_data) resource = self._job_resource(build_uuid, user_data)
logger.debug("Using Kubernetes Distribution: %s", self._kubernetes_distribution()) logger.debug("Using Kubernetes Distribution: %s", self._kubernetes_distribution())
logger.debug("Generated kubernetes resource:\n%s", resource) logger.debug("Generated kubernetes resource:\n%s", resource)

View File

@ -222,8 +222,11 @@ class MemoryOrchestrator(Orchestrator):
def get_prefixed_keys(self, prefix): def get_prefixed_keys(self, prefix):
return { return {
k: value for (k, value) in list(self.state.items()) k: value
if k.startswith(prefix) and not k.endswith(REDIS_EXPIRED_SUFFIX) and not k.endswith(REDIS_EXPIRING_SUFFIX) for (k, value) in list(self.state.items())
if k.startswith(prefix)
and not k.endswith(REDIS_EXPIRED_SUFFIX)
and not k.endswith(REDIS_EXPIRING_SUFFIX)
} }
def get_key(self, key): def get_key(self, key):
@ -261,7 +264,7 @@ class MemoryOrchestrator(Orchestrator):
self.set_key(key, "", overwrite=False, expiration=expiration) self.set_key(key, "", overwrite=False, expiration=expiration)
except KeyError: except KeyError:
return False return False
return True return True
def shutdown(self): def shutdown(self):
self.state = None self.state = None
@ -327,7 +330,9 @@ class RedisOrchestrator(Orchestrator):
self._pubsub_expiring.psubscribe( self._pubsub_expiring.psubscribe(
**{REDIS_EXPIRED_KEYSPACE_PATTERN % (db, "*"): self._expiring_key_handler} **{REDIS_EXPIRED_KEYSPACE_PATTERN % (db, "*"): self._expiring_key_handler}
) )
self._pubsub_expiring_thread = self._pubsub_expiring.run_in_thread(daemon=True, sleep_time=5) self._pubsub_expiring_thread = self._pubsub_expiring.run_in_thread(
daemon=True, sleep_time=5
)
def _expiring_key_handler(self, message): def _expiring_key_handler(self, message):
try: try:
@ -344,9 +349,7 @@ class RedisOrchestrator(Orchestrator):
# Mark key as expired. This key is used to track post job cleanup in the callback, # Mark key as expired. This key is used to track post job cleanup in the callback,
# to allow another manager to pickup the cleanup if this fails. # to allow another manager to pickup the cleanup if this fails.
self._client.set( self._client.set(slash_join(key, REDIS_EXPIRED_SUFFIX), expired_value)
slash_join(key, REDIS_EXPIRED_SUFFIX), expired_value
)
self._client.delete(key) self._client.delete(key)
except redis.ConnectionError: except redis.ConnectionError:
_sleep_orchestrator() _sleep_orchestrator()
@ -422,7 +425,9 @@ class RedisOrchestrator(Orchestrator):
# Yielding to the event loop is required, thus this cannot be written as a dict comprehension. # Yielding to the event loop is required, thus this cannot be written as a dict comprehension.
results = {} results = {}
for key in keys: for key in keys:
if key.decode("utf-8").endswith(REDIS_EXPIRING_SUFFIX) or key.decode("utf-8").endswith(REDIS_EXPIRED_SUFFIX): if key.decode("utf-8").endswith(REDIS_EXPIRING_SUFFIX) or key.decode("utf-8").endswith(
REDIS_EXPIRED_SUFFIX
):
continue continue
ttl = self._client.ttl(key) ttl = self._client.ttl(key)
if ttl == REDIS_NONEXPIRING_KEY: if ttl == REDIS_NONEXPIRING_KEY:
@ -477,14 +482,15 @@ class RedisOrchestrator(Orchestrator):
# The extra leeway is so the expire event handler has time to get the original value and publish the event. # The extra leeway is so the expire event handler has time to get the original value and publish the event.
self._client.set(key, value, xx=overwrite) self._client.set(key, value, xx=overwrite)
if expiration is not None: if expiration is not None:
self._client.expire(key, expiration+ONE_DAY) self._client.expire(key, expiration + ONE_DAY)
overwrite_expiring_key = self._client.exists( overwrite_expiring_key = self._client.exists(slash_join(key, REDIS_EXPIRING_SUFFIX))
slash_join(key, REDIS_EXPIRING_SUFFIX)
)
# The "expiring/*" are only used to publish the EXPIRE event. A separate key is needed # The "expiring/*" are only used to publish the EXPIRE event. A separate key is needed
# because the the EXPIRE event does not include the original key value. # because the the EXPIRE event does not include the original key value.
self._client.set( self._client.set(
slash_join(key, REDIS_EXPIRING_SUFFIX), "", xx=overwrite_expiring_key, ex=expiration slash_join(key, REDIS_EXPIRING_SUFFIX),
"",
xx=overwrite_expiring_key,
ex=expiration,
) )
# Remove any expired key that might have previously been created but not removed # Remove any expired key that might have previously been created but not removed
# if a new expiration is set. # if a new expiration is set.

View File

@ -63,11 +63,13 @@ class BuilderServer(object):
self._lifecycle_manager.initialize(self._lifecycle_manager_config) self._lifecycle_manager.initialize(self._lifecycle_manager_config)
logger.debug("Initializing the gRPC server") logger.debug("Initializing the gRPC server")
server = grpc.server(futures.ThreadPoolExecutor(max_workers=DEFAULT_GRPC_SERVER_WORKER_COUNT)) server = grpc.server(
futures.ThreadPoolExecutor(max_workers=DEFAULT_GRPC_SERVER_WORKER_COUNT)
)
buildman_pb2_grpc.add_BuildManagerServicer_to_server( buildman_pb2_grpc.add_BuildManagerServicer_to_server(
BuildManagerServicer(self._lifecycle_manager), server BuildManagerServicer(self._lifecycle_manager), server
) )
server.add_insecure_port("[::]:"+str(DEFAULT_GRPC_SERVER_PORT)) server.add_insecure_port("[::]:" + str(DEFAULT_GRPC_SERVER_PORT))
logger.debug("Starting the gRPC server...") logger.debug("Starting the gRPC server...")
server.start() server.start()

View File

@ -130,7 +130,10 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
) )
self.manager.initialize( self.manager.initialize(
{"EXECUTOR": "test", "ORCHESTRATOR": {"MEM_CONFIG": None},} {
"EXECUTOR": "test",
"ORCHESTRATOR": {"MEM_CONFIG": None},
}
) )
# Ensure that that the realm and building callbacks have been registered # Ensure that that the realm and building callbacks have been registered
@ -212,9 +215,7 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
# Finish the job # Finish the job
await ( await (self.manager.job_completed(self.mock_job, BuildJobResult.COMPLETE, test_component))
self.manager.job_completed(self.mock_job, BuildJobResult.COMPLETE, test_component)
)
# Ensure that the executor kills the job. # Ensure that the executor kills the job.
self.assertEqual(self.test_executor.stop_builder.call_count, 1) self.assertEqual(self.test_executor.stop_builder.call_count, 1)
@ -517,7 +518,9 @@ class TestEphemeral(EphemeralBuilderTestCase):
def test_skip_invalid_executor(self): def test_skip_invalid_executor(self):
self.manager.initialize( self.manager.initialize(
{ {
"EXECUTORS": [{"EXECUTOR": "unknown", "MINIMUM_RETRY_THRESHOLD": 42},], "EXECUTORS": [
{"EXECUTOR": "unknown", "MINIMUM_RETRY_THRESHOLD": 42},
],
"ORCHESTRATOR": {"MEM_CONFIG": None}, "ORCHESTRATOR": {"MEM_CONFIG": None},
} }
) )
@ -529,7 +532,12 @@ class TestEphemeral(EphemeralBuilderTestCase):
EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor
self.manager.initialize( self.manager.initialize(
{ {
"EXECUTORS": [{"EXECUTOR": "test", "NAMESPACE_WHITELIST": ["something"],}], "EXECUTORS": [
{
"EXECUTOR": "test",
"NAMESPACE_WHITELIST": ["something"],
}
],
"ORCHESTRATOR": {"MEM_CONFIG": None}, "ORCHESTRATOR": {"MEM_CONFIG": None},
} }
) )
@ -549,7 +557,12 @@ class TestEphemeral(EphemeralBuilderTestCase):
EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor
self.manager.initialize( self.manager.initialize(
{ {
"EXECUTORS": [{"EXECUTOR": "test", "MINIMUM_RETRY_THRESHOLD": 2,}], "EXECUTORS": [
{
"EXECUTOR": "test",
"MINIMUM_RETRY_THRESHOLD": 2,
}
],
"ORCHESTRATOR": {"MEM_CONFIG": None}, "ORCHESTRATOR": {"MEM_CONFIG": None},
} }
) )
@ -578,7 +591,11 @@ class TestEphemeral(EphemeralBuilderTestCase):
"NAMESPACE_WHITELIST": ["something"], "NAMESPACE_WHITELIST": ["something"],
"MINIMUM_RETRY_THRESHOLD": 3, "MINIMUM_RETRY_THRESHOLD": 3,
}, },
{"NAME": "secondary", "EXECUTOR": "secondary", "MINIMUM_RETRY_THRESHOLD": 2,}, {
"NAME": "secondary",
"EXECUTOR": "secondary",
"MINIMUM_RETRY_THRESHOLD": 2,
},
], ],
"ALLOWED_WORKER_COUNT": 5, "ALLOWED_WORKER_COUNT": 5,
"ORCHESTRATOR": {"MEM_CONFIG": None}, "ORCHESTRATOR": {"MEM_CONFIG": None},
@ -661,7 +678,11 @@ class TestEphemeral(EphemeralBuilderTestCase):
EphemeralBuilderManager.EXECUTORS["bad"] = BadExecutor EphemeralBuilderManager.EXECUTORS["bad"] = BadExecutor
self.manager.initialize( self.manager.initialize(
{"EXECUTOR": "bad", "EXECUTOR_CONFIG": {}, "ORCHESTRATOR": {"MEM_CONFIG": None},} {
"EXECUTOR": "bad",
"EXECUTOR_CONFIG": {},
"ORCHESTRATOR": {"MEM_CONFIG": None},
}
) )
build_job = self._create_build_job(namespace="something", retries=3) build_job = self._create_build_job(namespace="something", retries=3)
@ -673,7 +694,11 @@ class TestEphemeral(EphemeralBuilderTestCase):
EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor
self.manager.initialize( self.manager.initialize(
{"EXECUTOR": "test", "EXECUTOR_CONFIG": {}, "ORCHESTRATOR": {"MEM_CONFIG": None},} {
"EXECUTOR": "test",
"EXECUTOR_CONFIG": {},
"ORCHESTRATOR": {"MEM_CONFIG": None},
}
) )
# Start the build job. # Start the build job.

View File

@ -2,13 +2,14 @@ import pytest
import time import time
import uuid import uuid
from buildman.build_token import \ from buildman.build_token import (
build_token, \ build_token,
verify_build_token, \ verify_build_token,
ANONYMOUS_SUB, \ ANONYMOUS_SUB,
BUILD_JOB_REGISTRATION_TYPE, \ BUILD_JOB_REGISTRATION_TYPE,
BUILD_JOB_TOKEN_TYPE, \ BUILD_JOB_TOKEN_TYPE,
InvalidBuildTokenException InvalidBuildTokenException,
)
from test.fixtures import * from test.fixtures import *
@ -19,19 +20,24 @@ from app import app, instance_keys
"token_type, expected_exception", "token_type, expected_exception",
[ [
pytest.param(BUILD_JOB_REGISTRATION_TYPE, None, id="valid"), pytest.param(BUILD_JOB_REGISTRATION_TYPE, None, id="valid"),
pytest.param(BUILD_JOB_TOKEN_TYPE, "Build token type in JWT does not match expected type: %s" % BUILD_JOB_TOKEN_TYPE, id="Invalid token type"), pytest.param(
BUILD_JOB_TOKEN_TYPE,
"Build token type in JWT does not match expected type: %s" % BUILD_JOB_TOKEN_TYPE,
id="Invalid token type",
),
], ],
) )
def test_registration_build_token(initialized_db, token_type, expected_exception): def test_registration_build_token(initialized_db, token_type, expected_exception):
build_id = str(uuid.uuid4()) build_id = str(uuid.uuid4())
job_id = "building/" + build_id job_id = "building/" + build_id
token = build_token( token = build_token(
app.config["SERVER_HOSTNAME"], app.config["SERVER_HOSTNAME"],
BUILD_JOB_REGISTRATION_TYPE, BUILD_JOB_REGISTRATION_TYPE,
build_id, build_id,
job_id, job_id,
int(time.time()) + 360, int(time.time()) + 360,
instance_keys) instance_keys,
)
if expected_exception is not None: if expected_exception is not None:
with pytest.raises(InvalidBuildTokenException) as ibe: with pytest.raises(InvalidBuildTokenException) as ibe:

View File

@ -6,7 +6,14 @@ from unittest.mock import patch, Mock
import fakeredis import fakeredis
from freezegun import freeze_time from freezegun import freeze_time
from buildman.orchestrator import MemoryOrchestrator, RedisOrchestrator, REDIS_EXPIRED_SUFFIX, REDIS_EXPIRING_SUFFIX, KeyEvent, KeyChange from buildman.orchestrator import (
MemoryOrchestrator,
RedisOrchestrator,
REDIS_EXPIRED_SUFFIX,
REDIS_EXPIRING_SUFFIX,
KeyEvent,
KeyChange,
)
from util import slash_join from util import slash_join
from test.fixtures import * from test.fixtures import *
@ -15,17 +22,17 @@ from test.fixtures import *
@pytest.fixture() @pytest.fixture()
def fake_redis(): def fake_redis():
def init_fake_strict_redis( def init_fake_strict_redis(
host="127.0.0.1", host="127.0.0.1",
port=6379, port=6379,
password=None, password=None,
db=0, db=0,
ssl_certfile=None, ssl_certfile=None,
ssl_keyfile=None, ssl_keyfile=None,
ssl_ca_certs=None, ssl_ca_certs=None,
ssl=False, ssl=False,
socket_connect_timeout=1, socket_connect_timeout=1,
socket_timeout=2, socket_timeout=2,
health_check_interval=2, health_check_interval=2,
): ):
fake_client = fakeredis.FakeStrictRedis( fake_client = fakeredis.FakeStrictRedis(
host=host, host=host,
@ -75,18 +82,18 @@ def test_get_prefixed_keys(orchestrator):
for x in range(keys_to_generate): for x in range(keys_to_generate):
orchestrator.set_key(slash_join(key_prefix, str(x)), "test_val") orchestrator.set_key(slash_join(key_prefix, str(x)), "test_val")
generated_keys.add(slash_join(key_prefix, str(x))) generated_keys.add(slash_join(key_prefix, str(x)))
assert len(orchestrator.get_prefixed_keys(key_prefix)) == keys_to_generate assert len(orchestrator.get_prefixed_keys(key_prefix)) == keys_to_generate
keys_to_remove = randrange(1, keys_to_generate) keys_to_remove = randrange(1, keys_to_generate)
for x in range(keys_to_remove): for x in range(keys_to_remove):
orchestrator.delete_key(slash_join(key_prefix, str(x))) orchestrator.delete_key(slash_join(key_prefix, str(x)))
generated_keys.remove(slash_join(key_prefix, str(x))) generated_keys.remove(slash_join(key_prefix, str(x)))
assert len(orchestrator.get_prefixed_keys(key_prefix)) == keys_to_generate - keys_to_remove assert len(orchestrator.get_prefixed_keys(key_prefix)) == keys_to_generate - keys_to_remove
for k in generated_keys: for k in generated_keys:
orchestrator.delete_key(k) orchestrator.delete_key(k)
assert len(orchestrator.get_prefixed_keys(key_prefix)) == 0 assert len(orchestrator.get_prefixed_keys(key_prefix)) == 0
@ -105,13 +112,13 @@ def test_set_key(orchestrator):
# Try overwriting some existing key without setting overwrite # Try overwriting some existing key without setting overwrite
with pytest.raises(KeyError): with pytest.raises(KeyError):
orchestrator.set_key(some_key, "test_val_3") orchestrator.set_key(some_key, "test_val_3")
# Try overwriting some existing key with overwrite set. # Try overwriting some existing key with overwrite set.
# Also expects a new expiration key to be created. # Also expects a new expiration key to be created.
orchestrator.set_key(some_key, "test_val_4", overwrite=True, expiration=360) orchestrator.set_key(some_key, "test_val_4", overwrite=True, expiration=360)
assert orchestrator.get_key(some_key) == "test_val_4" assert orchestrator.get_key(some_key) == "test_val_4"
assert orchestrator.get_key(slash_join(some_key, REDIS_EXPIRING_SUFFIX)) is not None assert orchestrator.get_key(slash_join(some_key, REDIS_EXPIRING_SUFFIX)) is not None
def test_on_key_change(orchestrator): def test_on_key_change(orchestrator):
key_prefix = "building/" key_prefix = "building/"
@ -129,7 +136,7 @@ def test_on_key_change(orchestrator):
"test_val", "test_val",
) )
) )
# SET # SET
orchestrator.set_key(slash_join(key_prefix, "key1"), "test_val", overwrite=True) orchestrator.set_key(slash_join(key_prefix, "key1"), "test_val", overwrite=True)
time.sleep(0.1) time.sleep(0.1)
@ -179,4 +186,3 @@ def test_delete_key(orchestrator):
orchestrator.delete_key(slash_join(key_prefix, "key1")) orchestrator.delete_key(slash_join(key_prefix, "key1"))
with pytest.raises(KeyError): with pytest.raises(KeyError):
orchestrator.get_key(slash_join(key_prefix, "key1")) orchestrator.get_key(slash_join(key_prefix, "key1"))

View File

@ -22,14 +22,26 @@ NAMESPACES_SCHEMA = {
"type": "boolean", "type": "boolean",
"description": "True if the namespace is the user's personal namespace", "description": "True if the namespace is the user's personal namespace",
}, },
"score": {"type": "number", "description": "Score of the relevance of the namespace",}, "score": {
"type": "number",
"description": "Score of the relevance of the namespace",
},
"avatar_url": { "avatar_url": {
"type": ["string", "null"], "type": ["string", "null"],
"description": "URL of the avatar for this namespace", "description": "URL of the avatar for this namespace",
}, },
"url": {"type": "string", "description": "URL of the website to view the namespace",}, "url": {
"id": {"type": "string", "description": "Trigger-internal ID of the namespace",}, "type": "string",
"title": {"type": "string", "description": "Human-readable title of the namespace",}, "description": "URL of the website to view the namespace",
},
"id": {
"type": "string",
"description": "Trigger-internal ID of the namespace",
},
"title": {
"type": "string",
"description": "Human-readable title of the namespace",
},
}, },
"required": ["personal", "score", "avatar_url", "id", "title"], "required": ["personal", "score", "avatar_url", "id", "title"],
}, },
@ -64,7 +76,10 @@ BUILD_SOURCES_SCHEMA = {
"type": "boolean", "type": "boolean",
"description": "True if the current user has admin permissions on the repository", "description": "True if the current user has admin permissions on the repository",
}, },
"private": {"type": "boolean", "description": "True if the repository is private",}, "private": {
"type": "boolean",
"description": "True if the repository is private",
},
}, },
"required": [ "required": [
"name", "name",
@ -90,12 +105,24 @@ METADATA_SCHEMA = {
"type": "object", "type": "object",
"description": "The parsed information about the ref, if any", "description": "The parsed information about the ref, if any",
"properties": { "properties": {
"branch": {"type": "string", "description": "The branch name",}, "branch": {
"tag": {"type": "string", "description": "The tag name",}, "type": "string",
"remote": {"type": "string", "description": "The remote name",}, "description": "The branch name",
},
"tag": {
"type": "string",
"description": "The tag name",
},
"remote": {
"type": "string",
"description": "The remote name",
},
}, },
}, },
"git_url": {"type": "string", "description": "The GIT url to use for the checkout",}, "git_url": {
"type": "string",
"description": "The GIT url to use for the checkout",
},
"ref": { "ref": {
"type": "string", "type": "string",
"description": "git reference for a git commit", "description": "git reference for a git commit",
@ -113,14 +140,23 @@ METADATA_SCHEMA = {
"type": "string", "type": "string",
"description": "The short SHA for this git commit", "description": "The short SHA for this git commit",
}, },
"url": {"type": "string", "description": "URL to view a git commit",}, "url": {
"message": {"type": "string", "description": "git commit message",}, "type": "string",
"description": "URL to view a git commit",
},
"message": {
"type": "string",
"description": "git commit message",
},
"date": {"type": "string", "description": "timestamp for a git commit"}, "date": {"type": "string", "description": "timestamp for a git commit"},
"author": { "author": {
"type": "object", "type": "object",
"description": "metadata about the author of a git commit", "description": "metadata about the author of a git commit",
"properties": { "properties": {
"username": {"type": "string", "description": "username of the author",}, "username": {
"type": "string",
"description": "username of the author",
},
"url": { "url": {
"type": "string", "type": "string",
"description": "URL to view the profile of the author", "description": "URL to view the profile of the author",
@ -136,7 +172,10 @@ METADATA_SCHEMA = {
"type": "object", "type": "object",
"description": "metadata about the committer of a git commit", "description": "metadata about the committer of a git commit",
"properties": { "properties": {
"username": {"type": "string", "description": "username of the committer",}, "username": {
"type": "string",
"description": "username of the committer",
},
"url": { "url": {
"type": "string", "type": "string",
"description": "URL to view the profile of the committer", "description": "URL to view the profile of the committer",

View File

@ -34,7 +34,11 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
"properties": { "properties": {
"repository": { "repository": {
"type": "object", "type": "object",
"properties": {"full_name": {"type": "string",},}, "properties": {
"full_name": {
"type": "string",
},
},
"required": ["full_name"], "required": ["full_name"],
}, # /Repository }, # /Repository
"push": { "push": {
@ -60,8 +64,12 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
"user": { "user": {
"type": "object", "type": "object",
"properties": { "properties": {
"display_name": {"type": "string",}, "display_name": {
"account_id": {"type": "string",}, "type": "string",
},
"account_id": {
"type": "string",
},
"links": { "links": {
"type": "object", "type": "object",
"properties": { "properties": {
@ -97,14 +105,22 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
"actor": { "actor": {
"type": "object", "type": "object",
"properties": { "properties": {
"account_id": {"type": "string",}, "account_id": {
"display_name": {"type": "string",}, "type": "string",
},
"display_name": {
"type": "string",
},
"links": { "links": {
"type": "object", "type": "object",
"properties": { "properties": {
"avatar": { "avatar": {
"type": "object", "type": "object",
"properties": {"href": {"type": "string",},}, "properties": {
"href": {
"type": "string",
},
},
"required": ["href"], "required": ["href"],
}, },
}, },
@ -118,10 +134,18 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
BITBUCKET_COMMIT_INFO_SCHEMA = { BITBUCKET_COMMIT_INFO_SCHEMA = {
"type": "object", "type": "object",
"properties": { "properties": {
"node": {"type": "string",}, "node": {
"message": {"type": "string",}, "type": "string",
"timestamp": {"type": "string",}, },
"raw_author": {"type": "string",}, "message": {
"type": "string",
},
"timestamp": {
"type": "string",
},
"raw_author": {
"type": "string",
},
}, },
"required": ["node", "message", "timestamp"], "required": ["node", "message", "timestamp"],
} }
@ -310,7 +334,10 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
# Add a deploy key to the repository. # Add a deploy key to the repository.
public_key, private_key = generate_ssh_keypair() public_key, private_key = generate_ssh_keypair()
config["credentials"] = [ config["credentials"] = [
{"name": "SSH Public Key", "value": public_key.decode("ascii"),}, {
"name": "SSH Public Key",
"value": public_key.decode("ascii"),
},
] ]
repository = self._get_repository_client() repository = self._get_repository_client()

View File

@ -94,8 +94,14 @@ class CustomBuildTrigger(BuildTriggerHandler):
"type": "object", "type": "object",
"description": "metadata about a git commit", "description": "metadata about a git commit",
"properties": { "properties": {
"url": {"type": "string", "description": "URL to view a git commit",}, "url": {
"message": {"type": "string", "description": "git commit message",}, "type": "string",
"description": "URL to view a git commit",
},
"message": {
"type": "string",
"description": "git commit message",
},
"date": {"type": "string", "description": "timestamp for a git commit"}, "date": {"type": "string", "description": "timestamp for a git commit"},
"author": { "author": {
"type": "object", "type": "object",
@ -194,8 +200,14 @@ class CustomBuildTrigger(BuildTriggerHandler):
config = self.config config = self.config
public_key, private_key = generate_ssh_keypair() public_key, private_key = generate_ssh_keypair()
config["credentials"] = [ config["credentials"] = [
{"name": "SSH Public Key", "value": public_key.decode("ascii"),}, {
{"name": "Webhook Endpoint URL", "value": standard_webhook_url,}, "name": "SSH Public Key",
"value": public_key.decode("ascii"),
},
{
"name": "Webhook Endpoint URL",
"value": standard_webhook_url,
},
] ]
self.config = config self.config = config
return config, {"private_key": private_key.decode("ascii")} return config, {"private_key": private_key.decode("ascii")}

View File

@ -40,14 +40,24 @@ logger = logging.getLogger(__name__)
GITHUB_WEBHOOK_PAYLOAD_SCHEMA = { GITHUB_WEBHOOK_PAYLOAD_SCHEMA = {
"type": "object", "type": "object",
"properties": { "properties": {
"ref": {"type": "string",}, "ref": {
"type": "string",
},
"head_commit": { "head_commit": {
"type": ["object", "null"], "type": ["object", "null"],
"properties": { "properties": {
"id": {"type": "string",}, "id": {
"url": {"type": "string",}, "type": "string",
"message": {"type": "string",}, },
"timestamp": {"type": "string",}, "url": {
"type": "string",
},
"message": {
"type": "string",
},
"timestamp": {
"type": "string",
},
"author": { "author": {
"type": "object", "type": "object",
"properties": { "properties": {
@ -69,7 +79,11 @@ GITHUB_WEBHOOK_PAYLOAD_SCHEMA = {
}, },
"repository": { "repository": {
"type": "object", "type": "object",
"properties": {"ssh_url": {"type": "string",},}, "properties": {
"ssh_url": {
"type": "string",
},
},
"required": ["ssh_url"], "required": ["ssh_url"],
}, },
}, },
@ -199,7 +213,10 @@ class GithubBuildTrigger(BuildTriggerHandler):
# Add a deploy key to the GitHub repository. # Add a deploy key to the GitHub repository.
public_key, private_key = generate_ssh_keypair() public_key, private_key = generate_ssh_keypair()
config["credentials"] = [ config["credentials"] = [
{"name": "SSH Public Key", "value": public_key.decode("ascii"),}, {
"name": "SSH Public Key",
"value": public_key.decode("ascii"),
},
] ]
try: try:

View File

@ -33,11 +33,19 @@ logger = logging.getLogger(__name__)
GITLAB_WEBHOOK_PAYLOAD_SCHEMA = { GITLAB_WEBHOOK_PAYLOAD_SCHEMA = {
"type": "object", "type": "object",
"properties": { "properties": {
"ref": {"type": "string",}, "ref": {
"checkout_sha": {"type": ["string", "null"],}, "type": "string",
},
"checkout_sha": {
"type": ["string", "null"],
},
"repository": { "repository": {
"type": "object", "type": "object",
"properties": {"git_ssh_url": {"type": "string",},}, "properties": {
"git_ssh_url": {
"type": "string",
},
},
"required": ["git_ssh_url"], "required": ["git_ssh_url"],
}, },
"commits": { "commits": {
@ -45,13 +53,25 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = {
"items": { "items": {
"type": "object", "type": "object",
"properties": { "properties": {
"id": {"type": "string",}, "id": {
"url": {"type": ["string", "null"],}, "type": "string",
"message": {"type": "string",}, },
"timestamp": {"type": "string",}, "url": {
"type": ["string", "null"],
},
"message": {
"type": "string",
},
"timestamp": {
"type": "string",
},
"author": { "author": {
"type": "object", "type": "object",
"properties": {"email": {"type": "string",},}, "properties": {
"email": {
"type": "string",
},
},
"required": ["email"], "required": ["email"],
}, },
}, },
@ -227,7 +247,10 @@ class GitLabBuildTrigger(BuildTriggerHandler):
# Add a deploy key to the repository. # Add a deploy key to the repository.
public_key, private_key = generate_ssh_keypair() public_key, private_key = generate_ssh_keypair()
config["credentials"] = [ config["credentials"] = [
{"name": "SSH Public Key", "value": public_key.decode("ascii"),}, {
"name": "SSH Public Key",
"value": public_key.decode("ascii"),
},
] ]
key = gl_project.keys.create( key = gl_project.keys.create(

View File

@ -60,7 +60,9 @@ def get_branch(branch_name):
return (False, None, None) return (False, None, None)
data = { data = {
"target": {"hash": "aaaaaaa",}, "target": {
"hash": "aaaaaaa",
},
} }
return (True, data, None) return (True, data, None)
@ -71,7 +73,9 @@ def get_tag(tag_name):
return (False, None, None) return (False, None, None)
data = { data = {
"target": {"hash": "aaaaaaa",}, "target": {
"hash": "aaaaaaa",
},
} }
return (True, data, None) return (True, data, None)

View File

@ -24,13 +24,17 @@ def users_handler(url, request):
if url.query.find("knownuser") < 0: if url.query.find("knownuser") < 0:
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps([]), "content": json.dumps([]),
} }
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
[ [
{ {
@ -53,7 +57,9 @@ def user_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"id": 1, "id": 1,
@ -73,7 +79,9 @@ def project_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"id": 4, "id": 4,
@ -95,7 +103,9 @@ def project_tree_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
[ [
{ {
@ -117,13 +127,22 @@ def project_tags_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
[ [
{"name": "sometag", "commit": {"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",},}, {
"name": "sometag",
"commit": {
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
},
},
{ {
"name": "someothertag", "name": "someothertag",
"commit": {"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",}, "commit": {
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
},
}, },
] ]
), ),
@ -137,13 +156,22 @@ def project_branches_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
[ [
{"name": "master", "commit": {"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",},}, {
"name": "master",
"commit": {
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
},
},
{ {
"name": "otherbranch", "name": "otherbranch",
"commit": {"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",}, "commit": {
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
},
}, },
] ]
), ),
@ -157,7 +185,9 @@ def project_branch_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"name": "master", "name": "master",
@ -176,7 +206,9 @@ def project_branch_handler(_, request):
"short_id": "7b5c3cc", "short_id": "7b5c3cc",
"title": "add projects API", "title": "add projects API",
"message": "add projects API", "message": "add projects API",
"parent_ids": ["4ad91d3c1144c406e50c7b33bae684bd6837faf8",], "parent_ids": [
"4ad91d3c1144c406e50c7b33bae684bd6837faf8",
],
}, },
} }
), ),
@ -190,7 +222,9 @@ def namespace_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"id": 2, "id": 2,
@ -212,7 +246,9 @@ def user_namespace_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"id": 1, "id": 1,
@ -234,7 +270,9 @@ def namespaces_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
[ [
{ {
@ -265,7 +303,9 @@ def get_projects_handler(add_permissions_block):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
[ [
{ {
@ -306,7 +346,9 @@ def get_group_handler(null_avatar):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"id": 1, "id": 1,
@ -335,7 +377,9 @@ def dockerfile_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"file_name": "Dockerfile", "file_name": "Dockerfile",
@ -361,7 +405,9 @@ def sub_dockerfile_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"file_name": "Dockerfile", "file_name": "Dockerfile",
@ -385,7 +431,9 @@ def tag_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"name": "sometag", "name": "sometag",
@ -421,7 +469,9 @@ def commit_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f", "id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
@ -456,7 +506,9 @@ def create_deploykey_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"id": 1, "id": 1,
@ -476,7 +528,9 @@ def create_hook_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
{ {
"id": 1, "id": 1,
@ -505,7 +559,9 @@ def delete_hook_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps({}), "content": json.dumps({}),
} }
@ -517,7 +573,9 @@ def delete_deploykey_handker(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps({}), "content": json.dumps({}),
} }
@ -529,7 +587,9 @@ def user_projects_list_handler(_, request):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps( "content": json.dumps(
[ [
{ {

View File

@ -77,7 +77,10 @@ def test_subdir_path_map(new_path, original_dictionary, output):
"config, metadata, expected_tags", "config, metadata, expected_tags",
[ [
pytest.param( pytest.param(
{}, {"commit": "hellothereiamacommit"}, ["helloth"], id="no ref and default options", {},
{"commit": "hellothereiamacommit"},
["helloth"],
id="no ref and default options",
), ),
pytest.param( pytest.param(
{}, {},
@ -102,7 +105,9 @@ def test_subdir_path_map(new_path, original_dictionary, output):
{ {
"commit": "hellothereiamacommit", "commit": "hellothereiamacommit",
"ref": "refs/heads/somebranch", "ref": "refs/heads/somebranch",
"commit_info": {"author": {"username": "someguy"},}, "commit_info": {
"author": {"username": "someguy"},
},
}, },
["author-someguy", "helloth"], ["author-someguy", "helloth"],
id="template test", id="template test",
@ -119,7 +124,9 @@ def test_subdir_path_map(new_path, original_dictionary, output):
"commit": "hellothereiamacommit", "commit": "hellothereiamacommit",
"ref": "refs/heads/somebranch", "ref": "refs/heads/somebranch",
"default_branch": "somebranch", "default_branch": "somebranch",
"commit_info": {"author": {"username": "someguy"},}, "commit_info": {
"author": {"username": "someguy"},
},
}, },
["author-someguy", "helloth", "latest"], ["author-someguy", "helloth", "latest"],
id="template test with default branch", id="template test with default branch",
@ -171,7 +178,9 @@ def test_subdir_path_map(new_path, original_dictionary, output):
"commit": "hellothereiamacommit", "commit": "hellothereiamacommit",
"ref": "refs/heads/somebranch", "ref": "refs/heads/somebranch",
"default_branch": "somebranch", "default_branch": "somebranch",
"commit_info": {"author": {"username": "someguy"},}, "commit_info": {
"author": {"username": "someguy"},
},
}, },
["author-someguy", "helloth", "latest", "somebranch"], ["author-someguy", "helloth", "latest", "somebranch"],
id="everything test", id="everything test",

View File

@ -92,8 +92,12 @@ def test_custom_github():
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", "url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"date": "2015-09-11T14:26:16-04:00", "date": "2015-09-11T14:26:16-04:00",
"message": "Update Dockerfile", "message": "Update Dockerfile",
"committer": {"username": "jsmith",}, "committer": {
"author": {"username": "jsmith",}, "username": "jsmith",
},
"author": {
"username": "jsmith",
},
}, },
} }
@ -188,7 +192,10 @@ def test_bitbucket_commit():
"url": "https://bitbucket.org/foo/bar/commits/abdeaf1b2b4a6b9ddf742c1e1754236380435a62", "url": "https://bitbucket.org/foo/bar/commits/abdeaf1b2b4a6b9ddf742c1e1754236380435a62",
"date": "2012-07-24 00:26:36", "date": "2012-07-24 00:26:36",
"message": "making some changes\n", "message": "making some changes\n",
"author": {"avatar_url": "http://some/avatar/url", "username": "cooluser",}, "author": {
"avatar_url": "http://some/avatar/url",
"username": "cooluser",
},
}, },
} }
@ -230,8 +237,12 @@ def test_github_webhook_payload_slash_branch():
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", "url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"date": "2015-09-11T14:26:16-04:00", "date": "2015-09-11T14:26:16-04:00",
"message": "Update Dockerfile", "message": "Update Dockerfile",
"committer": {"username": "jsmith",}, "committer": {
"author": {"username": "jsmith",}, "username": "jsmith",
},
"author": {
"username": "jsmith",
},
}, },
} }
@ -248,8 +259,12 @@ def test_github_webhook_payload():
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", "url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"date": "2015-09-11T14:26:16-04:00", "date": "2015-09-11T14:26:16-04:00",
"message": "Update Dockerfile", "message": "Update Dockerfile",
"committer": {"username": "jsmith",}, "committer": {
"author": {"username": "jsmith",}, "username": "jsmith",
},
"author": {
"username": "jsmith",
},
}, },
} }

View File

@ -124,5 +124,7 @@ if __name__ == "__main__":
limit_services(config, QUAY_SERVICES) limit_services(config, QUAY_SERVICES)
override_services(config, QUAY_OVERRIDE_SERVICES) override_services(config, QUAY_OVERRIDE_SERVICES)
generate_supervisord_config( generate_supervisord_config(
os.path.join(QUAYCONF_DIR, "supervisord.conf"), config, QUAY_LOGGING, os.path.join(QUAYCONF_DIR, "supervisord.conf"),
config,
QUAY_LOGGING,
) )

View File

@ -8,7 +8,12 @@ from data.appr_model import tag as tag_model
def list_packages_query( def list_packages_query(
models_ref, namespace=None, media_type=None, search_query=None, username=None, limit=50, models_ref,
namespace=None,
media_type=None,
search_query=None,
username=None,
limit=50,
): ):
""" """
List and filter repository by search query. List and filter repository by search query.

View File

@ -46,11 +46,11 @@ def get_app_release(repo, tag_name, media_type, models_ref):
def delete_app_release(repo, tag_name, media_type, models_ref): def delete_app_release(repo, tag_name, media_type, models_ref):
""" Terminate a Tag/media-type couple """Terminate a Tag/media-type couple
It find the corresponding tag/manifest and remove from the manifestlistmanifest the manifest It find the corresponding tag/manifest and remove from the manifestlistmanifest the manifest
1. it terminates the current tag (in all-cases) 1. it terminates the current tag (in all-cases)
2. if the new manifestlist is not empty, it creates a new tag for it 2. if the new manifestlist is not empty, it creates a new tag for it
""" """
ManifestListManifest = models_ref.ManifestListManifest ManifestListManifest = models_ref.ManifestListManifest
manifestlistmanifest_set_name = models_ref.manifestlistmanifest_set_name manifestlistmanifest_set_name = models_ref.manifestlistmanifest_set_name

View File

@ -348,7 +348,11 @@ class FakeSubscription(AttrDict):
class FakeStripe(object): class FakeStripe(object):
class Customer(AttrDict): class Customer(AttrDict):
FAKE_PLAN = AttrDict({"id": "bus-small",}) FAKE_PLAN = AttrDict(
{
"id": "bus-small",
}
)
FAKE_SUBSCRIPTION = AttrDict( FAKE_SUBSCRIPTION = AttrDict(
{ {
@ -373,7 +377,11 @@ class FakeStripe(object):
} }
) )
FAKE_CARD_LIST = AttrDict({"data": [FAKE_CARD],}) FAKE_CARD_LIST = AttrDict(
{
"data": [FAKE_CARD],
}
)
ACTIVE_CUSTOMERS = {} ACTIVE_CUSTOMERS = {}
@ -426,7 +434,11 @@ class FakeStripe(object):
class Invoice(AttrDict): class Invoice(AttrDict):
@staticmethod @staticmethod
def list(customer, count): def list(customer, count):
return AttrDict({"data": [],}) return AttrDict(
{
"data": [],
}
)
class Billing(object): class Billing(object):

4
data/cache/impl.py vendored
View File

@ -195,7 +195,9 @@ class MemcachedModelCache(DataModelCache):
convert_to_timedelta(cache_key.expiration) if cache_key.expiration else None convert_to_timedelta(cache_key.expiration) if cache_key.expiration else None
) )
client.set( client.set(
cache_key.key, result, expire=int(expires.total_seconds()) if expires else None, cache_key.key,
result,
expire=int(expires.total_seconds()) if expires else None,
) )
logger.debug( logger.debug(
"Cached loaded result for key %s with expiration %s: %s", "Cached loaded result for key %s with expiration %s: %s",

View File

@ -23,7 +23,13 @@ class MockClient(object):
pass pass
@pytest.mark.parametrize("cache_type", [(NoopDataModelCache), (InMemoryDataModelCache),]) @pytest.mark.parametrize(
"cache_type",
[
(NoopDataModelCache),
(InMemoryDataModelCache),
],
)
def test_caching(cache_type): def test_caching(cache_type):
key = CacheKey("foo", "60m") key = CacheKey("foo", "60m")
cache = cache_type() cache = cache_type()

View File

@ -552,22 +552,22 @@ class EnumField(ForeignKeyField):
return _get_enum_field_values(self) return _get_enum_field_values(self)
def get_id(self, name): def get_id(self, name):
""" Returns the ForeignKeyId from the name field """Returns the ForeignKeyId from the name field
Example: Example:
>>> Repository.repo_kind.get_id("application") >>> Repository.repo_kind.get_id("application")
2 2
""" """
try: try:
return self.enum[name].value return self.enum[name].value
except KeyError: except KeyError:
raise self.rel_model.DoesNotExist raise self.rel_model.DoesNotExist
def get_name(self, value): def get_name(self, value):
""" Returns the name value from the ForeignKeyId """Returns the name value from the ForeignKeyId
Example: Example:
>>> Repository.repo_kind.get_name(2) >>> Repository.repo_kind.get_name(2)
"application" "application"
""" """
try: try:
return self.enum(value).name return self.enum(value).name
except ValueError: except ValueError:
@ -1941,7 +1941,10 @@ class RepoMirrorConfig(BaseModel):
# Mirror Configuration # Mirror Configuration
mirror_type = ClientEnumField(RepoMirrorType, default=RepoMirrorType.PULL) mirror_type = ClientEnumField(RepoMirrorType, default=RepoMirrorType.PULL)
internal_robot = QuayUserField(allows_robots=True, backref="mirrorpullrobot",) internal_robot = QuayUserField(
allows_robots=True,
backref="mirrorpullrobot",
)
external_reference = CharField() external_reference = CharField()
external_registry_username = EncryptedCharField(max_length=2048, null=True) external_registry_username = EncryptedCharField(max_length=2048, null=True)
external_registry_password = EncryptedCharField(max_length=2048, null=True) external_registry_password = EncryptedCharField(max_length=2048, null=True)
@ -1983,9 +1986,9 @@ class IndexerVersion(IntEnum):
class ManifestSecurityStatus(BaseModel): class ManifestSecurityStatus(BaseModel):
""" """
Represents the security scan status for a particular container image manifest. Represents the security scan status for a particular container image manifest.
Intended to replace the `security_indexed` and `security_indexed_engine` fields Intended to replace the `security_indexed` and `security_indexed_engine` fields
on the `Image` model. on the `Image` model.
""" """

View File

@ -8,7 +8,10 @@ logger = logging.getLogger(__name__)
def _transition_model(*args, **kwargs): def _transition_model(*args, **kwargs):
return CombinedLogsModel(DocumentLogsModel(*args, **kwargs), TableLogsModel(*args, **kwargs),) return CombinedLogsModel(
DocumentLogsModel(*args, **kwargs),
TableLogsModel(*args, **kwargs),
)
_LOG_MODELS = { _LOG_MODELS = {

View File

@ -67,8 +67,14 @@ def fake_elasticsearch(allow_wildcard=True):
docs[index_name].append(item) docs[index_name].append(item)
return { return {
"status_code": 204, "status_code": 204,
"headers": {"Content-Type": "application/json",}, "headers": {
"content": json.dumps({"result": "created",}), "Content-Type": "application/json",
},
"content": json.dumps(
{
"result": "created",
}
),
} }
@urlmatch(netloc=FAKE_ES_HOST, path=r"/([^/]+)$", method="DELETE") @urlmatch(netloc=FAKE_ES_HOST, path=r"/([^/]+)$", method="DELETE")
@ -86,7 +92,9 @@ def fake_elasticsearch(allow_wildcard=True):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": {"acknowledged": True}, "content": {"acknowledged": True},
} }
@ -107,7 +115,9 @@ def fake_elasticsearch(allow_wildcard=True):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps(found), "content": json.dumps(found),
} }
@ -189,7 +199,9 @@ def fake_elasticsearch(allow_wildcard=True):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps({"count": len(found)}), "content": json.dumps({"count": len(found)}),
} }
@ -200,7 +212,9 @@ def fake_elasticsearch(allow_wildcard=True):
if scroll_id in scrolls: if scroll_id in scrolls:
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps(scrolls[scroll_id]), "content": json.dumps(scrolls[scroll_id]),
} }
@ -346,8 +360,14 @@ def fake_elasticsearch(allow_wildcard=True):
return by_field_name return by_field_name
final_result = { final_result = {
"hits": {"hits": found, "total": len(found),}, "hits": {
"_shards": {"successful": 1, "total": 1,}, "hits": found,
"total": len(found),
},
"_shards": {
"successful": 1,
"total": 1,
},
"aggregations": _aggregate(request, found), "aggregations": _aggregate(request, found),
} }
@ -358,13 +378,21 @@ def fake_elasticsearch(allow_wildcard=True):
return { return {
"status_code": 200, "status_code": 200,
"headers": {"Content-Type": "application/json",}, "headers": {
"Content-Type": "application/json",
},
"content": json.dumps(final_result), "content": json.dumps(final_result),
} }
@urlmatch(netloc=FAKE_ES_HOST) @urlmatch(netloc=FAKE_ES_HOST)
def catchall_handler(url, request): def catchall_handler(url, request):
print("Unsupported URL: %s %s" % (request.method, url,)) print(
"Unsupported URL: %s %s"
% (
request.method,
url,
)
)
return {"status_code": 501} return {"status_code": 501}
handlers = [ handlers = [

View File

@ -274,7 +274,13 @@ AGGS_COUNT = [
] ]
COUNT_REQUEST = {"query": {"bool": {"filter": [{"term": {"repository_id": 1}}]}}} COUNT_REQUEST = {"query": {"bool": {"filter": [{"term": {"repository_id": 1}}]}}}
COUNT_RESPONSE = _status(_shards({"count": 1,})) COUNT_RESPONSE = _status(
_shards(
{
"count": 1,
}
)
)
# assume there are 2 pages # assume there are 2 pages
_scroll_id = "DnF1ZXJ5VGhlbkZldGNoBQAAAAAAACEmFkk1aGlTRzdSUWllejZmYTlEYTN3SVEAAAAAAAAhJRZJNWhpU0c3UlFpZXo2ZmE5RGEzd0lRAAAAAAAAHtAWLWZpaFZXVzVSTy1OTXA5V3MwcHZrZwAAAAAAAB7RFi1maWhWV1c1Uk8tTk1wOVdzMHB2a2cAAAAAAAAhJxZJNWhpU0c3UlFpZXo2ZmE5RGEzd0lR" _scroll_id = "DnF1ZXJ5VGhlbkZldGNoBQAAAAAAACEmFkk1aGlTRzdSUWllejZmYTlEYTN3SVEAAAAAAAAhJRZJNWhpU0c3UlFpZXo2ZmE5RGEzd0lRAAAAAAAAHtAWLWZpaFZXVzVSTy1OTXA5V3MwcHZrZwAAAAAAAB7RFi1maWhWV1c1Uk8tTk1wOVdzMHB2a2cAAAAAAAAhJxZJNWhpU0c3UlFpZXo2ZmE5RGEzd0lR"

View File

@ -34,7 +34,11 @@ def combined_model():
def es_model(): def es_model():
return DocumentLogsModel( return DocumentLogsModel(
producer="elasticsearch", elasticsearch_config={"host": FAKE_ES_HOST, "port": 12345,} producer="elasticsearch",
elasticsearch_config={
"host": FAKE_ES_HOST,
"port": 12345,
},
) )
@ -74,7 +78,9 @@ def _lookup_logs(logs_model, start_time, end_time, **kwargs):
"devtable", "devtable",
"simple", "simple",
"devtable", "devtable",
{"performer_name": "devtable",}, {
"performer_name": "devtable",
},
True, True,
id="matching performer", id="matching performer",
), ),
@ -82,7 +88,9 @@ def _lookup_logs(logs_model, start_time, end_time, **kwargs):
"devtable", "devtable",
"simple", "simple",
"devtable", "devtable",
{"namespace_name": "devtable",}, {
"namespace_name": "devtable",
},
True, True,
id="matching namespace", id="matching namespace",
), ),
@ -90,7 +98,10 @@ def _lookup_logs(logs_model, start_time, end_time, **kwargs):
"devtable", "devtable",
"simple", "simple",
"devtable", "devtable",
{"namespace_name": "devtable", "repository_name": "simple",}, {
"namespace_name": "devtable",
"repository_name": "simple",
},
True, True,
id="matching repository", id="matching repository",
), ),
@ -98,7 +109,9 @@ def _lookup_logs(logs_model, start_time, end_time, **kwargs):
"devtable", "devtable",
"simple", "simple",
"devtable", "devtable",
{"performer_name": "public",}, {
"performer_name": "public",
},
False, False,
id="different performer", id="different performer",
), ),
@ -106,7 +119,9 @@ def _lookup_logs(logs_model, start_time, end_time, **kwargs):
"devtable", "devtable",
"simple", "simple",
"devtable", "devtable",
{"namespace_name": "public",}, {
"namespace_name": "public",
},
False, False,
id="different namespace", id="different namespace",
), ),
@ -114,7 +129,10 @@ def _lookup_logs(logs_model, start_time, end_time, **kwargs):
"devtable", "devtable",
"simple", "simple",
"devtable", "devtable",
{"namespace_name": "devtable", "repository_name": "complex",}, {
"namespace_name": "devtable",
"repository_name": "complex",
},
False, False,
id="different repository", id="different repository",
), ),
@ -564,8 +582,12 @@ def test_disabled_namespace(clear_db_logs):
], ],
), ),
pytest.param( pytest.param(
[AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)),], # 1 [
[AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)),], # 2 AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)),
], # 1
[
AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)),
], # 2
[ [
AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)), # 1 AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)), # 1
AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)), # 2 AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)), # 2

View File

@ -27,13 +27,19 @@ class Migration(object):
migration_object = { migration_object = {
"apiVersion": "dbaoperator.app-sre.redhat.com/v1alpha1", "apiVersion": "dbaoperator.app-sre.redhat.com/v1alpha1",
"kind": "DatabaseMigration", "kind": "DatabaseMigration",
"metadata": {"name": revision,}, "metadata": {
"name": revision,
},
"spec": { "spec": {
"previous": down_revision, "previous": down_revision,
"migrationContainerSpec": { "migrationContainerSpec": {
"name": revision, "name": revision,
"image": "quay.io/quay/quay", "image": "quay.io/quay/quay",
"command": ["/quay-registry/quay-entrypoint.sh", "migrate", revision,], "command": [
"/quay-registry/quay-entrypoint.sh",
"migrate",
revision,
],
}, },
"schemaHints": self._schema_hints, "schemaHints": self._schema_hints,
}, },
@ -49,7 +55,10 @@ class Migration(object):
self.hint_add_column(subop.table_name, subop.column) self.hint_add_column(subop.table_name, subop.column)
elif isinstance(subop, ops.CreateIndexOp): elif isinstance(subop, ops.CreateIndexOp):
self.hint_create_index( self.hint_create_index(
subop.index_name, subop.table_name, subop.columns, subop.unique, subop.index_name,
subop.table_name,
subop.columns,
subop.unique,
) )
elif isinstance(subop, ops.DropIndexOp): elif isinstance(subop, ops.DropIndexOp):
self.hint_drop_index(subop.index_name, subop.table_name) self.hint_drop_index(subop.index_name, subop.table_name)
@ -84,7 +93,10 @@ class Migration(object):
def hint_drop_table(self, table_name, **kwargs): def hint_drop_table(self, table_name, **kwargs):
self._schema_hints.append( self._schema_hints.append(
{"operation": "dropTable", "table": table_name,} {
"operation": "dropTable",
"table": table_name,
}
) )
def hint_add_column(self, table_name, column, *args, **kwargs): def hint_add_column(self, table_name, column, *args, **kwargs):
@ -109,7 +121,11 @@ class Migration(object):
def hint_drop_index(self, index_name, table_name, **kwargs): def hint_drop_index(self, index_name, table_name, **kwargs):
self._schema_hints.append( self._schema_hints.append(
{"operation": "dropIndex", "table": table_name, "indexName": index_name,} {
"operation": "dropIndex",
"table": table_name,
"indexName": index_name,
}
) )

View File

@ -104,7 +104,9 @@ def run_migrations_online():
engine = get_engine() engine = get_engine()
connection = engine.connect() connection = engine.connect()
context.configure( context.configure(
connection=connection, target_metadata=target_metadata, transactional_ddl=False, connection=connection,
target_metadata=target_metadata,
transactional_ddl=False,
) )
try: try:

View File

@ -18,7 +18,12 @@ from sqlalchemy.dialects import mysql
def upgrade(op, tables, tester): def upgrade(op, tables, tester):
for media_type in OCI_CONTENT_TYPES: for media_type in OCI_CONTENT_TYPES:
op.bulk_insert(tables.mediatype, [{"name": media_type},]) op.bulk_insert(
tables.mediatype,
[
{"name": media_type},
],
)
def downgrade(op, tables, tester): def downgrade(op, tables, tester):

View File

@ -124,7 +124,12 @@ def upgrade(op, tables, tester):
op.create_index("tag_tag_kind_id", "tag", ["tag_kind_id"], unique=False) op.create_index("tag_tag_kind_id", "tag", ["tag_kind_id"], unique=False)
# ### end Alembic commands ### # ### end Alembic commands ###
op.bulk_insert(tables.tagkind, [{"name": "tag"},]) op.bulk_insert(
tables.tagkind,
[
{"name": "tag"},
],
)
# ### population of test data ### # # ### population of test data ### #
tester.populate_table( tester.populate_table(

View File

@ -153,10 +153,10 @@ def upgrade(op, tables, tester):
def downgrade(op, tables, tester): def downgrade(op, tables, tester):
""" """
This will downgrade existing data but may not exactly match previous data structure. If the This will downgrade existing data but may not exactly match previous data structure. If the
external_reference does not have three parts (registry, namespace, repository) then a failed external_reference does not have three parts (registry, namespace, repository) then a failed
value is inserted. value is inserted.
""" """
op.add_column( op.add_column(
"repomirrorconfig", sa.Column("external_registry", sa.String(length=255), nullable=True) "repomirrorconfig", sa.Column("external_registry", sa.String(length=255), nullable=True)

View File

@ -42,7 +42,12 @@ def upgrade(op, tables, tester):
) )
# ### end Alembic commands ### # ### end Alembic commands ###
op.bulk_insert(tables.mediatype, [{"name": "text/markdown"},]) op.bulk_insert(
tables.mediatype,
[
{"name": "text/markdown"},
],
)
# ### population of test data ### # # ### population of test data ### #
tester.populate_column("messages", "media_type_id", tester.TestDataType.Foreign("mediatype")) tester.populate_column("messages", "media_type_id", tester.TestDataType.Foreign("mediatype"))

View File

@ -12,7 +12,12 @@ down_revision = "94836b099894"
def upgrade(op, tables, tester): def upgrade(op, tables, tester):
op.bulk_insert(tables.notificationkind, [{"name": "build_cancelled"},]) op.bulk_insert(
tables.notificationkind,
[
{"name": "build_cancelled"},
],
)
def downgrade(op, tables, tester): def downgrade(op, tables, tester):

View File

@ -25,10 +25,18 @@ def upgrade(op, tables, tester):
op.create_index("disablereason_name", "disablereason", ["name"], unique=True) op.create_index("disablereason_name", "disablereason", ["name"], unique=True)
op.bulk_insert( op.bulk_insert(
tables.disablereason, [{"id": 1, "name": "user_toggled"},], tables.disablereason,
[
{"id": 1, "name": "user_toggled"},
],
) )
op.bulk_insert(tables.logentrykind, [{"name": "toggle_repo_trigger"},]) op.bulk_insert(
tables.logentrykind,
[
{"name": "toggle_repo_trigger"},
],
)
op.add_column( op.add_column(
"repositorybuildtrigger", sa.Column("disabled_reason_id", sa.Integer(), nullable=True) "repositorybuildtrigger", sa.Column("disabled_reason_id", sa.Integer(), nullable=True)

View File

@ -38,7 +38,12 @@ def upgrade(op, tables, tester):
op.create_index("userprompt_user_id_kind_id", "userprompt", ["user_id", "kind_id"], unique=True) op.create_index("userprompt_user_id_kind_id", "userprompt", ["user_id", "kind_id"], unique=True)
### end Alembic commands ### ### end Alembic commands ###
op.bulk_insert(tables.userpromptkind, [{"name": "confirm_username"},]) op.bulk_insert(
tables.userpromptkind,
[
{"name": "confirm_username"},
],
)
# ### population of test data ### # # ### population of test data ### #
tester.populate_table( tester.populate_table(

View File

@ -49,7 +49,10 @@ def upgrade(op, tables, tester):
op.bulk_insert( op.bulk_insert(
tables.logentrykind, tables.logentrykind,
[{"name": "create_app_specific_token"}, {"name": "revoke_app_specific_token"},], [
{"name": "create_app_specific_token"},
{"name": "revoke_app_specific_token"},
],
) )
# ### population of test data ### # # ### population of test data ### #

View File

@ -451,11 +451,17 @@ def upgrade(op, tables, tester):
op.create_index("manifestlayerscan_layer_id", "manifestlayerscan", ["layer_id"], unique=True) op.create_index("manifestlayerscan_layer_id", "manifestlayerscan", ["layer_id"], unique=True)
blobplacementlocation_table = table( blobplacementlocation_table = table(
"blobplacementlocation", column("id", sa.Integer()), column("name", sa.String()), "blobplacementlocation",
column("id", sa.Integer()),
column("name", sa.String()),
) )
op.bulk_insert( op.bulk_insert(
blobplacementlocation_table, [{"name": "local_eu"}, {"name": "local_us"},], blobplacementlocation_table,
[
{"name": "local_eu"},
{"name": "local_us"},
],
) )
op.bulk_insert( op.bulk_insert(
@ -473,11 +479,19 @@ def upgrade(op, tables, tester):
], ],
) )
tagkind_table = table("tagkind", column("id", sa.Integer()), column("name", sa.String()),) tagkind_table = table(
"tagkind",
column("id", sa.Integer()),
column("name", sa.String()),
)
op.bulk_insert( op.bulk_insert(
tagkind_table, tagkind_table,
[{"id": 1, "name": "tag"}, {"id": 2, "name": "release"}, {"id": 3, "name": "channel"},], [
{"id": 1, "name": "tag"},
{"id": 2, "name": "release"},
{"id": 3, "name": "channel"},
],
) )

View File

@ -235,7 +235,12 @@ def upgrade(op, tables, tester):
# ### end Alembic commands ### # ### end Alembic commands ###
for media_type in DOCKER_SCHEMA1_CONTENT_TYPES: for media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
op.bulk_insert(tables.mediatype, [{"name": media_type},]) op.bulk_insert(
tables.mediatype,
[
{"name": media_type},
],
)
# ### population of test data ### # # ### population of test data ### #
tester.populate_table( tester.populate_table(

View File

@ -12,7 +12,12 @@ down_revision = "faf752bd2e0a"
def upgrade(op, tables, tester): def upgrade(op, tables, tester):
op.bulk_insert(tables.externalnotificationevent, [{"name": "build_cancelled"},]) op.bulk_insert(
tables.externalnotificationevent,
[
{"name": "build_cancelled"},
],
)
def downgrade(op, tables, tester): def downgrade(op, tables, tester):

View File

@ -24,7 +24,11 @@ def upgrade(op, tables, tester):
op.create_index("repositorykind_name", "repositorykind", ["name"], unique=True) op.create_index("repositorykind_name", "repositorykind", ["name"], unique=True)
op.bulk_insert( op.bulk_insert(
tables.repositorykind, [{"id": 1, "name": "image"}, {"id": 2, "name": "application"},], tables.repositorykind,
[
{"id": 1, "name": "image"},
{"id": 2, "name": "application"},
],
) )
op.add_column( op.add_column(

View File

@ -15,7 +15,12 @@ down_revision = "67f0abd172ae"
def upgrade(op, tables, tester): def upgrade(op, tables, tester):
for media_type in DOCKER_SCHEMA2_CONTENT_TYPES: for media_type in DOCKER_SCHEMA2_CONTENT_TYPES:
op.bulk_insert(tables.mediatype, [{"name": media_type},]) op.bulk_insert(
tables.mediatype,
[
{"name": media_type},
],
)
def downgrade(op, tables, tester): def downgrade(op, tables, tester):

View File

@ -1259,11 +1259,22 @@ def upgrade(op, tables, tester):
"tagmanifestlabel_repository_id", "tagmanifestlabel", ["repository_id"], unique=False "tagmanifestlabel_repository_id", "tagmanifestlabel", ["repository_id"], unique=False
) )
op.bulk_insert(tables.accesstokenkind, [{"name": "build-worker"}, {"name": "pushpull-token"},]) op.bulk_insert(
tables.accesstokenkind,
[
{"name": "build-worker"},
{"name": "pushpull-token"},
],
)
op.bulk_insert( op.bulk_insert(
tables.buildtriggerservice, tables.buildtriggerservice,
[{"name": "github"}, {"name": "gitlab"}, {"name": "bitbucket"}, {"name": "custom-git"},], [
{"name": "github"},
{"name": "gitlab"},
{"name": "bitbucket"},
{"name": "custom-git"},
],
) )
op.bulk_insert( op.bulk_insert(
@ -1304,9 +1315,20 @@ def upgrade(op, tables, tester):
], ],
) )
op.bulk_insert(tables.imagestoragesignaturekind, [{"name": "gpg2"},]) op.bulk_insert(
tables.imagestoragesignaturekind,
[
{"name": "gpg2"},
],
)
op.bulk_insert(tables.imagestoragetransformation, [{"name": "squash"}, {"name": "aci"},]) op.bulk_insert(
tables.imagestoragetransformation,
[
{"name": "squash"},
{"name": "aci"},
],
)
op.bulk_insert( op.bulk_insert(
tables.labelsourcetype, tables.labelsourcetype,
@ -1392,7 +1414,13 @@ def upgrade(op, tables, tester):
], ],
) )
op.bulk_insert(tables.mediatype, [{"name": "text/plain"}, {"name": "application/json"},]) op.bulk_insert(
tables.mediatype,
[
{"name": "text/plain"},
{"name": "application/json"},
],
)
op.bulk_insert( op.bulk_insert(
tables.notificationkind, tables.notificationkind,
@ -1412,11 +1440,31 @@ def upgrade(op, tables, tester):
], ],
) )
op.bulk_insert(tables.role, [{"name": "admin"}, {"name": "write"}, {"name": "read"},]) op.bulk_insert(
tables.role,
[
{"name": "admin"},
{"name": "write"},
{"name": "read"},
],
)
op.bulk_insert(tables.teamrole, [{"name": "admin"}, {"name": "creator"}, {"name": "member"},]) op.bulk_insert(
tables.teamrole,
[
{"name": "admin"},
{"name": "creator"},
{"name": "member"},
],
)
op.bulk_insert(tables.visibility, [{"name": "public"}, {"name": "private"},]) op.bulk_insert(
tables.visibility,
[
{"name": "public"},
{"name": "private"},
],
)
# ### population of test data ### # # ### population of test data ### #
tester.populate_table( tester.populate_table(
@ -1504,7 +1552,11 @@ def upgrade(op, tables, tester):
) )
tester.populate_table( tester.populate_table(
"messages", [("content", tester.TestDataType.String), ("uuid", tester.TestDataType.UUID),] "messages",
[
("content", tester.TestDataType.String),
("uuid", tester.TestDataType.UUID),
],
) )
tester.populate_table( tester.populate_table(

View File

@ -238,16 +238,30 @@ def downgrade(op, tables, tester):
# ### end Alembic commands ### # ### end Alembic commands ###
blobplacementlocation_table = table( blobplacementlocation_table = table(
"blobplacementlocation", column("id", sa.Integer()), column("name", sa.String()), "blobplacementlocation",
column("id", sa.Integer()),
column("name", sa.String()),
) )
op.bulk_insert( op.bulk_insert(
blobplacementlocation_table, [{"name": "local_eu"}, {"name": "local_us"},], blobplacementlocation_table,
[
{"name": "local_eu"},
{"name": "local_us"},
],
) )
tagkind_table = table("tagkind", column("id", sa.Integer()), column("name", sa.String()),) tagkind_table = table(
"tagkind",
column("id", sa.Integer()),
column("name", sa.String()),
)
op.bulk_insert( op.bulk_insert(
tagkind_table, tagkind_table,
[{"id": 1, "name": "tag"}, {"id": 2, "name": "release"}, {"id": 3, "name": "channel"},], [
{"id": 1, "name": "tag"},
{"id": 2, "name": "release"},
{"id": 3, "name": "channel"},
],
) )

View File

@ -12,7 +12,12 @@ down_revision = "dc4af11a5f90"
def upgrade(op, tables, tester): def upgrade(op, tables, tester):
op.bulk_insert(tables.logentrykind, [{"name": "change_tag_expiration"},]) op.bulk_insert(
tables.logentrykind,
[
{"name": "change_tag_expiration"},
],
)
def downgrade(op, tables, tester): def downgrade(op, tables, tester):

View File

@ -18,7 +18,12 @@ def upgrade(op, tables, tester):
"repositorynotification", "repositorynotification",
sa.Column("number_of_failures", sa.Integer(), nullable=False, server_default="0"), sa.Column("number_of_failures", sa.Integer(), nullable=False, server_default="0"),
) )
op.bulk_insert(tables.logentrykind, [{"name": "reset_repo_notification"},]) op.bulk_insert(
tables.logentrykind,
[
{"name": "reset_repo_notification"},
],
)
# ### population of test data ### # # ### population of test data ### #
tester.populate_column( tester.populate_column(

View File

@ -22,7 +22,12 @@ def upgrade(op, tables, tester):
), ),
) )
### end Alembic commands ### ### end Alembic commands ###
op.bulk_insert(tables.logentrykind, [{"name": "change_repo_trust"},]) op.bulk_insert(
tables.logentrykind,
[
{"name": "change_repo_trust"},
],
)
# ### population of test data ### # # ### population of test data ### #
tester.populate_column("repository", "trust_enabled", tester.TestDataType.Boolean) tester.populate_column("repository", "trust_enabled", tester.TestDataType.Boolean)

View File

@ -22,7 +22,13 @@ def upgrade(op, tables, tester):
op.add_column("user", sa.Column("given_name", UTF8CharField(length=255), nullable=True)) op.add_column("user", sa.Column("given_name", UTF8CharField(length=255), nullable=True))
### end Alembic commands ### ### end Alembic commands ###
op.bulk_insert(tables.userpromptkind, [{"name": "enter_name"}, {"name": "enter_company"},]) op.bulk_insert(
tables.userpromptkind,
[
{"name": "enter_name"},
{"name": "enter_company"},
],
)
# ### population of test data ### # # ### population of test data ### #
tester.populate_column("user", "company", tester.TestDataType.UTF8Char) tester.populate_column("user", "company", tester.TestDataType.UTF8Char)

View File

@ -229,7 +229,7 @@ def update_last_accessed(token_or_user):
def estimated_row_count(model_cls): def estimated_row_count(model_cls):
""" Returns the estimated number of rows in the given model. If available, uses engine-specific """Returns the estimated number of rows in the given model. If available, uses engine-specific
estimation (which is very fast) and otherwise falls back to .count() estimation (which is very fast) and otherwise falls back to .count()
""" """
return db_count_estimator(model_cls, db) return db_count_estimator(model_cls, db)

View File

@ -59,7 +59,7 @@ class _ManifestAlreadyExists(Exception):
def find_manifests_for_sec_notification(manifest_digest): def find_manifests_for_sec_notification(manifest_digest):
""" """
Finds all manifests matching the given digest that live in a repository with a registered Finds all manifests matching the given digest that live in a repository with a registered
notification event for security scan results. notification event for security scan results.
""" """
@ -375,14 +375,14 @@ def _build_blob_map(
raise_on_error=False, raise_on_error=False,
require_empty_layer=True, require_empty_layer=True,
): ):
""" Builds a map containing the digest of each blob referenced by the given manifest, """Builds a map containing the digest of each blob referenced by the given manifest,
to its associated Blob row in the database. This method also verifies that the blob to its associated Blob row in the database. This method also verifies that the blob
is accessible under the given repository. Returns None on error (unless raise_on_error is accessible under the given repository. Returns None on error (unless raise_on_error
is specified). If require_empty_layer is set to True, the method will check if the manifest is specified). If require_empty_layer is set to True, the method will check if the manifest
references the special shared empty layer blob and, if so, add it to the map. Otherwise, references the special shared empty layer blob and, if so, add it to the map. Otherwise,
the empty layer blob is only returned if it was *explicitly* referenced in the manifest. the empty layer blob is only returned if it was *explicitly* referenced in the manifest.
This is necessary because Docker V2_2/OCI manifests can implicitly reference an empty blob This is necessary because Docker V2_2/OCI manifests can implicitly reference an empty blob
layer for image layers that only change metadata. layer for image layers that only change metadata.
""" """
# Ensure all the blobs in the manifest exist. # Ensure all the blobs in the manifest exist.

View File

@ -40,8 +40,8 @@ GC_CANDIDATE_COUNT = 500 # repositories
class RetargetTagException(Exception): class RetargetTagException(Exception):
""" Exception raised when re-targetting a tag fails and explicit exception """Exception raised when re-targetting a tag fails and explicit exception
raising is requested. """ raising is requested."""
def get_tag_by_id(tag_id): def get_tag_by_id(tag_id):
@ -79,8 +79,8 @@ def get_tag(repository_id, tag_name):
def tag_names_for_manifest(manifest_id, limit=None): def tag_names_for_manifest(manifest_id, limit=None):
""" """
Returns the names of the tags pointing to the given manifest. Returns the names of the tags pointing to the given manifest.
""" """
query = Tag.select(Tag.id, Tag.name).where(Tag.manifest == manifest_id) query = Tag.select(Tag.id, Tag.name).where(Tag.manifest == manifest_id)
@ -295,7 +295,11 @@ def create_temporary_tag_if_necessary(manifest, expiration_sec):
def retarget_tag( def retarget_tag(
tag_name, manifest_id, is_reversion=False, now_ms=None, raise_on_error=False, tag_name,
manifest_id,
is_reversion=False,
now_ms=None,
raise_on_error=False,
): ):
""" """
Creates or updates a tag with the specified name to point to the given manifest under its Creates or updates a tag with the specified name to point to the given manifest under its
@ -580,8 +584,8 @@ def tags_containing_legacy_image(image):
def find_repository_with_garbage(limit_to_gc_policy_s): def find_repository_with_garbage(limit_to_gc_policy_s):
""" Returns a repository that has garbage (defined as an expired Tag that is past """Returns a repository that has garbage (defined as an expired Tag that is past
the repo's namespace's expiration window) or None if none. the repo's namespace's expiration window) or None if none.
""" """
expiration_timestamp = get_epoch_timestamp_ms() - (limit_to_gc_policy_s * 1000) expiration_timestamp = get_epoch_timestamp_ms() - (limit_to_gc_policy_s * 1000)

View File

@ -66,7 +66,11 @@ def test_lookup_manifest_dead_tag(initialized_db):
def create_manifest_for_testing(repository, differentiation_field="1"): def create_manifest_for_testing(repository, differentiation_field="1"):
# Populate a manifest. # Populate a manifest.
layer_json = json.dumps( layer_json = json.dumps(
{"config": {}, "rootfs": {"type": "layers", "diff_ids": []}, "history": [],} {
"config": {},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [],
}
) )
# Add a blob containing the config. # Add a blob containing the config.
@ -125,7 +129,13 @@ def _populate_blob(content):
return blob, digest return blob, digest
@pytest.mark.parametrize("schema_version", [1, 2,]) @pytest.mark.parametrize(
"schema_version",
[
1,
2,
],
)
def test_get_or_create_manifest(schema_version, initialized_db): def test_get_or_create_manifest(schema_version, initialized_db):
repository = create_repository("devtable", "newrepo", None) repository = create_repository("devtable", "newrepo", None)
@ -137,10 +147,15 @@ def test_get_or_create_manifest(schema_version, initialized_db):
layer_json = json.dumps( layer_json = json.dumps(
{ {
"id": "somelegacyid", "id": "somelegacyid",
"config": {"Labels": expected_labels,}, "config": {
"Labels": expected_labels,
},
"rootfs": {"type": "layers", "diff_ids": []}, "rootfs": {"type": "layers", "diff_ids": []},
"history": [ "history": [
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",}, {
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
], ],
} }
) )
@ -249,10 +264,15 @@ def test_get_or_create_manifest_list(initialized_db):
layer_json = json.dumps( layer_json = json.dumps(
{ {
"id": "somelegacyid", "id": "somelegacyid",
"config": {"Labels": expected_labels,}, "config": {
"Labels": expected_labels,
},
"rootfs": {"type": "layers", "diff_ids": []}, "rootfs": {"type": "layers", "diff_ids": []},
"history": [ "history": [
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",}, {
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
], ],
} }
) )
@ -327,10 +347,15 @@ def test_get_or_create_manifest_list_duplicate_child_manifest(initialized_db):
layer_json = json.dumps( layer_json = json.dumps(
{ {
"id": "somelegacyid", "id": "somelegacyid",
"config": {"Labels": expected_labels,}, "config": {
"Labels": expected_labels,
},
"rootfs": {"type": "layers", "diff_ids": []}, "rootfs": {"type": "layers", "diff_ids": []},
"history": [ "history": [
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",}, {
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
], ],
} }
) )
@ -394,8 +419,14 @@ def test_get_or_create_manifest_with_remote_layers(initialized_db):
"config": {}, "config": {},
"rootfs": {"type": "layers", "diff_ids": []}, "rootfs": {"type": "layers", "diff_ids": []},
"history": [ "history": [
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",}, {
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",}, "created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
], ],
} }
) )
@ -451,7 +482,11 @@ def test_get_or_create_manifest_with_remote_layers(initialized_db):
def create_manifest_for_testing(repository, differentiation_field="1", include_shared_blob=False): def create_manifest_for_testing(repository, differentiation_field="1", include_shared_blob=False):
# Populate a manifest. # Populate a manifest.
layer_json = json.dumps( layer_json = json.dumps(
{"config": {}, "rootfs": {"type": "layers", "diff_ids": []}, "history": [],} {
"config": {},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [],
}
) )
# Add a blob containing the config. # Add a blob containing the config.
@ -481,8 +516,14 @@ def test_retriever(initialized_db):
"config": {}, "config": {},
"rootfs": {"type": "layers", "diff_ids": []}, "rootfs": {"type": "layers", "diff_ids": []},
"history": [ "history": [
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",}, {
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",}, "created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
], ],
} }
) )
@ -571,7 +612,10 @@ def test_create_manifest_cannot_load_config_blob(initialized_db):
"config": {}, "config": {},
"rootfs": {"type": "layers", "diff_ids": []}, "rootfs": {"type": "layers", "diff_ids": []},
"history": [ "history": [
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",}, {
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
], ],
} }
) )

View File

@ -135,7 +135,11 @@ def test_get_tag(initialized_db):
@pytest.mark.parametrize( @pytest.mark.parametrize(
"namespace_name, repo_name", [("devtable", "simple"), ("devtable", "complex"),] "namespace_name, repo_name",
[
("devtable", "simple"),
("devtable", "complex"),
],
) )
def test_list_repository_tag_history(namespace_name, repo_name, initialized_db): def test_list_repository_tag_history(namespace_name, repo_name, initialized_db):
repo = get_repository(namespace_name, repo_name) repo = get_repository(namespace_name, repo_name)

View File

@ -114,8 +114,8 @@ def get_public_repo_visibility():
class _RepositoryExistsException(Exception): class _RepositoryExistsException(Exception):
""" Exception raised if a repository exists in create_repository. Used to breakout of """Exception raised if a repository exists in create_repository. Used to breakout of
the transaction. the transaction.
""" """
def __init__(self, internal_exception): def __init__(self, internal_exception):
@ -658,7 +658,12 @@ def mark_repository_for_deletion(namespace_name, repository_name, repository_gc_
# Add a queueitem to delete the repository. # Add a queueitem to delete the repository.
marker.queue_id = repository_gc_queue.put( marker.queue_id = repository_gc_queue.put(
[namespace_name, str(repo.id)], [namespace_name, str(repo.id)],
json.dumps({"marker_id": marker.id, "original_name": repository_name,}), json.dumps(
{
"marker_id": marker.id,
"original_name": repository_name,
}
),
) )
marker.save() marker.save()

View File

@ -171,8 +171,8 @@ def missing_counts_query(date):
def delete_expired_entries(repo, limit=50): def delete_expired_entries(repo, limit=50):
""" Deletes expired entries from the RepositoryActionCount table for a specific repository. """Deletes expired entries from the RepositoryActionCount table for a specific repository.
Returns the number of entries removed. Returns the number of entries removed.
""" """
threshold_date = datetime.today() - RAC_RETENTION_PERIOD threshold_date = datetime.today() - RAC_RETENTION_PERIOD
found = list( found = list(

View File

@ -14,7 +14,16 @@ from test.fixtures import *
@pytest.mark.parametrize( @pytest.mark.parametrize(
"expiration", [(None), ("-1m"), ("-1d"), ("-1w"), ("10m"), ("10d"), ("10w"),] "expiration",
[
(None),
("-1m"),
("-1d"),
("-1w"),
("10m"),
("10d"),
("10w"),
],
) )
def test_gc(expiration, initialized_db): def test_gc(expiration, initialized_db):
user = model.user.get_user("devtable") user = model.user.get_user("devtable")
@ -83,8 +92,24 @@ def app_config():
yield _config.app_config yield _config.app_config
@pytest.mark.parametrize("expiration", [(None), ("10m"), ("10d"), ("10w"),]) @pytest.mark.parametrize(
@pytest.mark.parametrize("default_expiration", [(None), ("10m"), ("10d"), ("10w"),]) "expiration",
[
(None),
("10m"),
("10d"),
("10w"),
],
)
@pytest.mark.parametrize(
"default_expiration",
[
(None),
("10m"),
("10d"),
("10w"),
],
)
def test_create_access_token(expiration, default_expiration, initialized_db, app_config): def test_create_access_token(expiration, default_expiration, initialized_db, app_config):
user = model.user.get_user("devtable") user = model.user.get_user("devtable")
expiration_date = datetime.now() + convert_to_timedelta(expiration) if expiration else None expiration_date = datetime.now() + convert_to_timedelta(expiration) if expiration else None

View File

@ -61,10 +61,25 @@ def _get_visible_repositories_for_user(
continue continue
@pytest.mark.parametrize("username", ["devtable", "devtable+dtrobot", "public", "reader",]) @pytest.mark.parametrize(
"username",
[
"devtable",
"devtable+dtrobot",
"public",
"reader",
],
)
@pytest.mark.parametrize("include_public", [True, False]) @pytest.mark.parametrize("include_public", [True, False])
@pytest.mark.parametrize("filter_to_namespace", [True, False]) @pytest.mark.parametrize("filter_to_namespace", [True, False])
@pytest.mark.parametrize("repo_kind", [None, "image", "application",]) @pytest.mark.parametrize(
"repo_kind",
[
None,
"image",
"application",
],
)
def test_filter_repositories( def test_filter_repositories(
username, include_public, filter_to_namespace, repo_kind, initialized_db username, include_public, filter_to_namespace, repo_kind, initialized_db
): ):

View File

@ -108,7 +108,15 @@ def move_tag(repository, tag, image_ids, expect_gc=True):
# NOTE: Building root to leaf. # NOTE: Building root to leaf.
parent_id = None parent_id = None
for image_id in image_ids: for image_id in image_ids:
config = {"id": image_id, "config": {"Labels": {"foo": "bar", "meh": "grah",}}} config = {
"id": image_id,
"config": {
"Labels": {
"foo": "bar",
"meh": "grah",
}
},
}
if parent_id: if parent_id:
config["parent"] = parent_id config["parent"] = parent_id
@ -239,9 +247,11 @@ def assert_gc_integrity(expect_storage_removed=True):
# with the same ID, make sure it does not have the same Docker Image ID. # with the same ID, make sure it does not have the same Docker Image ID.
# See: https://www.sqlite.org/autoinc.html # See: https://www.sqlite.org/autoinc.html
found_image = Image.get(id=removed_image_and_storage.id) found_image = Image.get(id=removed_image_and_storage.id)
assert found_image.docker_image_id != removed_image_and_storage.docker_image_id, ( assert (
"Found unexpected removed image %s under repo %s" found_image.docker_image_id != removed_image_and_storage.docker_image_id
% (found_image.id, found_image.repository) ), "Found unexpected removed image %s under repo %s" % (
found_image.id,
found_image.repository,
) )
except Image.DoesNotExist: except Image.DoesNotExist:
pass pass
@ -621,7 +631,14 @@ def test_image_with_cas(default_tag_policy, initialized_db):
builder = DockerSchema1ManifestBuilder( builder = DockerSchema1ManifestBuilder(
repository.namespace_user.username, repository.name, "first" repository.namespace_user.username, repository.name, "first"
) )
builder.insert_layer(digest, json.dumps({"id": "i1",})) builder.insert_layer(
digest,
json.dumps(
{
"id": "i1",
}
),
)
# Store the manifest. # Store the manifest.
manifest = builder.build(docker_v2_signing_key) manifest = builder.build(docker_v2_signing_key)
@ -681,7 +698,14 @@ def test_images_shared_cas(default_tag_policy, initialized_db):
builder = DockerSchema1ManifestBuilder( builder = DockerSchema1ManifestBuilder(
repository.namespace_user.username, repository.name, "first" repository.namespace_user.username, repository.name, "first"
) )
builder.insert_layer(digest, json.dumps({"id": "i1",})) builder.insert_layer(
digest,
json.dumps(
{
"id": "i1",
}
),
)
manifest = builder.build(docker_v2_signing_key) manifest = builder.build(docker_v2_signing_key)
registry_model.create_manifest_and_retarget_tag( registry_model.create_manifest_and_retarget_tag(
repo_ref, manifest, "first", storage, raise_on_error=True repo_ref, manifest, "first", storage, raise_on_error=True
@ -695,7 +719,14 @@ def test_images_shared_cas(default_tag_policy, initialized_db):
builder = DockerSchema1ManifestBuilder( builder = DockerSchema1ManifestBuilder(
repository.namespace_user.username, repository.name, "second" repository.namespace_user.username, repository.name, "second"
) )
builder.insert_layer(digest, json.dumps({"id": "i2",})) builder.insert_layer(
digest,
json.dumps(
{
"id": "i2",
}
),
)
manifest = builder.build(docker_v2_signing_key) manifest = builder.build(docker_v2_signing_key)
created, _ = registry_model.create_manifest_and_retarget_tag( created, _ = registry_model.create_manifest_and_retarget_tag(
repo_ref, manifest, "second", storage, raise_on_error=True repo_ref, manifest, "second", storage, raise_on_error=True

View File

@ -5,8 +5,25 @@ from data.model.modelutil import paginate
from test.fixtures import * from test.fixtures import *
@pytest.mark.parametrize("page_size", [10, 20, 50, 100, 200, 500, 1000,]) @pytest.mark.parametrize(
@pytest.mark.parametrize("descending", [False, True,]) "page_size",
[
10,
20,
50,
100,
200,
500,
1000,
],
)
@pytest.mark.parametrize(
"descending",
[
False,
True,
],
)
def test_paginate(page_size, descending, initialized_db): def test_paginate(page_size, descending, initialized_db):
# Add a bunch of rows into a test table (`Role`). # Add a bunch of rows into a test table (`Role`).
for i in range(0, 522): for i in range(0, 522):

View File

@ -6,7 +6,13 @@ from data.queue import WorkQueue
from test.fixtures import * from test.fixtures import *
@pytest.mark.parametrize("deleted", [(True), (False),]) @pytest.mark.parametrize(
"deleted",
[
(True),
(False),
],
)
def test_get_organizations(deleted, initialized_db): def test_get_organizations(deleted, initialized_db):
# Delete an org. # Delete an org.
deleted_org = get_organization("sellnsmall") deleted_org = get_organization("sellnsmall")

View File

@ -21,8 +21,20 @@ def test_duplicate_repository_different_kinds(initialized_db):
os.environ.get("TEST_DATABASE_URI", "").find("mysql") >= 0, os.environ.get("TEST_DATABASE_URI", "").find("mysql") >= 0,
reason="MySQL requires specialized indexing of newly created repos", reason="MySQL requires specialized indexing of newly created repos",
) )
@pytest.mark.parametrize("query", [(""), ("e"),]) @pytest.mark.parametrize(
@pytest.mark.parametrize("authed_username", [(None), ("devtable"),]) "query",
[
(""),
("e"),
],
)
@pytest.mark.parametrize(
"authed_username",
[
(None),
("devtable"),
],
)
def test_search_pagination(query, authed_username, initialized_db): def test_search_pagination(query, authed_username, initialized_db):
# Create some public repos. # Create some public repos.
repo1 = create_repository( repo1 = create_repository(

View File

@ -30,7 +30,14 @@ def test_create_user_with_expiration(initialized_db):
assert user.removed_tag_expiration_s == 60 * 60 assert user.removed_tag_expiration_s == 60 * 60
@pytest.mark.parametrize("token_lifetime, time_since", [("1m", "2m"), ("2m", "1m"), ("1h", "1m"),]) @pytest.mark.parametrize(
"token_lifetime, time_since",
[
("1m", "2m"),
("2m", "1m"),
("1h", "1m"),
],
)
def test_validation_code(token_lifetime, time_since, initialized_db): def test_validation_code(token_lifetime, time_since, initialized_db):
user = create_user_noverify("foobar", "foo@example.com", email_required=False) user = create_user_noverify("foobar", "foo@example.com", email_required=False)
created = datetime.now() - convert_to_timedelta(time_since) created = datetime.now() - convert_to_timedelta(time_since)
@ -46,8 +53,20 @@ def test_validation_code(token_lifetime, time_since, initialized_db):
assert expect_success == (result is not None) assert expect_success == (result is not None)
@pytest.mark.parametrize("disabled", [(True), (False),]) @pytest.mark.parametrize(
@pytest.mark.parametrize("deleted", [(True), (False),]) "disabled",
[
(True),
(False),
],
)
@pytest.mark.parametrize(
"deleted",
[
(True),
(False),
],
)
def test_get_active_users(disabled, deleted, initialized_db): def test_get_active_users(disabled, deleted, initialized_db):
# Delete a user. # Delete a user.
deleted_user = model.user.get_user("public") deleted_user = model.user.get_user("public")

View File

@ -1203,7 +1203,12 @@ def mark_namespace_for_deletion(user, queues, namespace_gc_queue, force=False):
# Add a queueitem to delete the namespace. # Add a queueitem to delete the namespace.
marker.queue_id = namespace_gc_queue.put( marker.queue_id = namespace_gc_queue.put(
[str(user.id)], [str(user.id)],
json.dumps({"marker_id": marker.id, "original_username": original_username,}), json.dumps(
{
"marker_id": marker.id,
"original_username": original_username,
}
),
) )
marker.save() marker.save()
return marker.id return marker.id
@ -1227,7 +1232,7 @@ def delete_user(user, queues):
Should *not* be called by any user-facing API. Instead, mark_namespace_for_deletion should be Should *not* be called by any user-facing API. Instead, mark_namespace_for_deletion should be
used, and the queue should call this method. used, and the queue should call this method.
Returns True on success and False otherwise. Returns True on success and False otherwise.
""" """
# Ensure the user is disabled before beginning the deletion process. # Ensure the user is disabled before beginning the deletion process.

View File

@ -50,7 +50,11 @@ class WorkQueue(object):
""" """
def __init__( def __init__(
self, queue_name, transaction_factory, canonical_name_match_list=None, has_namespace=False, self,
queue_name,
transaction_factory,
canonical_name_match_list=None,
has_namespace=False,
): ):
self._queue_name = queue_name self._queue_name = queue_name
self._transaction_factory = transaction_factory self._transaction_factory = transaction_factory

View File

@ -22,9 +22,9 @@ _FORCE_MASTER_COUNTER_ATTRIBUTE = "_force_master_nesting"
@contextmanager @contextmanager
def disallow_replica_use(db): def disallow_replica_use(db):
""" When used, any queries run under this context manager will hit the master """When used, any queries run under this context manager will hit the master
node and be disallowed from using the read replica(s). NOTE: This means if node and be disallowed from using the read replica(s). NOTE: This means if
the master node is unavailable, the underlying queries will *fail*. the master node is unavailable, the underlying queries will *fail*.
""" """
database = db.obj database = db.obj
counter = getattr(database._state, _FORCE_MASTER_COUNTER_ATTRIBUTE, 0) counter = getattr(database._state, _FORCE_MASTER_COUNTER_ATTRIBUTE, 0)

View File

@ -62,7 +62,8 @@ class BlobTooLargeException(BlobUploadException):
BlobUploadSettings = namedtuple( BlobUploadSettings = namedtuple(
"BlobUploadSettings", ["maximum_blob_size", "committed_blob_expiration"], "BlobUploadSettings",
["maximum_blob_size", "committed_blob_expiration"],
) )

View File

@ -275,8 +275,8 @@ class Tag(
@property @property
def manifest_layers_size(self): def manifest_layers_size(self):
""" Returns the compressed size of the layers of the manifest for the Tag or """Returns the compressed size of the layers of the manifest for the Tag or
None if none applicable or loaded. None if none applicable or loaded.
""" """
return self.manifest.layers_compressed_size return self.manifest.layers_compressed_size
@ -388,9 +388,9 @@ class Manifest(
return legacy_id_handler return legacy_id_handler
def lookup_legacy_image(self, layer_index, retriever): def lookup_legacy_image(self, layer_index, retriever):
""" Looks up and returns the legacy image for index-th layer in this manifest """Looks up and returns the legacy image for index-th layer in this manifest
or None if none. The indexes here are from leaf to root, with index 0 being or None if none. The indexes here are from leaf to root, with index 0 being
the leaf. the leaf.
""" """
# Retrieve the schema1 manifest. If none exists, legacy images are not supported. # Retrieve the schema1 manifest. If none exists, legacy images are not supported.
parsed = self.get_parsed_manifest() parsed = self.get_parsed_manifest()

View File

@ -55,7 +55,11 @@ class RegistryDataInterface(object):
@abstractmethod @abstractmethod
def lookup_manifest_by_digest( def lookup_manifest_by_digest(
self, repository_ref, manifest_digest, allow_dead=False, require_available=False, self,
repository_ref,
manifest_digest,
allow_dead=False,
require_available=False,
): ):
""" """
Looks up the manifest with the given digest under the given repository and returns it or Looks up the manifest with the given digest under the given repository and returns it or
@ -410,28 +414,28 @@ class RegistryDataInterface(object):
@abstractmethod @abstractmethod
def populate_legacy_images_for_testing(self, manifest, storage): def populate_legacy_images_for_testing(self, manifest, storage):
""" """
Populates legacy images for the given manifest, for testing only. This call Populates legacy images for the given manifest, for testing only. This call
will fail if called under non-testing code. will fail if called under non-testing code.
""" """
@abstractmethod @abstractmethod
def find_manifests_for_sec_notification(self, manifest_digest): def find_manifests_for_sec_notification(self, manifest_digest):
""" """
Finds all manifests with the given digest that live in repositories that have Finds all manifests with the given digest that live in repositories that have
registered security notifications. registered security notifications.
""" """
@abstractmethod @abstractmethod
def lookup_secscan_notification_severities(self, repository): def lookup_secscan_notification_severities(self, repository):
""" """
Returns the security notification severities for security events within Returns the security notification severities for security events within
a repository or None if none. a repository or None if none.
""" """
@abstractmethod @abstractmethod
def tag_names_for_manifest(self, manifest, limit): def tag_names_for_manifest(self, manifest, limit):
""" """
Returns the names of the tags that point to the given manifest, up to the given Returns the names of the tags that point to the given manifest, up to the given
limit. limit.
""" """

View File

@ -71,9 +71,9 @@ class OCIModel(RegistryDataInterface):
return (None, None) return (None, None)
def _resolve_legacy_image_id(self, legacy_image_id): def _resolve_legacy_image_id(self, legacy_image_id):
""" Decodes the given legacy image ID and returns the manifest to which it points, """Decodes the given legacy image ID and returns the manifest to which it points,
as well as the layer index for the image. If invalid, or the manifest was not found, as well as the layer index for the image. If invalid, or the manifest was not found,
returns (None, None). returns (None, None).
""" """
manifest, layer_index = self._resolve_legacy_image_id_to_manifest_row(legacy_image_id) manifest, layer_index = self._resolve_legacy_image_id_to_manifest_row(legacy_image_id)
if manifest is None: if manifest is None:
@ -142,7 +142,11 @@ class OCIModel(RegistryDataInterface):
return tag.manifest return tag.manifest
def lookup_manifest_by_digest( def lookup_manifest_by_digest(
self, repository_ref, manifest_digest, allow_dead=False, require_available=False, self,
repository_ref,
manifest_digest,
allow_dead=False,
require_available=False,
): ):
""" """
Looks up the manifest with the given digest under the given repository and returns it or Looks up the manifest with the given digest under the given repository and returns it or
@ -169,7 +173,11 @@ class OCIModel(RegistryDataInterface):
# Create the label itself. # Create the label itself.
label = oci.label.create_manifest_label( label = oci.label.create_manifest_label(
manifest._db_id, key, value, source_type_name, media_type_name, manifest._db_id,
key,
value,
source_type_name,
media_type_name,
) )
if label is None: if label is None:
return None return None
@ -355,7 +363,9 @@ class OCIModel(RegistryDataInterface):
# Re-target the tag to it. # Re-target the tag to it.
tag = oci.tag.retarget_tag( tag = oci.tag.retarget_tag(
tag_name, created_manifest.manifest, raise_on_error=raise_on_error, tag_name,
created_manifest.manifest,
raise_on_error=raise_on_error,
) )
if tag is None: if tag is None:
return (None, None) return (None, None)
@ -407,13 +417,16 @@ class OCIModel(RegistryDataInterface):
parsed = manifest.get_parsed_manifest() parsed = manifest.get_parsed_manifest()
except ManifestException: except ManifestException:
logger.exception( logger.exception(
"Could not parse manifest `%s` in retarget_tag", manifest._db_id, "Could not parse manifest `%s` in retarget_tag",
manifest._db_id,
) )
return None return None
if parsed.tag != tag_name: if parsed.tag != tag_name:
logger.debug( logger.debug(
"Rewriting manifest `%s` for tag named `%s`", manifest._db_id, tag_name, "Rewriting manifest `%s` for tag named `%s`",
manifest._db_id,
tag_name,
) )
repository_id = repository_ref._db_id repository_id = repository_ref._db_id
@ -621,7 +634,10 @@ class OCIModel(RegistryDataInterface):
specified). specified).
""" """
return self._list_manifest_layers( return self._list_manifest_layers(
repository_ref._db_id, parsed_manifest, storage, include_placements=include_placements, repository_ref._db_id,
parsed_manifest,
storage,
include_placements=include_placements,
) )
def get_manifest_local_blobs(self, manifest, storage, include_placements=False): def get_manifest_local_blobs(self, manifest, storage, include_placements=False):
@ -898,7 +914,7 @@ class OCIModel(RegistryDataInterface):
yield Manifest.for_manifest(manifest, self._legacy_image_id_handler) yield Manifest.for_manifest(manifest, self._legacy_image_id_handler)
def lookup_secscan_notification_severities(self, repository): def lookup_secscan_notification_severities(self, repository):
""" """
Returns the security notification severities for security events within Returns the security notification severities for security events within
a repository or None if none. a repository or None if none.
""" """
@ -916,8 +932,8 @@ class OCIModel(RegistryDataInterface):
return model.oci.tag.tag_names_for_manifest(manifest._db_id, limit) return model.oci.tag.tag_names_for_manifest(manifest._db_id, limit)
def populate_legacy_images_for_testing(self, manifest, storage): def populate_legacy_images_for_testing(self, manifest, storage):
""" Populates legacy images for the given manifest, for testing only. This call """Populates legacy images for the given manifest, for testing only. This call
will fail if called under non-testing code. will fail if called under non-testing code.
""" """
manifest_row = database.Manifest.get(id=manifest._db_id) manifest_row = database.Manifest.get(id=manifest._db_id)
oci.manifest.populate_legacy_images_for_testing( oci.manifest.populate_legacy_images_for_testing(

View File

@ -27,8 +27,22 @@ def registry_model(initialized_db):
return OCIModel() return OCIModel()
@pytest.mark.parametrize("chunk_count", [0, 1, 2, 10,]) @pytest.mark.parametrize(
@pytest.mark.parametrize("subchunk", [True, False,]) "chunk_count",
[
0,
1,
2,
10,
],
)
@pytest.mark.parametrize(
"subchunk",
[
True,
False,
],
)
def test_basic_upload_blob(chunk_count, subchunk, registry_model): def test_basic_upload_blob(chunk_count, subchunk, registry_model):
repository_ref = registry_model.lookup_repository("devtable", "complex") repository_ref = registry_model.lookup_repository("devtable", "complex")
storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"]) storage = DistributedStorage({"local_us": FakeStorage(None)}, ["local_us"])

View File

@ -51,7 +51,9 @@ from test.fixtures import *
@pytest.fixture( @pytest.fixture(
params=[OCIModel(),] params=[
OCIModel(),
]
) )
def registry_model(request, initialized_db): def registry_model(request, initialized_db):
return request.param return request.param
@ -85,7 +87,10 @@ def test_find_matching_tag(names, expected, registry_model):
@pytest.mark.parametrize( @pytest.mark.parametrize(
"repo_namespace, repo_name, expected", "repo_namespace, repo_name, expected",
[("devtable", "simple", {"latest", "prod"}), ("buynlarge", "orgrepo", {"latest", "prod"}),], [
("devtable", "simple", {"latest", "prod"}),
("buynlarge", "orgrepo", {"latest", "prod"}),
],
) )
def test_get_most_recent_tag(repo_namespace, repo_name, expected, registry_model): def test_get_most_recent_tag(repo_namespace, repo_name, expected, registry_model):
repo = model.repository.get_repository(repo_namespace, repo_name) repo = model.repository.get_repository(repo_namespace, repo_name)
@ -114,7 +119,11 @@ def test_lookup_repository(repo_namespace, repo_name, expected, registry_model):
@pytest.mark.parametrize( @pytest.mark.parametrize(
"repo_namespace, repo_name", [("devtable", "simple"), ("buynlarge", "orgrepo"),] "repo_namespace, repo_name",
[
("devtable", "simple"),
("buynlarge", "orgrepo"),
],
) )
def test_lookup_manifests(repo_namespace, repo_name, registry_model): def test_lookup_manifests(repo_namespace, repo_name, registry_model):
repo = model.repository.get_repository(repo_namespace, repo_name) repo = model.repository.get_repository(repo_namespace, repo_name)
@ -237,7 +246,13 @@ def test_repository_tags(repo_namespace, repo_name, registry_model):
("public", "publicrepo", 1, False), ("public", "publicrepo", 1, False),
], ],
) )
@pytest.mark.parametrize("with_size_fallback", [False, True,]) @pytest.mark.parametrize(
"with_size_fallback",
[
False,
True,
],
)
def test_repository_tag_history( def test_repository_tag_history(
namespace, name, expected_tag_count, has_expired, registry_model, with_size_fallback namespace, name, expected_tag_count, has_expired, registry_model, with_size_fallback
): ):
@ -285,7 +300,10 @@ def test_repository_tag_history_future_expires(registry_model):
@pytest.mark.parametrize( @pytest.mark.parametrize(
"repositories, expected_tag_count", "repositories, expected_tag_count",
[([], 0), ([("devtable", "simple"), ("devtable", "building")], 1),], [
([], 0),
([("devtable", "simple"), ("devtable", "building")], 1),
],
) )
def test_get_most_recent_tag_lifetime_start(repositories, expected_tag_count, registry_model): def test_get_most_recent_tag_lifetime_start(repositories, expected_tag_count, registry_model):
last_modified_map = registry_model.get_most_recent_tag_lifetime_start( last_modified_map = registry_model.get_most_recent_tag_lifetime_start(
@ -307,7 +325,13 @@ def test_get_most_recent_tag_lifetime_start(repositories, expected_tag_count, re
("buynlarge", "orgrepo"), ("buynlarge", "orgrepo"),
], ],
) )
@pytest.mark.parametrize("via_manifest", [False, True,]) @pytest.mark.parametrize(
"via_manifest",
[
False,
True,
],
)
def test_delete_tags(repo_namespace, repo_name, via_manifest, registry_model): def test_delete_tags(repo_namespace, repo_name, via_manifest, registry_model):
repository_ref = registry_model.lookup_repository(repo_namespace, repo_name) repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
tags = registry_model.list_all_active_repository_tags(repository_ref) tags = registry_model.list_all_active_repository_tags(repository_ref)
@ -340,7 +364,13 @@ def test_delete_tags(repo_namespace, repo_name, via_manifest, registry_model):
assert len(history) == len(previous_history) assert len(history) == len(previous_history)
@pytest.mark.parametrize("use_manifest", [True, False,]) @pytest.mark.parametrize(
"use_manifest",
[
True,
False,
],
)
def test_retarget_tag_history(use_manifest, registry_model): def test_retarget_tag_history(use_manifest, registry_model):
repository_ref = registry_model.lookup_repository("devtable", "history") repository_ref = registry_model.lookup_repository("devtable", "history")
history, _ = registry_model.list_repository_tag_history(repository_ref) history, _ = registry_model.list_repository_tag_history(repository_ref)
@ -422,7 +452,12 @@ def clear_rows(initialized_db):
@pytest.mark.parametrize( @pytest.mark.parametrize(
"namespace, expect_enabled", [("devtable", True), ("buynlarge", True), ("disabled", False),] "namespace, expect_enabled",
[
("devtable", True),
("buynlarge", True),
("disabled", False),
],
) )
def test_is_namespace_enabled(namespace, expect_enabled, registry_model): def test_is_namespace_enabled(namespace, expect_enabled, registry_model):
assert registry_model.is_namespace_enabled(namespace) == expect_enabled assert registry_model.is_namespace_enabled(namespace) == expect_enabled
@ -480,7 +515,10 @@ def test_manifest_remote_layers(oci_model):
"config": {}, "config": {},
"rootfs": {"type": "layers", "diff_ids": []}, "rootfs": {"type": "layers", "diff_ids": []},
"history": [ "history": [
{"created": "2018-04-03T18:37:09.284840891Z", "created_by": "do something",}, {
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
], ],
} }
) )
@ -526,7 +564,12 @@ def test_blob_uploads(registry_model):
# Update and ensure the changes are saved. # Update and ensure the changes are saved.
assert registry_model.update_blob_upload( assert registry_model.update_blob_upload(
blob_upload, 1, {"new": "metadata"}, 2, 3, blob_upload.sha_state, blob_upload,
1,
{"new": "metadata"},
2,
3,
blob_upload.sha_state,
) )
updated = registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id) updated = registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
@ -610,7 +653,8 @@ def test_get_cached_repo_blob(registry_model):
raise SomeException("Not connected!") raise SomeException("Not connected!")
with patch( with patch(
"data.registry_model.registry_oci_model.model.oci.blob.get_repository_blob_by_digest", fail, "data.registry_model.registry_oci_model.model.oci.blob.get_repository_blob_by_digest",
fail,
): ):
# Make sure we can load again, which should hit the cache. # Make sure we can load again, which should hit the cache.
cached = registry_model.get_cached_repo_blob(model_cache, "devtable", "simple", blob.digest) cached = registry_model.get_cached_repo_blob(model_cache, "devtable", "simple", blob.digest)
@ -675,7 +719,11 @@ def test_create_manifest_and_retarget_tag_with_labels(registry_model):
json_metadata = { json_metadata = {
"id": "someid", "id": "someid",
"config": {"Labels": {"quay.expires-after": "2w",},}, "config": {
"Labels": {
"quay.expires-after": "2w",
},
},
} }
builder = DockerSchema1ManifestBuilder("devtable", "simple", "anothertag") builder = DockerSchema1ManifestBuilder("devtable", "simple", "anothertag")
@ -739,7 +787,14 @@ def test_known_issue_schema1(registry_model):
def test_unicode_emoji(registry_model): def test_unicode_emoji(registry_model):
builder = DockerSchema1ManifestBuilder("devtable", "simple", "latest") builder = DockerSchema1ManifestBuilder("devtable", "simple", "latest")
builder.add_layer( builder.add_layer(
"sha256:abcde", json.dumps({"id": "someid", "author": "😱",}, ensure_ascii=False) "sha256:abcde",
json.dumps(
{
"id": "someid",
"author": "😱",
},
ensure_ascii=False,
),
) )
manifest = builder.build(ensure_ascii=False) manifest = builder.build(ensure_ascii=False)
@ -769,7 +824,13 @@ def test_unicode_emoji(registry_model):
assert found.get_parsed_manifest().digest == manifest.digest assert found.get_parsed_manifest().digest == manifest.digest
@pytest.mark.parametrize("test_cached", [False, True,]) @pytest.mark.parametrize(
"test_cached",
[
False,
True,
],
)
def test_lookup_active_repository_tags(test_cached, oci_model): def test_lookup_active_repository_tags(test_cached, oci_model):
repository_ref = oci_model.lookup_repository("devtable", "simple") repository_ref = oci_model.lookup_repository("devtable", "simple")
latest_tag = oci_model.get_repo_tag(repository_ref, "latest") latest_tag = oci_model.get_repo_tag(repository_ref, "latest")
@ -819,7 +880,14 @@ def test_lookup_active_repository_tags(test_cached, oci_model):
def test_create_manifest_with_temp_tag(initialized_db, registry_model): def test_create_manifest_with_temp_tag(initialized_db, registry_model):
builder = DockerSchema1ManifestBuilder("devtable", "simple", "latest") builder = DockerSchema1ManifestBuilder("devtable", "simple", "latest")
builder.add_layer( builder.add_layer(
"sha256:abcde", json.dumps({"id": "someid", "author": "some user",}, ensure_ascii=False) "sha256:abcde",
json.dumps(
{
"id": "someid",
"author": "some user",
},
ensure_ascii=False,
),
) )
manifest = builder.build(ensure_ascii=False) manifest = builder.build(ensure_ascii=False)
@ -860,7 +928,11 @@ def test_find_manifests_for_sec_notification(initialized_db, registry_model):
"vulnerability_found", "vulnerability_found",
"webhook", "webhook",
{}, {},
{"vulnerability": {"priority": "Critical",},}, {
"vulnerability": {
"priority": "Critical",
},
},
) )
# Now ensure the manifests are found. # Now ensure the manifests are found.
@ -879,7 +951,11 @@ def test_lookup_secscan_notification_severities(initialized_db, registry_model):
"vulnerability_found", "vulnerability_found",
"webhook", "webhook",
{}, {},
{"vulnerability": {"priority": "Critical",},}, {
"vulnerability": {
"priority": "Critical",
},
},
) )
model.notification.create_repo_notification( model.notification.create_repo_notification(
@ -887,7 +963,11 @@ def test_lookup_secscan_notification_severities(initialized_db, registry_model):
"vulnerability_found", "vulnerability_found",
"webhook", "webhook",
{}, {},
{"vulnerability": {"priority": "Low",},}, {
"vulnerability": {
"priority": "Low",
},
},
) )
assert set(registry_model.lookup_secscan_notification_severities(repository_ref)) == { assert set(registry_model.lookup_secscan_notification_severities(repository_ref)) == {

View File

@ -4,7 +4,15 @@ from data.registry_model.shared import SyntheticIDHandler
@pytest.mark.parametrize("manifest_id", [1, 1000, 10000, 60000]) @pytest.mark.parametrize("manifest_id", [1, 1000, 10000, 60000])
@pytest.mark.parametrize("hash_salt", [None, "", "testing1234", "foobarbaz",]) @pytest.mark.parametrize(
"hash_salt",
[
None,
"",
"testing1234",
"foobarbaz",
],
)
def test_handler(manifest_id, hash_salt): def test_handler(manifest_id, hash_salt):
handler = SyntheticIDHandler(hash_salt) handler = SyntheticIDHandler(hash_salt)
for index in range(0, 10): for index in range(0, 10):

View File

@ -195,14 +195,14 @@ class PaginatedNotificationStatus(IntEnum):
class PaginatedNotificationResult( class PaginatedNotificationResult(
namedtuple("PaginatedNotificationResult", ["status", "data", "next_page_index"]) namedtuple("PaginatedNotificationResult", ["status", "data", "next_page_index"])
): ):
""" """
Named tuple that contains the result of a paginated notification lookup in the security scanner. Named tuple that contains the result of a paginated notification lookup in the security scanner.
""" """
class UpdatedVulnerability( class UpdatedVulnerability(
namedtuple("UpdatedVulnerability", ["manifest_digest", "vulnerability"]) namedtuple("UpdatedVulnerability", ["manifest_digest", "vulnerability"])
): ):
""" """
Named tuple that represents an updated vulnerability for a manifest. Named tuple that represents an updated vulnerability for a manifest.
""" """

View File

@ -73,6 +73,6 @@ class SecurityScannerInterface(object):
@abstractmethod @abstractmethod
def mark_notification_handled(self, notification_id): def mark_notification_handled(self, notification_id):
""" """
Marks that a security notification from the scanner has been handled. Marks that a security notification from the scanner has been handled.
""" """

View File

@ -199,8 +199,20 @@ class V4SecurityScanner(SecurityScannerInterface):
# TODO(alecmerdler): We want to index newer manifests first, while backfilling older manifests... # TODO(alecmerdler): We want to index newer manifests first, while backfilling older manifests...
iterator = itertools.chain( iterator = itertools.chain(
yield_random_entries(not_indexed_query, Manifest.id, batch_size, max_id, min_id,), yield_random_entries(
yield_random_entries(index_error_query, Manifest.id, batch_size, max_id, min_id,), not_indexed_query,
Manifest.id,
batch_size,
max_id,
min_id,
),
yield_random_entries(
index_error_query,
Manifest.id,
batch_size,
max_id,
min_id,
),
yield_random_entries( yield_random_entries(
lambda: needs_reindexing_query(indexer_state.get("state", "")), lambda: needs_reindexing_query(indexer_state.get("state", "")),
Manifest.id, Manifest.id,
@ -358,7 +370,7 @@ class V4SecurityScanner(SecurityScannerInterface):
def features_for(report): def features_for(report):
""" """
Transforms a Clair v4 `VulnerabilityReport` dict into the standard shape of a Transforms a Clair v4 `VulnerabilityReport` dict into the standard shape of a
Quay Security scanner response. Quay Security scanner response.
""" """

View File

@ -529,14 +529,20 @@ def test_process_notification_page(initialized_db, set_secscan_config):
results = list( results = list(
secscan.process_notification_page( secscan.process_notification_page(
[ [
{"reason": "removed",}, {
"reason": "removed",
},
{ {
"reason": "added", "reason": "added",
"manifest": "sha256:abcd", "manifest": "sha256:abcd",
"vulnerability": { "vulnerability": {
"normalized_severity": "s", "normalized_severity": "s",
"description": "d", "description": "d",
"package": {"id": "42", "name": "p", "version": "v0.0.1",}, "package": {
"id": "42",
"name": "p",
"version": "v0.0.1",
},
"name": "n", "name": "n",
"fixed_in_version": "f", "fixed_in_version": "f",
"links": "l", "links": "l",

View File

@ -32,7 +32,13 @@ from data.encryption import FieldEncrypter, _VERSIONS, DecryptionFailureExceptio
"107383705745765174750346070528443780244192102846031525796571939503548634055845", "107383705745765174750346070528443780244192102846031525796571939503548634055845",
], ],
) )
@pytest.mark.parametrize("use_valid_key", [True, False,]) @pytest.mark.parametrize(
"use_valid_key",
[
True,
False,
],
)
def test_encryption(test_data, version, secret_key, use_valid_key): def test_encryption(test_data, version, secret_key, use_valid_key):
encrypter = FieldEncrypter(secret_key, version) encrypter = FieldEncrypter(secret_key, version)
encrypted = encrypter.encrypt_value(test_data, field_max_length=255) encrypted = encrypter.encrypt_value(test_data, field_max_length=255)

View File

@ -22,8 +22,13 @@ def test_readreplica(init_db_path, tmpdir_factory):
db_config = { db_config = {
"DB_URI": "sqlite:///{0}".format(primary_file), "DB_URI": "sqlite:///{0}".format(primary_file),
"DB_READ_REPLICAS": [{"DB_URI": "sqlite:///{0}".format(replica_file)},], "DB_READ_REPLICAS": [
"DB_CONNECTION_ARGS": {"threadlocals": True, "autorollback": True,}, {"DB_URI": "sqlite:///{0}".format(replica_file)},
],
"DB_CONNECTION_ARGS": {
"threadlocals": True,
"autorollback": True,
},
"DB_TRANSACTION_FACTORY": lambda x: FakeTransaction(), "DB_TRANSACTION_FACTORY": lambda x: FakeTransaction(),
"FOR_TESTING": True, "FOR_TESTING": True,
"DATABASE_SECRET_KEY": "anothercrazykey!", "DATABASE_SECRET_KEY": "anothercrazykey!",
@ -98,7 +103,10 @@ def test_readreplica(init_db_path, tmpdir_factory):
configure( configure(
{ {
"DB_URI": "sqlite:///{0}".format(primary_file), "DB_URI": "sqlite:///{0}".format(primary_file),
"DB_CONNECTION_ARGS": {"threadlocals": True, "autorollback": True,}, "DB_CONNECTION_ARGS": {
"threadlocals": True,
"autorollback": True,
},
"DB_TRANSACTION_FACTORY": lambda x: FakeTransaction(), "DB_TRANSACTION_FACTORY": lambda x: FakeTransaction(),
"DATABASE_SECRET_KEY": "anothercrazykey!", "DATABASE_SECRET_KEY": "anothercrazykey!",
} }

View File

@ -6,7 +6,13 @@ from test.fixtures import *
@pytest.mark.parametrize( @pytest.mark.parametrize(
"input", [("hello world"), ("hello ' world"), ('hello " world'), ("hello ` world"),] "input",
[
("hello world"),
("hello ' world"),
('hello " world'),
("hello ` world"),
],
) )
def test_mysql_text_escaping(input): def test_mysql_text_escaping(input):
query, values = Repository.select().where(match_mysql(Repository.description, input)).sql() query, values = Repository.select().where(match_mysql(Repository.description, input)).sql()

View File

@ -55,20 +55,34 @@ def blacklisted_emails(request):
"starting_membership,group_membership,expected_membership", "starting_membership,group_membership,expected_membership",
[ [
# Empty team + single member in group => Single member in team. # Empty team + single member in group => Single member in team.
([], [UserInformation("someuser", "someuser", "someuser@devtable.com"),], ["someuser"]), (
[],
[
UserInformation("someuser", "someuser", "someuser@devtable.com"),
],
["someuser"],
),
# Team with a Quay user + empty group => empty team. # Team with a Quay user + empty group => empty team.
([("someuser", None)], [], []), ([("someuser", None)], [], []),
# Team with an existing external user + user is in the group => no changes. # Team with an existing external user + user is in the group => no changes.
( (
[("someuser", "someuser"),], [
[UserInformation("someuser", "someuser", "someuser@devtable.com"),], ("someuser", "someuser"),
],
[
UserInformation("someuser", "someuser", "someuser@devtable.com"),
],
["someuser"], ["someuser"],
), ),
# Team with an existing external user (with a different Quay username) + user is in the group. # Team with an existing external user (with a different Quay username) + user is in the group.
# => no changes # => no changes
( (
[("anotherquayname", "someuser"),], [
[UserInformation("someuser", "someuser", "someuser@devtable.com"),], ("anotherquayname", "someuser"),
],
[
UserInformation("someuser", "someuser", "someuser@devtable.com"),
],
["someuser"], ["someuser"],
), ),
# Team missing a few members that are in the group => members added. # Team missing a few members that are in the group => members added.
@ -89,12 +103,18 @@ def blacklisted_emails(request):
("thirduser", "thirduser"), ("thirduser", "thirduser"),
("nontestuser", None), ("nontestuser", None),
], ],
[UserInformation("thirduser", "thirduser", "thirduser@devtable.com"),], [
UserInformation("thirduser", "thirduser", "thirduser@devtable.com"),
],
["thirduser"], ["thirduser"],
), ),
# Team has different membership than the group => members added and removed. # Team has different membership than the group => members added and removed.
( (
[("anotheruser", "anotheruser"), ("someuser", "someuser"), ("nontestuser", None),], [
("anotheruser", "anotheruser"),
("someuser", "someuser"),
("nontestuser", None),
],
[ [
UserInformation("anotheruser", "anotheruser", "anotheruser@devtable.com"), UserInformation("anotheruser", "anotheruser", "anotheruser@devtable.com"),
UserInformation("missinguser", "missinguser", "missinguser@devtable.com"), UserInformation("missinguser", "missinguser", "missinguser@devtable.com"),
@ -108,7 +128,9 @@ def blacklisted_emails(request):
("buynlarge+anotherbot", None), ("buynlarge+anotherbot", None),
("buynlarge+somerobot", None), ("buynlarge+somerobot", None),
], ],
[UserInformation("someuser", "someuser", "someuser@devtable.com"),], [
UserInformation("someuser", "someuser", "someuser@devtable.com"),
],
["someuser", "buynlarge+somerobot", "buynlarge+anotherbot"], ["someuser", "buynlarge+somerobot", "buynlarge+anotherbot"],
), ),
# Team has an extra member and some robots => member removed and robots remain. # Team has an extra member and some robots => member removed and robots remain.
@ -130,14 +152,21 @@ def blacklisted_emails(request):
("buynlarge+anotherbot", None), ("buynlarge+anotherbot", None),
("buynlarge+somerobot", None), ("buynlarge+somerobot", None),
], ],
[UserInformation("anotheruser", "anotheruser", "anotheruser@devtable.com"),], [
UserInformation("anotheruser", "anotheruser", "anotheruser@devtable.com"),
],
["anotheruser", "buynlarge+somerobot", "buynlarge+anotherbot"], ["anotheruser", "buynlarge+somerobot", "buynlarge+anotherbot"],
), ),
# Team with an existing external user (with a different Quay username) + user is in the group. # Team with an existing external user (with a different Quay username) + user is in the group.
# => no changes and robots remain. # => no changes and robots remain.
( (
[("anotherquayname", "someuser"), ("buynlarge+anotherbot", None),], [
[UserInformation("someuser", "someuser", "someuser@devtable.com"),], ("anotherquayname", "someuser"),
("buynlarge+anotherbot", None),
],
[
UserInformation("someuser", "someuser", "someuser@devtable.com"),
],
["someuser", "buynlarge+anotherbot"], ["someuser", "buynlarge+anotherbot"],
), ),
# Team which returns the same member twice, as pagination in some engines (like LDAP) is not # Team which returns the same member twice, as pagination in some engines (like LDAP) is not
@ -263,7 +292,10 @@ def test_sync_teams_to_groups(user_creation, invite_only_user_creation, blacklis
@pytest.mark.parametrize( @pytest.mark.parametrize(
"auth_system_builder,config", "auth_system_builder,config",
[(mock_ldap, {"group_dn": "cn=AwesomeFolk"}), (fake_keystone, {"group_id": "somegroupid"}),], [
(mock_ldap, {"group_dn": "cn=AwesomeFolk"}),
(fake_keystone, {"group_id": "somegroupid"}),
],
) )
def test_teamsync_end_to_end( def test_teamsync_end_to_end(
user_creation, invite_only_user_creation, auth_system_builder, config, blacklisted_emails, app user_creation, invite_only_user_creation, auth_system_builder, config, blacklisted_emails, app
@ -306,7 +338,10 @@ def test_teamsync_end_to_end(
@pytest.mark.parametrize( @pytest.mark.parametrize(
"auth_system_builder,config", "auth_system_builder,config",
[(mock_ldap, {"group_dn": "cn=AwesomeFolk"}), (fake_keystone, {"group_id": "somegroupid"}),], [
(mock_ldap, {"group_dn": "cn=AwesomeFolk"}),
(fake_keystone, {"group_id": "somegroupid"}),
],
) )
def test_teamsync_existing_email( def test_teamsync_existing_email(
user_creation, invite_only_user_creation, auth_system_builder, blacklisted_emails, config, app user_creation, invite_only_user_creation, auth_system_builder, blacklisted_emails, config, app

Some files were not shown because too many files have changed in this diff Show More