diff --git a/auth/auth_context.py b/auth/auth_context.py index 375d3d62a..b42292c7e 100644 --- a/auth/auth_context.py +++ b/auth/auth_context.py @@ -2,24 +2,32 @@ from flask import _request_ctx_stack def get_authenticated_context(): - """ Returns the auth context for the current request context, if any. """ + """ + Returns the auth context for the current request context, if any. + """ return getattr(_request_ctx_stack.top, "authenticated_context", None) def get_authenticated_user(): - """ Returns the authenticated user, if any, or None if none. """ + """ + Returns the authenticated user, if any, or None if none. + """ context = get_authenticated_context() return context.authed_user if context else None def get_validated_oauth_token(): - """ Returns the authenticated and validated OAuth access token, if any, or None if none. """ + """ + Returns the authenticated and validated OAuth access token, if any, or None if none. + """ context = get_authenticated_context() return context.authed_oauth_token if context else None def set_authenticated_context(auth_context): - """ Sets the auth context for the current request context to that given. """ + """ + Sets the auth context for the current request context to that given. + """ ctx = _request_ctx_stack.top ctx.authenticated_context = auth_context return auth_context diff --git a/auth/auth_context_type.py b/auth/auth_context_type.py index 878955dc0..cb425e0e0 100644 --- a/auth/auth_context_type.py +++ b/auth/auth_context_type.py @@ -20,92 +20,116 @@ logger = logging.getLogger(__name__) @add_metaclass(ABCMeta) class AuthContext(object): """ - Interface that represents the current context of authentication. - """ + Interface that represents the current context of authentication. + """ @property @abstractmethod def entity_kind(self): - """ Returns the kind of the entity in this auth context. """ + """ + Returns the kind of the entity in this auth context. + """ pass @property @abstractmethod def is_anonymous(self): - """ Returns true if this is an anonymous context. """ + """ + Returns true if this is an anonymous context. + """ pass @property @abstractmethod def authed_oauth_token(self): - """ Returns the authenticated OAuth token, if any. """ + """ + Returns the authenticated OAuth token, if any. + """ pass @property @abstractmethod def authed_user(self): - """ Returns the authenticated user, whether directly, or via an OAuth or access token. Note that - this property will also return robot accounts. - """ + """ + Returns the authenticated user, whether directly, or via an OAuth or access token. + + Note that this property will also return robot accounts. + """ pass @property @abstractmethod def has_nonrobot_user(self): - """ Returns whether a user (not a robot) was authenticated successfully. """ + """ + Returns whether a user (not a robot) was authenticated successfully. + """ pass @property @abstractmethod def identity(self): - """ Returns the identity for the auth context. """ + """ + Returns the identity for the auth context. + """ pass @property @abstractmethod def description(self): - """ Returns a human-readable and *public* description of the current auth context. """ + """ + Returns a human-readable and *public* description of the current auth context. + """ pass @property @abstractmethod def credential_username(self): - """ Returns the username to create credentials for this context's entity, if any. """ + """ + Returns the username to create credentials for this context's entity, if any. + """ pass @abstractmethod def analytics_id_and_public_metadata(self): - """ Returns the analytics ID and public log metadata for this auth context. """ + """ + Returns the analytics ID and public log metadata for this auth context. + """ pass @abstractmethod def apply_to_request_context(self): - """ Applies this auth result to the auth context and Flask-Principal. """ + """ + Applies this auth result to the auth context and Flask-Principal. + """ pass @abstractmethod def to_signed_dict(self): - """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other - form of signed serialization. - """ + """ + Serializes the auth context into a dictionary suitable for inclusion in a JWT or other form + of signed serialization. + """ pass @property @abstractmethod def unique_key(self): - """ Returns a key that is unique to this auth context type and its data. For example, an - instance of the auth context type for the user might be a string of the form - `user-{user-uuid}`. Callers should treat this key as opaque and not rely on the contents - for anything besides uniqueness. This is typically used by callers when they'd like to - check cache but not hit the database to get a fully validated auth context. - """ + """ + Returns a key that is unique to this auth context type and its data. + + For example, an instance of the auth context type for the user might be a string of the form + `user-{user-uuid}`. Callers should treat this key as opaque and not rely on the contents for + anything besides uniqueness. This is typically used by callers when they'd like to check + cache but not hit the database to get a fully validated auth context. + """ pass class ValidatedAuthContext(AuthContext): - """ ValidatedAuthContext represents the loaded, authenticated and validated auth information - for the current request context. - """ + """ + ValidatedAuthContext represents the loaded, authenticated and validated auth information for the + current request context. + """ def __init__( self, @@ -133,7 +157,9 @@ class ValidatedAuthContext(AuthContext): @property def entity_kind(self): - """ Returns the kind of the entity in this auth context. """ + """ + Returns the kind of the entity in this auth context. + """ for kind in ContextEntityKind: if hasattr(self, kind.value) and getattr(self, kind.value): return kind @@ -142,9 +168,11 @@ class ValidatedAuthContext(AuthContext): @property def authed_user(self): - """ Returns the authenticated user, whether directly, or via an OAuth token. Note that this - will also return robot accounts. - """ + """ + Returns the authenticated user, whether directly, or via an OAuth token. + + Note that this will also return robot accounts. + """ authed_user = self._authed_user() if authed_user is not None and not authed_user.enabled: logger.warning("Attempt to reference a disabled user/robot: %s", authed_user.username) @@ -170,17 +198,23 @@ class ValidatedAuthContext(AuthContext): @property def is_anonymous(self): - """ Returns true if this is an anonymous context. """ + """ + Returns true if this is an anonymous context. + """ return not self.authed_user and not self.token and not self.signed_data @property def has_nonrobot_user(self): - """ Returns whether a user (not a robot) was authenticated successfully. """ + """ + Returns whether a user (not a robot) was authenticated successfully. + """ return bool(self.authed_user and not self.robot) @property def identity(self): - """ Returns the identity for the auth context. """ + """ + Returns the identity for the auth context. + """ if self.oauthtoken: scope_set = scopes_from_scope_string(self.oauthtoken.scope) return QuayDeferredPermissionUser.for_user(self.oauthtoken.authorized_user, scope_set) @@ -200,7 +234,9 @@ class ValidatedAuthContext(AuthContext): @property def entity_reference(self): - """ Returns the DB object reference for this context's entity. """ + """ + Returns the DB object reference for this context's entity. + """ if self.entity_kind == ContextEntityKind.anonymous: return None @@ -208,23 +244,31 @@ class ValidatedAuthContext(AuthContext): @property def description(self): - """ Returns a human-readable and *public* description of the current auth context. """ + """ + Returns a human-readable and *public* description of the current auth context. + """ handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() return handler.description(self.entity_reference) @property def credential_username(self): - """ Returns the username to create credentials for this context's entity, if any. """ + """ + Returns the username to create credentials for this context's entity, if any. + """ handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() return handler.credential_username(self.entity_reference) def analytics_id_and_public_metadata(self): - """ Returns the analytics ID and public log metadata for this auth context. """ + """ + Returns the analytics ID and public log metadata for this auth context. + """ handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() return handler.analytics_id_and_public_metadata(self.entity_reference) def apply_to_request_context(self): - """ Applies this auth result to the auth context and Flask-Principal. """ + """ + Applies this auth result to the auth context and Flask-Principal. + """ # Save to the request context. set_authenticated_context(self) @@ -238,9 +282,10 @@ class ValidatedAuthContext(AuthContext): return "%s-%s" % (signed_dict["entity_kind"], signed_dict.get("entity_reference", "(anon)")) def to_signed_dict(self): - """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other - form of signed serialization. - """ + """ + Serializes the auth context into a dictionary suitable for inclusion in a JWT or other form + of signed serialization. + """ dict_data = { "version": 2, "entity_kind": self.entity_kind.value, @@ -288,11 +333,14 @@ class ValidatedAuthContext(AuthContext): class SignedAuthContext(AuthContext): - """ SignedAuthContext represents an auth context loaded from a signed token of some kind, - such as a JWT. Unlike ValidatedAuthContext, SignedAuthContext operates lazily, only loading - the actual {user, robot, token, etc} when requested. This allows registry operations that - only need to check if *some* entity is present to do so, without hitting the database. - """ + """ + SignedAuthContext represents an auth context loaded from a signed token of some kind, such as a + JWT. + + Unlike ValidatedAuthContext, SignedAuthContext operates lazily, only loading the actual {user, + robot, token, etc} when requested. This allows registry operations that only need to check if + *some* entity is present to do so, without hitting the database. + """ def __init__(self, kind, signed_data, v1_dict_format): self.kind = kind @@ -325,9 +373,10 @@ class SignedAuthContext(AuthContext): @lru_cache(maxsize=1) def _get_validated(self): - """ Returns a ValidatedAuthContext for this signed context, resolving all the necessary + """ + Returns a ValidatedAuthContext for this signed context, resolving all the necessary references. - """ + """ if not self.v1_dict_format: if self.kind == ContextEntityKind.anonymous: return ValidatedAuthContext() @@ -390,19 +439,25 @@ class SignedAuthContext(AuthContext): @property def entity_kind(self): - """ Returns the kind of the entity in this auth context. """ + """ + Returns the kind of the entity in this auth context. + """ return self.kind @property def is_anonymous(self): - """ Returns true if this is an anonymous context. """ + """ + Returns true if this is an anonymous context. + """ return self.kind == ContextEntityKind.anonymous @property def authed_user(self): - """ Returns the authenticated user, whether directly, or via an OAuth or access token. Note that - this property will also return robot accounts. - """ + """ + Returns the authenticated user, whether directly, or via an OAuth or access token. + + Note that this property will also return robot accounts. + """ if self.kind == ContextEntityKind.anonymous: return None @@ -417,7 +472,9 @@ class SignedAuthContext(AuthContext): @property def has_nonrobot_user(self): - """ Returns whether a user (not a robot) was authenticated successfully. """ + """ + Returns whether a user (not a robot) was authenticated successfully. + """ if self.kind == ContextEntityKind.anonymous: return False @@ -425,29 +482,40 @@ class SignedAuthContext(AuthContext): @property def identity(self): - """ Returns the identity for the auth context. """ + """ + Returns the identity for the auth context. + """ return self._get_validated().identity @property def description(self): - """ Returns a human-readable and *public* description of the current auth context. """ + """ + Returns a human-readable and *public* description of the current auth context. + """ return self._get_validated().description @property def credential_username(self): - """ Returns the username to create credentials for this context's entity, if any. """ + """ + Returns the username to create credentials for this context's entity, if any. + """ return self._get_validated().credential_username def analytics_id_and_public_metadata(self): - """ Returns the analytics ID and public log metadata for this auth context. """ + """ + Returns the analytics ID and public log metadata for this auth context. + """ return self._get_validated().analytics_id_and_public_metadata() def apply_to_request_context(self): - """ Applies this auth result to the auth context and Flask-Principal. """ + """ + Applies this auth result to the auth context and Flask-Principal. + """ return self._get_validated().apply_to_request_context() def to_signed_dict(self): - """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other - form of signed serialization. - """ + """ + Serializes the auth context into a dictionary suitable for inclusion in a JWT or other form + of signed serialization. + """ return self.signed_data diff --git a/auth/basic.py b/auth/basic.py index 2d2d22e4e..f99f8a81e 100644 --- a/auth/basic.py +++ b/auth/basic.py @@ -10,19 +10,23 @@ logger = logging.getLogger(__name__) def has_basic_auth(username): - """ Returns true if a basic auth header exists with a username and password pair that validates - against the internal authentication system. Returns True on full success and False on any - failure (missing header, invalid header, invalid credentials, etc). - """ + """ + Returns true if a basic auth header exists with a username and password pair that validates + against the internal authentication system. + + Returns True on full success and False on any failure (missing header, invalid header, invalid + credentials, etc). + """ auth_header = request.headers.get("authorization", "") result = validate_basic_auth(auth_header) return result.has_nonrobot_user and result.context.user.username == username def validate_basic_auth(auth_header): - """ Validates the specified basic auth header, returning whether its credentials point - to a valid user or token. - """ + """ + Validates the specified basic auth header, returning whether its credentials point to a valid + user or token. + """ if not auth_header: return ValidateResult(AuthKind.basic, missing=True) @@ -41,8 +45,9 @@ def validate_basic_auth(auth_header): def _parse_basic_auth_header(auth): - """ Parses the given basic auth header, returning the credentials found inside. - """ + """ + Parses the given basic auth header, returning the credentials found inside. + """ normalized = [part.strip() for part in auth.split(" ") if part] if normalized[0].lower() != "basic" or len(normalized) != 2: return None, "Invalid basic auth header" diff --git a/auth/context_entity.py b/auth/context_entity.py index c1bc272f7..abe5c285c 100644 --- a/auth/context_entity.py +++ b/auth/context_entity.py @@ -12,10 +12,12 @@ from auth.credential_consts import ( class ContextEntityKind(Enum): - """ Defines the various kinds of entities in an auth context. Note that the string values of - these fields *must* match the names of the fields in the ValidatedAuthContext class, as - we fill them in directly based on the string names here. - """ + """ + Defines the various kinds of entities in an auth context. + + Note that the string values of these fields *must* match the names of the fields in the + ValidatedAuthContext class, as we fill them in directly based on the string names here. + """ anonymous = "anonymous" user = "user" @@ -29,35 +31,45 @@ class ContextEntityKind(Enum): @add_metaclass(ABCMeta) class ContextEntityHandler(object): """ - Interface that represents handling specific kinds of entities under an auth context. - """ + Interface that represents handling specific kinds of entities under an auth context. + """ @abstractmethod def credential_username(self, entity_reference): - """ Returns the username to create credentials for this entity, if any. """ + """ + Returns the username to create credentials for this entity, if any. + """ pass @abstractmethod def get_serialized_entity_reference(self, entity_reference): - """ Returns the entity reference for this kind of auth context, serialized into a form that can - be placed into a JSON object and put into a JWT. This is typically a DB UUID or another - unique identifier for the object in the DB. - """ + """ + Returns the entity reference for this kind of auth context, serialized into a form that can + be placed into a JSON object and put into a JWT. + + This is typically a DB UUID or another unique identifier for the object in the DB. + """ pass @abstractmethod def deserialize_entity_reference(self, serialized_entity_reference): - """ Returns the deserialized reference to the entity in the database, or None if none. """ + """ + Returns the deserialized reference to the entity in the database, or None if none. + """ pass @abstractmethod def description(self, entity_reference): - """ Returns a human-readable and *public* description of the current entity. """ + """ + Returns a human-readable and *public* description of the current entity. + """ pass @abstractmethod def analytics_id_and_public_metadata(self, entity_reference): - """ Returns the analyitics ID and a dict of public metadata for the current entity. """ + """ + Returns the analyitics ID and a dict of public metadata for the current entity. + """ pass diff --git a/auth/cookie.py b/auth/cookie.py index 84baa26c5..acb096954 100644 --- a/auth/cookie.py +++ b/auth/cookie.py @@ -9,7 +9,9 @@ logger = logging.getLogger(__name__) def validate_session_cookie(auth_header_unusued=None): - """ Attempts to load a user from a session cookie. """ + """ + Attempts to load a user from a session cookie. + """ if current_user.is_anonymous: return ValidateResult(AuthKind.cookie, missing=True) diff --git a/auth/credentials.py b/auth/credentials.py index 314298b81..d7f554e78 100644 --- a/auth/credentials.py +++ b/auth/credentials.py @@ -27,7 +27,9 @@ class CredentialKind(Enum): def validate_credentials(auth_username, auth_password_or_token): - """ Validates a pair of auth username and password/token credentials. """ + """ + Validates a pair of auth username and password/token credentials. + """ # Check for access tokens. if auth_username == ACCESS_TOKEN_USERNAME: logger.debug("Found credentials for access token") diff --git a/auth/decorators.py b/auth/decorators.py index f7f61f99b..e663e6c5e 100644 --- a/auth/decorators.py +++ b/auth/decorators.py @@ -23,9 +23,11 @@ authentication_count = Counter( def _auth_decorator(pass_result=False, handlers=None): - """ Builds an auth decorator that runs the given handlers and, if any return successfully, - sets up the auth context. The wrapped function will be invoked *regardless of success or - failure of the auth handler(s)* + """ + Builds an auth decorator that runs the given handlers and, if any return successfully, sets up + the auth context. + + The wrapped function will be invoked *regardless of success or failure of the auth handler(s)* """ def processor(func): @@ -75,8 +77,10 @@ process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth]) def require_session_login(func): - """ Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If - a valid session cookie does exist, the authenticated user and identity are also set. + """ + Decorates a function and ensures that a valid session cookie exists or a 401 is raised. + + If a valid session cookie does exist, the authenticated user and identity are also set. """ @wraps(func) @@ -95,9 +99,11 @@ def require_session_login(func): def extract_namespace_repo_from_session(func): - """ Extracts the namespace and repository name from the current session (which must exist) - and passes them into the decorated function as the first and second arguments. If the - session doesn't exist or does not contain these arugments, a 400 error is raised. + """ + Extracts the namespace and repository name from the current session (which must exist) and + passes them into the decorated function as the first and second arguments. + + If the session doesn't exist or does not contain these arugments, a 400 error is raised. """ @wraps(func) diff --git a/auth/oauth.py b/auth/oauth.py index b41b56748..2331bc86c 100644 --- a/auth/oauth.py +++ b/auth/oauth.py @@ -10,9 +10,10 @@ logger = logging.getLogger(__name__) def validate_bearer_auth(auth_header): - """ Validates an OAuth token found inside a basic auth `Bearer` token, returning whether it - points to a valid OAuth token. - """ + """ + Validates an OAuth token found inside a basic auth `Bearer` token, returning whether it points + to a valid OAuth token. + """ if not auth_header: return ValidateResult(AuthKind.oauth, missing=True) @@ -26,8 +27,9 @@ def validate_bearer_auth(auth_header): def validate_oauth_token(token): - """ Validates the specified OAuth token, returning whether it points to a valid OAuth token. - """ + """ + Validates the specified OAuth token, returning whether it points to a valid OAuth token. + """ validated = model.oauth.validate_access_token(token) if not validated: logger.warning("OAuth access token could not be validated: %s", token) diff --git a/auth/permissions.py b/auth/permissions.py index 65bea44b9..e4fb1bcb3 100644 --- a/auth/permissions.py +++ b/auth/permissions.py @@ -112,9 +112,10 @@ class QuayDeferredPermissionUser(Identity): return self._translate_role_for_scopes(USER_ROLES, SCOPE_MAX_USER_ROLES, role) def _populate_user_provides(self, user_object): - """ Populates the provides that naturally apply to a user, such as being the admin of - their own namespace. - """ + """ + Populates the provides that naturally apply to a user, such as being the admin of their own + namespace. + """ # Add the user specific permissions, only for non-oauth permission user_grant = _UserNeed(user_object.username, self._user_role_for_scopes("admin")) @@ -142,9 +143,11 @@ class QuayDeferredPermissionUser(Identity): self.provides.add(_SuperUserNeed()) def _populate_namespace_wide_provides(self, user_object, namespace_filter): - """ Populates the namespace-wide provides for a particular user under a particular namespace. + """ + Populates the namespace-wide provides for a particular user under a particular namespace. + This method does *not* add any provides for specific repositories. - """ + """ for team in model.permission.get_org_wide_permissions( user_object, org_filter=namespace_filter @@ -169,7 +172,9 @@ class QuayDeferredPermissionUser(Identity): self.provides.add(team_grant) def _populate_repository_provides(self, user_object, namespace_filter, repository_name): - """ Populates the repository-specific provides for a particular user and repository. """ + """ + Populates the repository-specific provides for a particular user and repository. + """ if namespace_filter and repository_name: permissions = model.permission.get_user_repository_permissions( @@ -232,7 +237,9 @@ class QuayDeferredPermissionUser(Identity): class QuayPermission(Permission): - """ Base for all permissions in Quay. """ + """ + Base for all permissions in Quay. + """ namespace = None repo_name = None diff --git a/auth/registry_jwt_auth.py b/auth/registry_jwt_auth.py index d08738269..62244e40f 100644 --- a/auth/registry_jwt_auth.py +++ b/auth/registry_jwt_auth.py @@ -57,7 +57,9 @@ class InvalidJWTException(Exception): def get_auth_headers(repository=None, scopes=None): - """ Returns a dictionary of headers for auth responses. """ + """ + Returns a dictionary of headers for auth responses. + """ headers = {} realm_auth_path = url_for("v2.generate_registry_jwt") authenticate = 'Bearer realm="{0}{1}",service="{2}"'.format( @@ -76,10 +78,13 @@ def get_auth_headers(repository=None, scopes=None): def identity_from_bearer_token(bearer_header): - """ Process a bearer header and return the loaded identity, or raise InvalidJWTException if an - identity could not be loaded. Expects tokens and grants in the format of the Docker registry - v2 auth spec: https://docs.docker.com/registry/spec/auth/token/ - """ + """ + Process a bearer header and return the loaded identity, or raise InvalidJWTException if an + identity could not be loaded. + + Expects tokens and grants in the format of the Docker registry v2 auth spec: + https://docs.docker.com/registry/spec/auth/token/ + """ logger.debug("Validating auth header: %s", bearer_header) try: @@ -121,9 +126,11 @@ def identity_from_bearer_token(bearer_header): def process_registry_jwt_auth(scopes=None): - """ Processes the registry JWT auth token found in the authorization header. If none found, - no error is returned. If an invalid token is found, raises a 401. - """ + """ + Processes the registry JWT auth token found in the authorization header. + + If none found, no error is returned. If an invalid token is found, raises a 401. + """ def inner(func): @wraps(func) diff --git a/auth/scopes.py b/auth/scopes.py index c16a3dbf3..c631e3e07 100644 --- a/auth/scopes.py +++ b/auth/scopes.py @@ -166,9 +166,9 @@ def validate_scope_string(scopes): def is_subset_string(full_string, expected_string): - """ Returns true if the scopes found in expected_string are also found - in full_string. - """ + """ + Returns true if the scopes found in expected_string are also found in full_string. + """ full_scopes = scopes_from_scope_string(full_string) if not full_scopes: return False diff --git a/auth/signedgrant.py b/auth/signedgrant.py index f6e5e269d..baf7f88f0 100644 --- a/auth/signedgrant.py +++ b/auth/signedgrant.py @@ -12,7 +12,9 @@ SIGNATURE_PREFIX = "sigv2=" def generate_signed_token(grants, user_context): - """ Generates a signed session token with the given grants and user context. """ + """ + Generates a signed session token with the given grants and user context. + """ ser = SecureCookieSessionInterface().get_signing_serializer(app) data_to_sign = { "grants": grants, @@ -24,9 +26,10 @@ def generate_signed_token(grants, user_context): def validate_signed_grant(auth_header): - """ Validates a signed grant as found inside an auth header and returns whether it points to - a valid grant. - """ + """ + Validates a signed grant as found inside an auth header and returns whether it points to a valid + grant. + """ if not auth_header: return ValidateResult(AuthKind.signed_grant, missing=True) diff --git a/auth/validateresult.py b/auth/validateresult.py index 89915185d..1784f000e 100644 --- a/auth/validateresult.py +++ b/auth/validateresult.py @@ -14,7 +14,9 @@ class AuthKind(Enum): class ValidateResult(object): - """ A result of validating auth in one form or another. """ + """ + A result of validating auth in one form or another. + """ def __init__( self, @@ -47,11 +49,15 @@ class ValidateResult(object): return self.tuple() == other.tuple() def apply_to_context(self): - """ Applies this auth result to the auth context and Flask-Principal. """ + """ + Applies this auth result to the auth context and Flask-Principal. + """ self.context.apply_to_request_context() def with_kind(self, kind): - """ Returns a copy of this result, but with the kind replaced. """ + """ + Returns a copy of this result, but with the kind replaced. + """ result = ValidateResult(kind, missing=self.missing, error_message=self.error_message) result.context = self.context return result @@ -65,15 +71,21 @@ class ValidateResult(object): @property def authed_user(self): - """ Returns the authenticated user, whether directly, or via an OAuth token. """ + """ + Returns the authenticated user, whether directly, or via an OAuth token. + """ return self.context.authed_user @property def has_nonrobot_user(self): - """ Returns whether a user (not a robot) was authenticated successfully. """ + """ + Returns whether a user (not a robot) was authenticated successfully. + """ return self.context.has_nonrobot_user @property def auth_valid(self): - """ Returns whether authentication successfully occurred. """ + """ + Returns whether authentication successfully occurred. + """ return self.context.entity_kind != ContextEntityKind.anonymous diff --git a/avatars/avatars.py b/avatars/avatars.py index 0de0e1c6f..ca7e323fd 100644 --- a/avatars/avatars.py +++ b/avatars/avatars.py @@ -24,7 +24,9 @@ class Avatar(object): class BaseAvatar(object): - """ Base class for all avatar implementations. """ + """ + Base class for all avatar implementations. + """ def __init__(self, preferred_url_scheme, colors, http_client): self.preferred_url_scheme = preferred_url_scheme @@ -32,9 +34,10 @@ class BaseAvatar(object): self.http_client = http_client def get_mail_html(self, name, email_or_id, size=16, kind="user"): - """ Returns the full HTML and CSS for viewing the avatar of the given name and email address, + """ + Returns the full HTML and CSS for viewing the avatar of the given name and email address, with an optional size. - """ + """ data = self.get_data(name, email_or_id, kind) url = self._get_url(data["hash"], size) if kind != "team" else None font_size = size - 6 @@ -110,12 +113,16 @@ class BaseAvatar(object): return {"name": name, "hash": hash_value, "color": hash_color, "kind": kind} def _get_url(self, hash_value, size): - """ Returns the URL for displaying the overlay avatar. """ + """ + Returns the URL for displaying the overlay avatar. + """ return None class GravatarAvatar(BaseAvatar): - """ Avatar system that uses gravatar for generating avatars. """ + """ + Avatar system that uses gravatar for generating avatars. + """ def _get_url(self, hash_value, size=16): return "%s://www.gravatar.com/avatar/%s?d=404&size=%s" % ( @@ -126,7 +133,9 @@ class GravatarAvatar(BaseAvatar): class LocalAvatar(BaseAvatar): - """ Avatar system that uses the local system for generating avatars. """ + """ + Avatar system that uses the local system for generating avatars. + """ pass diff --git a/boot.py b/boot.py index 247b12487..3d5ccc2e5 100755 --- a/boot.py +++ b/boot.py @@ -69,8 +69,8 @@ def _verify_service_key(): def setup_jwt_proxy(): """ - Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration. - """ + Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration. + """ if os.path.exists(os.path.join(CONF_DIR, "jwtproxy_conf.yaml")): # Proxy is already setup. Make sure the service key is still valid. quay_key_id = _verify_service_key() diff --git a/buildman/asyncutil.py b/buildman/asyncutil.py index f913072c4..39a5b8c73 100644 --- a/buildman/asyncutil.py +++ b/buildman/asyncutil.py @@ -6,16 +6,17 @@ from trollius import get_event_loop, coroutine def wrap_with_threadpool(obj, worker_threads=1): """ - Wraps a class in an async executor so that it can be safely used in an event loop like trollius. - """ + Wraps a class in an async executor so that it can be safely used in an event loop like trollius. + """ async_executor = ThreadPoolExecutor(worker_threads) return AsyncWrapper(obj, executor=async_executor), async_executor class AsyncWrapper(object): - """ Wrapper class which will transform a syncronous library to one that can be used with - trollius coroutines. - """ + """ + Wrapper class which will transform a syncronous library to one that can be used with trollius + coroutines. + """ def __init__(self, delegate, loop=None, executor=None): self._loop = loop if loop is not None else get_event_loop() @@ -29,9 +30,10 @@ class AsyncWrapper(object): return delegate_attr def wrapper(*args, **kwargs): - """ Wraps the delegate_attr with primitives that will transform sync calls to ones shelled - out to a thread pool. - """ + """ + Wraps the delegate_attr with primitives that will transform sync calls to ones shelled + out to a thread pool. + """ callable_delegate_attr = partial(delegate_attr, *args, **kwargs) return self._loop.run_in_executor(self._executor, callable_delegate_attr) diff --git a/buildman/component/basecomponent.py b/buildman/component/basecomponent.py index 8806b5629..b68bdccc0 100644 --- a/buildman/component/basecomponent.py +++ b/buildman/component/basecomponent.py @@ -2,7 +2,9 @@ from autobahn.asyncio.wamp import ApplicationSession class BaseComponent(ApplicationSession): - """ Base class for all registered component sessions in the server. """ + """ + Base class for all registered component sessions in the server. + """ def __init__(self, config, **kwargs): ApplicationSession.__init__(self, config) diff --git a/buildman/component/buildcomponent.py b/buildman/component/buildcomponent.py index 12be8464c..76b575deb 100644 --- a/buildman/component/buildcomponent.py +++ b/buildman/component/buildcomponent.py @@ -35,7 +35,9 @@ logger = logging.getLogger(__name__) class ComponentStatus(object): - """ ComponentStatus represents the possible states of a component. """ + """ + ComponentStatus represents the possible states of a component. + """ JOINING = "joining" WAITING = "waiting" @@ -45,7 +47,9 @@ class ComponentStatus(object): class BuildComponent(BaseComponent): - """ An application session component which conducts one (or more) builds. """ + """ + An application session component which conducts one (or more) builds. + """ def __init__(self, config, realm=None, token=None, **kwargs): self.expected_token = token @@ -85,7 +89,9 @@ class BuildComponent(BaseComponent): @trollius.coroutine def start_build(self, build_job): - """ Starts a build. """ + """ + Starts a build. + """ if self._component_status not in (ComponentStatus.WAITING, ComponentStatus.RUNNING): logger.debug( "Could not start build for component %s (build %s, worker version: %s): %s", @@ -191,7 +197,9 @@ class BuildComponent(BaseComponent): logger.debug("With Arguments: %s", build_arguments) def build_complete_callback(result): - """ This function is used to execute a coroutine as the callback. """ + """ + This function is used to execute a coroutine as the callback. + """ trollius.ensure_future(self._build_complete(result)) self.call("io.quay.builder.build", **build_arguments).add_done_callback( @@ -218,14 +226,18 @@ class BuildComponent(BaseComponent): @staticmethod def _commit_sha(build_config): - """ Determines whether the metadata is using an old schema or not and returns the commit. """ + """ + Determines whether the metadata is using an old schema or not and returns the commit. + """ commit_sha = build_config["trigger_metadata"].get("commit", "") old_commit_sha = build_config["trigger_metadata"].get("commit_sha", "") return commit_sha or old_commit_sha @staticmethod def name_and_path(subdir): - """ Returns the dockerfile path and name """ + """ + Returns the dockerfile path and name. + """ if subdir.endswith("/"): subdir += "Dockerfile" elif not subdir.endswith("Dockerfile"): @@ -234,7 +246,9 @@ class BuildComponent(BaseComponent): @staticmethod def _total_completion(statuses, total_images): - """ Returns the current amount completion relative to the total completion of a build. """ + """ + Returns the current amount completion relative to the total completion of a build. + """ percentage_with_sizes = float(len(statuses.values())) / total_images sent_bytes = sum([status["current"] for status in statuses.values()]) total_bytes = sum([status["total"] for status in statuses.values()]) @@ -242,7 +256,9 @@ class BuildComponent(BaseComponent): @staticmethod def _process_pushpull_status(status_dict, current_phase, docker_data, images): - """ Processes the status of a push or pull by updating the provided status_dict and images. """ + """ + Processes the status of a push or pull by updating the provided status_dict and images. + """ if not docker_data: return @@ -271,7 +287,9 @@ class BuildComponent(BaseComponent): @trollius.coroutine def _on_log_message(self, phase, json_data): - """ Tails log messages and updates the build status. """ + """ + Tails log messages and updates the build status. + """ # Update the heartbeat. self._last_heartbeat = datetime.datetime.utcnow() @@ -355,7 +373,9 @@ class BuildComponent(BaseComponent): @trollius.coroutine def _build_failure(self, error_message, exception=None): - """ Handles and logs a failed build. """ + """ + Handles and logs a failed build. + """ yield From( self._build_status.set_error( error_message, {"internal_error": str(exception) if exception else None} @@ -370,7 +390,11 @@ class BuildComponent(BaseComponent): @trollius.coroutine def _build_complete(self, result): - """ Wraps up a completed build. Handles any errors and calls self._build_finished. """ + """ + Wraps up a completed build. + + Handles any errors and calls self._build_finished. + """ build_id = self._current_job.repo_build.uuid try: @@ -451,7 +475,9 @@ class BuildComponent(BaseComponent): @trollius.coroutine def _build_finished(self, job_status): - """ Alerts the parent that a build has completed and sets the status back to running. """ + """ + Alerts the parent that a build has completed and sets the status back to running. + """ yield From(self.parent_manager.job_completed(self._current_job, job_status, self)) # Set the component back to a running state. @@ -459,7 +485,9 @@ class BuildComponent(BaseComponent): @staticmethod def _ping(): - """ Ping pong. """ + """ + Ping pong. + """ return "pong" @trollius.coroutine @@ -499,7 +527,9 @@ class BuildComponent(BaseComponent): self._component_status = phase def _on_heartbeat(self): - """ Updates the last known heartbeat. """ + """ + Updates the last known heartbeat. + """ if self._component_status == ComponentStatus.TIMED_OUT: return @@ -508,10 +538,12 @@ class BuildComponent(BaseComponent): @trollius.coroutine def _heartbeat(self): - """ Coroutine that runs every HEARTBEAT_TIMEOUT seconds, both checking the worker's heartbeat - and updating the heartbeat in the build status dictionary (if applicable). This allows - the build system to catch crashes from either end. - """ + """ + Coroutine that runs every HEARTBEAT_TIMEOUT seconds, both checking the worker's heartbeat + and updating the heartbeat in the build status dictionary (if applicable). + + This allows the build system to catch crashes from either end. + """ yield From(trollius.sleep(INITIAL_TIMEOUT)) while True: diff --git a/buildman/component/buildparse.py b/buildman/component/buildparse.py index 18d678cae..85a3d8d19 100644 --- a/buildman/component/buildparse.py +++ b/buildman/component/buildparse.py @@ -2,9 +2,11 @@ import re def extract_current_step(current_status_string): - """ Attempts to extract the current step numeric identifier from the given status string. Returns the step - number or None if none. - """ + """ + Attempts to extract the current step numeric identifier from the given status string. + + Returns the step number or None if none. + """ # Older format: `Step 12 :` # Newer format: `Step 4/13 :` step_increment = re.search(r"Step ([0-9]+)/([0-9]+) :", current_status_string) diff --git a/buildman/enums.py b/buildman/enums.py index a7fe7bb99..055c42358 100644 --- a/buildman/enums.py +++ b/buildman/enums.py @@ -2,7 +2,9 @@ from data.database import BUILD_PHASE class BuildJobResult(object): - """ Build job result enum """ + """ + Build job result enum. + """ INCOMPLETE = "incomplete" COMPLETE = "complete" @@ -10,7 +12,9 @@ class BuildJobResult(object): class BuildServerStatus(object): - """ Build server status enum """ + """ + Build server status enum. + """ STARTING = "starting" RUNNING = "running" diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index 674195567..c7310fd8e 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -14,13 +14,17 @@ logger = logging.getLogger(__name__) class BuildJobLoadException(Exception): - """ Exception raised if a build job could not be instantiated for some reason. """ + """ + Exception raised if a build job could not be instantiated for some reason. + """ pass class BuildJob(object): - """ Represents a single in-progress build job. """ + """ + Represents a single in-progress build job. + """ def __init__(self, job_item): self.job_item = job_item @@ -56,17 +60,23 @@ class BuildJob(object): @property def build_uuid(self): - """ Returns the unique UUID for this build job. """ + """ + Returns the unique UUID for this build job. + """ return self.job_details["build_uuid"] @property def namespace(self): - """ Returns the namespace under which this build is running. """ + """ + Returns the namespace under which this build is running. + """ return self.repo_build.repository.namespace_user.username @property def repo_name(self): - """ Returns the name of the repository under which this build is running. """ + """ + Returns the name of the repository under which this build is running. + """ return self.repo_build.repository.name @property @@ -74,7 +84,9 @@ class BuildJob(object): return self._load_repo_build() def get_build_package_url(self, user_files): - """ Returns the URL of the build package for this build, if any or empty string if none. """ + """ + Returns the URL of the build package for this build, if any or empty string if none. + """ archive_url = self.build_config.get("archive_url", None) if archive_url: return archive_url @@ -88,7 +100,9 @@ class BuildJob(object): @property def pull_credentials(self): - """ Returns the pull credentials for this job, or None if none. """ + """ + Returns the pull credentials for this job, or None if none. + """ return self.job_details.get("pull_credentials") @property @@ -102,7 +116,9 @@ class BuildJob(object): ) def determine_cached_tag(self, base_image_id=None, cache_comments=None): - """ Returns the tag to pull to prime the cache or None if none. """ + """ + Returns the tag to pull to prime the cache or None if none. + """ cached_tag = self._determine_cached_tag_by_tag() logger.debug( "Determined cached tag %s for %s: %s", cached_tag, base_image_id, cache_comments @@ -110,9 +126,12 @@ class BuildJob(object): return cached_tag def _determine_cached_tag_by_tag(self): - """ Determines the cached tag by looking for one of the tags being built, and seeing if it - exists in the repository. This is a fallback for when no comment information is available. - """ + """ + Determines the cached tag by looking for one of the tags being built, and seeing if it + exists in the repository. + + This is a fallback for when no comment information is available. + """ with UseThenDisconnect(app.config): tags = self.build_config.get("docker_tags", ["latest"]) repository = RepositoryReference.for_repo_obj(self.repo_build.repository) @@ -128,7 +147,9 @@ class BuildJob(object): class BuildJobNotifier(object): - """ A class for sending notifications to a job that only relies on the build_uuid """ + """ + A class for sending notifications to a job that only relies on the build_uuid. + """ def __init__(self, build_uuid): self.build_uuid = build_uuid diff --git a/buildman/jobutil/buildstatus.py b/buildman/jobutil/buildstatus.py index ad2bc9417..416495cff 100644 --- a/buildman/jobutil/buildstatus.py +++ b/buildman/jobutil/buildstatus.py @@ -13,7 +13,9 @@ logger = logging.getLogger(__name__) class StatusHandler(object): - """ Context wrapper for writing status to build logs. """ + """ + Context wrapper for writing status to build logs. + """ def __init__(self, build_logs, repository_build_uuid): self._current_phase = None diff --git a/buildman/jobutil/workererror.py b/buildman/jobutil/workererror.py index d34f6d8ee..81a626266 100644 --- a/buildman/jobutil/workererror.py +++ b/buildman/jobutil/workererror.py @@ -1,5 +1,7 @@ class WorkerError(object): - """ Helper class which represents errors raised by a build worker. """ + """ + Helper class which represents errors raised by a build worker. + """ def __init__(self, error_code, base_message=None): self._error_code = error_code diff --git a/buildman/manager/basemanager.py b/buildman/manager/basemanager.py index 996a4eacc..eea969797 100644 --- a/buildman/manager/basemanager.py +++ b/buildman/manager/basemanager.py @@ -2,7 +2,9 @@ from trollius import coroutine class BaseManager(object): - """ Base for all worker managers. """ + """ + Base for all worker managers. + """ def __init__( self, @@ -22,59 +24,77 @@ class BaseManager(object): @coroutine def job_heartbeat(self, build_job): - """ Method invoked to tell the manager that a job is still running. This method will be called - every few minutes. """ + """ + Method invoked to tell the manager that a job is still running. + + This method will be called every few minutes. + """ self.job_heartbeat_callback(build_job) def overall_setup_time(self): - """ Returns the number of seconds that the build system should wait before allowing the job - to be picked up again after called 'schedule'. - """ + """ + Returns the number of seconds that the build system should wait before allowing the job to + be picked up again after called 'schedule'. + """ raise NotImplementedError def shutdown(self): - """ Indicates that the build controller server is in a shutdown state and that no new jobs - or workers should be performed. Existing workers should be cleaned up once their jobs - have completed - """ + """ + Indicates that the build controller server is in a shutdown state and that no new jobs or + workers should be performed. + + Existing workers should be cleaned up once their jobs have completed + """ raise NotImplementedError @coroutine def schedule(self, build_job): - """ Schedules a queue item to be built. Returns a 2-tuple with (True, None) if the item was - properly scheduled and (False, a retry timeout in seconds) if all workers are busy or an - error occurs. - """ + """ + Schedules a queue item to be built. + + Returns a 2-tuple with (True, None) if the item was properly scheduled and (False, a retry + timeout in seconds) if all workers are busy or an error occurs. + """ raise NotImplementedError def initialize(self, manager_config): - """ Runs any initialization code for the manager. Called once the server is in a ready state. - """ + """ + Runs any initialization code for the manager. + + Called once the server is in a ready state. + """ raise NotImplementedError @coroutine def build_component_ready(self, build_component): - """ Method invoked whenever a build component announces itself as ready. - """ + """ + Method invoked whenever a build component announces itself as ready. + """ raise NotImplementedError def build_component_disposed(self, build_component, timed_out): - """ Method invoked whenever a build component has been disposed. The timed_out boolean indicates - whether the component's heartbeat timed out. - """ + """ + Method invoked whenever a build component has been disposed. + + The timed_out boolean indicates whether the component's heartbeat timed out. + """ raise NotImplementedError @coroutine def job_completed(self, build_job, job_status, build_component): - """ Method invoked once a job_item has completed, in some manner. The job_status will be - one of: incomplete, error, complete. Implementations of this method should call coroutine - self.job_complete_callback with a status of Incomplete if they wish for the job to be - automatically requeued. - """ + """ + Method invoked once a job_item has completed, in some manner. + + The job_status will be one of: incomplete, error, complete. Implementations of this method + should call coroutine self.job_complete_callback with a status of Incomplete if they wish + for the job to be automatically requeued. + """ raise NotImplementedError def num_workers(self): - """ Returns the number of active build workers currently registered. This includes those - that are currently busy and awaiting more work. - """ + """ + Returns the number of active build workers currently registered. + + This includes those that are currently busy and awaiting more work. + """ raise NotImplementedError diff --git a/buildman/manager/buildcanceller.py b/buildman/manager/buildcanceller.py index c2ab2d9ad..8439cab7f 100644 --- a/buildman/manager/buildcanceller.py +++ b/buildman/manager/buildcanceller.py @@ -9,7 +9,9 @@ CANCELLERS = {"ephemeral": OrchestratorCanceller} class BuildCanceller(object): - """ A class to manage cancelling a build """ + """ + A class to manage cancelling a build. + """ def __init__(self, app=None): self.build_manager_config = app.config.get("BUILD_MANAGER") @@ -19,7 +21,9 @@ class BuildCanceller(object): self.handler = None def try_cancel_build(self, uuid): - """ A method to kill a running build """ + """ + A method to kill a running build. + """ if self.handler is None: canceller = CANCELLERS.get(self.build_manager_config[0], NoopCanceller) self.handler = canceller(self.build_manager_config[1]) diff --git a/buildman/manager/enterprise.py b/buildman/manager/enterprise.py index 975b53cae..efd73db47 100644 --- a/buildman/manager/enterprise.py +++ b/buildman/manager/enterprise.py @@ -13,7 +13,9 @@ logger = logging.getLogger(__name__) class DynamicRegistrationComponent(BaseComponent): - """ Component session that handles dynamic registration of the builder components. """ + """ + Component session that handles dynamic registration of the builder components. + """ def onConnect(self): self.join(REGISTRATION_REALM) @@ -32,7 +34,9 @@ class DynamicRegistrationComponent(BaseComponent): class EnterpriseManager(BaseManager): - """ Build manager implementation for the Enterprise Registry. """ + """ + Build manager implementation for the Enterprise Registry. + """ def __init__(self, *args, **kwargs): self.ready_components = set() @@ -52,7 +56,9 @@ class EnterpriseManager(BaseManager): return 60 def add_build_component(self): - """ Adds a new build component for an Enterprise Registry. """ + """ + Adds a new build component for an Enterprise Registry. + """ # Generate a new unique realm ID for the build worker. realm = str(uuid.uuid4()) new_component = self.register_component(realm, BuildComponent, token="") @@ -61,7 +67,9 @@ class EnterpriseManager(BaseManager): @coroutine def schedule(self, build_job): - """ Schedules a build for an Enterprise Registry. """ + """ + Schedules a build for an Enterprise Registry. + """ if self.shutting_down or not self.ready_components: raise Return(False, RETRY_TIMEOUT) diff --git a/buildman/manager/ephemeral.py b/buildman/manager/ephemeral.py index aae6f9a78..9135033ab 100644 --- a/buildman/manager/ephemeral.py +++ b/buildman/manager/ephemeral.py @@ -65,7 +65,9 @@ BuildInfo = namedtuple("BuildInfo", ["component", "build_job", "execution_id", " class EphemeralBuilderManager(BaseManager): - """ Build manager implementation for the Enterprise Registry. """ + """ + Build manager implementation for the Enterprise Registry. + """ EXECUTORS = { "popen": PopenExecutor, @@ -98,7 +100,9 @@ class EphemeralBuilderManager(BaseManager): @coroutine def _mark_job_incomplete(self, build_job, build_info): - """ Marks a job as incomplete, in response to a failure to start or a timeout. """ + """ + Marks a job as incomplete, in response to a failure to start or a timeout. + """ executor_name = build_info.executor_name execution_id = build_info.execution_id @@ -137,13 +141,13 @@ class EphemeralBuilderManager(BaseManager): @coroutine def _job_callback(self, key_change): """ - This is the callback invoked when keys related to jobs are changed. - It ignores all events related to the creation of new jobs. - Deletes or expirations cause checks to ensure they've been properly marked as completed. + This is the callback invoked when keys related to jobs are changed. It ignores all events + related to the creation of new jobs. Deletes or expirations cause checks to ensure they've + been properly marked as completed. - :param key_change: the event and value produced by a key changing in the orchestrator - :type key_change: :class:`KeyChange` - """ + :param key_change: the event and value produced by a key changing in the orchestrator + :type key_change: :class:`KeyChange` + """ if key_change.event in (KeyEvent.CREATE, KeyEvent.SET): raise Return() @@ -339,35 +343,35 @@ class EphemeralBuilderManager(BaseManager): def _metric_key(self, realm): """ - Create a key which is used to track a job in the Orchestrator. + Create a key which is used to track a job in the Orchestrator. - :param realm: realm for the build - :type realm: str - :returns: key used to track jobs - :rtype: str - """ + :param realm: realm for the build + :type realm: str + :returns: key used to track jobs + :rtype: str + """ return slash_join(self._metric_prefix, realm) def _job_key(self, build_job): """ - Creates a key which is used to track a job in the Orchestrator. + Creates a key which is used to track a job in the Orchestrator. - :param build_job: unique job identifier for a build - :type build_job: str - :returns: key used to track the job - :rtype: str - """ + :param build_job: unique job identifier for a build + :type build_job: str + :returns: key used to track the job + :rtype: str + """ return slash_join(self._job_prefix, build_job.job_details["build_uuid"]) def _realm_key(self, realm): """ - Create a key which is used to track an incoming connection on a realm. + Create a key which is used to track an incoming connection on a realm. - :param realm: realm for the build - :type realm: str - :returns: key used to track the connection to the realm - :rtype: str - """ + :param realm: realm for the build + :type realm: str + :returns: key used to track the connection to the realm + :rtype: str + """ return slash_join(self._realm_prefix, realm) def initialize(self, manager_config): @@ -787,10 +791,11 @@ class EphemeralBuilderManager(BaseManager): logger.exception("Could not write metric for realm %s", realm) def num_workers(self): - """ The number of workers we're managing locally. + """ + The number of workers we're managing locally. - :returns: the number of the workers locally managed - :rtype: int + :returns: the number of the workers locally managed + :rtype: int """ return len(self._component_to_job) diff --git a/buildman/manager/etcd_canceller.py b/buildman/manager/etcd_canceller.py index d4b129e52..c5db79d10 100644 --- a/buildman/manager/etcd_canceller.py +++ b/buildman/manager/etcd_canceller.py @@ -5,7 +5,9 @@ logger = logging.getLogger(__name__) class EtcdCanceller(object): - """ A class that sends a message to etcd to cancel a build """ + """ + A class that sends a message to etcd to cancel a build. + """ def __init__(self, config): etcd_host = config.get("ETCD_HOST", "127.0.0.1") @@ -28,7 +30,9 @@ class EtcdCanceller(object): ) def try_cancel_build(self, build_uuid): - """ Writes etcd message to cancel build_uuid. """ + """ + Writes etcd message to cancel build_uuid. + """ logger.info("Cancelling build %s".format(build_uuid)) try: self._etcd_client.write( diff --git a/buildman/manager/executor.py b/buildman/manager/executor.py index 6694388fc..133a0b1f1 100644 --- a/buildman/manager/executor.py +++ b/buildman/manager/executor.py @@ -65,15 +65,19 @@ def async_observe(metric, *labels): class ExecutorException(Exception): - """ Exception raised when there is a problem starting or stopping a builder. """ + """ + Exception raised when there is a problem starting or stopping a builder. + """ pass class BuilderExecutor(object): def __init__(self, executor_config, manager_hostname): - """ Interface which can be plugged into the EphemeralNodeManager to provide a strategy for - starting and stopping builders. """ + """ + Interface which can be plugged into the EphemeralNodeManager to provide a strategy for + starting and stopping builders. + """ self.executor_config = executor_config self.manager_hostname = manager_hostname @@ -82,28 +86,40 @@ class BuilderExecutor(object): @property def name(self): - """ Name returns the unique name for this executor. """ + """ + Name returns the unique name for this executor. + """ return self.executor_config.get("NAME") or self.__class__.__name__ @property def setup_time(self): - """ Returns the amount of time (in seconds) to wait for the execution to start for the build. - If None, the manager's default will be used. """ + """ + Returns the amount of time (in seconds) to wait for the execution to start for the build. + + If None, the manager's default will be used. + """ return self.executor_config.get("SETUP_TIME") @coroutine def start_builder(self, realm, token, build_uuid): - """ Create a builder with the specified config. Returns a unique id which can be used to manage - the builder. """ + """ + Create a builder with the specified config. + + Returns a unique id which can be used to manage the builder. + """ raise NotImplementedError @coroutine def stop_builder(self, builder_id): - """ Stop a builder which is currently running. """ + """ + Stop a builder which is currently running. + """ raise NotImplementedError def allowed_for_namespace(self, namespace): - """ Returns true if this executor can be used for builds in the given namespace. """ + """ + Returns true if this executor can be used for builds in the given namespace. + """ # Check for an explicit namespace whitelist. namespace_whitelist = self.executor_config.get("NAMESPACE_WHITELIST") @@ -122,8 +138,9 @@ class BuilderExecutor(object): @property def minimum_retry_threshold(self): - """ Returns the minimum number of retries required for this executor to be used or 0 if - none. """ + """ + Returns the minimum number of retries required for this executor to be used or 0 if none. + """ return self.executor_config.get("MINIMUM_RETRY_THRESHOLD", 0) def generate_cloud_config( @@ -163,8 +180,10 @@ class BuilderExecutor(object): class EC2Executor(BuilderExecutor): - """ Implementation of BuilderExecutor which uses libcloud to start machines on a variety of cloud - providers. """ + """ + Implementation of BuilderExecutor which uses libcloud to start machines on a variety of cloud + providers. + """ COREOS_STACK_URL = ( "http://%s.release.core-os.net/amd64-usr/current/coreos_production_ami_hvm.txt" @@ -175,7 +194,9 @@ class EC2Executor(BuilderExecutor): super(EC2Executor, self).__init__(*args, **kwargs) def _get_conn(self): - """ Creates an ec2 connection which can be used to manage instances. """ + """ + Creates an ec2 connection which can be used to manage instances. + """ return AsyncWrapper( boto.ec2.connect_to_region( self.executor_config["EC2_REGION"], @@ -187,7 +208,9 @@ class EC2Executor(BuilderExecutor): @classmethod @cachetools.func.ttl_cache(ttl=ONE_HOUR) def _get_coreos_ami(cls, ec2_region, coreos_channel): - """ Retrieve the CoreOS AMI id from the canonical listing. """ + """ + Retrieve the CoreOS AMI id from the canonical listing. + """ stack_list_string = requests.get(EC2Executor.COREOS_STACK_URL % coreos_channel).text stack_amis = dict([stack.split("=") for stack in stack_list_string.split("|")]) return stack_amis[ec2_region] @@ -303,7 +326,9 @@ class EC2Executor(BuilderExecutor): class PopenExecutor(BuilderExecutor): - """ Implementation of BuilderExecutor which uses Popen to fork a quay-builder process. """ + """ + Implementation of BuilderExecutor which uses Popen to fork a quay-builder process. + """ def __init__(self, executor_config, manager_hostname): self._jobs = {} @@ -354,8 +379,9 @@ class PopenExecutor(BuilderExecutor): class KubernetesExecutor(BuilderExecutor): - """ Executes build jobs by creating Kubernetes jobs which run a qemu-kvm virtual - machine in a pod """ + """ + Executes build jobs by creating Kubernetes jobs which run a qemu-kvm virtual machine in a pod. + """ def __init__(self, *args, **kwargs): super(KubernetesExecutor, self).__init__(*args, **kwargs) @@ -585,10 +611,14 @@ class KubernetesExecutor(BuilderExecutor): class LogPipe(threading.Thread): - """ Adapted from http://codereview.stackexchange.com/a/17959 """ + """ + Adapted from http://codereview.stackexchange.com/a/17959. + """ def __init__(self, level): - """ Setup the object with a logger and a loglevel and start the thread """ + """ + Setup the object with a logger and a loglevel and start the thread. + """ threading.Thread.__init__(self) self.daemon = False self.level = level @@ -597,16 +627,22 @@ class LogPipe(threading.Thread): self.start() def fileno(self): - """ Return the write file descriptor of the pipe """ + """ + Return the write file descriptor of the pipe. + """ return self.fd_write def run(self): - """ Run the thread, logging everything. """ + """ + Run the thread, logging everything. + """ for line in iter(self.pipe_reader.readline, ""): logging.log(self.level, line.strip("\n")) self.pipe_reader.close() def close(self): - """ Close the write end of the pipe. """ + """ + Close the write end of the pipe. + """ os.close(self.fd_write) diff --git a/buildman/manager/noop_canceller.py b/buildman/manager/noop_canceller.py index 51c023fcc..e4d297155 100644 --- a/buildman/manager/noop_canceller.py +++ b/buildman/manager/noop_canceller.py @@ -1,9 +1,13 @@ class NoopCanceller(object): - """ A class that can not cancel a build """ + """ + A class that can not cancel a build. + """ def __init__(self, config=None): pass def try_cancel_build(self, uuid): - """ Does nothing and fails to cancel build. """ + """ + Does nothing and fails to cancel build. + """ return False diff --git a/buildman/manager/orchestrator_canceller.py b/buildman/manager/orchestrator_canceller.py index 323ecd09b..56708ceff 100644 --- a/buildman/manager/orchestrator_canceller.py +++ b/buildman/manager/orchestrator_canceller.py @@ -11,7 +11,9 @@ CANCEL_PREFIX = "cancel/" class OrchestratorCanceller(object): - """ An asynchronous way to cancel a build with any Orchestrator. """ + """ + An asynchronous way to cancel a build with any Orchestrator. + """ def __init__(self, config): self._orchestrator = orchestrator_from_config(config, canceller_only=True) diff --git a/buildman/orchestrator.py b/buildman/orchestrator.py index c09d09df1..6b1b25f65 100644 --- a/buildman/orchestrator.py +++ b/buildman/orchestrator.py @@ -46,13 +46,13 @@ REDIS_EXPIRED_KEYSPACE_REGEX = re.compile(REDIS_EXPIRED_KEYSPACE_PATTERN % (r"(\ def orchestrator_from_config(manager_config, canceller_only=False): """ - Allocates a new Orchestrator from the 'ORCHESTRATOR' block from provided manager config. - Checks for legacy configuration prefixed with 'ETCD_' when the 'ORCHESTRATOR' is not present. + Allocates a new Orchestrator from the 'ORCHESTRATOR' block from provided manager config. Checks + for legacy configuration prefixed with 'ETCD_' when the 'ORCHESTRATOR' is not present. - :param manager_config: the configuration for the orchestrator - :type manager_config: dict - :rtype: :class: Orchestrator - """ + :param manager_config: the configuration for the orchestrator + :type manager_config: dict + :rtype: :class: Orchestrator + """ # Legacy codepath only knows how to configure etcd. if manager_config.get("ORCHESTRATOR") is None: manager_config["ORCHESTRATOR"] = { @@ -130,25 +130,24 @@ class KeyChange(namedtuple("KeyChange", ["event", "key", "value"])): @add_metaclass(ABCMeta) class Orchestrator(object): """ - Orchestrator is the interface that is used to synchronize the build states - across build managers. + Orchestrator is the interface that is used to synchronize the build states across build + managers. - This interface assumes that storage is being done by a key-value store - that supports watching for events on keys. + This interface assumes that storage is being done by a key-value store + that supports watching for events on keys. - Missing keys should return KeyError; otherwise, errors should raise an - OrchestratorError. + Missing keys should return KeyError; otherwise, errors should raise an + OrchestratorError. - :param key_prefix: the prefix of keys being watched - :type key_prefix: str - """ + :param key_prefix: the prefix of keys being watched + :type key_prefix: str + """ @abstractmethod def on_key_change(self, key, callback, restarter=None): """ - - The callback parameter takes in a KeyChange object as a parameter. - """ + The callback parameter takes in a KeyChange object as a parameter. + """ pass @abstractmethod @@ -187,47 +186,47 @@ class Orchestrator(object): @abstractmethod def set_key_sync(self, key, value, overwrite=False, expiration=None): """ - set_key, but without trollius coroutines. - """ + set_key, but without trollius coroutines. + """ pass @abstractmethod def delete_key(self, key): """ - Deletes a key that has been set in the orchestrator. + Deletes a key that has been set in the orchestrator. - :param key: the identifier for the key - :type key: str - """ + :param key: the identifier for the key + :type key: str + """ pass @abstractmethod def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION): """ - Takes a lock for synchronizing exclusive operations cluster-wide. + Takes a lock for synchronizing exclusive operations cluster-wide. - :param key: the identifier for the lock - :type key: str - :param expiration: the duration until the lock expires - :type expiration: :class:`datetime.timedelta` or int (seconds) - :returns: whether or not the lock was acquired - :rtype: bool - """ + :param key: the identifier for the lock + :type key: str + :param expiration: the duration until the lock expires + :type expiration: :class:`datetime.timedelta` or int (seconds) + :returns: whether or not the lock was acquired + :rtype: bool + """ pass @abstractmethod def shutdown(): """ - This function should shutdown any final resources allocated by the Orchestrator. - """ + This function should shutdown any final resources allocated by the Orchestrator. + """ pass def _sleep_orchestrator(): """ - This function blocks the trollius event loop by sleeping in order to backoff if a failure - such as a ConnectionError has occurred. - """ + This function blocks the trollius event loop by sleeping in order to backoff if a failure such + as a ConnectionError has occurred. + """ logger.exception( "Connecting to etcd failed; sleeping for %s and then trying again", ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION, @@ -240,7 +239,9 @@ def _sleep_orchestrator(): class EtcdAction(object): - """ Enumeration of the various kinds of etcd actions we can observe via a watch. """ + """ + Enumeration of the various kinds of etcd actions we can observe via a watch. + """ GET = "get" SET = "set" @@ -287,9 +288,10 @@ class Etcd2Orchestrator(Orchestrator): @staticmethod def _sanity_check_ttl(ttl): """ - A TTL of < 0 in etcd results in the key *never being expired*. - We use a max here to ensure that if the TTL is < 0, the key will expire immediately. - """ + A TTL of < 0 in etcd results in the key *never being expired*. + + We use a max here to ensure that if the TTL is < 0, the key will expire immediately. + """ return max(ttl, 0) def _watch_etcd(self, key, callback, restarter=None, start_index=None): @@ -511,8 +513,8 @@ class MemoryOrchestrator(Orchestrator): def set_key_sync(self, key, value, overwrite=False, expiration=None): """ - set_key, but without trollius coroutines. - """ + set_key, but without trollius coroutines. + """ preexisting_key = "key" in self.state if preexisting_key and not overwrite: raise KeyError @@ -683,9 +685,10 @@ class RedisOrchestrator(Orchestrator): @staticmethod def _is_expired_keyspace_event(event_result): """ - Sanity check that this isn't an unrelated keyspace event. - There could be a more efficient keyspace event config to avoid this client-side filter. - """ + Sanity check that this isn't an unrelated keyspace event. + + There could be a more efficient keyspace event config to avoid this client-side filter. + """ if event_result is None: return False diff --git a/buildman/server.py b/buildman/server.py index 714b23d88..9a66efdd9 100644 --- a/buildman/server.py +++ b/buildman/server.py @@ -32,8 +32,10 @@ HEARTBEAT_PERIOD_SEC = 30 class BuilderServer(object): - """ Server which handles both HTTP and WAMP requests, managing the full state of the build - controller. """ + """ + Server which handles both HTTP and WAMP requests, managing the full state of the build + controller. + """ def __init__( self, @@ -131,8 +133,11 @@ class BuilderServer(object): logger.debug("Shutting down server") def _register_component(self, realm, component_klass, **kwargs): - """ Registers a component with the server. The component_klass must derive from - BaseComponent. """ + """ + Registers a component with the server. + + The component_klass must derive from BaseComponent. + """ logger.debug("Registering component with realm %s", realm) if realm in self._realm_map: logger.debug("Component with realm %s already registered", realm) diff --git a/buildman/test/test_buildman.py b/buildman/test/test_buildman.py index ea4f20fe1..c5a9976f3 100644 --- a/buildman/test/test_buildman.py +++ b/buildman/test/test_buildman.py @@ -88,7 +88,9 @@ class EphemeralBuilderTestCase(unittest.TestCase): class TestEphemeralLifecycle(EphemeralBuilderTestCase): - """ Tests the various lifecycles of the ephemeral builder and its interaction with etcd. """ + """ + Tests the various lifecycles of the ephemeral builder and its interaction with etcd. + """ def __init__(self, *args, **kwargs): super(TestEphemeralLifecycle, self).__init__(*args, **kwargs) @@ -420,9 +422,10 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase): class TestEphemeral(EphemeralBuilderTestCase): - """ Simple unit tests for the ephemeral builder around config management, starting and stopping - jobs. - """ + """ + Simple unit tests for the ephemeral builder around config management, starting and stopping + jobs. + """ def setUp(self): super(TestEphemeral, self).setUp() diff --git a/buildtrigger/basehandler.py b/buildtrigger/basehandler.py index 30d919c10..5d9872813 100644 --- a/buildtrigger/basehandler.py +++ b/buildtrigger/basehandler.py @@ -164,7 +164,9 @@ class BuildTriggerHandler(object): @property def auth_token(self): - """ Returns the auth token for the trigger. """ + """ + Returns the auth token for the trigger. + """ # NOTE: This check is for testing. if hasattr(self.trigger, "auth_token"): return self.trigger.auth_token @@ -177,98 +179,111 @@ class BuildTriggerHandler(object): @abstractmethod def load_dockerfile_contents(self): """ - Loads the Dockerfile found for the trigger's config and returns them or None if none could - be found/loaded. - """ + Loads the Dockerfile found for the trigger's config and returns them or None if none could + be found/loaded. + """ pass @abstractmethod def list_build_source_namespaces(self): """ - Take the auth information for the specific trigger type and load the - list of namespaces that can contain build sources. - """ + Take the auth information for the specific trigger type and load the list of namespaces that + can contain build sources. + """ pass @abstractmethod def list_build_sources_for_namespace(self, namespace): """ - Take the auth information for the specific trigger type and load the - list of repositories under the given namespace. - """ + Take the auth information for the specific trigger type and load the list of repositories + under the given namespace. + """ pass @abstractmethod def list_build_subdirs(self): """ - Take the auth information and the specified config so far and list all of - the possible subdirs containing dockerfiles. - """ + Take the auth information and the specified config so far and list all of the possible + subdirs containing dockerfiles. + """ pass @abstractmethod def handle_trigger_request(self, request): """ - Transform the incoming request data into a set of actions. Returns a PreparedBuild. - """ + Transform the incoming request data into a set of actions. + + Returns a PreparedBuild. + """ pass @abstractmethod def is_active(self): """ - Returns True if the current build trigger is active. Inactive means further - setup is needed. - """ + Returns True if the current build trigger is active. + + Inactive means further setup is needed. + """ pass @abstractmethod def activate(self, standard_webhook_url): """ - Activates the trigger for the service, with the given new configuration. - Returns new public and private config that should be stored if successful. - """ + Activates the trigger for the service, with the given new configuration. + + Returns new public and private config that should be stored if successful. + """ pass @abstractmethod def deactivate(self): """ - Deactivates the trigger for the service, removing any hooks installed in - the remote service. Returns the new config that should be stored if this - trigger is going to be re-activated. - """ + Deactivates the trigger for the service, removing any hooks installed in the remote service. + + Returns the new config that should be stored if this trigger is going to be re-activated. + """ pass @abstractmethod def manual_start(self, run_parameters=None): """ - Manually creates a repository build for this trigger. Returns a PreparedBuild. - """ + Manually creates a repository build for this trigger. + + Returns a PreparedBuild. + """ pass @abstractmethod def list_field_values(self, field_name, limit=None): """ - Lists all values for the given custom trigger field. For example, a trigger might have a - field named "branches", and this method would return all branches. - """ + Lists all values for the given custom trigger field. + + For example, a trigger might have a field named "branches", and this method would return all + branches. + """ pass @abstractmethod def get_repository_url(self): - """ Returns the URL of the current trigger's repository. Note that this operation - can be called in a loop, so it should be as fast as possible. """ + """ + Returns the URL of the current trigger's repository. + + Note that this operation can be called in a loop, so it should be as fast as possible. + """ pass @classmethod def filename_is_dockerfile(cls, file_name): - """ Returns whether the file is named Dockerfile or follows the convention .Dockerfile""" + """ + Returns whether the file is named Dockerfile or follows the convention .Dockerfile. + """ return file_name.endswith(".Dockerfile") or u"Dockerfile" == file_name @classmethod def service_name(cls): """ - Particular service implemented by subclasses. - """ + Particular service implemented by subclasses. + """ raise NotImplementedError @classmethod @@ -280,17 +295,22 @@ class BuildTriggerHandler(object): raise InvalidServiceException("Unable to find service: %s" % trigger.service.name) def put_config_key(self, key, value): - """ Updates a config key in the trigger, saving it to the DB. """ + """ + Updates a config key in the trigger, saving it to the DB. + """ self.config[key] = value model.build.update_build_trigger(self.trigger, self.config) def set_auth_token(self, auth_token): - """ Sets the auth token for the trigger, saving it to the DB. """ + """ + Sets the auth token for the trigger, saving it to the DB. + """ model.build.update_build_trigger(self.trigger, self.config, auth_token=auth_token) def get_dockerfile_path(self): - """ Returns the normalized path to the Dockerfile found in the subdirectory - in the config. """ + """ + Returns the normalized path to the Dockerfile found in the subdirectory in the config. + """ dockerfile_path = self.config.get("dockerfile_path") or "Dockerfile" if dockerfile_path[0] == "/": dockerfile_path = dockerfile_path[1:] @@ -395,7 +415,9 @@ class BuildTriggerHandler(object): @classmethod def get_parent_directory_mappings(cls, dockerfile_path, current_paths=None): - """ Returns a map of dockerfile_paths to it's possible contexts. """ + """ + Returns a map of dockerfile_paths to it's possible contexts. + """ if dockerfile_path == "": return {} diff --git a/buildtrigger/bitbuckethandler.py b/buildtrigger/bitbuckethandler.py index 3f0630003..49a5e4a66 100644 --- a/buildtrigger/bitbuckethandler.py +++ b/buildtrigger/bitbuckethandler.py @@ -128,9 +128,9 @@ BITBUCKET_COMMIT_INFO_SCHEMA = { def get_transformed_commit_info(bb_commit, ref, default_branch, repository_name, lookup_author): - """ Returns the BitBucket commit information transformed into our own - payload format. - """ + """ + Returns the BitBucket commit information transformed into our own payload format. + """ try: validate(bb_commit, BITBUCKET_COMMIT_INFO_SCHEMA) except Exception as exc: @@ -165,9 +165,11 @@ def get_transformed_commit_info(bb_commit, ref, default_branch, repository_name, def get_transformed_webhook_payload(bb_payload, default_branch=None): - """ Returns the BitBucket webhook JSON payload transformed into our own payload - format. If the bb_payload is not valid, returns None. - """ + """ + Returns the BitBucket webhook JSON payload transformed into our own payload format. + + If the bb_payload is not valid, returns None. + """ try: validate(bb_payload, BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA) except Exception as exc: @@ -209,15 +211,17 @@ def get_transformed_webhook_payload(bb_payload, default_branch=None): class BitbucketBuildTrigger(BuildTriggerHandler): """ - BuildTrigger for Bitbucket. - """ + BuildTrigger for Bitbucket. + """ @classmethod def service_name(cls): return "bitbucket" def _get_client(self): - """ Returns a BitBucket API client for this trigger's config. """ + """ + Returns a BitBucket API client for this trigger's config. + """ key = app.config.get("BITBUCKET_TRIGGER_CONFIG", {}).get("CONSUMER_KEY", "") secret = app.config.get("BITBUCKET_TRIGGER_CONFIG", {}).get("CONSUMER_SECRET", "") @@ -227,7 +231,9 @@ class BitbucketBuildTrigger(BuildTriggerHandler): return BitBucket(key, secret, callback_url, timeout=15) def _get_authorized_client(self): - """ Returns an authorized API client. """ + """ + Returns an authorized API client. + """ base_client = self._get_client() auth_token = self.auth_token or "invalid:invalid" token_parts = auth_token.split(":") @@ -238,14 +244,18 @@ class BitbucketBuildTrigger(BuildTriggerHandler): return base_client.get_authorized_client(access_token, access_token_secret) def _get_repository_client(self): - """ Returns an API client for working with this config's BB repository. """ + """ + Returns an API client for working with this config's BB repository. + """ source = self.config["build_source"] (namespace, name) = source.split("/") bitbucket_client = self._get_authorized_client() return bitbucket_client.for_namespace(namespace).repositories().get(name) def _get_default_branch(self, repository, default_value="master"): - """ Returns the default branch for the repository or the value given. """ + """ + Returns the default branch for the repository or the value given. + """ (result, data, _) = repository.get_main_branch() if result: return data["name"] @@ -253,7 +263,9 @@ class BitbucketBuildTrigger(BuildTriggerHandler): return default_value def get_oauth_url(self): - """ Returns the OAuth URL to authorize Bitbucket. """ + """ + Returns the OAuth URL to authorize Bitbucket. + """ bitbucket_client = self._get_client() (result, data, err_msg) = bitbucket_client.get_authorization_url() if not result: @@ -262,7 +274,9 @@ class BitbucketBuildTrigger(BuildTriggerHandler): return data def exchange_verifier(self, verifier): - """ Exchanges the given verifier token to setup this trigger. """ + """ + Exchanges the given verifier token to setup this trigger. + """ bitbucket_client = self._get_client() access_token = self.config.get("access_token", "") access_token_secret = self.auth_token diff --git a/buildtrigger/githubhandler.py b/buildtrigger/githubhandler.py index ebc712266..85f483902 100644 --- a/buildtrigger/githubhandler.py +++ b/buildtrigger/githubhandler.py @@ -78,9 +78,11 @@ GITHUB_WEBHOOK_PAYLOAD_SCHEMA = { def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user=None): - """ Returns the GitHub webhook JSON payload transformed into our own payload - format. If the gh_payload is not valid, returns None. - """ + """ + Returns the GitHub webhook JSON payload transformed into our own payload format. + + If the gh_payload is not valid, returns None. + """ try: validate(gh_payload, GITHUB_WEBHOOK_PAYLOAD_SCHEMA) except Exception as exc: @@ -149,11 +151,13 @@ def _catch_ssl_errors(func): class GithubBuildTrigger(BuildTriggerHandler): """ - BuildTrigger for GitHub that uses the archive API and buildpacks. - """ + BuildTrigger for GitHub that uses the archive API and buildpacks. + """ def _get_client(self): - """ Returns an authenticated client for talking to the GitHub API. """ + """ + Returns an authenticated client for talking to the GitHub API. + """ return Github( self.auth_token, base_url=github_trigger.api_endpoint(), diff --git a/buildtrigger/gitlabhandler.py b/buildtrigger/gitlabhandler.py index fd2412577..a333dd20a 100644 --- a/buildtrigger/gitlabhandler.py +++ b/buildtrigger/gitlabhandler.py @@ -91,9 +91,9 @@ def _catch_timeouts_and_errors(func): def _paginated_iterator(func, exc, **kwargs): - """ Returns an iterator over invocations of the given function, automatically handling - pagination. - """ + """ + Returns an iterator over invocations of the given function, automatically handling pagination. + """ page = 1 while True: result = func(page=page, per_page=_PER_PAGE_COUNT, **kwargs) @@ -114,9 +114,11 @@ def _paginated_iterator(func, exc, **kwargs): def get_transformed_webhook_payload( gl_payload, default_branch=None, lookup_user=None, lookup_commit=None ): - """ Returns the Gitlab webhook JSON payload transformed into our own payload - format. If the gl_payload is not valid, returns None. - """ + """ + Returns the Gitlab webhook JSON payload transformed into our own payload format. + + If the gl_payload is not valid, returns None. + """ try: validate(gl_payload, GITLAB_WEBHOOK_PAYLOAD_SCHEMA) except Exception as exc: @@ -182,8 +184,8 @@ def get_transformed_webhook_payload( class GitLabBuildTrigger(BuildTriggerHandler): """ - BuildTrigger for GitLab. - """ + BuildTrigger for GitLab. + """ @classmethod def service_name(cls): diff --git a/buildtrigger/triggerutil.py b/buildtrigger/triggerutil.py index 9bf5997ee..34e924f2d 100644 --- a/buildtrigger/triggerutil.py +++ b/buildtrigger/triggerutil.py @@ -99,7 +99,9 @@ def should_skip_commit(metadata): def raise_if_skipped_build(prepared_build, config): - """ Raises a SkipRequestException if the given build should be skipped. """ + """ + Raises a SkipRequestException if the given build should be skipped. + """ # Check to ensure we have metadata. if not prepared_build.metadata: logger.debug("Skipping request due to missing metadata for prepared build") diff --git a/conf/init/nginx_conf_create.py b/conf/init/nginx_conf_create.py index 26967b1d9..9522c389b 100644 --- a/conf/init/nginx_conf_create.py +++ b/conf/init/nginx_conf_create.py @@ -66,8 +66,8 @@ def write_config(filename, **kwargs): def generate_nginx_config(config): """ - Generates nginx config from the app config - """ + Generates nginx config from the app config. + """ config = config or {} use_https = os.path.exists(os.path.join(QUAYCONF_DIR, "stack/ssl.key")) use_old_certs = os.path.exists(os.path.join(QUAYCONF_DIR, "stack/ssl.old.key")) @@ -89,8 +89,8 @@ def generate_nginx_config(config): def generate_server_config(config): """ - Generates server config from the app config - """ + Generates server config from the app config. + """ config = config or {} tuf_server = config.get("TUF_SERVER", None) tuf_host = config.get("TUF_HOST", None) @@ -111,8 +111,8 @@ def generate_server_config(config): def generate_rate_limiting_config(config): """ - Generates rate limiting config from the app config - """ + Generates rate limiting config from the app config. + """ config = config or {} non_rate_limited_namespaces = config.get("NON_RATE_LIMITED_NAMESPACES") or set() enable_rate_limits = config.get("FEATURE_RATE_LIMITS", False) @@ -126,8 +126,8 @@ def generate_rate_limiting_config(config): def generate_hosted_http_base_config(config): """ - Generates hosted http base config from the app config - """ + Generates hosted http base config from the app config. + """ config = config or {} feature_proxy_protocol = config.get("FEATURE_PROXY_PROTOCOL", False) diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py index 0620fed63..722e8066b 100644 --- a/config_app/config_endpoints/api/__init__.py +++ b/config_app/config_endpoints/api/__init__.py @@ -44,7 +44,9 @@ def log_action(kind, user_or_orgname, metadata=None, repo=None, repo_name=None): def format_date(date): - """ Output an RFC822 date format. """ + """ + Output an RFC822 date format. + """ if date is None: return None return formatdate(timegm(date.utctimetuple())) @@ -147,7 +149,9 @@ def validate_json_request(schema_name, optional=False): def kubernetes_only(f): - """ Aborts the request with a 400 if the app is not running on kubernetes """ + """ + Aborts the request with a 400 if the app is not running on kubernetes. + """ @wraps(f) def abort_if_not_kube(*args, **kwargs): diff --git a/config_app/config_endpoints/api/kube_endpoints.py b/config_app/config_endpoints/api/kube_endpoints.py index 3709cd787..e621af521 100644 --- a/config_app/config_endpoints/api/kube_endpoints.py +++ b/config_app/config_endpoints/api/kube_endpoints.py @@ -20,7 +20,9 @@ logger = logging.getLogger(__name__) @resource("/v1/kubernetes/deployments/") class SuperUserKubernetesDeployment(ApiResource): - """ Resource for the getting the status of Red Hat Quay deployments and cycling them """ + """ + Resource for the getting the status of Red Hat Quay deployments and cycling them. + """ schemas = { "ValidateDeploymentNames": { @@ -65,7 +67,9 @@ class QEDeploymentRolloutStatus(ApiResource): @resource("/v1/kubernetes/deployments/rollback") class QEDeploymentRollback(ApiResource): - """ Resource for rolling back deployments """ + """ + Resource for rolling back deployments. + """ schemas = { "ValidateDeploymentNames": { @@ -111,7 +115,9 @@ class QEDeploymentRollback(ApiResource): @resource("/v1/kubernetes/config") class SuperUserKubernetesConfiguration(ApiResource): - """ Resource for saving the config files to kubernetes secrets. """ + """ + Resource for saving the config files to kubernetes secrets. + """ @kubernetes_only @nickname("scDeployConfiguration") @@ -128,7 +134,9 @@ class SuperUserKubernetesConfiguration(ApiResource): @resource("/v1/kubernetes/config/populate") class KubernetesConfigurationPopulator(ApiResource): - """ Resource for populating the local configuration from the cluster's kubernetes secrets. """ + """ + Resource for populating the local configuration from the cluster's kubernetes secrets. + """ @kubernetes_only @nickname("scKubePopulateConfig") diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py index 5c1abc292..6b8f3cfc2 100644 --- a/config_app/config_endpoints/api/suconfig.py +++ b/config_app/config_endpoints/api/suconfig.py @@ -26,18 +26,24 @@ logger = logging.getLogger(__name__) def database_is_valid(): - """ Returns whether the database, as configured, is valid. """ + """ + Returns whether the database, as configured, is valid. + """ return model.is_valid() def database_has_users(): - """ Returns whether the database has any users defined. """ + """ + Returns whether the database has any users defined. + """ return model.has_users() @resource("/v1/superuser/config") class SuperUserConfig(ApiResource): - """ Resource for fetching and updating the current configuration, if any. """ + """ + Resource for fetching and updating the current configuration, if any. + """ schemas = { "UpdateConfig": { @@ -50,14 +56,18 @@ class SuperUserConfig(ApiResource): @nickname("scGetConfig") def get(self): - """ Returns the currently defined configuration, if any. """ + """ + Returns the currently defined configuration, if any. + """ config_object = config_provider.get_config() return {"config": config_object} @nickname("scUpdateConfig") @validate_json_request("UpdateConfig") def put(self): - """ Updates the config override file. """ + """ + Updates the config override file. + """ # Note: This method is called to set the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. config_object = request.get_json()["config"] @@ -78,13 +88,16 @@ class SuperUserConfig(ApiResource): @resource("/v1/superuser/registrystatus") class SuperUserRegistryStatus(ApiResource): - """ Resource for determining the status of the registry, such as if config exists, - if a database is configured, and if it has any defined users. - """ + """ + Resource for determining the status of the registry, such as if config exists, if a database is + configured, and if it has any defined users. + """ @nickname("scRegistryStatus") def get(self): - """ Returns the status of the registry. """ + """ + Returns the status of the registry. + """ # If there is no config file, we need to setup the database. if not config_provider.config_exists(): return {"status": "config-db"} @@ -118,11 +131,15 @@ def _reload_config(): @resource("/v1/superuser/setupdb") class SuperUserSetupDatabase(ApiResource): - """ Resource for invoking alembic to setup the database. """ + """ + Resource for invoking alembic to setup the database. + """ @nickname("scSetupDatabase") def get(self): - """ Invokes the alembic upgrade process. """ + """ + Invokes the alembic upgrade process. + """ # Note: This method is called after the database configured is saved, but before the # database has any tables. Therefore, we only allow it to be run in that unique case. if config_provider.config_exists() and not database_is_valid(): @@ -146,7 +163,9 @@ class SuperUserSetupDatabase(ApiResource): @resource("/v1/superuser/config/createsuperuser") class SuperUserCreateInitialSuperUser(ApiResource): - """ Resource for creating the initial super user. """ + """ + Resource for creating the initial super user. + """ schemas = { "CreateSuperUser": { @@ -164,8 +183,10 @@ class SuperUserCreateInitialSuperUser(ApiResource): @nickname("scCreateInitialSuperuser") @validate_json_request("CreateSuperUser") def post(self): - """ Creates the initial super user, updates the underlying configuration and - sets the current session to have that super user. """ + """ + Creates the initial super user, updates the underlying configuration and sets the current + session to have that super user. + """ _reload_config() @@ -199,7 +220,9 @@ class SuperUserCreateInitialSuperUser(ApiResource): @resource("/v1/superuser/config/validate/") class SuperUserConfigValidate(ApiResource): - """ Resource for validating a block of configuration against an external service. """ + """ + Resource for validating a block of configuration against an external service. + """ schemas = { "ValidateConfig": { @@ -219,7 +242,9 @@ class SuperUserConfigValidate(ApiResource): @nickname("scValidateConfig") @validate_json_request("ValidateConfig") def post(self, service): - """ Validates the given config for the given service. """ + """ + Validates the given config for the given service. + """ # Note: This method is called to validate the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. Note that # this is also safe since this method does not access any information not given in the request. @@ -239,11 +264,15 @@ class SuperUserConfigValidate(ApiResource): @resource("/v1/superuser/config/file/") class SuperUserConfigFile(ApiResource): - """ Resource for fetching the status of config files and overriding them. """ + """ + Resource for fetching the status of config files and overriding them. + """ @nickname("scConfigFileExists") def get(self, filename): - """ Returns whether the configuration file with the given name exists. """ + """ + Returns whether the configuration file with the given name exists. + """ if not is_valid_config_upload_filename(filename): abort(404) @@ -251,7 +280,9 @@ class SuperUserConfigFile(ApiResource): @nickname("scUpdateConfigFile") def post(self, filename): - """ Updates the configuration file with the given name. """ + """ + Updates the configuration file with the given name. + """ if not is_valid_config_upload_filename(filename): abort(404) diff --git a/config_app/config_endpoints/api/suconfig_models_interface.py b/config_app/config_endpoints/api/suconfig_models_interface.py index d41a97d11..a1eae5ec2 100644 --- a/config_app/config_endpoints/api/suconfig_models_interface.py +++ b/config_app/config_endpoints/api/suconfig_models_interface.py @@ -5,35 +5,37 @@ from six import add_metaclass @add_metaclass(ABCMeta) class SuperuserConfigDataInterface(object): """ - Interface that represents all data store interactions required by the superuser config API. - """ + Interface that represents all data store interactions required by the superuser config API. + """ @abstractmethod def is_valid(self): """ - Returns true if the configured database is valid. - """ + Returns true if the configured database is valid. + """ @abstractmethod def has_users(self): """ - Returns true if there are any users defined. - """ + Returns true if there are any users defined. + """ @abstractmethod def create_superuser(self, username, password, email): """ - Creates a new superuser with the given username, password and email. Returns the user's UUID. - """ + Creates a new superuser with the given username, password and email. + + Returns the user's UUID. + """ @abstractmethod def has_federated_login(self, username, service_name): """ - Returns true if the matching user has a federated login under the matching service. - """ + Returns true if the matching user has a federated login under the matching service. + """ @abstractmethod def attach_federated_login(self, username, service_name, federated_username): """ - Attaches a federatated login to the matching user, under the given service. - """ + Attaches a federatated login to the matching user, under the given service. + """ diff --git a/config_app/config_endpoints/api/superuser.py b/config_app/config_endpoints/api/superuser.py index 5fcc7e765..fa24b561b 100644 --- a/config_app/config_endpoints/api/superuser.py +++ b/config_app/config_endpoints/api/superuser.py @@ -32,7 +32,9 @@ logger = logging.getLogger(__name__) @resource("/v1/superuser/customcerts/") class SuperUserCustomCertificate(ApiResource): - """ Resource for managing a custom certificate. """ + """ + Resource for managing a custom certificate. + """ @nickname("uploadCustomCertificate") def post(self, certpath): @@ -85,7 +87,9 @@ class SuperUserCustomCertificate(ApiResource): @resource("/v1/superuser/customcerts") class SuperUserCustomCertificates(ApiResource): - """ Resource for managing custom certificates. """ + """ + Resource for managing custom certificates. + """ @nickname("getCustomCertificates") def get(self): @@ -128,7 +132,9 @@ class SuperUserCustomCertificates(ApiResource): @resource("/v1/superuser/keys") class SuperUserServiceKeyManagement(ApiResource): - """ Resource for managing service keys.""" + """ + Resource for managing service keys. + """ schemas = { "CreateServiceKey": { @@ -221,7 +227,9 @@ class SuperUserServiceKeyManagement(ApiResource): @resource("/v1/superuser/approvedkeys/") class SuperUserServiceKeyApproval(ApiResource): - """ Resource for approving service keys. """ + """ + Resource for approving service keys. + """ schemas = { "ApproveServiceKey": { diff --git a/config_app/config_endpoints/api/superuser_models_interface.py b/config_app/config_endpoints/api/superuser_models_interface.py index 15d1e7bfd..915ecc636 100644 --- a/config_app/config_endpoints/api/superuser_models_interface.py +++ b/config_app/config_endpoints/api/superuser_models_interface.py @@ -37,24 +37,25 @@ class RepositoryBuild( ) ): """ - RepositoryBuild represents a build associated with a repostiory - :type uuid: string - :type logs_archived: boolean - :type repository_namespace_user_username: string - :type repository_name: string - :type can_write: boolean - :type can_write: boolean - :type pull_robot: User - :type resource_key: string - :type trigger: Trigger - :type display_name: string - :type started: boolean - :type job_config: {Any -> Any} - :type phase: string - :type status: string - :type error: string - :type archive_url: string - """ + RepositoryBuild represents a build associated with a repostiory. + + :type uuid: string + :type logs_archived: boolean + :type repository_namespace_user_username: string + :type repository_name: string + :type can_write: boolean + :type can_write: boolean + :type pull_robot: User + :type resource_key: string + :type trigger: Trigger + :type display_name: string + :type started: boolean + :type job_config: {Any -> Any} + :type phase: string + :type status: string + :type error: string + :type archive_url: string + """ def to_dict(self): @@ -94,12 +95,13 @@ class RepositoryBuild( class Approval(namedtuple("Approval", ["approver", "approval_type", "approved_date", "notes"])): """ - Approval represents whether a key has been approved or not - :type approver: User - :type approval_type: string - :type approved_date: Date - :type notes: string - """ + Approval represents whether a key has been approved or not. + + :type approver: User + :type approval_type: string + :type approved_date: Date + :type notes: string + """ def to_dict(self): return { @@ -127,18 +129,18 @@ class ServiceKey( ) ): """ - ServiceKey is an apostille signing key - :type name: string - :type kid: int - :type service: string - :type jwk: string - :type metadata: string - :type created_date: Date - :type expiration_date: Date - :type rotation_duration: Date - :type approval: Approval + ServiceKey is an apostille signing key. - """ + :type name: string + :type kid: int + :type service: string + :type jwk: string + :type metadata: string + :type created_date: Date + :type expiration_date: Date + :type rotation_duration: Date + :type approval: Approval + """ def to_dict(self): return { @@ -156,13 +158,14 @@ class ServiceKey( class User(namedtuple("User", ["username", "email", "verified", "enabled", "robot"])): """ - User represents a single user. - :type username: string - :type email: string - :type verified: boolean - :type enabled: boolean - :type robot: User - """ + User represents a single user. + + :type username: string + :type email: string + :type verified: boolean + :type enabled: boolean + :type robot: User + """ def to_dict(self): user_data = { @@ -179,10 +182,11 @@ class User(namedtuple("User", ["username", "email", "verified", "enabled", "robo class Organization(namedtuple("Organization", ["username", "email"])): """ - Organization represents a single org. - :type username: string - :type email: string - """ + Organization represents a single org. + + :type username: string + :type email: string + """ def to_dict(self): return { @@ -194,11 +198,11 @@ class Organization(namedtuple("Organization", ["username", "email"])): @add_metaclass(ABCMeta) class SuperuserDataInterface(object): """ - Interface that represents all data store interactions required by a superuser api. - """ + Interface that represents all data store interactions required by a superuser api. + """ @abstractmethod def list_all_service_keys(self): """ - Returns a list of service keys - """ + Returns a list of service keys. + """ diff --git a/config_app/config_endpoints/api/superuser_models_pre_oci.py b/config_app/config_endpoints/api/superuser_models_pre_oci.py index 744549442..6a9ec4afb 100644 --- a/config_app/config_endpoints/api/superuser_models_pre_oci.py +++ b/config_app/config_endpoints/api/superuser_models_pre_oci.py @@ -47,9 +47,9 @@ class ServiceKeyAlreadyApproved(Exception): class PreOCIModel(SuperuserDataInterface): """ - PreOCIModel implements the data model for the SuperUser using a database schema - before it was changed to support the OCI specification. - """ + PreOCIModel implements the data model for the SuperUser using a database schema before it was + changed to support the OCI specification. + """ def list_all_service_keys(self): keys = model.service_keys.list_all_keys() diff --git a/config_app/config_endpoints/api/tar_config_loader.py b/config_app/config_endpoints/api/tar_config_loader.py index b0e8f42be..48ce26034 100644 --- a/config_app/config_endpoints/api/tar_config_loader.py +++ b/config_app/config_endpoints/api/tar_config_loader.py @@ -19,8 +19,8 @@ from config_app.config_util.tar import ( @resource("/v1/configapp/initialization") class ConfigInitialization(ApiResource): """ - Resource for dealing with any initialization logic for the config app - """ + Resource for dealing with any initialization logic for the config app. + """ @nickname("scStartNewConfig") def post(self): @@ -31,9 +31,9 @@ class ConfigInitialization(ApiResource): @resource("/v1/configapp/tarconfig") class TarConfigLoader(ApiResource): """ - Resource for dealing with configuration as a tarball, - including loading and generating functions - """ + Resource for dealing with configuration as a tarball, including loading and generating + functions. + """ @nickname("scGetConfigTarball") def get(self): @@ -50,7 +50,9 @@ class TarConfigLoader(ApiResource): @nickname("scUploadTarballConfig") def put(self): - """ Loads tarball config into the config provider """ + """ + Loads tarball config into the config provider. + """ # Generate a new empty dir to load the config into config_provider.new_config_dir() input_stream = request.stream diff --git a/config_app/config_endpoints/api/user.py b/config_app/config_endpoints/api/user.py index 9ab787a47..f5c0057a9 100644 --- a/config_app/config_endpoints/api/user.py +++ b/config_app/config_endpoints/api/user.py @@ -5,10 +5,14 @@ from config_app.config_endpoints.api.superuser_models_interface import user_view @resource("/v1/user/") class User(ApiResource): - """ Operations related to users. """ + """ + Operations related to users. + """ @nickname("getLoggedInUser") def get(self): - """ Get user information for the authenticated user. """ + """ + Get user information for the authenticated user. + """ user = get_authenticated_user() return user_view(user) diff --git a/config_app/config_endpoints/common.py b/config_app/config_endpoints/common.py index c7f56dd3d..fc29e9053 100644 --- a/config_app/config_endpoints/common.py +++ b/config_app/config_endpoints/common.py @@ -30,7 +30,9 @@ TYPE_CONVERTER = { def _list_files(path, extension, contains=""): - """ Returns a list of all the files with the given extension found under the given path. """ + """ + Returns a list of all the files with the given extension found under the given path. + """ def matches(f): return os.path.splitext(f)[1] == "." + extension and contains in os.path.splitext(f)[0] @@ -47,7 +49,9 @@ FONT_AWESOME_4 = "netdna.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.cs def render_page_template(name, route_data=None, js_bundle_name=DEFAULT_JS_BUNDLE_NAME, **kwargs): - """ Renders the page template with the given name as the response and returns its contents. """ + """ + Renders the page template with the given name as the response and returns its contents. + """ main_scripts = _list_files("build", "js", js_bundle_name) use_cdn = os.getenv("TESTING") == "true" diff --git a/config_app/config_endpoints/exception.py b/config_app/config_endpoints/exception.py index 00274ac5e..bc04a0369 100644 --- a/config_app/config_endpoints/exception.py +++ b/config_app/config_endpoints/exception.py @@ -10,26 +10,26 @@ class ApiErrorType(Enum): class ApiException(HTTPException): """ - Represents an error in the application/problem+json format. + Represents an error in the application/problem+json format. - See: https://tools.ietf.org/html/rfc7807 + See: https://tools.ietf.org/html/rfc7807 - - "type" (string) - A URI reference that identifies the - problem type. + - "type" (string) - A URI reference that identifies the + problem type. - - "title" (string) - A short, human-readable summary of the problem - type. It SHOULD NOT change from occurrence to occurrence of the - problem, except for purposes of localization + - "title" (string) - A short, human-readable summary of the problem + type. It SHOULD NOT change from occurrence to occurrence of the + problem, except for purposes of localization - - "status" (number) - The HTTP status code + - "status" (number) - The HTTP status code - - "detail" (string) - A human-readable explanation specific to this - occurrence of the problem. + - "detail" (string) - A human-readable explanation specific to this + occurrence of the problem. - - "instance" (string) - A URI reference that identifies the specific - occurrence of the problem. It may or may not yield further - information if dereferenced. - """ + - "instance" (string) - A URI reference that identifies the specific + occurrence of the problem. It may or may not yield further + information if dereferenced. + """ def __init__(self, error_type, status_code, error_description, payload=None): Exception.__init__(self) diff --git a/config_app/config_util/config/TransientDirectoryProvider.py b/config_app/config_util/config/TransientDirectoryProvider.py index 119178050..500d02b49 100644 --- a/config_app/config_util/config/TransientDirectoryProvider.py +++ b/config_app/config_util/config/TransientDirectoryProvider.py @@ -9,10 +9,10 @@ OLD_CONFIG_SUBDIR = "old/" class TransientDirectoryProvider(FileConfigProvider): - """ Implementation of the config provider that reads and writes the data - from/to the file system, only using temporary directories, - deleting old dirs and creating new ones as requested. - """ + """ + Implementation of the config provider that reads and writes the data from/to the file system, + only using temporary directories, deleting old dirs and creating new ones as requested. + """ def __init__(self, config_volume, yaml_filename, py_filename): # Create a temp directory that will be cleaned up when we change the config path @@ -29,8 +29,8 @@ class TransientDirectoryProvider(FileConfigProvider): def new_config_dir(self): """ - Update the path with a new temporary directory, deleting the old one in the process - """ + Update the path with a new temporary directory, deleting the old one in the process. + """ self.temp_dir.cleanup() temp_dir = TemporaryDirectory() @@ -40,8 +40,8 @@ class TransientDirectoryProvider(FileConfigProvider): def create_copy_of_config_dir(self): """ - Create a directory to store loaded/populated configuration (for rollback if necessary) - """ + Create a directory to store loaded/populated configuration (for rollback if necessary) + """ if self.old_config_dir is not None: self.old_config_dir.cleanup() diff --git a/config_app/config_util/config/__init__.py b/config_app/config_util/config/__init__.py index c86d98141..ddee86803 100644 --- a/config_app/config_util/config/__init__.py +++ b/config_app/config_util/config/__init__.py @@ -8,7 +8,9 @@ from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX def get_config_provider(config_volume, yaml_filename, py_filename, testing=False): - """ Loads and returns the config provider for the current environment. """ + """ + Loads and returns the config provider for the current environment. + """ if testing: return TestConfigProvider() diff --git a/config_app/config_util/config/basefileprovider.py b/config_app/config_util/config/basefileprovider.py index ac78000d9..d030264d0 100644 --- a/config_app/config_util/config/basefileprovider.py +++ b/config_app/config_util/config/basefileprovider.py @@ -12,7 +12,9 @@ logger = logging.getLogger(__name__) class BaseFileProvider(BaseProvider): - """ Base implementation of the config provider that reads the data from the file system. """ + """ + Base implementation of the config provider that reads the data from the file system. + """ def __init__(self, config_volume, yaml_filename, py_filename): self.config_volume = config_volume diff --git a/config_app/config_util/config/baseprovider.py b/config_app/config_util/config/baseprovider.py index e6705809d..5c85e8fb0 100644 --- a/config_app/config_util/config/baseprovider.py +++ b/config_app/config_util/config/baseprovider.py @@ -12,13 +12,17 @@ logger = logging.getLogger(__name__) class CannotWriteConfigException(Exception): - """ Exception raised when the config cannot be written. """ + """ + Exception raised when the config cannot be written. + """ pass class SetupIncompleteException(Exception): - """ Exception raised when attempting to verify config that has not yet been setup. """ + """ + Exception raised when attempting to verify config that has not yet been setup. + """ pass @@ -63,8 +67,9 @@ def export_yaml(config_obj, config_file): @add_metaclass(ABCMeta) class BaseProvider(object): - """ A configuration provider helps to load, save, and handle config override in the application. - """ + """ + A configuration provider helps to load, save, and handle config override in the application. + """ @property def provider_id(self): @@ -72,59 +77,84 @@ class BaseProvider(object): @abstractmethod def update_app_config(self, app_config): - """ Updates the given application config object with the loaded override config. """ + """ + Updates the given application config object with the loaded override config. + """ @abstractmethod def get_config(self): - """ Returns the contents of the config override file, or None if none. """ + """ + Returns the contents of the config override file, or None if none. + """ @abstractmethod def save_config(self, config_object): - """ Updates the contents of the config override file to those given. """ + """ + Updates the contents of the config override file to those given. + """ @abstractmethod def config_exists(self): - """ Returns true if a config override file exists in the config volume. """ + """ + Returns true if a config override file exists in the config volume. + """ @abstractmethod def volume_exists(self): - """ Returns whether the config override volume exists. """ + """ + Returns whether the config override volume exists. + """ @abstractmethod def volume_file_exists(self, filename): - """ Returns whether the file with the given name exists under the config override volume. """ + """ + Returns whether the file with the given name exists under the config override volume. + """ @abstractmethod def get_volume_file(self, filename, mode="r"): - """ Returns a Python file referring to the given name under the config override volume. """ + """ + Returns a Python file referring to the given name under the config override volume. + """ @abstractmethod def write_volume_file(self, filename, contents): - """ Writes the given contents to the config override volumne, with the given filename. """ + """ + Writes the given contents to the config override volumne, with the given filename. + """ @abstractmethod def remove_volume_file(self, filename): - """ Removes the config override volume file with the given filename. """ + """ + Removes the config override volume file with the given filename. + """ @abstractmethod def list_volume_directory(self, path): - """ Returns a list of strings representing the names of the files found in the config override - directory under the given path. If the path doesn't exist, returns None. - """ + """ + Returns a list of strings representing the names of the files found in the config override + directory under the given path. + + If the path doesn't exist, returns None. + """ @abstractmethod def save_volume_file(self, filename, flask_file): - """ Saves the given flask file to the config override volume, with the given - filename. - """ + """ + Saves the given flask file to the config override volume, with the given filename. + """ @abstractmethod def requires_restart(self, app_config): - """ If true, the configuration loaded into memory for the app does not match that on disk, + """ + If true, the configuration loaded into memory for the app does not match that on disk, indicating that this container requires a restart. - """ + """ @abstractmethod def get_volume_path(self, directory, filename): - """ Helper for constructing file paths, which may differ between providers. For example, - kubernetes can't have subfolders in configmaps """ + """ + Helper for constructing file paths, which may differ between providers. + + For example, kubernetes can't have subfolders in configmaps + """ diff --git a/config_app/config_util/config/fileprovider.py b/config_app/config_util/config/fileprovider.py index d3e9d43f9..6937eef36 100644 --- a/config_app/config_util/config/fileprovider.py +++ b/config_app/config_util/config/fileprovider.py @@ -8,7 +8,9 @@ logger = logging.getLogger(__name__) def _ensure_parent_dir(filepath): - """ Ensures that the parent directory of the given file path exists. """ + """ + Ensures that the parent directory of the given file path exists. + """ try: parentpath = os.path.abspath(os.path.join(filepath, os.pardir)) if not os.path.isdir(parentpath): @@ -18,8 +20,9 @@ def _ensure_parent_dir(filepath): class FileConfigProvider(BaseFileProvider): - """ Implementation of the config provider that reads and writes the data - from/to the file system. """ + """ + Implementation of the config provider that reads and writes the data from/to the file system. + """ def __init__(self, config_volume, yaml_filename, py_filename): super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename) diff --git a/config_app/config_util/config/testprovider.py b/config_app/config_util/config/testprovider.py index fdbbccc8c..3f5804856 100644 --- a/config_app/config_util/config/testprovider.py +++ b/config_app/config_util/config/testprovider.py @@ -8,8 +8,11 @@ REAL_FILES = ["test/data/signing-private.gpg", "test/data/signing-public.gpg", " class TestConfigProvider(BaseProvider): - """ Implementation of the config provider for testing. Everything is kept in-memory instead on - the real file system. """ + """ + Implementation of the config provider for testing. + + Everything is kept in-memory instead on the real file system. + """ def __init__(self): self.clear() diff --git a/config_app/config_util/k8saccessor.py b/config_app/config_util/k8saccessor.py index 62db3a502..193914c0f 100644 --- a/config_app/config_util/k8saccessor.py +++ b/config_app/config_util/k8saccessor.py @@ -28,11 +28,12 @@ class K8sApiException(Exception): def _deployment_rollout_status_message(deployment, deployment_name): """ - Gets the friendly human readable message of the current state of the deployment rollout - :param deployment: python dict matching: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#deployment-v1-apps - :param deployment_name: string - :return: DeploymentRolloutStatus - """ + Gets the friendly human readable message of the current state of the deployment rollout. + + :param deployment: python dict matching: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#deployment-v1-apps + :param deployment_name: string + :return: DeploymentRolloutStatus + """ # Logic for rollout status pulled from the `kubectl rollout status` command: # https://github.com/kubernetes/kubernetes/blob/d9ba19c751709c8608e09a0537eea98973f3a796/pkg/kubectl/rollout_status.go#L62 if deployment["metadata"]["generation"] <= deployment["status"]["observedGeneration"]: @@ -88,7 +89,9 @@ def _deployment_rollout_status_message(deployment, deployment_name): class KubernetesAccessorSingleton(object): - """ Singleton allowing access to kubernetes operations """ + """ + Singleton allowing access to kubernetes operations. + """ _instance = None @@ -102,10 +105,11 @@ class KubernetesAccessorSingleton(object): @classmethod def get_instance(cls, kube_config=None): """ - Singleton getter implementation, returns the instance if one exists, otherwise creates the - instance and ties it to the class. - :return: KubernetesAccessorSingleton - """ + Singleton getter implementation, returns the instance if one exists, otherwise creates the + instance and ties it to the class. + + :return: KubernetesAccessorSingleton + """ if cls._instance is None: return cls(kube_config) @@ -113,9 +117,10 @@ class KubernetesAccessorSingleton(object): def save_secret_to_directory(self, dir_path): """ - Saves all files in the kubernetes secret to a local directory. - Assumes the directory is empty. - """ + Saves all files in the kubernetes secret to a local directory. + + Assumes the directory is empty. + """ secret = self._lookup_secret() secret_data = secret.get("data", {}) @@ -143,8 +148,8 @@ class KubernetesAccessorSingleton(object): def replace_qe_secret(self, new_secret_data): """ - Removes the old config and replaces it with the new_secret_data as one action - """ + Removes the old config and replaces it with the new_secret_data as one action. + """ # Check first that the namespace for Red Hat Quay exists. If it does not, report that # as an error, as it seems to be a common issue. namespace_url = "namespaces/%s" % (self.kube_config.qe_namespace) @@ -183,10 +188,11 @@ class KubernetesAccessorSingleton(object): self._assert_success(self._execute_k8s_api("PUT", secret_url, secret)) def get_deployment_rollout_status(self, deployment_name): - """" - Returns the status of a rollout of a given deployment - :return _DeploymentRolloutStatus - """ + """ + " Returns the status of a rollout of a given deployment. + + :return _DeploymentRolloutStatus + """ deployment_selector_url = "namespaces/%s/deployments/%s" % ( self.kube_config.qe_namespace, deployment_name, @@ -203,9 +209,9 @@ class KubernetesAccessorSingleton(object): return _deployment_rollout_status_message(deployment, deployment_name) def get_qe_deployments(self): - """" - Returns all deployments matching the label selector provided in the KubeConfig - """ + """ + " Returns all deployments matching the label selector provided in the KubeConfig. + """ deployment_selector_url = "namespaces/%s/deployments?labelSelector=%s%%3D%s" % ( self.kube_config.qe_namespace, QE_DEPLOYMENT_LABEL, @@ -220,9 +226,9 @@ class KubernetesAccessorSingleton(object): return json.loads(response.text) def cycle_qe_deployments(self, deployment_names): - """" - Triggers a rollout of all desired deployments in the qe namespace - """ + """ + " Triggers a rollout of all desired deployments in the qe namespace. + """ for name in deployment_names: logger.debug("Cycling deployment %s", name) diff --git a/config_app/config_util/log.py b/config_app/config_util/log.py index 65504debc..4c80e2b13 100644 --- a/config_app/config_util/log.py +++ b/config_app/config_util/log.py @@ -5,12 +5,13 @@ from config_app._init_config import CONF_DIR def logfile_path(jsonfmt=False, debug=False): """ Returns the a logfileconf path following this rules: + - conf/logging_debug_json.conf # jsonfmt=true, debug=true - conf/logging_json.conf # jsonfmt=true, debug=false - conf/logging_debug.conf # jsonfmt=false, debug=true - conf/logging.conf # jsonfmt=false, debug=false Can be parametrized via envvars: JSONLOG=true, DEBUGLOG=true - """ + """ _json = "" _debug = "" diff --git a/config_app/config_util/ssl.py b/config_app/config_util/ssl.py index 547646525..cce441686 100644 --- a/config_app/config_util/ssl.py +++ b/config_app/config_util/ssl.py @@ -4,21 +4,26 @@ import OpenSSL class CertInvalidException(Exception): - """ Exception raised when a certificate could not be parsed/loaded. """ + """ + Exception raised when a certificate could not be parsed/loaded. + """ pass class KeyInvalidException(Exception): - """ Exception raised when a key could not be parsed/loaded or successfully applied to a cert. """ + """ + Exception raised when a key could not be parsed/loaded or successfully applied to a cert. + """ pass def load_certificate(cert_contents): - """ Loads the certificate from the given contents and returns it or raises a CertInvalidException - on failure. - """ + """ + Loads the certificate from the given contents and returns it or raises a CertInvalidException on + failure. + """ try: cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_contents) return SSLCertificate(cert) @@ -30,15 +35,19 @@ _SUBJECT_ALT_NAME = "subjectAltName" class SSLCertificate(object): - """ Helper class for easier working with SSL certificates. """ + """ + Helper class for easier working with SSL certificates. + """ def __init__(self, openssl_cert): self.openssl_cert = openssl_cert def validate_private_key(self, private_key_path): - """ Validates that the private key found at the given file path applies to this certificate. + """ + Validates that the private key found at the given file path applies to this certificate. + Raises a KeyInvalidException on failure. - """ + """ context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD) context.use_certificate(self.openssl_cert) @@ -49,7 +58,9 @@ class SSLCertificate(object): raise KeyInvalidException(ex.message[0][2]) def matches_name(self, check_name): - """ Returns true if this SSL certificate matches the given DNS hostname. """ + """ + Returns true if this SSL certificate matches the given DNS hostname. + """ for dns_name in self.names: if fnmatch(check_name, dns_name): return True @@ -58,17 +69,25 @@ class SSLCertificate(object): @property def expired(self): - """ Returns whether the SSL certificate has expired. """ + """ + Returns whether the SSL certificate has expired. + """ return self.openssl_cert.has_expired() @property def common_name(self): - """ Returns the defined common name for the certificate, if any. """ + """ + Returns the defined common name for the certificate, if any. + """ return self.openssl_cert.get_subject().commonName @property def names(self): - """ Returns all the DNS named to which the certificate applies. May be empty. """ + """ + Returns all the DNS named to which the certificate applies. + + May be empty. + """ dns_names = set() common_name = self.common_name if common_name is not None: diff --git a/config_app/config_util/tar.py b/config_app/config_util/tar.py index bdff6143e..3345e1a4a 100644 --- a/config_app/config_util/tar.py +++ b/config_app/config_util/tar.py @@ -3,8 +3,8 @@ from util.config.validator import EXTRA_CA_DIRECTORY def strip_absolute_path_and_add_trailing_dir(path): """ - Removes the initial trailing / from the prefix path, and add the last dir one - """ + Removes the initial trailing / from the prefix path, and add the last dir one. + """ return path[1:] + "/" diff --git a/data/appr_model/blob.py b/data/appr_model/blob.py index 588c0cfd1..ba855f7a8 100644 --- a/data/appr_model/blob.py +++ b/data/appr_model/blob.py @@ -14,13 +14,17 @@ def _ensure_sha256_header(digest): def get_blob(digest, models_ref): - """ Find a blob by its digest. """ + """ + Find a blob by its digest. + """ Blob = models_ref.Blob return Blob.select().where(Blob.digest == _ensure_sha256_header(digest)).get() def get_or_create_blob(digest, size, media_type_name, locations, models_ref): - """ Try to find a blob by its digest or create it. """ + """ + Try to find a blob by its digest or create it. + """ Blob = models_ref.Blob BlobPlacement = models_ref.BlobPlacement @@ -48,7 +52,9 @@ def get_or_create_blob(digest, size, media_type_name, locations, models_ref): def get_blob_locations(digest, models_ref): - """ Find all locations names for a blob. """ + """ + Find all locations names for a blob. + """ Blob = models_ref.Blob BlobPlacement = models_ref.BlobPlacement BlobPlacementLocation = models_ref.BlobPlacementLocation diff --git a/data/appr_model/channel.py b/data/appr_model/channel.py index 297cf144e..881ef7824 100644 --- a/data/appr_model/channel.py +++ b/data/appr_model/channel.py @@ -2,9 +2,11 @@ from data.appr_model import tag as tag_model def get_channel_releases(repo, channel, models_ref): - """ Return all previously linked tags. - This works based upon Tag lifetimes. - """ + """ + Return all previously linked tags. + + This works based upon Tag lifetimes. + """ Channel = models_ref.Channel Tag = models_ref.Tag @@ -24,13 +26,17 @@ def get_channel_releases(repo, channel, models_ref): def get_channel(repo, channel_name, models_ref): - """ Find a Channel by name. """ + """ + Find a Channel by name. + """ channel = tag_model.get_tag(repo, channel_name, models_ref, "channel") return channel def get_tag_channels(repo, tag_name, models_ref, active=True): - """ Find the Channels associated with a Tag. """ + """ + Find the Channels associated with a Tag. + """ Tag = models_ref.Tag tag = tag_model.get_tag(repo, tag_name, models_ref, "release") @@ -43,12 +49,16 @@ def get_tag_channels(repo, tag_name, models_ref, active=True): def delete_channel(repo, channel_name, models_ref): - """ Delete a channel by name. """ + """ + Delete a channel by name. + """ return tag_model.delete_tag(repo, channel_name, models_ref, "channel") def create_or_update_channel(repo, channel_name, tag_name, models_ref): - """ Creates or updates a channel to include a particular tag. """ + """ + Creates or updates a channel to include a particular tag. + """ tag = tag_model.get_tag(repo, tag_name, models_ref, "release") return tag_model.create_or_update_tag( repo, channel_name, models_ref, linked_tag=tag, tag_kind="channel" @@ -56,7 +66,9 @@ def create_or_update_channel(repo, channel_name, tag_name, models_ref): def get_repo_channels(repo, models_ref): - """ Creates or updates a channel to include a particular tag. """ + """ + Creates or updates a channel to include a particular tag. + """ Channel = models_ref.Channel Tag = models_ref.Tag diff --git a/data/appr_model/manifest.py b/data/appr_model/manifest.py index 8311f20d1..3c61ce540 100644 --- a/data/appr_model/manifest.py +++ b/data/appr_model/manifest.py @@ -53,7 +53,9 @@ def get_or_create_manifest(manifest_json, media_type_name, models_ref): def get_manifest_types(repo, models_ref, release=None): - """ Returns an array of MediaTypes.name for a repo, can filter by tag """ + """ + Returns an array of MediaTypes.name for a repo, can filter by tag. + """ Tag = models_ref.Tag ManifestListManifest = models_ref.ManifestListManifest diff --git a/data/appr_model/manifest_list.py b/data/appr_model/manifest_list.py index 885a00b3c..950ccb9ad 100644 --- a/data/appr_model/manifest_list.py +++ b/data/appr_model/manifest_list.py @@ -45,8 +45,10 @@ def get_or_create_manifest_list(manifest_list_json, media_type_name, schema_vers def create_manifestlistmanifest(manifestlist, manifest_ids, manifest_list_json, models_ref): - """ From a manifestlist, manifests, and the manifest list blob, - create if doesn't exist the manfiestlistmanifest for each manifest """ + """ + From a manifestlist, manifests, and the manifest list blob, create if doesn't exist the + manfiestlistmanifest for each manifest. + """ for pos in xrange(len(manifest_ids)): manifest_id = manifest_ids[pos] manifest_json = manifest_list_json[pos] diff --git a/data/appr_model/package.py b/data/appr_model/package.py index 86c32f4c9..77dd72d25 100644 --- a/data/appr_model/package.py +++ b/data/appr_model/package.py @@ -10,7 +10,9 @@ from data.appr_model import tag as tag_model def list_packages_query( models_ref, namespace=None, media_type=None, search_query=None, username=None, limit=50, ): - """ List and filter repository by search query. """ + """ + List and filter repository by search query. + """ Tag = models_ref.Tag if username and not search_query: diff --git a/data/appr_model/release.py b/data/appr_model/release.py index bf145ba03..5cb4d35f4 100644 --- a/data/appr_model/release.py +++ b/data/appr_model/release.py @@ -23,7 +23,9 @@ def _ensure_sha256_header(digest): def get_app_release(repo, tag_name, media_type, models_ref): - """ Returns (tag, manifest, blob) given a repo object, tag_name, and media_type). """ + """ + Returns (tag, manifest, blob) given a repo object, tag_name, and media_type). + """ ManifestListManifest = models_ref.ManifestListManifest Manifest = models_ref.Manifest Blob = models_ref.Blob @@ -88,12 +90,13 @@ def delete_app_release(repo, tag_name, media_type, models_ref): def create_app_release(repo, tag_name, manifest_data, digest, models_ref, force=False): - """ Create a new application release, it includes creating a new Tag, ManifestList, - ManifestListManifests, Manifest, ManifestBlob. + """ + Create a new application release, it includes creating a new Tag, ManifestList, + ManifestListManifests, Manifest, ManifestBlob. - To deduplicate the ManifestList, the manifestlist_json is kept ordered by the manifest.id. - To find the insert point in the ManifestList it uses bisect on the manifest-ids list. - """ + To deduplicate the ManifestList, the manifestlist_json is kept ordered by the manifest.id. To + find the insert point in the ManifestList it uses bisect on the manifest-ids list. + """ ManifestList = models_ref.ManifestList ManifestListManifest = models_ref.ManifestListManifest Blob = models_ref.Blob @@ -160,7 +163,9 @@ def create_app_release(repo, tag_name, manifest_data, digest, models_ref, force= def get_release_objs(repo, models_ref, media_type=None): - """ Returns an array of Tag for a repo, with optional filtering by media_type. """ + """ + Returns an array of Tag for a repo, with optional filtering by media_type. + """ Tag = models_ref.Tag release_query = Tag.select().where( @@ -173,5 +178,7 @@ def get_release_objs(repo, models_ref, media_type=None): def get_releases(repo, model_refs, media_type=None): - """ Returns an array of Tag.name for a repo, can filter by media_type. """ + """ + Returns an array of Tag.name for a repo, can filter by media_type. + """ return [t.name for t in get_release_objs(repo, model_refs, media_type)] diff --git a/data/appr_model/tag.py b/data/appr_model/tag.py index b329fe234..5d2492b85 100644 --- a/data/appr_model/tag.py +++ b/data/appr_model/tag.py @@ -114,7 +114,9 @@ def tag_exists(repo, tag_name, models_ref, tag_kind="release"): def filter_tags_by_media_type(tag_query, media_type, models_ref): - """ Return only available tag for a media_type. """ + """ + Return only available tag for a media_type. + """ ManifestListManifest = models_ref.ManifestListManifest Tag = models_ref.Tag media_type = manifest_media_type(media_type) diff --git a/data/billing.py b/data/billing.py index ac950e2b1..f033020af 100644 --- a/data/billing.py +++ b/data/billing.py @@ -325,7 +325,9 @@ PLANS = [ def get_plan(plan_id): - """ Returns the plan with the given ID or None if none. """ + """ + Returns the plan with the given ID or None if none. + """ for plan in PLANS: if plan["stripeId"] == plan_id: return plan diff --git a/data/buildlogs.py b/data/buildlogs.py index fff5251a0..a896656f4 100644 --- a/data/buildlogs.py +++ b/data/buildlogs.py @@ -44,9 +44,9 @@ class RedisBuildLogs(object): def append_log_entry(self, build_id, log_obj): """ - Appends the serialized form of log_obj to the end of the log entry list - and returns the new length of the list. - """ + Appends the serialized form of log_obj to the end of the log entry list and returns the new + length of the list. + """ pipeline = self._redis.pipeline(transaction=False) pipeline.expire(self._logs_key(build_id), SEVEN_DAYS) pipeline.rpush(self._logs_key(build_id), json.dumps(log_obj)) @@ -55,9 +55,9 @@ class RedisBuildLogs(object): def append_log_message(self, build_id, log_message, log_type=None, log_data=None): """ - Wraps the message in an envelope and push it to the end of the log entry - list and returns the index at which it was inserted. - """ + Wraps the message in an envelope and push it to the end of the log entry list and returns + the index at which it was inserted. + """ log_obj = {"message": log_message} if log_type: @@ -70,9 +70,9 @@ class RedisBuildLogs(object): def get_log_entries(self, build_id, start_index): """ - Returns a tuple of the current length of the list and an iterable of the - requested log entries. - """ + Returns a tuple of the current length of the list and an iterable of the requested log + entries. + """ try: llen = self._redis.llen(self._logs_key(build_id)) log_entries = self._redis.lrange(self._logs_key(build_id), start_index, -1) @@ -82,20 +82,20 @@ class RedisBuildLogs(object): def expire_status(self, build_id): """ - Sets the status entry to expire in 1 day. - """ + Sets the status entry to expire in 1 day. + """ self._redis.expire(self._status_key(build_id), ONE_DAY) def expire_log_entries(self, build_id): """ - Sets the log entry to expire in 1 day. - """ + Sets the log entry to expire in 1 day. + """ self._redis.expire(self._logs_key(build_id), ONE_DAY) def delete_log_entries(self, build_id): """ - Delete the log entry - """ + Delete the log entry. + """ self._redis.delete(self._logs_key(build_id)) @staticmethod @@ -104,15 +104,14 @@ class RedisBuildLogs(object): def set_status(self, build_id, status_obj): """ - Sets the status key for this build to json serialized form of the supplied - obj. - """ + Sets the status key for this build to json serialized form of the supplied obj. + """ self._redis.set(self._status_key(build_id), json.dumps(status_obj), ex=SEVEN_DAYS) def get_status(self, build_id): """ - Loads the status information for the specified build id. - """ + Loads the status information for the specified build id. + """ try: fetched = self._redis.get(self._status_key(build_id)) except redis.RedisError as re: diff --git a/data/cache/__init__.py b/data/cache/__init__.py index 045b501cc..13485eedb 100644 --- a/data/cache/__init__.py +++ b/data/cache/__init__.py @@ -2,7 +2,9 @@ from data.cache.impl import NoopDataModelCache, InMemoryDataModelCache, Memcache def get_model_cache(config): - """ Returns a data model cache matching the given configuration. """ + """ + Returns a data model cache matching the given configuration. + """ cache_config = config.get("DATA_MODEL_CACHE_CONFIG", {}) engine = cache_config.get("engine", "noop") diff --git a/data/cache/cache_key.py b/data/cache/cache_key.py index 911406b3f..1fb84f856 100644 --- a/data/cache/cache_key.py +++ b/data/cache/cache_key.py @@ -2,29 +2,39 @@ from collections import namedtuple class CacheKey(namedtuple("CacheKey", ["key", "expiration"])): - """ Defines a key into the data model cache. """ + """ + Defines a key into the data model cache. + """ pass def for_repository_blob(namespace_name, repo_name, digest, version): - """ Returns a cache key for a blob in a repository. """ + """ + Returns a cache key for a blob in a repository. + """ return CacheKey("repo_blob__%s_%s_%s_%s" % (namespace_name, repo_name, digest, version), "60s") def for_catalog_page(auth_context_key, start_id, limit): - """ Returns a cache key for a single page of a catalog lookup for an authed context. """ + """ + Returns a cache key for a single page of a catalog lookup for an authed context. + """ params = (auth_context_key or "(anon)", start_id or 0, limit or 0) return CacheKey("catalog_page__%s_%s_%s" % params, "60s") def for_namespace_geo_restrictions(namespace_name): - """ Returns a cache key for the geo restrictions for a namespace. """ + """ + Returns a cache key for the geo restrictions for a namespace. + """ return CacheKey("geo_restrictions__%s" % (namespace_name), "240s") def for_active_repo_tags(repository_id, start_pagination_id, limit): - """ Returns a cache key for the active tags in a repository. """ + """ + Returns a cache key for the active tags in a repository. + """ return CacheKey( "repo_active_tags__%s_%s_%s" % (repository_id, start_pagination_id, limit), "120s" ) diff --git a/data/cache/impl.py b/data/cache/impl.py index 679bc0ccf..a2c2cee8e 100644 --- a/data/cache/impl.py +++ b/data/cache/impl.py @@ -20,25 +20,33 @@ def is_not_none(value): @add_metaclass(ABCMeta) class DataModelCache(object): - """ Defines an interface for cache storing and returning tuple data model objects. """ + """ + Defines an interface for cache storing and returning tuple data model objects. + """ @abstractmethod def retrieve(self, cache_key, loader, should_cache=is_not_none): - """ Checks the cache for the specified cache key and returns the value found (if any). If none - found, the loader is called to get a result and populate the cache. - """ + """ + Checks the cache for the specified cache key and returns the value found (if any). + + If none found, the loader is called to get a result and populate the cache. + """ pass class NoopDataModelCache(DataModelCache): - """ Implementation of the data model cache which does nothing. """ + """ + Implementation of the data model cache which does nothing. + """ def retrieve(self, cache_key, loader, should_cache=is_not_none): return loader() class InMemoryDataModelCache(DataModelCache): - """ Implementation of the data model cache backed by an in-memory dictionary. """ + """ + Implementation of the data model cache backed by an in-memory dictionary. + """ def __init__(self): self.cache = ExpiresDict() @@ -83,7 +91,9 @@ _JSON_TYPE = 2 class MemcachedModelCache(DataModelCache): - """ Implementation of the data model cache backed by a memcached. """ + """ + Implementation of the data model cache backed by a memcached. + """ def __init__( self, diff --git a/data/database.py b/data/database.py index fd743c5f4..80051c5ee 100644 --- a/data/database.py +++ b/data/database.py @@ -95,9 +95,11 @@ _EXTRA_ARGS = { def pipes_concat(arg1, arg2, *extra_args): - """ Concat function for sqlite, since it doesn't support fn.Concat. - Concatenates clauses with || characters. - """ + """ + Concat function for sqlite, since it doesn't support fn.Concat. + + Concatenates clauses with || characters. + """ reduced = arg1.concat(arg2) for arg in extra_args: reduced = reduced.concat(arg) @@ -105,9 +107,11 @@ def pipes_concat(arg1, arg2, *extra_args): def function_concat(arg1, arg2, *extra_args): - """ Default implementation of concat which uses fn.Concat(). Used by all - database engines except sqlite. - """ + """ + Default implementation of concat which uses fn.Concat(). + + Used by all database engines except sqlite. + """ return fn.Concat(arg1, arg2, *extra_args) @@ -125,16 +129,17 @@ def null_for_update(query): def delete_instance_filtered(instance, model_class, delete_nullable, skip_transitive_deletes): - """ Deletes the DB instance recursively, skipping any models in the skip_transitive_deletes set. + """ + Deletes the DB instance recursively, skipping any models in the skip_transitive_deletes set. - Callers *must* ensure that any models listed in the skip_transitive_deletes must be capable - of being directly deleted when the instance is deleted (with automatic sorting handling - dependency order). + Callers *must* ensure that any models listed in the skip_transitive_deletes must be capable + of being directly deleted when the instance is deleted (with automatic sorting handling + dependency order). - For example, the RepositoryTag and Image tables for Repository will always refer to the - *same* repository when RepositoryTag references Image, so we can safely skip - transitive deletion for the RepositoryTag table. - """ + For example, the RepositoryTag and Image tables for Repository will always refer to the + *same* repository when RepositoryTag references Image, so we can safely skip + transitive deletion for the RepositoryTag table. + """ # We need to sort the ops so that models get cleaned in order of their dependencies ops = reversed(list(instance.dependencies(delete_nullable))) filtered_ops = [] @@ -206,9 +211,10 @@ class RetryOperationalError(object): class CloseForLongOperation(object): - """ Helper object which disconnects the database then reconnects after the nested operation - completes. - """ + """ + Helper object which disconnects the database then reconnects after the nested operation + completes. + """ def __init__(self, config_object): self.config_object = config_object @@ -225,7 +231,9 @@ class CloseForLongOperation(object): class UseThenDisconnect(object): - """ Helper object for conducting work with a database and then tearing it down. """ + """ + Helper object for conducting work with a database and then tearing it down. + """ def __init__(self, config_object): self.config_object = config_object @@ -241,9 +249,10 @@ class UseThenDisconnect(object): class TupleSelector(object): - """ Helper class for selecting tuples from a peewee query and easily accessing - them as if they were objects. - """ + """ + Helper class for selecting tuples from a peewee query and easily accessing them as if they were + objects. + """ class _TupleWrapper(object): def __init__(self, data, fields): @@ -255,7 +264,9 @@ class TupleSelector(object): @classmethod def tuple_reference_key(cls, field): - """ Returns a string key for referencing a field in a TupleSelector. """ + """ + Returns a string key for referencing a field in a TupleSelector. + """ if isinstance(field, Function): return field.name + ",".join([cls.tuple_reference_key(arg) for arg in field.arguments]) @@ -288,8 +299,11 @@ ensure_under_transaction = CallableProxy() def validate_database_url(url, db_kwargs, connect_timeout=5): - """ Validates that we can connect to the given database URL, with the given kwargs. Raises - an exception if the validation fails. """ + """ + Validates that we can connect to the given database URL, with the given kwargs. + + Raises an exception if the validation fails. + """ db_kwargs = db_kwargs.copy() try: @@ -305,8 +319,11 @@ def validate_database_url(url, db_kwargs, connect_timeout=5): def validate_database_precondition(url, db_kwargs, connect_timeout=5): - """ Validates that we can connect to the given database URL and the database meets our - precondition. Raises an exception if the validation fails. """ + """ + Validates that we can connect to the given database URL and the database meets our precondition. + + Raises an exception if the validation fails. + """ db_kwargs = db_kwargs.copy() try: driver = _db_from_url( @@ -473,19 +490,23 @@ def _get_enum_field_values(enum_field): class EnumField(ForeignKeyField): - """ Create a cached python Enum from an EnumTable """ + """ + Create a cached python Enum from an EnumTable. + """ def __init__(self, model, enum_key_field="name", *args, **kwargs): """ - model is the EnumTable model-class (see ForeignKeyField) - enum_key_field is the field from the EnumTable to use as the enum name - """ + model is the EnumTable model-class (see ForeignKeyField) enum_key_field is the field from + the EnumTable to use as the enum name. + """ self.enum_key_field = enum_key_field super(EnumField, self).__init__(model, *args, **kwargs) @property def enum(self): - """ Returns a python enun.Enum generated from the associated EnumTable """ + """ + Returns a python enun.Enum generated from the associated EnumTable. + """ return _get_enum_field_values(self) def get_id(self, name): @@ -512,10 +533,12 @@ class EnumField(ForeignKeyField): def deprecated_field(field, flag): - """ Marks a field as deprecated and removes it from the peewee model if the - flag is not set. A flag is defined in the active_migration module and will - be associated with one or more migration phases. - """ + """ + Marks a field as deprecated and removes it from the peewee model if the flag is not set. + + A flag is defined in the active_migration module and will be associated with one or more + migration phases. + """ if ActiveDataMigration.has_flag(flag): return field @@ -529,9 +552,10 @@ class BaseModel(ReadReplicaSupportedModel): read_only_config = read_only_config def __getattribute__(self, name): - """ Adds _id accessors so that foreign key field IDs can be looked up without making - a database roundtrip. - """ + """ + Adds _id accessors so that foreign key field IDs can be looked up without making a database + roundtrip. + """ if name.endswith("_id"): field_name = name[0 : len(name) - 3] if field_name in self._meta.fields: @@ -762,13 +786,14 @@ class RepositoryKind(BaseModel): @unique class RepositoryState(IntEnum): """ - Possible states of a repository. - NORMAL: Regular repo where all actions are possible - READ_ONLY: Only read actions, such as pull, are allowed regardless of specific user permissions - MIRROR: Equivalent to READ_ONLY except that mirror robot has write permission - MARKED_FOR_DELETION: Indicates the repository has been marked for deletion and should be hidden - and un-usable. - """ + Possible states of a repository. + + NORMAL: Regular repo where all actions are possible + READ_ONLY: Only read actions, such as pull, are allowed regardless of specific user permissions + MIRROR: Equivalent to READ_ONLY except that mirror robot has write permission + MARKED_FOR_DELETION: Indicates the repository has been marked for deletion and should be hidden + and un-usable. + """ NORMAL = 0 READ_ONLY = 1 @@ -1038,9 +1063,9 @@ class Image(BaseModel): ) def ancestor_id_list(self): - """ Returns an integer list of ancestor ids, ordered chronologically from - root to direct parent. - """ + """ + Returns an integer list of ancestor ids, ordered chronologically from root to direct parent. + """ return map(int, self.ancestors.split("/")[1:-1]) @@ -1078,7 +1103,9 @@ class RepositoryTag(BaseModel): class BUILD_PHASE(object): - """ Build phases enum """ + """ + Build phases enum. + """ ERROR = "error" INTERNAL_ERROR = "internalerror" @@ -1102,7 +1129,9 @@ class BUILD_PHASE(object): class TRIGGER_DISABLE_REASON(object): - """ Build trigger disable reason enum """ + """ + Build trigger disable reason enum. + """ BUILD_FALURES = "successive_build_failures" INTERNAL_ERRORS = "successive_build_internal_errors" @@ -1195,7 +1224,11 @@ class LogEntry(BaseModel): class LogEntry2(BaseModel): - """ TEMP FOR QUAY.IO ONLY. DO NOT RELEASE INTO QUAY ENTERPRISE. """ + """ + TEMP FOR QUAY.IO ONLY. + + DO NOT RELEASE INTO QUAY ENTERPRISE. + """ kind = ForeignKeyField(LogEntryKind) account = IntegerField(index=True, db_column="account_id") @@ -1423,8 +1456,9 @@ class ServiceKey(BaseModel): class MediaType(BaseModel): - """ MediaType is an enumeration of the possible formats of various objects in the data model. - """ + """ + MediaType is an enumeration of the possible formats of various objects in the data model. + """ name = CharField(index=True, unique=True) @@ -1437,17 +1471,19 @@ class Messages(BaseModel): class LabelSourceType(BaseModel): - """ LabelSourceType is an enumeration of the possible sources for a label. - """ + """ + LabelSourceType is an enumeration of the possible sources for a label. + """ name = CharField(index=True, unique=True) mutable = BooleanField(default=False) class Label(BaseModel): - """ Label represents user-facing metadata associated with another entry in the database (e.g. a - Manifest). - """ + """ + Label represents user-facing metadata associated with another entry in the database (e.g. a + Manifest). + """ uuid = CharField(default=uuid_generator, index=True, unique=True) key = CharField(index=True) @@ -1457,8 +1493,9 @@ class Label(BaseModel): class ApprBlob(BaseModel): - """ ApprBlob represents a content-addressable object stored outside of the database. - """ + """ + ApprBlob represents a content-addressable object stored outside of the database. + """ digest = CharField(index=True, unique=True) media_type = EnumField(MediaType) @@ -1467,15 +1504,17 @@ class ApprBlob(BaseModel): class ApprBlobPlacementLocation(BaseModel): - """ ApprBlobPlacementLocation is an enumeration of the possible storage locations for ApprBlobs. - """ + """ + ApprBlobPlacementLocation is an enumeration of the possible storage locations for ApprBlobs. + """ name = CharField(index=True, unique=True) class ApprBlobPlacement(BaseModel): - """ ApprBlobPlacement represents the location of a Blob. - """ + """ + ApprBlobPlacement represents the location of a Blob. + """ blob = ForeignKeyField(ApprBlob) location = EnumField(ApprBlobPlacementLocation) @@ -1487,8 +1526,9 @@ class ApprBlobPlacement(BaseModel): class ApprManifest(BaseModel): - """ ApprManifest represents the metadata and collection of blobs that comprise an Appr image. - """ + """ + ApprManifest represents the metadata and collection of blobs that comprise an Appr image. + """ digest = CharField(index=True, unique=True) media_type = EnumField(MediaType) @@ -1496,8 +1536,9 @@ class ApprManifest(BaseModel): class ApprManifestBlob(BaseModel): - """ ApprManifestBlob is a many-to-many relation table linking ApprManifests and ApprBlobs. - """ + """ + ApprManifestBlob is a many-to-many relation table linking ApprManifests and ApprBlobs. + """ manifest = ForeignKeyField(ApprManifest, index=True) blob = ForeignKeyField(ApprBlob, index=True) @@ -1509,8 +1550,9 @@ class ApprManifestBlob(BaseModel): class ApprManifestList(BaseModel): - """ ApprManifestList represents all of the various Appr manifests that compose an ApprTag. - """ + """ + ApprManifestList represents all of the various Appr manifests that compose an ApprTag. + """ digest = CharField(index=True, unique=True) manifest_list_json = JSONField() @@ -1519,15 +1561,17 @@ class ApprManifestList(BaseModel): class ApprTagKind(BaseModel): - """ ApprTagKind is a enumtable to reference tag kinds. - """ + """ + ApprTagKind is a enumtable to reference tag kinds. + """ name = CharField(index=True, unique=True) class ApprTag(BaseModel): - """ ApprTag represents a user-facing alias for referencing an ApprManifestList. - """ + """ + ApprTag represents a user-facing alias for referencing an ApprManifestList. + """ name = CharField() repository = ForeignKeyField(Repository) @@ -1555,9 +1599,10 @@ ApprChannel = ApprTag.alias() class ApprManifestListManifest(BaseModel): - """ ApprManifestListManifest is a many-to-many relation table linking ApprManifestLists and - ApprManifests. - """ + """ + ApprManifestListManifest is a many-to-many relation table linking ApprManifestLists and + ApprManifests. + """ manifest_list = ForeignKeyField(ApprManifestList, index=True) manifest = ForeignKeyField(ApprManifest, index=True) @@ -1573,9 +1618,10 @@ class ApprManifestListManifest(BaseModel): class AppSpecificAuthToken(BaseModel): - """ AppSpecificAuthToken represents a token generated by a user for use with an external - application where putting the user's credentials, even encrypted, is deemed too risky. - """ + """ + AppSpecificAuthToken represents a token generated by a user for use with an external application + where putting the user's credentials, even encrypted, is deemed too risky. + """ user = QuayUserField() uuid = CharField(default=uuid_generator, max_length=36, index=True) @@ -1594,9 +1640,11 @@ class AppSpecificAuthToken(BaseModel): class Manifest(BaseModel): - """ Manifest represents a single manifest under a repository. Within a repository, - there can only be one manifest with the same digest. - """ + """ + Manifest represents a single manifest under a repository. + + Within a repository, there can only be one manifest with the same digest. + """ repository = ForeignKeyField(Repository) digest = CharField(index=True) @@ -1613,15 +1661,17 @@ class Manifest(BaseModel): class TagKind(BaseModel): - """ TagKind describes the various kinds of tags that can be found in the registry. - """ + """ + TagKind describes the various kinds of tags that can be found in the registry. + """ name = CharField(index=True, unique=True) class Tag(BaseModel): - """ Tag represents a user-facing alias for referencing a Manifest or as an alias to another tag. - """ + """ + Tag represents a user-facing alias for referencing a Manifest or as an alias to another tag. + """ name = CharField() repository = ForeignKeyField(Repository) @@ -1648,10 +1698,12 @@ class Tag(BaseModel): class ManifestChild(BaseModel): - """ ManifestChild represents a relationship between a manifest and its child manifest(s). - Multiple manifests can share the same children. Note that since Manifests are stored - per-repository, the repository here is a bit redundant, but we do so to make cleanup easier. - """ + """ + ManifestChild represents a relationship between a manifest and its child manifest(s). + + Multiple manifests can share the same children. Note that since Manifests are stored per- + repository, the repository here is a bit redundant, but we do so to make cleanup easier. + """ repository = ForeignKeyField(Repository) manifest = ForeignKeyField(Manifest) @@ -1669,10 +1721,12 @@ class ManifestChild(BaseModel): class ManifestLabel(BaseModel): - """ ManifestLabel represents a label applied to a Manifest, within a repository. - Note that since Manifests are stored per-repository, the repository here is - a bit redundant, but we do so to make cleanup easier. - """ + """ + ManifestLabel represents a label applied to a Manifest, within a repository. + + Note that since Manifests are stored per-repository, the repository here is a bit redundant, but + we do so to make cleanup easier. + """ repository = ForeignKeyField(Repository, index=True) manifest = ForeignKeyField(Manifest) @@ -1685,7 +1739,9 @@ class ManifestLabel(BaseModel): class ManifestBlob(BaseModel): - """ ManifestBlob represents a blob that is used by a manifest. """ + """ + ManifestBlob represents a blob that is used by a manifest. + """ repository = ForeignKeyField(Repository, index=True) manifest = ForeignKeyField(Manifest) @@ -1698,9 +1754,10 @@ class ManifestBlob(BaseModel): class ManifestLegacyImage(BaseModel): - """ For V1-compatible manifests only, this table maps from the manifest to its associated - Docker image. - """ + """ + For V1-compatible manifests only, this table maps from the manifest to its associated Docker + image. + """ repository = ForeignKeyField(Repository, index=True) manifest = ForeignKeyField(Manifest, unique=True) @@ -1708,7 +1765,9 @@ class ManifestLegacyImage(BaseModel): class TagManifest(BaseModel): - """ TO BE DEPRECATED: The manifest for a tag. """ + """ + TO BE DEPRECATED: The manifest for a tag. + """ tag = ForeignKeyField(RepositoryTag, unique=True) digest = CharField(index=True) @@ -1724,8 +1783,9 @@ class TagManifestToManifest(BaseModel): class TagManifestLabel(BaseModel): - """ TO BE DEPRECATED: Mapping from a tag manifest to a label. - """ + """ + TO BE DEPRECATED: Mapping from a tag manifest to a label. + """ repository = ForeignKeyField(Repository, index=True) annotated = ForeignKeyField(TagManifest, index=True) @@ -1762,17 +1822,18 @@ class TagToRepositoryTag(BaseModel): @unique class RepoMirrorRuleType(IntEnum): """ - Types of mirroring rules. - TAG_GLOB_CSV: Comma separated glob values (eg. "7.6,7.6-1.*") - """ + Types of mirroring rules. + + TAG_GLOB_CSV: Comma separated glob values (eg. "7.6,7.6-1.*") + """ TAG_GLOB_CSV = 1 class RepoMirrorRule(BaseModel): """ - Determines how a given Repository should be mirrored. - """ + Determines how a given Repository should be mirrored. + """ uuid = CharField(default=uuid_generator, max_length=36, index=True) repository = ForeignKeyField(Repository, index=True) @@ -1789,8 +1850,8 @@ class RepoMirrorRule(BaseModel): @unique class RepoMirrorType(IntEnum): """ - Types of repository mirrors. - """ + Types of repository mirrors. + """ PULL = 1 # Pull images from the external repo @@ -1798,8 +1859,8 @@ class RepoMirrorType(IntEnum): @unique class RepoMirrorStatus(IntEnum): """ - Possible statuses of repository mirroring. - """ + Possible statuses of repository mirroring. + """ FAIL = -1 NEVER_RUN = 0 @@ -1810,9 +1871,9 @@ class RepoMirrorStatus(IntEnum): class RepoMirrorConfig(BaseModel): """ - Represents a repository to be mirrored and any additional configuration - required to perform the mirroring. - """ + Represents a repository to be mirrored and any additional configuration required to perform the + mirroring. + """ repository = ForeignKeyField(Repository, index=True, unique=True, backref="mirror") creation_date = DateTimeField(default=datetime.utcnow) diff --git a/data/encryption.py b/data/encryption.py index 21fe0e26a..b71996096 100644 --- a/data/encryption.py +++ b/data/encryption.py @@ -9,7 +9,9 @@ from util.security.secret import convert_secret_key class DecryptionFailureException(Exception): - """ Exception raised if a field could not be decrypted. """ + """ + Exception raised if a field could not be decrypted. + """ EncryptionVersion = namedtuple("EncryptionVersion", ["prefix", "encrypt", "decrypt"]) @@ -56,9 +58,10 @@ _RESERVED_FIELD_SPACE = len(_SEPARATOR) + max([len(k) for k in _VERSIONS.keys()] class FieldEncrypter(object): - """ Helper object for defining how fields are encrypted and decrypted between the database - and the application. - """ + """ + Helper object for defining how fields are encrypted and decrypted between the database and the + application. + """ def __init__(self, secret_key, version="v0"): # NOTE: secret_key will be None when the system is being first initialized, so we allow that @@ -68,7 +71,9 @@ class FieldEncrypter(object): self._encryption_version = _VERSIONS[version] def encrypt_value(self, value, field_max_length=None): - """ Encrypts the value using the current version of encryption. """ + """ + Encrypts the value using the current version of encryption. + """ assert self._secret_key is not None encrypted_value = self._encryption_version.encrypt( self._secret_key, value, field_max_length @@ -76,9 +81,11 @@ class FieldEncrypter(object): return "%s%s%s" % (self._encryption_version.prefix, _SEPARATOR, encrypted_value) def decrypt_value(self, value): - """ Decrypts the value, returning it. If the value cannot be decrypted - raises a DecryptionFailureException. - """ + """ + Decrypts the value, returning it. + + If the value cannot be decrypted raises a DecryptionFailureException. + """ assert self._secret_key is not None if _SEPARATOR not in value: raise DecryptionFailureException("Invalid encrypted value") diff --git a/data/fields.py b/data/fields.py index 9ef3bcde5..c3a638fcb 100644 --- a/data/fields.py +++ b/data/fields.py @@ -78,7 +78,9 @@ class Base64BinaryField(TextField): class DecryptedValue(object): - """ Wrapper around an already decrypted value to be placed into an encrypted field. """ + """ + Wrapper around an already decrypted value to be placed into an encrypted field. + """ def __init__(self, decrypted_value): assert decrypted_value is not None @@ -89,24 +91,34 @@ class DecryptedValue(object): return self.value def matches(self, unencrypted_value): - """ Returns whether the value of this field matches the unencrypted_value. """ + """ + Returns whether the value of this field matches the unencrypted_value. + """ return self.decrypt() == unencrypted_value class LazyEncryptedValue(object): - """ Wrapper around an encrypted value in an encrypted field. Will decrypt lazily. """ + """ + Wrapper around an encrypted value in an encrypted field. + + Will decrypt lazily. + """ def __init__(self, encrypted_value, field): self.encrypted_value = encrypted_value self._field = field def decrypt(self, encrypter=None): - """ Decrypts the value. """ + """ + Decrypts the value. + """ encrypter = encrypter or self._field.model._meta.encrypter return encrypter.decrypt_value(self.encrypted_value) def matches(self, unencrypted_value): - """ Returns whether the value of this field matches the unencrypted_value. """ + """ + Returns whether the value of this field matches the unencrypted_value. + """ return self.decrypt() == unencrypted_value def __eq__(self, _): @@ -132,7 +144,9 @@ class LazyEncryptedValue(object): def _add_encryption(field_class, requires_length_check=True): - """ Adds support for encryption and decryption to the given field class. """ + """ + Adds support for encryption and decryption to the given field class. + """ class indexed_class(field_class): def __init__(self, default_token_length=None, *args, **kwargs): @@ -202,11 +216,15 @@ class EnumField(SmallIntegerField): self.enum_type = enum_type def db_value(self, value): - """Convert the python value for storage in the database.""" + """ + Convert the python value for storage in the database. + """ return int(value.value) def python_value(self, value): - """Convert the database value to a pythonic value.""" + """ + Convert the database value to a pythonic value. + """ return self.enum_type(value) if value is not None else None def clone_base(self, **kwargs): @@ -214,7 +232,9 @@ class EnumField(SmallIntegerField): def _add_fulltext(field_class): - """ Adds support for full text indexing and lookup to the given field class. """ + """ + Adds support for full text indexing and lookup to the given field class. + """ class indexed_class(field_class): # Marker used by SQLAlchemy translation layer to add the proper index for full text searching. @@ -256,32 +276,42 @@ FullIndexedTextField = _add_fulltext(TextField) class Credential(object): - """ Credential represents a hashed credential. """ + """ + Credential represents a hashed credential. + """ def __init__(self, hashed): self.hashed = hashed def matches(self, value): - """ Returns true if this credential matches the unhashed value given. """ + """ + Returns true if this credential matches the unhashed value given. + """ return bcrypt.hashpw(value.encode("utf-8"), self.hashed) == self.hashed @classmethod def from_string(cls, string_value): - """ Returns a Credential object from an unhashed string value. """ + """ + Returns a Credential object from an unhashed string value. + """ return Credential(bcrypt.hashpw(string_value.encode("utf-8"), bcrypt.gensalt())) @classmethod def generate(cls, length=20): - """ Generates a new credential and returns it, along with its unhashed form. """ + """ + Generates a new credential and returns it, along with its unhashed form. + """ token = random_string(length) return Credential.from_string(token), token class CredentialField(CharField): - """ A character field that stores crytographically hashed credentials that should never be - available to the user in plaintext after initial creation. This field automatically - provides verification. - """ + """ + A character field that stores crytographically hashed credentials that should never be available + to the user in plaintext after initial creation. + + This field automatically provides verification. + """ def __init__(self, *args, **kwargs): CharField.__init__(self, *args, **kwargs) diff --git a/data/logs_model/combined_model.py b/data/logs_model/combined_model.py index 33f6fc964..a359a7b6e 100644 --- a/data/logs_model/combined_model.py +++ b/data/logs_model/combined_model.py @@ -9,13 +9,16 @@ logger = logging.getLogger(__name__) def _merge_aggregated_log_counts(*args): - """ Merge two lists of AggregatedLogCount based on the value of their kind_id and datetime. - """ + """ + Merge two lists of AggregatedLogCount based on the value of their kind_id and datetime. + """ matching_keys = {} aggregated_log_counts_list = itertools.chain.from_iterable(args) def canonical_key_from_kind_date_tuple(kind_id, dt): - """ Return a comma separated key from an AggregatedLogCount's kind_id and datetime. """ + """ + Return a comma separated key from an AggregatedLogCount's kind_id and datetime. + """ return str(kind_id) + "," + str(dt) for kind_id, count, dt in aggregated_log_counts_list: @@ -33,9 +36,9 @@ def _merge_aggregated_log_counts(*args): class CombinedLogsModel(SharedModel, ActionLogsDataInterface): """ - CombinedLogsModel implements the data model that logs to the first logs model and reads from - both. - """ + CombinedLogsModel implements the data model that logs to the first logs model and reads from + both. + """ def __init__(self, read_write_logs_model, read_only_logs_model): self.read_write_logs_model = read_write_logs_model diff --git a/data/logs_model/datatypes.py b/data/logs_model/datatypes.py index 5a2b7dcb3..4decf1fdd 100644 --- a/data/logs_model/datatypes.py +++ b/data/logs_model/datatypes.py @@ -11,7 +11,9 @@ from util.morecollections import AttrDict def _format_date(date): - """ Output an RFC822 date format. """ + """ + Output an RFC822 date format. + """ if date is None: return None @@ -24,10 +26,12 @@ def _kinds(): class LogEntriesPage(namedtuple("LogEntriesPage", ["logs", "next_page_token"])): - """ Represents a page returned by the lookup_logs call. The `logs` contains the logs - found for the page and `next_page_token`, if not None, contains the token to be - encoded and returned for the followup call. - """ + """ + Represents a page returned by the lookup_logs call. + + The `logs` contains the logs found for the page and `next_page_token`, if not None, contains the + token to be encoded and returned for the followup call. + """ class Log( @@ -48,7 +52,9 @@ class Log( ], ) ): - """ Represents a single log entry returned by the logs model. """ + """ + Represents a single log entry returned by the logs model. + """ @classmethod def for_logentry(cls, log): @@ -181,7 +187,9 @@ class Log( class AggregatedLogCount(namedtuple("AggregatedLogCount", ["kind_id", "count", "datetime"])): - """ Represents the aggregated count of the number of logs, of a particular kind, on a day. """ + """ + Represents the aggregated count of the number of logs, of a particular kind, on a day. + """ def to_dict(self): view = { diff --git a/data/logs_model/document_logs_model.py b/data/logs_model/document_logs_model.py index f463efd5c..41379069f 100644 --- a/data/logs_model/document_logs_model.py +++ b/data/logs_model/document_logs_model.py @@ -50,11 +50,12 @@ COUNT_REPOSITORY_ACTION_TIMEOUT = 30 def _date_range_descending(start_datetime, end_datetime, includes_end_datetime=False): - """ Generate the dates between `end_datetime` and `start_datetime`. + """ + Generate the dates between `end_datetime` and `start_datetime`. - If `includes_end_datetime` is set, the generator starts at `end_datetime`, - otherwise, starts the generator at `end_datetime` minus 1 second. - """ + If `includes_end_datetime` is set, the generator starts at `end_datetime`, otherwise, starts the + generator at `end_datetime` minus 1 second. + """ assert end_datetime >= start_datetime start_date = start_datetime.date() @@ -69,12 +70,13 @@ def _date_range_descending(start_datetime, end_datetime, includes_end_datetime=F def _date_range_in_single_index(dt1, dt2): - """ Determine whether a single index can be searched given a range - of dates or datetimes. If date instances are given, difference should be 1 day. + """ + Determine whether a single index can be searched given a range of dates or datetimes. If date + instances are given, difference should be 1 day. - NOTE: dt2 is exclusive to the search result set. - i.e. The date range is larger or equal to dt1 and strictly smaller than dt2 - """ + NOTE: dt2 is exclusive to the search result set. + i.e. The date range is larger or equal to dt1 and strictly smaller than dt2 + """ assert isinstance(dt1, date) and isinstance(dt2, date) dt = dt2 - dt1 @@ -106,34 +108,40 @@ def _for_elasticsearch_logs(logs, repository_id=None, namespace_id=None): def _random_id(): - """ Generates a unique uuid4 string for the random_id field in LogEntry. - It is used as tie-breaker for sorting logs based on datetime: - https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-after.html - """ + """ + Generates a unique uuid4 string for the random_id field in LogEntry. + + It is used as tie-breaker for sorting logs based on datetime: + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-after.html + """ return str(uuid.uuid4()) @add_metaclass(ABCMeta) class ElasticsearchLogsModelInterface(object): """ - Interface for Elasticsearch specific operations with the logs model. - These operations are usually index based. - """ + Interface for Elasticsearch specific operations with the logs model. + + These operations are usually index based. + """ @abstractmethod def can_delete_index(self, index, cutoff_date): - """ Return whether the given index is older than the given cutoff date. """ + """ + Return whether the given index is older than the given cutoff date. + """ @abstractmethod def list_indices(self): - """ List the logs model's indices. """ + """ + List the logs model's indices. + """ class DocumentLogsModel(SharedModel, ActionLogsDataInterface, ElasticsearchLogsModelInterface): """ - DocumentLogsModel implements the data model for the logs API backed by an - elasticsearch service. - """ + DocumentLogsModel implements the data model for the logs API backed by an elasticsearch service. + """ def __init__( self, should_skip_logging=None, elasticsearch_config=None, producer=None, **kwargs @@ -155,11 +163,12 @@ class DocumentLogsModel(SharedModel, ActionLogsDataInterface, ElasticsearchLogsM @staticmethod def _get_ids_by_names(repository_name, namespace_name, performer_name): - """ Retrieve repository/namespace/performer ids based on their names. - throws DataModelException when the namespace_name does not match any - user in the database. + """ + Retrieve repository/namespace/performer ids based on their names. + + throws DataModelException when the namespace_name does not match any user in the database. returns database ID or None if not exists. - """ + """ repository_id = None account_id = None performer_id = None @@ -269,10 +278,11 @@ class DocumentLogsModel(SharedModel, ActionLogsDataInterface, ElasticsearchLogsM return search.execute() def _load_latest_logs(self, performer_id, repository_id, account_id, filter_kinds, size): - """ Return the latest logs from Elasticsearch. + """ + Return the latest logs from Elasticsearch. - Look at indices up to theset logrotateworker threshold, or up to 30 days if not defined. - """ + Look at indices up to theset logrotateworker threshold, or up to 30 days if not defined. + """ # Set the last index to check to be the logrotateworker threshold, or 30 days end_datetime = datetime.now() start_datetime = end_datetime - timedelta(days=DATE_RANGE_LIMIT) @@ -563,7 +573,9 @@ class DocumentLogsModel(SharedModel, ActionLogsDataInterface, ElasticsearchLogsM return self._es_client.list_indices() def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation): - """ Yield a context manager for a group of outdated logs. """ + """ + Yield a context manager for a group of outdated logs. + """ all_indices = self.list_indices() for index in all_indices: if not self.can_delete_index(index, cutoff_date): @@ -575,11 +587,10 @@ class DocumentLogsModel(SharedModel, ActionLogsDataInterface, ElasticsearchLogsM class ElasticsearchLogRotationContext(LogRotationContextInterface): """ - ElasticsearchLogRotationContext yield batch of logs from an index. + ElasticsearchLogRotationContext yield batch of logs from an index. - When completed without exceptions, this context will delete its associated - Elasticsearch index. - """ + When completed without exceptions, this context will delete its associated Elasticsearch index. + """ def __init__(self, index, min_logs_per_rotation, es_client): self._es_client = es_client @@ -623,7 +634,9 @@ class ElasticsearchLogRotationContext(LogRotationContextInterface): return search def _generate_filename(self): - """ Generate the filenames used to archive the action logs. """ + """ + Generate the filenames used to archive the action logs. + """ filename = "%s_%d-%d" % (self.index, self.start_pos, self.end_pos) filename = ".".join((filename, "txt.gz")) return filename diff --git a/data/logs_model/elastic_logs.py b/data/logs_model/elastic_logs.py index a5afae871..81e548966 100644 --- a/data/logs_model/elastic_logs.py +++ b/data/logs_model/elastic_logs.py @@ -58,8 +58,8 @@ class LogEntry(Document): @classmethod def init(cls, index_prefix, index_settings=None, skip_template_init=False): """ - Create the index template, and populate LogEntry's mapping and index settings. - """ + Create the index template, and populate LogEntry's mapping and index settings. + """ wildcard_index = Index(name=index_prefix + "*") wildcard_index.settings(**(index_settings or {})) wildcard_index.document(cls) @@ -93,8 +93,8 @@ class LogEntry(Document): class ElasticsearchLogs(object): """ - Model for logs operations stored in an Elasticsearch cluster. - """ + Model for logs operations stored in an Elasticsearch cluster. + """ def __init__( self, @@ -125,9 +125,8 @@ class ElasticsearchLogs(object): def _initialize(self): """ - Initialize a connection to an ES cluster and - creates an index template if it does not exist. - """ + Initialize a connection to an ES cluster and creates an index template if it does not exist. + """ if not self._initialized: http_auth = None if self._access_key and self._secret_key and self._aws_region: @@ -183,7 +182,9 @@ class ElasticsearchLogs(object): self._initialized = True def index_name(self, day): - """ Return an index name for the given day. """ + """ + Return an index name for the given day. + """ return self._index_prefix + day.strftime(INDEX_DATE_FORMAT) def index_exists(self, index): @@ -194,15 +195,17 @@ class ElasticsearchLogs(object): @staticmethod def _valid_index_prefix(prefix): - """ Check that the given index prefix is valid with the set of - indices used by this class. - """ + """ + Check that the given index prefix is valid with the set of indices used by this class. + """ return re.match(VALID_INDEX_PATTERN, prefix) is not None def _valid_index_name(self, index): - """ Check that the given index name is valid and follows the format: - YYYY-MM-DD - """ + """ + Check that the given index name is valid and follows the format: + + YYYY-MM-DD + """ if not ElasticsearchLogs._valid_index_prefix(index): return False @@ -218,7 +221,9 @@ class ElasticsearchLogs(object): return False def can_delete_index(self, index, cutoff_date): - """ Check if the given index can be deleted based on the given index's date and cutoff date. """ + """ + Check if the given index can be deleted based on the given index's date and cutoff date. + """ assert self._valid_index_name(index) index_dt = datetime.strptime(index[len(self._index_prefix) :], INDEX_DATE_FORMAT) return index_dt < cutoff_date and cutoff_date - index_dt >= timedelta(days=1) @@ -260,11 +265,12 @@ def configure_es( index_settings=None, ): """ - For options in index_settings, refer to: - https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html - some index settings are set at index creation time, and therefore, you should NOT - change those settings once the index is set. - """ + For options in index_settings, refer to: + + https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html + some index settings are set at index creation time, and therefore, you should NOT + change those settings once the index is set. + """ es_client = ElasticsearchLogs( host=host, port=port, diff --git a/data/logs_model/inmemory_model.py b/data/logs_model/inmemory_model.py index 9b8d8e834..8a3fd8a99 100644 --- a/data/logs_model/inmemory_model.py +++ b/data/logs_model/inmemory_model.py @@ -26,8 +26,10 @@ StoredLog = namedtuple( class InMemoryModel(ActionLogsDataInterface): """ - InMemoryModel implements the data model for logs in-memory. FOR TESTING ONLY. - """ + InMemoryModel implements the data model for logs in-memory. + + FOR TESTING ONLY. + """ def __init__(self): self.logs = [] @@ -315,7 +317,9 @@ class InMemoryLogRotationContext(LogRotationContextInterface): self.all_logs.remove(log) def yield_logs_batch(self): - """ Yield a batch of logs and a filename for that batch. """ + """ + Yield a batch of logs and a filename for that batch. + """ filename = "inmemory_model_filename_placeholder" filename = ".".join((filename, "txt.gz")) yield [log_and_repo.stored_log for log_and_repo in self.expired_logs], filename diff --git a/data/logs_model/interface.py b/data/logs_model/interface.py index b7c254259..9007148e6 100644 --- a/data/logs_model/interface.py +++ b/data/logs_model/interface.py @@ -3,14 +3,18 @@ from six import add_metaclass class LogsIterationTimeout(Exception): - """ Exception raised if logs iteration times out. """ + """ + Exception raised if logs iteration times out. + """ @add_metaclass(ABCMeta) class ActionLogsDataInterface(object): - """ Interface for code to work with the logs data model. The logs data model consists - of all access for reading and writing action logs. - """ + """ + Interface for code to work with the logs data model. + + The logs data model consists of all access for reading and writing action logs. + """ @abstractmethod def lookup_logs( @@ -24,11 +28,13 @@ class ActionLogsDataInterface(object): page_token=None, max_page_count=None, ): - """ Looks up all logs between the start_datetime and end_datetime, filtered - by performer (a user), repository or namespace. Note that one (and only one) of the three - can be specified. Returns a LogEntriesPage. `filter_kinds`, if specified, is a set/list - of the kinds of logs to filter out. - """ + """ + Looks up all logs between the start_datetime and end_datetime, filtered by performer (a + user), repository or namespace. + + Note that one (and only one) of the three can be specified. Returns a LogEntriesPage. + `filter_kinds`, if specified, is a set/list of the kinds of logs to filter out. + """ @abstractmethod def lookup_latest_logs( @@ -39,10 +45,12 @@ class ActionLogsDataInterface(object): filter_kinds=None, size=20, ): - """ Looks up latest logs of a specific kind, filtered by performer (a user), - repository or namespace. Note that one (and only one) of the three can be specified. - Returns a list of `Log`. - """ + """ + Looks up latest logs of a specific kind, filtered by performer (a user), repository or + namespace. + + Note that one (and only one) of the three can be specified. Returns a list of `Log`. + """ @abstractmethod def get_aggregated_log_counts( @@ -54,16 +62,20 @@ class ActionLogsDataInterface(object): namespace_name=None, filter_kinds=None, ): - """ Returns the aggregated count of logs, by kind, between the start_datetime and end_datetime, - filtered by performer (a user), repository or namespace. Note that one (and only one) of - the three can be specified. Returns a list of AggregatedLogCount. - """ + """ + Returns the aggregated count of logs, by kind, between the start_datetime and end_datetime, + filtered by performer (a user), repository or namespace. + + Note that one (and only one) of the three can be specified. Returns a list of + AggregatedLogCount. + """ @abstractmethod def count_repository_actions(self, repository, day): - """ Returns the total number of repository actions over the given day, in the given repository + """ + Returns the total number of repository actions over the given day, in the given repository or None on error. - """ + """ @abstractmethod def queue_logs_export( @@ -77,10 +89,12 @@ class ActionLogsDataInterface(object): callback_email=None, filter_kinds=None, ): - """ Queues logs between the start_datetime and end_time, filtered by a repository or namespace, - for export to the specified URL and/or email address. Returns the ID of the export job - queued or None if error. - """ + """ + Queues logs between the start_datetime and end_time, filtered by a repository or namespace, + for export to the specified URL and/or email address. + + Returns the ID of the export job queued or None if error. + """ @abstractmethod def log_action( @@ -95,7 +109,9 @@ class ActionLogsDataInterface(object): timestamp=None, is_free_namespace=False, ): - """ Logs a single action as having taken place. """ + """ + Logs a single action as having taken place. + """ @abstractmethod def yield_logs_for_export( @@ -106,7 +122,8 @@ class ActionLogsDataInterface(object): namespace_id=None, max_query_time=None, ): - """ Returns an iterator that yields bundles of all logs found between the start_datetime and + """ + Returns an iterator that yields bundles of all logs found between the start_datetime and end_datetime, optionally filtered by the repository or namespace. This function should be used for any bulk lookup operations, and should be implemented by implementors to put minimal strain on the backing storage for large operations. If there was an error in setting @@ -115,30 +132,33 @@ class ActionLogsDataInterface(object): If max_query_time is specified, each iteration that yields a log bundle will have its queries run with a maximum timeout of that specified, and, if any exceed that threshold, LogsIterationTimeout will be raised instead of returning the logs bundle. - """ + """ @abstractmethod def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation): """ - A generator that yields contexts implementing the LogRotationContextInterface. - Each context represents a set of logs to be archived and deleted once - the context completes without exceptions. + A generator that yields contexts implementing the LogRotationContextInterface. Each context + represents a set of logs to be archived and deleted once the context completes without + exceptions. - For database logs, the LogRotationContext abstracts over a set of rows. When the context - finishes, its associated rows get deleted. + For database logs, the LogRotationContext abstracts over a set of rows. When the context + finishes, its associated rows get deleted. - For Elasticsearch logs, the LogRotationContext abstracts over indices. When the context - finishes, its associated index gets deleted. - """ + For Elasticsearch logs, the LogRotationContext abstracts over indices. When the context + finishes, its associated index gets deleted. + """ @add_metaclass(ABCMeta) class LogRotationContextInterface(object): - """ Interface for iterating over a set of logs to be archived. """ + """ + Interface for iterating over a set of logs to be archived. + """ @abstractmethod def yield_logs_batch(self): """ - Generator yielding batch of logs and a filename for that batch. - A batch is a subset of the logs part of the context. - """ + Generator yielding batch of logs and a filename for that batch. + + A batch is a subset of the logs part of the context. + """ diff --git a/data/logs_model/logs_producer/__init__.py b/data/logs_model/logs_producer/__init__.py index a843658e2..b80323b90 100644 --- a/data/logs_model/logs_producer/__init__.py +++ b/data/logs_model/logs_producer/__init__.py @@ -5,9 +5,11 @@ logger = logging.getLogger(__name__) class LogSendException(Exception): - """ A generic error when sending the logs to its destination. - e.g. Kinesis, Kafka, Elasticsearch, ... - """ + """ + A generic error when sending the logs to its destination. + + e.g. Kinesis, Kafka, Elasticsearch, ... + """ pass diff --git a/data/logs_model/logs_producer/elasticsearch_logs_producer.py b/data/logs_model/logs_producer/elasticsearch_logs_producer.py index 95cf425b9..f08d07870 100644 --- a/data/logs_model/logs_producer/elasticsearch_logs_producer.py +++ b/data/logs_model/logs_producer/elasticsearch_logs_producer.py @@ -10,10 +10,11 @@ logger = logging.getLogger(__name__) class ElasticsearchLogsProducer(LogProducerInterface): - """ Log producer writing log entries to Elasticsearch. + """ + Log producer writing log entries to Elasticsearch. - This implementation writes directly to Elasticsearch without a streaming/queueing service. - """ + This implementation writes directly to Elasticsearch without a streaming/queueing service. + """ def send(self, logentry): try: diff --git a/data/logs_model/logs_producer/interface.py b/data/logs_model/logs_producer/interface.py index c9693725a..388bd4f50 100644 --- a/data/logs_model/logs_producer/interface.py +++ b/data/logs_model/logs_producer/interface.py @@ -6,4 +6,6 @@ from six import add_metaclass class LogProducerInterface(object): @abstractmethod def send(self, logentry): - """ Send a log entry to the configured log infrastructure. """ + """ + Send a log entry to the configured log infrastructure. + """ diff --git a/data/logs_model/logs_producer/kafka_logs_producer.py b/data/logs_model/logs_producer/kafka_logs_producer.py index 739a2a4da..d0acff4f9 100644 --- a/data/logs_model/logs_producer/kafka_logs_producer.py +++ b/data/logs_model/logs_producer/kafka_logs_producer.py @@ -15,7 +15,9 @@ DEFAULT_MAX_BLOCK_SECONDS = 5 class KafkaLogsProducer(LogProducerInterface): - """ Log producer writing log entries to a Kafka stream. """ + """ + Log producer writing log entries to a Kafka stream. + """ def __init__(self, bootstrap_servers=None, topic=None, client_id=None, max_block_seconds=None): self.bootstrap_servers = bootstrap_servers diff --git a/data/logs_model/logs_producer/kinesis_stream_logs_producer.py b/data/logs_model/logs_producer/kinesis_stream_logs_producer.py index 3ec5cbc50..f656cf056 100644 --- a/data/logs_model/logs_producer/kinesis_stream_logs_producer.py +++ b/data/logs_model/logs_producer/kinesis_stream_logs_producer.py @@ -21,10 +21,12 @@ DEFAULT_MAX_POOL_CONNECTIONS = 10 def _partition_key(number_of_shards=None): - """ Generate a partition key for AWS Kinesis stream. - If the number of shards is specified, generate keys where the size of the key space is - the number of shards. - """ + """ + Generate a partition key for AWS Kinesis stream. + + If the number of shards is specified, generate keys where the size of the key space is the + number of shards. + """ key = None if number_of_shards is not None: shard_number = random.randrange(0, number_of_shards) @@ -36,7 +38,9 @@ def _partition_key(number_of_shards=None): class KinesisStreamLogsProducer(LogProducerInterface): - """ Log producer writing log entries to an Amazon Kinesis Data Stream. """ + """ + Log producer writing log entries to an Amazon Kinesis Data Stream. + """ def __init__( self, diff --git a/data/logs_model/logs_producer/util.py b/data/logs_model/logs_producer/util.py index 3ec1e9698..b55ed49c8 100644 --- a/data/logs_model/logs_producer/util.py +++ b/data/logs_model/logs_producer/util.py @@ -3,7 +3,9 @@ from datetime import datetime class LogEntryJSONEncoder(json.JSONEncoder): - """ JSON encoder to encode datetimes to ISO8601 format. """ + """ + JSON encoder to encode datetimes to ISO8601 format. + """ def default(self, obj): if isinstance(obj, datetime): @@ -13,7 +15,9 @@ class LogEntryJSONEncoder(json.JSONEncoder): def logs_json_serializer(logentry, sort_keys=False): - """ Serializes a LogEntry to json bytes. """ + """ + Serializes a LogEntry to json bytes. + """ return json.dumps( logentry.to_dict(), cls=LogEntryJSONEncoder, ensure_ascii=True, sort_keys=sort_keys ).encode("ascii") diff --git a/data/logs_model/shared.py b/data/logs_model/shared.py index 8ac854d84..5cda67c60 100644 --- a/data/logs_model/shared.py +++ b/data/logs_model/shared.py @@ -18,10 +18,12 @@ class SharedModel: callback_email=None, filter_kinds=None, ): - """ Queues logs between the start_datetime and end_time, filtered by a repository or namespace, - for export to the specified URL and/or email address. Returns the ID of the export job - queued or None if error. - """ + """ + Queues logs between the start_datetime and end_time, filtered by a repository or namespace, + for export to the specified URL and/or email address. + + Returns the ID of the export job queued or None if error. + """ export_id = str(uuid.uuid4()) namespace = model.user.get_namespace_user(namespace_name) if namespace is None: @@ -59,8 +61,11 @@ def epoch_ms(dt): def get_kinds_filter(kinds): - """ Given a list of kinds, return the set of kinds not that are not part of that list. - i.e Returns the list of kinds to be filtered out. """ + """ + Given a list of kinds, return the set of kinds not that are not part of that list. + + i.e Returns the list of kinds to be filtered out. + """ kind_map = model.log.get_log_entry_kinds() kind_map = {key: kind_map[key] for key in kind_map if not isinstance(key, int)} return [kind_name for kind_name in kind_map if kind_name not in kinds] diff --git a/data/logs_model/table_logs_model.py b/data/logs_model/table_logs_model.py index 9f2146731..1230afa5d 100644 --- a/data/logs_model/table_logs_model.py +++ b/data/logs_model/table_logs_model.py @@ -31,9 +31,9 @@ LOG_MODELS = [LogEntry3, LogEntry2, LogEntry] class TableLogsModel(SharedModel, ActionLogsDataInterface): """ - TableLogsModel implements the data model for the logs API backed by a single table - in the database. - """ + TableLogsModel implements the data model for the logs API backed by a single table in the + database. + """ def __init__(self, should_skip_logging=None, **kwargs): self._should_skip_logging = should_skip_logging @@ -325,7 +325,9 @@ class TableLogsModel(SharedModel, ActionLogsDataInterface): current_batch_size = timedelta(seconds=seconds) def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation): - """ Yield a context manager for a group of outdated logs. """ + """ + Yield a context manager for a group of outdated logs. + """ for log_model in LOG_MODELS: while True: with UseThenDisconnect(config.app_config): @@ -362,12 +364,12 @@ table_logs_model = TableLogsModel() class DatabaseLogRotationContext(LogRotationContextInterface): """ - DatabaseLogRotationContext represents a batch of logs to be archived together. - i.e A set of logs to be archived in the same file (based on the number of logs per rotation). + DatabaseLogRotationContext represents a batch of logs to be archived together. i.e A set of logs + to be archived in the same file (based on the number of logs per rotation). - When completed without exceptions, this context will delete the stale logs - from rows `start_id` to `end_id`. - """ + When completed without exceptions, this context will delete the stale logs from rows `start_id` + to `end_id`. + """ def __init__(self, logs, log_model, start_id, end_id): self.logs = logs @@ -385,6 +387,8 @@ class DatabaseLogRotationContext(LogRotationContextInterface): delete_stale_logs(self.start_id, self.end_id, self.log_model) def yield_logs_batch(self): - """ Yield a batch of logs and a filename for that batch. """ + """ + Yield a batch of logs and a filename for that batch. + """ filename = "%d-%d-%s.txt.gz" % (self.start_id, self.end_id, self.log_model.__name__.lower()) yield self.logs, filename diff --git a/data/logs_model/test/test_logs_interface.py b/data/logs_model/test/test_logs_interface.py index 9fb53047e..44e8c9ad5 100644 --- a/data/logs_model/test/test_logs_interface.py +++ b/data/logs_model/test/test_logs_interface.py @@ -610,9 +610,9 @@ def test_date_range_in_single_index(dt1, dt2, expected_result): def test_pagination(logs_model, mock_page_size): """ - Make sure that pagination does not stop if searching through multiple indices by day, - and the current log count matches the page size while there are still indices to be searched. - """ + Make sure that pagination does not stop if searching through multiple indices by day, and the + current log count matches the page size while there are still indices to be searched. + """ day1 = datetime.now() day2 = day1 + timedelta(days=1) day3 = day2 + timedelta(days=1) diff --git a/data/migrations/env.py b/data/migrations/env.py index cc713037a..e7ec1a67f 100644 --- a/data/migrations/env.py +++ b/data/migrations/env.py @@ -54,10 +54,12 @@ tables = AttrDict(target_metadata.tables) def get_tester(): - """ Returns the tester to use. We only return the tester that populates data - if the TEST_MIGRATE env var is set to `true` AND we make sure we're not - connecting to a production database. - """ + """ + Returns the tester to use. + + We only return the tester that populates data if the TEST_MIGRATE env var is set to `true` AND + we make sure we're not connecting to a production database. + """ if os.environ.get("TEST_MIGRATE", "") == "true": url = unquote(DB_URI) if url.find("amazonaws.com") < 0: @@ -92,17 +94,17 @@ def report_success(ctx=None, step=None, heads=None, run_args=None): def run_migrations_offline(): - """Run migrations in 'offline' mode. + """ + Run migrations in 'offline' mode. - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. - Calls to context.execute() here emit the given string to the - script output. - - """ + Calls to context.execute() here emit the given string to the + script output. + """ url = unquote(DB_URI) context.configure(url=url, target_metadata=target_metadata, transactional_ddl=True) @@ -111,12 +113,11 @@ def run_migrations_offline(): def run_migrations_online(): - """Run migrations in 'online' mode. + """ + Run migrations in 'online' mode. - In this scenario we need to create an Engine - and associate a connection with the context. - - """ + In this scenario we need to create an Engine and associate a connection with the context. + """ if ( isinstance(db.obj, SqliteDatabase) diff --git a/data/migrations/progress.py b/data/migrations/progress.py index b8666ddcb..e14ed3f31 100644 --- a/data/migrations/progress.py +++ b/data/migrations/progress.py @@ -9,22 +9,29 @@ from util.abchelpers import nooper @add_metaclass(ABCMeta) class ProgressReporter(object): - """ Implements an interface for reporting progress with the migrations. - """ + """ + Implements an interface for reporting progress with the migrations. + """ @abstractmethod def report_version_complete(self, success): - """ Called when an entire migration is complete. """ + """ + Called when an entire migration is complete. + """ @abstractmethod def report_step_progress(self): - """ Called when a single step in the migration has been completed. """ + """ + Called when a single step in the migration has been completed. + """ @nooper class NullReporter(ProgressReporter): - """ No-op version of the progress reporter, designed for use when no progress - reporting endpoint is provided. """ + """ + No-op version of the progress reporter, designed for use when no progress reporting endpoint is + provided. + """ class PrometheusReporter(ProgressReporter): diff --git a/data/migrations/test/test_db_config.py b/data/migrations/test/test_db_config.py index 4f524608d..650416460 100644 --- a/data/migrations/test/test_db_config.py +++ b/data/migrations/test/test_db_config.py @@ -16,7 +16,9 @@ from test.fixtures import * ], ) def test_alembic_db_uri(db_uri, is_valid): - """ Test if the given URI is escaped for string interpolation (Python's configparser). """ + """ + Test if the given URI is escaped for string interpolation (Python's configparser). + """ with patch("alembic.script.ScriptDirectory.run_env") as m: if is_valid: run_alembic_migration(db_uri) diff --git a/data/migrations/tester.py b/data/migrations/tester.py index 7fbbdc58a..afd82f305 100644 --- a/data/migrations/tester.py +++ b/data/migrations/tester.py @@ -91,28 +91,37 @@ class DataTypes(object): @add_metaclass(ABCMeta) class MigrationTester(object): - """ Implements an interface for adding testing capabilities to the - data model migration system in Alembic. - """ + """ + Implements an interface for adding testing capabilities to the data model migration system in + Alembic. + """ TestDataType = DataTypes @abstractmethod def is_testing(self): - """ Returns whether we are currently under a migration test. """ + """ + Returns whether we are currently under a migration test. + """ @abstractmethod def populate_table(self, table_name, fields): - """ Called to populate a table with the given fields filled in with testing data. """ + """ + Called to populate a table with the given fields filled in with testing data. + """ @abstractmethod def populate_column(self, table_name, col_name, field_type): - """ Called to populate a column in a table to be filled in with testing data. """ + """ + Called to populate a column in a table to be filled in with testing data. + """ @nooper class NoopTester(MigrationTester): - """ No-op version of the tester, designed for production workloads. """ + """ + No-op version of the tester, designed for production workloads. + """ class PopulateTestDataTester(MigrationTester): diff --git a/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py b/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py index 77fcee664..f98bb6a9a 100644 --- a/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py +++ b/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py @@ -1,9 +1,9 @@ -"""Add creation date to User table +""" +Add creation date to User table. Revision ID: 0cf50323c78b Revises: 87fbbc224f10 Create Date: 2018-03-09 13:19:41.903196 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py b/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py index dcbc77c45..5c0132ef0 100644 --- a/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py +++ b/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py @@ -1,9 +1,9 @@ -"""Add Tag, TagKind and ManifestChild tables +""" +Add Tag, TagKind and ManifestChild tables. Revision ID: 10f45ee2310b Revises: 13411de1c0ff Create Date: 2018-10-29 15:22:53.552216 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py b/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py index 9a38cdd8a..3fd6146de 100644 --- a/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py +++ b/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py @@ -1,9 +1,9 @@ -"""Remove unique from TagManifestToManifest +""" +Remove unique from TagManifestToManifest. Revision ID: 13411de1c0ff Revises: 654e6df88b71 Create Date: 2018-08-19 23:30:24.969549 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py b/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py index 1829376c7..6add1de3e 100644 --- a/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py +++ b/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py @@ -1,9 +1,9 @@ -"""Add maximum build queue count setting to user table +""" +Add maximum build queue count setting to user table. Revision ID: 152bb29a1bb3 Revises: 7367229b38d9 Create Date: 2018-02-20 13:34:34.902415 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py b/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py index 0ae018604..83d91730f 100644 --- a/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py +++ b/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py @@ -1,9 +1,9 @@ -"""Make BlodUpload byte_count not nullable +""" +Make BlodUpload byte_count not nullable. Revision ID: 152edccba18c Revises: c91c564aad34 Create Date: 2018-02-23 12:41:25.571835 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py b/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py index a1b384c2e..50fde0497 100644 --- a/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py +++ b/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py @@ -1,9 +1,9 @@ -"""Add automatic disable of build triggers +""" +Add automatic disable of build triggers. Revision ID: 17aff2e1354e Revises: 61cadbacb9fc Create Date: 2017-10-18 15:58:03.971526 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py b/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py index 6208a6a6d..7cf741cd0 100644 --- a/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py +++ b/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py @@ -1,9 +1,9 @@ -"""Add last_accessed field to User table +""" +Add last_accessed field to User table. Revision ID: 224ce4c72c2f Revises: b547bc139ad8 Create Date: 2018-03-12 22:44:07.070490 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py b/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py index 85f8e6e74..8943f3b53 100644 --- a/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py +++ b/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py @@ -1,9 +1,9 @@ -"""repo mirror columns +""" +repo mirror columns. Revision ID: 34c8ef052ec9 Revises: c059b952ed76 Create Date: 2019-10-07 13:11:20.424715 - """ # revision identifiers, used by Alembic. @@ -41,9 +41,9 @@ BATCH_SIZE = 10 # Original model class RepoMirrorConfig(BaseModel): """ - Represents a repository to be mirrored and any additional configuration - required to perform the mirroring. - """ + Represents a repository to be mirrored and any additional configuration required to perform the + mirroring. + """ repository = ForeignKeyField(Repository, index=True, unique=True, backref="mirror") creation_date = DateTimeField(default=datetime.utcnow) diff --git a/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py b/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py index db51f6997..7790f88fb 100644 --- a/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py +++ b/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py @@ -1,9 +1,9 @@ -"""Add severity and media_type to global messages +""" +Add severity and media_type to global messages. Revision ID: 3e8cc74a1e7b Revises: fc47c1ec019f Create Date: 2017-01-17 16:22:28.584237 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/45fd8b9869d4_add_notification_type.py b/data/migrations/versions/45fd8b9869d4_add_notification_type.py index b4bac9f65..7e95b1ab8 100644 --- a/data/migrations/versions/45fd8b9869d4_add_notification_type.py +++ b/data/migrations/versions/45fd8b9869d4_add_notification_type.py @@ -1,9 +1,9 @@ -"""add_notification_type +""" +add_notification_type. Revision ID: 45fd8b9869d4 Revises: 94836b099894 Create Date: 2016-12-01 12:02:19.724528 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py b/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py index 9070c5612..01cda799c 100644 --- a/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py +++ b/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py @@ -1,9 +1,9 @@ -"""Add index on logs_archived on repositorybuild +""" +Add index on logs_archived on repositorybuild. Revision ID: 481623ba00ba Revises: b9045731c4de Create Date: 2019-02-15 16:09:47.326805 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/49e1138ed12d_change_token_column_types.py b/data/migrations/versions/49e1138ed12d_change_token_column_types.py index 1bcd578f9..5ad94da87 100644 --- a/data/migrations/versions/49e1138ed12d_change_token_column_types.py +++ b/data/migrations/versions/49e1138ed12d_change_token_column_types.py @@ -1,9 +1,9 @@ -""" Change token column types for encrypted columns +""" +Change token column types for encrypted columns. Revision ID: 49e1138ed12d Revises: 703298a825c2 Create Date: 2019-08-19 16:07:48.109889 - """ # revision identifiers, used by Alembic. revision = "49e1138ed12d" diff --git a/data/migrations/versions/4fd6b8463eb2_add_new_deletedrepository_tracking_table.py b/data/migrations/versions/4fd6b8463eb2_add_new_deletedrepository_tracking_table.py index 3b91ceb7e..196f00618 100644 --- a/data/migrations/versions/4fd6b8463eb2_add_new_deletedrepository_tracking_table.py +++ b/data/migrations/versions/4fd6b8463eb2_add_new_deletedrepository_tracking_table.py @@ -1,9 +1,9 @@ -"""Add new DeletedRepository tracking table +""" +Add new DeletedRepository tracking table. Revision ID: 4fd6b8463eb2 Revises: 34c8ef052ec9 Create Date: 2019-12-22 14:58:34.375692 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/5248ddf35167_repository_mirror.py b/data/migrations/versions/5248ddf35167_repository_mirror.py index 86d68061f..a25b4e696 100644 --- a/data/migrations/versions/5248ddf35167_repository_mirror.py +++ b/data/migrations/versions/5248ddf35167_repository_mirror.py @@ -1,9 +1,9 @@ -"""Repository Mirror +""" +Repository Mirror. Revision ID: 5248ddf35167 Revises: b918abdbee43 Create Date: 2019-06-25 16:22:36.310532 - """ revision = "5248ddf35167" diff --git a/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py b/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py index 79a904af5..3aebec0f0 100644 --- a/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py +++ b/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py @@ -1,9 +1,9 @@ -"""Remove reference to subdir +""" +Remove reference to subdir. Revision ID: 53e2ac668296 Revises: ed01e313d3cb Create Date: 2017-03-28 15:01:31.073382 - """ # revision identifiers, used by Alembic. @@ -46,7 +46,9 @@ def downgrade(tables, tester, progress_reporter): def delete_subdir(config): - """ Remove subdir from config """ + """ + Remove subdir from config. + """ if not config: return config if "subdir" in config: @@ -56,7 +58,9 @@ def delete_subdir(config): def add_subdir(config): - """ Add subdir back into config """ + """ + Add subdir back into config. + """ if not config: return config if "context" in config: diff --git a/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py b/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py index 2d7a16e5a..5c33dda3c 100644 --- a/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py +++ b/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py @@ -1,9 +1,9 @@ -"""Add NamespaceGeoRestriction table +""" +Add NamespaceGeoRestriction table. Revision ID: 54492a68a3cf Revises: c00a1f15968b Create Date: 2018-12-05 15:12:14.201116 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py b/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py index b5b6923bc..99c18f10e 100644 --- a/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py +++ b/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py @@ -1,9 +1,9 @@ -"""Cleanup old robots +""" +Cleanup old robots. Revision ID: 5b7503aada1b Revises: 224ce4c72c2f Create Date: 2018-05-09 17:18:52.230504 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py b/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py index 97d64f27a..e9ef90a8d 100644 --- a/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py +++ b/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py @@ -1,9 +1,9 @@ -"""Remove 'oci' tables not used by CNR. The rest will be migrated and renamed. +""" +Remove 'oci' tables not used by CNR. The rest will be migrated and renamed. Revision ID: 5cbbfc95bac7 Revises: 1783530bee68 Create Date: 2018-05-23 17:28:40.114433 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py b/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py index c357b9256..12febdd20 100644 --- a/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py +++ b/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py @@ -1,9 +1,9 @@ -"""Backfill new appr tables +""" +Backfill new appr tables. Revision ID: 5d463ea1e8a8 Revises: 610320e9dacf Create Date: 2018-07-08 10:01:19.756126 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py b/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py index bf30ea24c..16fb46ab3 100644 --- a/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py +++ b/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py @@ -1,9 +1,9 @@ -"""Add new Appr-specific tables +""" +Add new Appr-specific tables. Revision ID: 610320e9dacf Revises: 5cbbfc95bac7 Create Date: 2018-05-24 16:46:13.514562 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py b/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py index f93bd32b2..84244a525 100644 --- a/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py +++ b/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py @@ -1,9 +1,9 @@ -"""Add ability for build triggers to be disabled +""" +Add ability for build triggers to be disabled. Revision ID: 61cadbacb9fc Revises: b4c2d45bc132 Create Date: 2017-10-18 12:07:26.190901 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py b/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py index 6f2b496b2..2b239fc2d 100644 --- a/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py +++ b/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py @@ -1,9 +1,9 @@ -"""Change manifest_bytes to a UTF8 text field +""" +Change manifest_bytes to a UTF8 text field. Revision ID: 654e6df88b71 Revises: eafdeadcebc7 Create Date: 2018-08-15 09:58:46.109277 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py b/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py index cd61e2b47..cf7d849ff 100644 --- a/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py +++ b/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py @@ -1,9 +1,9 @@ -"""Add TagToRepositoryTag table +""" +Add TagToRepositoryTag table. Revision ID: 67f0abd172ae Revises: 10f45ee2310b Create Date: 2018-10-30 11:31:06.615488 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py b/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py index 53e0a497d..5fea6bb51 100644 --- a/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py +++ b/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py @@ -1,9 +1,9 @@ -"""Change LogEntry to use a BigInteger as its primary key +""" +Change LogEntry to use a BigInteger as its primary key. Revision ID: 6c21e2cfb8b6 Revises: d17c695859ea Create Date: 2018-07-27 16:30:02.877346 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py b/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py index 302584bf7..30bf1ec70 100644 --- a/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py +++ b/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py @@ -1,9 +1,9 @@ -"""Add user prompt support +""" +Add user prompt support. Revision ID: 6c7014e84a5e Revises: c156deb8845d Create Date: 2016-10-31 16:26:31.447705 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py b/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py index c9bf5127c..e16be19ce 100644 --- a/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py +++ b/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py @@ -1,9 +1,9 @@ -"""Add LogEntry3 table +""" +Add LogEntry3 table. Revision ID: 6ec8726c0ace Revises: 54492a68a3cf Create Date: 2019-01-03 13:41:02.897957 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py b/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py index 9bd2bf997..b4a303b8e 100644 --- a/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py +++ b/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py @@ -1,9 +1,9 @@ -"""Backfill new encrypted fields +""" +Backfill new encrypted fields. Revision ID: 703298a825c2 Revises: c13c8052f7a6 Create Date: 2019-08-19 16:07:48.109889 - """ # revision identifiers, used by Alembic. revision = "703298a825c2" diff --git a/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py b/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py index 4c387e4de..e3d7319dc 100644 --- a/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py +++ b/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py @@ -1,9 +1,9 @@ -"""Add support for app specific tokens +""" +Add support for app specific tokens. Revision ID: 7367229b38d9 Revises: d8989249f8f6 Create Date: 2017-12-12 13:15:42.419764 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/7a525c68eb13_add_oci_app_models.py b/data/migrations/versions/7a525c68eb13_add_oci_app_models.py index b6e60a2e0..818054a37 100644 --- a/data/migrations/versions/7a525c68eb13_add_oci_app_models.py +++ b/data/migrations/versions/7a525c68eb13_add_oci_app_models.py @@ -1,9 +1,9 @@ -"""Add OCI/App models +""" +Add OCI/App models. Revision ID: 7a525c68eb13 Revises: e2894a3a3c19 Create Date: 2017-01-24 16:25:52.170277 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py b/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py index cb2216963..ac52f9d05 100644 --- a/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py +++ b/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py @@ -1,9 +1,9 @@ -"""Add disabled datetime to trigger +""" +Add disabled datetime to trigger. Revision ID: 87fbbc224f10 Revises: 17aff2e1354e Create Date: 2017-10-24 14:06:37.658705 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py b/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py index dcf90201d..eef2d36fb 100644 --- a/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py +++ b/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py @@ -1,9 +1,9 @@ -"""Add V2_2 data models for Manifest, ManifestBlob and ManifestLegacyImage +""" +Add V2_2 data models for Manifest, ManifestBlob and ManifestLegacyImage. Revision ID: 9093adccc784 Revises: 6c21e2cfb8b6 Create Date: 2018-08-06 16:07:50.222749 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/94836b099894_create_new_notification_type.py b/data/migrations/versions/94836b099894_create_new_notification_type.py index 86bed4422..5aa8a1edb 100644 --- a/data/migrations/versions/94836b099894_create_new_notification_type.py +++ b/data/migrations/versions/94836b099894_create_new_notification_type.py @@ -1,9 +1,9 @@ -"""Create new notification type +""" +Create new notification type. Revision ID: 94836b099894 Revises: faf752bd2e0a Create Date: 2016-11-30 10:29:51.519278 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py b/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py index 927f6952b..298e05db1 100644 --- a/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py +++ b/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py @@ -1,9 +1,9 @@ -"""back fill build expand_config +""" +back fill build expand_config. Revision ID: a6c463dfb9fe Revises: b4df55dea4b3 Create Date: 2017-03-17 10:00:19.739858 - """ # revision identifiers, used by Alembic. @@ -71,7 +71,9 @@ def create_dockerfile_path(current_subdir): def get_config_expand(config): - """ A function to transform old records into new records """ + """ + A function to transform old records into new records. + """ if not config: return config @@ -89,7 +91,9 @@ def get_config_expand(config): def get_config_contract(config): - """ A function to delete context and dockerfile_path from config """ + """ + A function to delete context and dockerfile_path from config. + """ if not config: return config diff --git a/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py b/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py index d1c91362c..34e9806c8 100644 --- a/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py +++ b/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py @@ -1,9 +1,9 @@ -"""Add deleted namespace table +""" +Add deleted namespace table. Revision ID: b4c2d45bc132 Revises: 152edccba18c Create Date: 2018-02-27 11:43:02.329941 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/b4df55dea4b3_add_repository_kind.py b/data/migrations/versions/b4df55dea4b3_add_repository_kind.py index bd32fcd13..a5fd0fa27 100644 --- a/data/migrations/versions/b4df55dea4b3_add_repository_kind.py +++ b/data/migrations/versions/b4df55dea4b3_add_repository_kind.py @@ -1,9 +1,9 @@ -"""add repository kind +""" +add repository kind. Revision ID: b4df55dea4b3 Revises: 7a525c68eb13 Create Date: 2017-03-19 12:59:41.484430 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py b/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py index 529a95cb3..0f0cbd3e5 100644 --- a/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py +++ b/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py @@ -1,9 +1,9 @@ -"""Add RobotAccountMetadata table +""" +Add RobotAccountMetadata table. Revision ID: b547bc139ad8 Revises: 0cf50323c78b Create Date: 2018-03-09 15:50:48.298880 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py b/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py index 3e19a1dba..ae8e0aeaf 100644 --- a/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py +++ b/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py @@ -1,9 +1,9 @@ -"""Change BlobUpload fields to BigIntegers to allow layers > 8GB +""" +Change BlobUpload fields to BigIntegers to allow layers > 8GB. Revision ID: b8ae68ad3e52 Revises: 7a525c68eb13 Create Date: 2017-02-27 11:26:49.182349 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py b/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py index 8e9755146..cd1c58880 100644 --- a/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py +++ b/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py @@ -1,9 +1,9 @@ -"""Add lifetime end indexes to tag tables +""" +Add lifetime end indexes to tag tables. Revision ID: b9045731c4de Revises: e184af42242d Create Date: 2019-02-14 17:18:40.474310 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py b/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py index 267301ff3..5a737966e 100644 --- a/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py +++ b/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py @@ -1,9 +1,9 @@ -"""Run full tag backfill +""" +Run full tag backfill. Revision ID: b918abdbee43 Revises: 481623ba00ba Create Date: 2019-03-14 13:38:03.411609 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py b/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py index 2268b664d..78561acab 100644 --- a/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py +++ b/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py @@ -1,9 +1,9 @@ -"""Add TeamSync table +""" +Add TeamSync table. Revision ID: be8d1c402ce0 Revises: a6c463dfb9fe Create Date: 2017-02-23 13:34:52.356812 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py b/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py index 56b39beea..d2af9890a 100644 --- a/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py +++ b/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py @@ -1,9 +1,9 @@ -"""Remove unencrypted fields and data +""" +Remove unencrypted fields and data. Revision ID: c059b952ed76 Revises: 49e1138ed12d Create Date: 2019-08-19 16:31:00.952773 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py b/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py index 9909d402b..b8cb75789 100644 --- a/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py +++ b/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py @@ -1,9 +1,9 @@ -"""Add new fields and tables for encrypted tokens +""" +Add new fields and tables for encrypted tokens. Revision ID: c13c8052f7a6 Revises: 5248ddf35167 Create Date: 2019-08-19 15:59:36.269155 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py b/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py index 37947088e..410ce556f 100644 --- a/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py +++ b/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py @@ -1,9 +1,9 @@ -"""Reset our migrations with a required update +""" +Reset our migrations with a required update. Revision ID: c156deb8845d Revises: None Create Date: 2016-11-08 11:58:11.110762 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py b/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py index a2f61ae44..2a6eebade 100644 --- a/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py +++ b/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py @@ -1,9 +1,9 @@ -"""Backfill RepositorySearchScore table +""" +Backfill RepositorySearchScore table. Revision ID: c3d4b7ebcdf7 Revises: f30984525c86 Create Date: 2017-04-13 12:01:59.572775 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py b/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py index 34d64b966..1a9b7fefb 100644 --- a/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py +++ b/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py @@ -1,9 +1,9 @@ -"""Drop checksum on ImageStorage +""" +Drop checksum on ImageStorage. Revision ID: c91c564aad34 Revises: 152bb29a1bb3 Create Date: 2018-02-21 12:17:52.405644 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/cbc8177760d9_add_user_location_field.py b/data/migrations/versions/cbc8177760d9_add_user_location_field.py index 25ac9d911..8b53a2d59 100644 --- a/data/migrations/versions/cbc8177760d9_add_user_location_field.py +++ b/data/migrations/versions/cbc8177760d9_add_user_location_field.py @@ -1,9 +1,9 @@ -"""Add user location field +""" +Add user location field. Revision ID: cbc8177760d9 Revises: 7367229b38d9 Create Date: 2018-02-02 17:39:16.589623 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py b/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py index 893a6bea0..fe04a0910 100644 --- a/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py +++ b/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py @@ -1,9 +1,9 @@ -"""repository mirror notification +""" +repository mirror notification. Revision ID: cc6778199cdb Revises: c059b952ed76 Create Date: 2019-10-03 17:41:23.316914 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py b/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py index 8f111e3fb..03f21b071 100644 --- a/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py +++ b/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py @@ -1,9 +1,9 @@ -"""Delete old Appr tables +""" +Delete old Appr tables. Revision ID: d17c695859ea Revises: 5d463ea1e8a8 Create Date: 2018-07-16 15:21:11.593040 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py b/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py index a31f1acb3..251a20f55 100644 --- a/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py +++ b/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py @@ -1,9 +1,9 @@ -"""Backfill state_id and make it unique +""" +Backfill state_id and make it unique. Revision ID: d42c175b439a Revises: 3e8cc74a1e7b Create Date: 2017-01-18 15:11:01.635632 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py b/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py index df40def07..b0a8c657b 100644 --- a/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py +++ b/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py @@ -1,9 +1,9 @@ -"""Add change_tag_expiration log type +""" +Add change_tag_expiration log type. Revision ID: d8989249f8f6 Revises: dc4af11a5f90 Create Date: 2017-06-21 21:18:25.948689 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py b/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py index 368c27183..903f7346a 100644 --- a/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py +++ b/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py @@ -1,9 +1,9 @@ -"""add notification number of failures column +""" +add notification number of failures column. Revision ID: dc4af11a5f90 Revises: 53e2ac668296 Create Date: 2017-05-16 17:24:02.630365 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py b/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py index 46f5cfbf8..060f9e47e 100644 --- a/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py +++ b/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py @@ -1,9 +1,9 @@ -"""Add missing index on UUID fields +""" +Add missing index on UUID fields. Revision ID: e184af42242d Revises: 6ec8726c0ace Create Date: 2019-02-14 16:35:47.768086 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py b/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py index f858e1a33..e9e5ba8bd 100644 --- a/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py +++ b/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py @@ -1,9 +1,9 @@ -"""Add full text search indexing for repo name and description +""" +Add full text search indexing for repo name and description. Revision ID: e2894a3a3c19 Revises: d42c175b439a Create Date: 2017-01-11 13:55:54.890774 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py b/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py index 50770a815..ba584dd9c 100644 --- a/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py +++ b/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py @@ -1,9 +1,9 @@ -"""Remove blob_index from ManifestBlob table +""" +Remove blob_index from ManifestBlob table. Revision ID: eafdeadcebc7 Revises: 9093adccc784 Create Date: 2018-08-07 15:57:54.001225 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py b/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py index c29f9f069..a9335f7b4 100644 --- a/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py +++ b/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py @@ -1,9 +1,9 @@ -"""Add trust_enabled to repository +""" +Add trust_enabled to repository. Revision ID: ed01e313d3cb Revises: c3d4b7ebcdf7 Create Date: 2017-04-14 17:38:03.319695 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py b/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py index 41d0f130e..19968cd6e 100644 --- a/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py +++ b/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py @@ -1,9 +1,9 @@ -"""Add RepositorySearchScore table +""" +Add RepositorySearchScore table. Revision ID: f30984525c86 Revises: be8d1c402ce0 Create Date: 2017-04-04 14:30:13.270728 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py b/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py index 1c8430a32..a8af5d564 100644 --- a/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py +++ b/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py @@ -1,9 +1,9 @@ -"""update queue item table indices +""" +update queue item table indices. Revision ID: f5167870dd66 Revises: 45fd8b9869d4 Create Date: 2016-12-08 17:26:20.333846 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py b/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py index e29705ebe..6418cd33b 100644 --- a/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py +++ b/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py @@ -1,9 +1,9 @@ -"""Add user metadata fields +""" +Add user metadata fields. Revision ID: faf752bd2e0a Revises: 6c7014e84a5e Create Date: 2016-11-14 17:29:03.984665 - """ # revision identifiers, used by Alembic. diff --git a/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py b/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py index 6b9dac5ff..d34c72d5b 100644 --- a/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py +++ b/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py @@ -1,9 +1,9 @@ -"""Add state_id field to QueueItem +""" +Add state_id field to QueueItem. Revision ID: fc47c1ec019f Revises: f5167870dd66 Create Date: 2017-01-12 15:44:23.643016 - """ # revision identifiers, used by Alembic. diff --git a/data/migrationutil.py b/data/migrationutil.py index b68223624..5d64e44c3 100644 --- a/data/migrationutil.py +++ b/data/migrationutil.py @@ -11,12 +11,15 @@ MigrationPhase = namedtuple("MigrationPhase", ["name", "alembic_revision", "flag class DataMigration(object): @abstractproperty def alembic_migration_revision(self): - """ Returns the alembic migration revision corresponding to the currently configured phase. - """ + """ + Returns the alembic migration revision corresponding to the currently configured phase. + """ @abstractmethod def has_flag(self, flag): - """ Returns true if the data migration's current phase has the given flag set. """ + """ + Returns true if the data migration's current phase has the given flag set. + """ class NullDataMigration(DataMigration): diff --git a/data/model/_basequery.py b/data/model/_basequery.py index 19c8ee368..71cecde8d 100644 --- a/data/model/_basequery.py +++ b/data/model/_basequery.py @@ -27,11 +27,13 @@ logger = logging.getLogger(__name__) def reduce_as_tree(queries_to_reduce): - """ This method will split a list of queries into halves recursively until we reach individual - queries, at which point it will start unioning the queries, or the already unioned subqueries. - This works around a bug in peewee SQL generation where reducing linearly generates a chain - of queries that will exceed the recursion depth limit when it has around 80 queries. - """ + """ + This method will split a list of queries into halves recursively until we reach individual + queries, at which point it will start unioning the queries, or the already unioned subqueries. + + This works around a bug in peewee SQL generation where reducing linearly generates a chain of + queries that will exceed the recursion depth limit when it has around 80 queries. + """ mid = len(queries_to_reduce) / 2 left = queries_to_reduce[:mid] right = queries_to_reduce[mid:] @@ -179,8 +181,11 @@ def calculate_image_aggregate_size(ancestors_str, image_size, parent_image): def update_last_accessed(token_or_user): - """ Updates the `last_accessed` field on the given token or user. If the existing field's value - is within the configured threshold, the update is skipped. """ + """ + Updates the `last_accessed` field on the given token or user. + + If the existing field's value is within the configured threshold, the update is skipped. + """ if not config.app_config.get("FEATURE_USER_LAST_ACCESSED"): return diff --git a/data/model/appspecifictoken.py b/data/model/appspecifictoken.py index 429ed5582..842fb3cd8 100644 --- a/data/model/appspecifictoken.py +++ b/data/model/appspecifictoken.py @@ -26,8 +26,11 @@ _default_expiration_duration_opt = "__deo" def create_token(user, title, expiration=_default_expiration_duration_opt): - """ Creates and returns an app specific token for the given user. If no expiration is specified - (including `None`), then the default from config is used. """ + """ + Creates and returns an app specific token for the given user. + + If no expiration is specified (including `None`), then the default from config is used. + """ if expiration == _default_expiration_duration_opt: duration = _default_expiration_duration() expiration = duration + datetime.now() if duration else None @@ -49,17 +52,23 @@ def create_token(user, title, expiration=_default_expiration_duration_opt): def list_tokens(user): - """ Lists all tokens for the given user. """ + """ + Lists all tokens for the given user. + """ return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user) def revoke_token(token): - """ Revokes an app specific token by deleting it. """ + """ + Revokes an app specific token by deleting it. + """ token.delete_instance() def revoke_token_by_uuid(uuid, owner): - """ Revokes an app specific token by deleting it. """ + """ + Revokes an app specific token by deleting it. + """ try: token = AppSpecificAuthToken.get(uuid=uuid, user=owner) except AppSpecificAuthToken.DoesNotExist: @@ -70,9 +79,10 @@ def revoke_token_by_uuid(uuid, owner): def get_expiring_tokens(user, soon): - """ Returns all tokens owned by the given user that will be expiring "soon", where soon is defined - by the soon parameter (a timedelta from now). - """ + """ + Returns all tokens owned by the given user that will be expiring "soon", where soon is defined + by the soon parameter (a timedelta from now). + """ soon_datetime = datetime.now() + soon return AppSpecificAuthToken.select().where( AppSpecificAuthToken.user == user, @@ -82,7 +92,9 @@ def get_expiring_tokens(user, soon): def gc_expired_tokens(expiration_window): - """ Deletes all expired tokens outside of the expiration window. """ + """ + Deletes all expired tokens outside of the expiration window. + """ ( AppSpecificAuthToken.delete() .where(AppSpecificAuthToken.expiration < (datetime.now() - expiration_window)) @@ -91,10 +103,12 @@ def gc_expired_tokens(expiration_window): def get_token_by_uuid(uuid, owner=None): - """ Looks up an unexpired app specific token with the given uuid. Returns it if found or - None if none. If owner is specified, only tokens owned by the owner user will be - returned. - """ + """ + Looks up an unexpired app specific token with the given uuid. + + Returns it if found or None if none. If owner is specified, only tokens owned by the owner user + will be returned. + """ try: query = AppSpecificAuthToken.select().where( AppSpecificAuthToken.uuid == uuid, @@ -112,9 +126,12 @@ def get_token_by_uuid(uuid, owner=None): def access_valid_token(token_code): - """ Looks up an unexpired app specific token with the given token code. If found, the token's - last_accessed field is set to now and the token is returned. If not found, returns None. - """ + """ + Looks up an unexpired app specific token with the given token code. + + If found, the token's last_accessed field is set to now and the token is returned. If not found, + returns None. + """ token_code = remove_unicode(token_code) prefix = token_code[:TOKEN_NAME_PREFIX_LENGTH] diff --git a/data/model/blob.py b/data/model/blob.py index 179876073..9a1f7ce06 100644 --- a/data/model/blob.py +++ b/data/model/blob.py @@ -28,8 +28,9 @@ logger = logging.getLogger(__name__) def get_repository_blob_by_digest(repository, blob_digest): - """ Find the content-addressable blob linked to the specified repository. - """ + """ + Find the content-addressable blob linked to the specified repository. + """ assert blob_digest try: storage = ( @@ -49,8 +50,9 @@ def get_repository_blob_by_digest(repository, blob_digest): def get_repo_blob_by_digest(namespace, repo_name, blob_digest): - """ Find the content-addressable blob linked to the specified repository. - """ + """ + Find the content-addressable blob linked to the specified repository. + """ assert blob_digest try: storage = ( @@ -97,8 +99,9 @@ def store_blob_record_and_temp_link_in_repo( link_expiration_s, uncompressed_byte_count=None, ): - """ Store a record of the blob and temporarily link it to the specified repository. - """ + """ + Store a record of the blob and temporarily link it to the specified repository. + """ assert blob_digest assert byte_count is not None @@ -135,9 +138,11 @@ def store_blob_record_and_temp_link_in_repo( def temp_link_blob(repository_id, blob_digest, link_expiration_s): - """ Temporarily links to the blob record from the given repository. If the blob record is not - found, return None. - """ + """ + Temporarily links to the blob record from the given repository. + + If the blob record is not found, return None. + """ assert blob_digest with db_transaction(): @@ -163,7 +168,9 @@ def _temp_link_blob(repository_id, storage, link_expiration_s): def get_stale_blob_upload(stale_timespan): - """ Returns a random blob upload which was created before the stale timespan. """ + """ + Returns a random blob upload which was created before the stale timespan. + """ stale_threshold = datetime.now() - stale_timespan try: @@ -192,7 +199,9 @@ def get_stale_blob_upload(stale_timespan): def get_blob_upload_by_uuid(upload_uuid): - """ Loads the upload with the given UUID, if any. """ + """ + Loads the upload with the given UUID, if any. + """ try: return BlobUpload.select().where(BlobUpload.uuid == upload_uuid).get() except BlobUpload.DoesNotExist: @@ -200,8 +209,9 @@ def get_blob_upload_by_uuid(upload_uuid): def get_blob_upload(namespace, repo_name, upload_uuid): - """ Load the upload which is already in progress. - """ + """ + Load the upload which is already in progress. + """ try: return ( BlobUpload.select(BlobUpload, ImageStorageLocation) @@ -221,14 +231,18 @@ def get_blob_upload(namespace, repo_name, upload_uuid): def initiate_upload(namespace, repo_name, uuid, location_name, storage_metadata): - """ Initiates a blob upload for the repository with the given namespace and name, - in a specific location. """ + """ + Initiates a blob upload for the repository with the given namespace and name, in a specific + location. + """ repo = _basequery.get_existing_repository(namespace, repo_name) return initiate_upload_for_repo(repo, uuid, location_name, storage_metadata) def initiate_upload_for_repo(repo, uuid, location_name, storage_metadata): - """ Initiates a blob upload for a specific repository object, in a specific location. """ + """ + Initiates a blob upload for a specific repository object, in a specific location. + """ location = storage_model.get_image_location_for_name(location_name) return BlobUpload.create( repository=repo, location=location.id, uuid=uuid, storage_metadata=storage_metadata @@ -236,11 +250,12 @@ def initiate_upload_for_repo(repo, uuid, location_name, storage_metadata): def get_shared_blob(digest): - """ Returns the ImageStorage blob with the given digest or, if not present, - returns None. This method is *only* to be used for shared blobs that are - globally accessible, such as the special empty gzipped tar layer that Docker - no longer pushes to us. - """ + """ + Returns the ImageStorage blob with the given digest or, if not present, returns None. + + This method is *only* to be used for shared blobs that are globally accessible, such as the + special empty gzipped tar layer that Docker no longer pushes to us. + """ assert digest try: return ImageStorage.get(content_checksum=digest, uploading=False) @@ -249,12 +264,13 @@ def get_shared_blob(digest): def get_or_create_shared_blob(digest, byte_data, storage): - """ Returns the ImageStorage blob with the given digest or, if not present, - adds a row and writes the given byte data to the storage engine. - This method is *only* to be used for shared blobs that are globally - accessible, such as the special empty gzipped tar layer that Docker - no longer pushes to us. - """ + """ + Returns the ImageStorage blob with the given digest or, if not present, adds a row and writes + the given byte data to the storage engine. + + This method is *only* to be used for shared blobs that are globally accessible, such as the + special empty gzipped tar layer that Docker no longer pushes to us. + """ assert digest assert byte_data is not None assert storage diff --git a/data/model/build.py b/data/model/build.py index a31cc399c..e370aee79 100644 --- a/data/model/build.py +++ b/data/model/build.py @@ -209,7 +209,9 @@ def _get_build_row(build_uuid): def update_phase_then_close(build_uuid, phase): - """ A function to change the phase of a build """ + """ + A function to change the phase of a build. + """ with UseThenDisconnect(config.app_config): try: build = _get_build_row(build_uuid) @@ -230,7 +232,9 @@ def update_phase_then_close(build_uuid, phase): def create_cancel_build_in_queue(build_phase, build_queue_id, build_queue): - """ A function to cancel a build before it leaves the queue """ + """ + A function to cancel a build before it leaves the queue. + """ def cancel_build(): cancelled = False @@ -247,7 +251,9 @@ def create_cancel_build_in_queue(build_phase, build_queue_id, build_queue): def create_cancel_build_in_manager(build_phase, build_uuid, build_canceller): - """ A function to cancel the build before it starts to push """ + """ + A function to cancel the build before it starts to push. + """ def cancel_build(): if build_phase in PHASES_NOT_ALLOWED_TO_CANCEL_FROM: @@ -259,8 +265,10 @@ def create_cancel_build_in_manager(build_phase, build_uuid, build_canceller): def cancel_repository_build(build, build_queue): - """ This tries to cancel the build returns true if request is successful false - if it can't be cancelled """ + """ + This tries to cancel the build returns true if request is successful false if it can't be + cancelled. + """ from app import build_canceller from buildman.jobutil.buildjob import BuildJobNotifier @@ -306,8 +314,9 @@ def get_archivable_build(): def mark_build_archived(build_uuid): - """ Mark a build as archived, and return True if we were the ones who actually - updated the row. """ + """ + Mark a build as archived, and return True if we were the ones who actually updated the row. + """ return ( RepositoryBuild.update(logs_archived=True) .where(RepositoryBuild.uuid == build_uuid, RepositoryBuild.logs_archived == False) @@ -316,7 +325,9 @@ def mark_build_archived(build_uuid): def toggle_build_trigger(trigger, enabled, reason=TRIGGER_DISABLE_REASON.USER_TOGGLED): - """ Toggles the enabled status of a build trigger. """ + """ + Toggles the enabled status of a build trigger. + """ trigger.enabled = enabled if not enabled: @@ -327,11 +338,14 @@ def toggle_build_trigger(trigger, enabled, reason=TRIGGER_DISABLE_REASON.USER_TO def update_trigger_disable_status(trigger, final_phase): - """ Updates the disable status of the given build trigger. If the build trigger had a - failure, then the counter is increased and, if we've reached the limit, the trigger is - automatically disabled. Otherwise, if the trigger succeeded, it's counter is reset. This - ensures that triggers that continue to error are eventually automatically disabled. - """ + """ + Updates the disable status of the given build trigger. + + If the build trigger had a failure, then the counter is increased and, if we've reached the + limit, the trigger is automatically disabled. Otherwise, if the trigger succeeded, it's counter + is reset. This ensures that triggers that continue to error are eventually automatically + disabled. + """ with db_transaction(): try: trigger = RepositoryBuildTrigger.get(id=trigger.id) diff --git a/data/model/gc.py b/data/model/gc.py index 0a9533013..fab7c13d8 100644 --- a/data/model/gc.py +++ b/data/model/gc.py @@ -56,11 +56,13 @@ class _GarbageCollectorContext(object): def purge_repository(repo, force=False): - """ Completely delete all traces of the repository. Will return True upon - complete success, and False upon partial or total failure. Garbage - collection is incremental and repeatable, so this return value does - not need to be checked or responded to. - """ + """ + Completely delete all traces of the repository. + + Will return True upon complete success, and False upon partial or total failure. Garbage + collection is incremental and repeatable, so this return value does not need to be checked or + responded to. + """ assert repo.state == RepositoryState.MARKED_FOR_DELETION or force # Delete the repository of all Appr-referenced entries. @@ -96,10 +98,12 @@ def purge_repository(repo, force=False): def _chunk_iterate_for_deletion(query, chunk_size=10): - """ Returns an iterator that loads the rows returned by the given query in chunks. Note that - order is not guaranteed here, so this will only work (i.e. not return duplicates) if - the rows returned are being deleted between calls. - """ + """ + Returns an iterator that loads the rows returned by the given query in chunks. + + Note that order is not guaranteed here, so this will only work (i.e. not return duplicates) if + the rows returned are being deleted between calls. + """ while True: results = list(query.limit(chunk_size)) if not results: @@ -109,9 +113,9 @@ def _chunk_iterate_for_deletion(query, chunk_size=10): def _purge_repository_contents(repo): - """ Purges all the contents of a repository, removing all of its tags, - manifests and images. - """ + """ + Purges all the contents of a repository, removing all of its tags, manifests and images. + """ logger.debug("Purging repository %s", repo) # Purge via all the tags. @@ -168,7 +172,9 @@ def _purge_repository_contents(repo): def garbage_collect_repo(repo): - """ Performs garbage collection over the contents of a repository. """ + """ + Performs garbage collection over the contents of a repository. + """ # Purge expired tags. had_changes = False @@ -200,9 +206,10 @@ def garbage_collect_repo(repo): def _run_garbage_collection(context): - """ Runs the garbage collection loop, deleting manifests, images, labels and blobs - in an iterative fashion. - """ + """ + Runs the garbage collection loop, deleting manifests, images, labels and blobs in an iterative + fashion. + """ has_changes = True while has_changes: diff --git a/data/model/image.py b/data/model/image.py index 72978b4af..48d74f02d 100644 --- a/data/model/image.py +++ b/data/model/image.py @@ -41,8 +41,9 @@ def _namespace_id_for_username(username): def get_image_with_storage(docker_image_id, storage_uuid): - """ Returns the image with the given docker image ID and storage uuid or None if none. - """ + """ + Returns the image with the given docker image ID and storage uuid or None if none. + """ try: return ( Image.select(Image, ImageStorage) @@ -55,9 +56,12 @@ def get_image_with_storage(docker_image_id, storage_uuid): def get_parent_images(namespace_name, repository_name, image_obj): - """ Returns a list of parent Image objects starting with the most recent parent - and ending with the base layer. The images in this query will include the storage. - """ + """ + Returns a list of parent Image objects starting with the most recent parent and ending with the + base layer. + + The images in this query will include the storage. + """ parents = image_obj.ancestors # Ancestors are in the format ///...//, with each path section @@ -81,7 +85,9 @@ def get_parent_images(namespace_name, repository_name, image_obj): def get_placements_for_images(images): - """ Returns the placements for the given images, as a map from image storage ID to placements. """ + """ + Returns the placements for the given images, as a map from image storage ID to placements. + """ if not images: return {} @@ -101,9 +107,10 @@ def get_placements_for_images(images): def get_image_and_placements(namespace_name, repo_name, docker_image_id): - """ Returns the repo image (with a storage object) and storage placements for the image - or (None, None) if non found. - """ + """ + Returns the repo image (with a storage object) and storage placements for the image or (None, + None) if non found. + """ repo_image = get_repo_image_and_storage(namespace_name, repo_name, docker_image_id) if repo_image is None: return (None, None) @@ -120,9 +127,11 @@ def get_image_and_placements(namespace_name, repo_name, docker_image_id): def get_repo_image(namespace_name, repository_name, docker_image_id): - """ Returns the repository image with the given Docker image ID or None if none. - Does not include the storage object. - """ + """ + Returns the repository image with the given Docker image ID or None if none. + + Does not include the storage object. + """ def limit_to_image_id(query): return query.where(Image.docker_image_id == docker_image_id).limit(1) @@ -135,9 +144,11 @@ def get_repo_image(namespace_name, repository_name, docker_image_id): def get_repo_image_and_storage(namespace_name, repository_name, docker_image_id): - """ Returns the repository image with the given Docker image ID or None if none. - Includes the storage object. - """ + """ + Returns the repository image with the given Docker image ID or None if none. + + Includes the storage object. + """ def limit_to_image_id(query): return query.where(Image.docker_image_id == docker_image_id) @@ -150,9 +161,11 @@ def get_repo_image_and_storage(namespace_name, repository_name, docker_image_id) def get_image_by_id(namespace_name, repository_name, docker_image_id): - """ Returns the repository image with the given Docker image ID or raises if not found. - Includes the storage object. - """ + """ + Returns the repository image with the given Docker image ID or raises if not found. + + Includes the storage object. + """ image = get_repo_image_and_storage(namespace_name, repository_name, docker_image_id) if not image: raise InvalidImageException( @@ -209,7 +222,11 @@ def get_repository_images_without_placements(repo_obj, with_ancestor=None): def get_repository_images(namespace_name, repository_name): - """ Returns all the repository images in the repository. Does not include storage objects. """ + """ + Returns all the repository images in the repository. + + Does not include storage objects. + """ return _get_repository_images(namespace_name, repository_name, lambda q: q) @@ -351,8 +368,9 @@ def set_image_metadata( v1_json_metadata, parent=None, ): - """ Sets metadata that is specific to how a binary piece of storage fits into the layer tree. - """ + """ + Sets metadata that is specific to how a binary piece of storage fits into the layer tree. + """ with db_transaction(): try: fetched = ( @@ -423,9 +441,10 @@ def synthesize_v1_image( v1_json_metadata, parent_image=None, ): - """ Find an existing image with this docker image id, and if none exists, write one with the - specified metadata. - """ + """ + Find an existing image with this docker image id, and if none exists, write one with the + specified metadata. + """ ancestors = "/" if parent_image is not None: ancestors = "{0}{1}/".format(parent_image.ancestors, parent_image.id) @@ -477,27 +496,37 @@ def ensure_image_locations(*names): def get_max_id_for_sec_scan(): - """ Gets the maximum id for a clair sec scan """ + """ + Gets the maximum id for a clair sec scan. + """ return Image.select(fn.Max(Image.id)).scalar() def get_min_id_for_sec_scan(version): - """ Gets the minimum id for a clair sec scan """ + """ + Gets the minimum id for a clair sec scan. + """ return Image.select(fn.Min(Image.id)).where(Image.security_indexed_engine < version).scalar() def total_image_count(): - """ Returns the total number of images in DB """ + """ + Returns the total number of images in DB. + """ return Image.select().count() def get_image_pk_field(): - """ Returns the primary key for Image DB model """ + """ + Returns the primary key for Image DB model. + """ return Image.id def get_images_eligible_for_scan(clair_version): - """ Returns a query that gives all images eligible for a clair scan """ + """ + Returns a query that gives all images eligible for a clair scan. + """ return ( get_image_with_storage_and_parent_base() .where(Image.security_indexed_engine < clair_version) diff --git a/data/model/label.py b/data/model/label.py index e574a8723..4610787a5 100644 --- a/data/model/label.py +++ b/data/model/label.py @@ -51,7 +51,9 @@ def _get_media_type_id(name): def create_manifest_label(tag_manifest, key, value, source_type_name, media_type_name=None): - """ Creates a new manifest label on a specific tag manifest. """ + """ + Creates a new manifest label on a specific tag manifest. + """ if not key: raise InvalidLabelKeyException("Missing key on label") @@ -101,7 +103,9 @@ def create_manifest_label(tag_manifest, key, value, source_type_name, media_type def list_manifest_labels(tag_manifest, prefix_filter=None): - """ Lists all labels found on the given tag manifest. """ + """ + Lists all labels found on the given tag manifest. + """ query = ( Label.select(Label, MediaType) .join(MediaType) @@ -119,7 +123,9 @@ def list_manifest_labels(tag_manifest, prefix_filter=None): def get_manifest_label(label_uuid, tag_manifest): - """ Retrieves the manifest label on the tag manifest with the given ID. """ + """ + Retrieves the manifest label on the tag manifest with the given ID. + """ try: return ( Label.select(Label, LabelSourceType) @@ -135,7 +141,9 @@ def get_manifest_label(label_uuid, tag_manifest): def delete_manifest_label(label_uuid, tag_manifest): - """ Deletes the manifest label on the tag manifest with the given ID. """ + """ + Deletes the manifest label on the tag manifest with the given ID. + """ # Find the label itself. label = get_manifest_label(label_uuid, tag_manifest) diff --git a/data/model/log.py b/data/model/log.py index 54ff20ed0..93f19a2c4 100644 --- a/data/model/log.py +++ b/data/model/log.py @@ -27,7 +27,9 @@ def _logs_query( id_range=None, namespace_id=None, ): - """ Returns a query for selecting logs from the table, with various options and filters. """ + """ + Returns a query for selecting logs from the table, with various options and filters. + """ if namespace is not None: assert namespace_id is None @@ -75,8 +77,9 @@ def _latest_logs_query( model=LogEntry3, size=None, ): - """ Returns a query for selecting the latest logs from the table, with various options and - filters. """ + """ + Returns a query for selecting the latest logs from the table, with various options and filters. + """ query = model.select(*selections).switch(model) if repository: @@ -129,7 +132,9 @@ def get_aggregated_logs( ignore=None, model=LogEntry3, ): - """ Returns the count of logs, by kind and day, for the logs matching the given filters. """ + """ + Returns the count of logs, by kind and day, for the logs matching the given filters. + """ date = db.extract_date("day", model.datetime) selections = [model.kind, date.alias("day"), fn.Count(model.id).alias("count")] query = _logs_query( @@ -149,7 +154,9 @@ def get_logs_query( model=LogEntry3, id_range=None, ): - """ Returns the logs matching the given filters. """ + """ + Returns the logs matching the given filters. + """ Performer = User.alias() Account = User.alias() selections = [model, Performer] @@ -184,7 +191,9 @@ def get_logs_query( def get_latest_logs_query( performer=None, repository=None, namespace=None, ignore=None, model=LogEntry3, size=None ): - """ Returns the latest logs matching the given filters. """ + """ + Returns the latest logs matching the given filters. + """ Performer = User.alias() Account = User.alias() selections = [model, Performer] @@ -223,7 +232,9 @@ def log_action( metadata={}, timestamp=None, ): - """ Logs an entry in the LogEntry table. """ + """ + Logs an entry in the LogEntry table. + """ if not timestamp: timestamp = datetime.today() @@ -264,7 +275,9 @@ def log_action( def get_stale_logs_start_id(model): - """ Gets the oldest log entry. """ + """ + Gets the oldest log entry. + """ try: return (model.select(fn.Min(model.id)).tuples())[0][0] except IndexError: @@ -272,28 +285,35 @@ def get_stale_logs_start_id(model): def get_stale_logs(start_id, end_id, model, cutoff_date): - """ Returns all the logs with IDs between start_id and end_id inclusively. """ + """ + Returns all the logs with IDs between start_id and end_id inclusively. + """ return model.select().where( (model.id >= start_id), (model.id <= end_id), model.datetime <= cutoff_date ) def delete_stale_logs(start_id, end_id, model): - """ Deletes all the logs with IDs between start_id and end_id. """ + """ + Deletes all the logs with IDs between start_id and end_id. + """ model.delete().where((model.id >= start_id), (model.id <= end_id)).execute() def get_repository_action_counts(repo, start_date): - """ Returns the daily aggregated action counts for the given repository, starting at the given - start date. - """ + """ + Returns the daily aggregated action counts for the given repository, starting at the given start + date. + """ return RepositoryActionCount.select().where( RepositoryActionCount.repository == repo, RepositoryActionCount.date >= start_date ) def get_repositories_action_sums(repository_ids): - """ Returns a map from repository ID to total actions within that repository in the last week. """ + """ + Returns a map from repository ID to total actions within that repository in the last week. + """ if not repository_ids: return {} @@ -317,9 +337,10 @@ def get_repositories_action_sums(repository_ids): def get_minimum_id_for_logs(start_time, repository_id=None, namespace_id=None, model=LogEntry3): - """ Returns the minimum ID for logs matching the given repository or namespace in - the logs table, starting at the given start time. - """ + """ + Returns the minimum ID for logs matching the given repository or namespace in the logs table, + starting at the given start time. + """ # First try bounded by a day. Most repositories will meet this criteria, and therefore # can make a much faster query. day_after = start_time + timedelta(days=1) @@ -340,9 +361,10 @@ def get_minimum_id_for_logs(start_time, repository_id=None, namespace_id=None, m def get_maximum_id_for_logs(end_time, repository_id=None, namespace_id=None, model=LogEntry3): - """ Returns the maximum ID for logs matching the given repository or namespace in - the logs table, ending at the given end time. - """ + """ + Returns the maximum ID for logs matching the given repository or namespace in the logs table, + ending at the given end time. + """ # First try bounded by a day. Most repositories will meet this criteria, and therefore # can make a much faster query. day_before = end_time - timedelta(days=1) diff --git a/data/model/message.py b/data/model/message.py index 8425f4f8d..b7eeffa48 100644 --- a/data/model/message.py +++ b/data/model/message.py @@ -2,12 +2,16 @@ from data.database import Messages, MediaType def get_messages(): - """Query the data base for messages and returns a container of database message objects""" + """ + Query the data base for messages and returns a container of database message objects. + """ return Messages.select(Messages, MediaType).join(MediaType) def create(messages): - """Insert messages into the database.""" + """ + Insert messages into the database. + """ inserted = [] for message in messages: severity = message["severity"] @@ -21,7 +25,9 @@ def create(messages): def delete_message(uuids): - """Delete message from the database""" + """ + Delete message from the database. + """ if not uuids: return Messages.delete().where(Messages.uuid << uuids).execute() diff --git a/data/model/modelutil.py b/data/model/modelutil.py index 52830fc39..e948ce791 100644 --- a/data/model/modelutil.py +++ b/data/model/modelutil.py @@ -15,11 +15,12 @@ def paginate( max_page=None, sort_field_name=None, ): - """ Paginates the given query using an field range, starting at the optional page_token. - Returns a *list* of matching results along with an unencrypted page_token for the - next page, if any. If descending is set to True, orders by the field descending rather - than ascending. - """ + """ + Paginates the given query using an field range, starting at the optional page_token. + + Returns a *list* of matching results along with an unencrypted page_token for the next page, if + any. If descending is set to True, orders by the field descending rather than ascending. + """ # Note: We use the sort_field_alias for the order_by, but not the where below. The alias is # necessary for certain queries that use unions in MySQL, as it gets confused on which field # to order by. The where clause, on the other hand, cannot use the alias because Postgres does @@ -55,7 +56,11 @@ def paginate( def pagination_start(page_token=None): - """ Returns the start index for pagination for the given page token. Will return None if None. """ + """ + Returns the start index for pagination for the given page token. + + Will return None if None. + """ if page_token is not None: start_index = page_token.get("start_index") if page_token.get("is_datetime"): @@ -65,9 +70,10 @@ def pagination_start(page_token=None): def paginate_query(query, limit=50, sort_field_name=None, page_number=None): - """ Executes the given query and returns a page's worth of results, as well as the page token - for the next page (if any). - """ + """ + Executes the given query and returns a page's worth of results, as well as the page token for + the next page (if any). + """ results = list(query) page_token = None if len(results) > limit: diff --git a/data/model/notification.py b/data/model/notification.py index dd826465e..95bd5aa94 100644 --- a/data/model/notification.py +++ b/data/model/notification.py @@ -144,7 +144,9 @@ def delete_matching_notifications(target, kind_name, **kwargs): def increment_notification_failure_count(uuid): - """ This increments the number of failures by one """ + """ + This increments the number of failures by one. + """ ( RepositoryNotification.update( number_of_failures=RepositoryNotification.number_of_failures + 1 @@ -155,7 +157,9 @@ def increment_notification_failure_count(uuid): def reset_notification_number_of_failures(namespace_name, repository_name, uuid): - """ This resets the number of failures for a repo notification to 0 """ + """ + This resets the number of failures for a repo notification to 0. + """ try: notification = ( RepositoryNotification.select().where(RepositoryNotification.uuid == uuid).get() @@ -174,7 +178,9 @@ def reset_notification_number_of_failures(namespace_name, repository_name, uuid) def reset_number_of_failures_to_zero(notification_id): - """ This resets the number of failures for a repo notification to 0 """ + """ + This resets the number of failures for a repo notification to 0. + """ RepositoryNotification.update(number_of_failures=0).where( RepositoryNotification.id == notification_id ).execute() @@ -197,7 +203,9 @@ def create_repo_notification( def _base_get_notification(uuid): - """ This is a base query for get statements """ + """ + This is a base query for get statements. + """ return ( RepositoryNotification.select(RepositoryNotification, Repository, Namespace) .join(Repository) @@ -207,7 +215,9 @@ def _base_get_notification(uuid): def get_enabled_notification(uuid): - """ This returns a notification with less than 3 failures """ + """ + This returns a notification with less than 3 failures. + """ try: return ( _base_get_notification(uuid).where(RepositoryNotification.number_of_failures < 3).get() diff --git a/data/model/oauth.py b/data/model/oauth.py index e0cbc1adc..8b4c34be3 100644 --- a/data/model/oauth.py +++ b/data/model/oauth.py @@ -35,8 +35,9 @@ class DatabaseAuthorizationProvider(AuthorizationProvider): @property def token_expires_in(self): - """Property method to get the token expiration time in seconds. - """ + """ + Property method to get the token expiration time in seconds. + """ return int(60 * 60 * 24 * 365.25 * 10) # 10 Years def validate_client_id(self, client_id): diff --git a/data/model/oci/blob.py b/data/model/oci/blob.py index 6d04ff561..2d4f789ba 100644 --- a/data/model/oci/blob.py +++ b/data/model/oci/blob.py @@ -5,9 +5,10 @@ from data.model.blob import get_repository_blob_by_digest as legacy_get def get_repository_blob_by_digest(repository, blob_digest): - """ Find the content-addressable blob linked to the specified repository and - returns it or None if none. - """ + """ + Find the content-addressable blob linked to the specified repository and returns it or None if + none. + """ try: storage = ( ImageStorage.select(ImageStorage.uuid) diff --git a/data/model/oci/label.py b/data/model/oci/label.py index 515c86a3b..b27437a0c 100644 --- a/data/model/oci/label.py +++ b/data/model/oci/label.py @@ -23,7 +23,9 @@ logger = logging.getLogger(__name__) def list_manifest_labels(manifest_id, prefix_filter=None): - """ Lists all labels found on the given manifest, with an optional filter by key prefix. """ + """ + Lists all labels found on the given manifest, with an optional filter by key prefix. + """ query = ( Label.select(Label, MediaType) .join(MediaType) @@ -41,7 +43,9 @@ def list_manifest_labels(manifest_id, prefix_filter=None): def get_manifest_label(label_uuid, manifest): - """ Retrieves the manifest label on the manifest with the given UUID or None if none. """ + """ + Retrieves the manifest label on the manifest with the given UUID or None if none. + """ try: return ( Label.select(Label, LabelSourceType) @@ -59,7 +63,9 @@ def get_manifest_label(label_uuid, manifest): def create_manifest_label( manifest_id, key, value, source_type_name, media_type_name=None, adjust_old_model=True ): - """ Creates a new manifest label on a specific tag manifest. """ + """ + Creates a new manifest label on a specific tag manifest. + """ if not key: raise InvalidLabelKeyException("Missing key on label") @@ -134,9 +140,11 @@ def create_manifest_label( def delete_manifest_label(label_uuid, manifest): - """ Deletes the manifest label on the tag manifest with the given ID. Returns the label deleted - or None if none. - """ + """ + Deletes the manifest label on the tag manifest with the given ID. + + Returns the label deleted or None if none. + """ # Find the label itself. label = get_manifest_label(label_uuid, manifest) if label is None: diff --git a/data/model/oci/manifest.py b/data/model/oci/manifest.py index 56fa10efb..155a5a25e 100644 --- a/data/model/oci/manifest.py +++ b/data/model/oci/manifest.py @@ -34,8 +34,9 @@ CreatedManifest = namedtuple("CreatedManifest", ["manifest", "newly_created", "l class CreateManifestException(Exception): - """ Exception raised when creating a manifest fails and explicit exception - raising is requested. """ + """ + Exception raised when creating a manifest fails and explicit exception raising is requested. + """ def lookup_manifest( @@ -45,11 +46,13 @@ def lookup_manifest( require_available=False, temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC, ): - """ Returns the manifest with the specified digest under the specified repository - or None if none. If allow_dead is True, then manifests referenced by only - dead tags will also be returned. If require_available is True, the manifest - will be marked with a temporary tag to ensure it remains available. - """ + """ + Returns the manifest with the specified digest under the specified repository or None if none. + + If allow_dead is True, then manifests referenced by only dead tags will also be returned. If + require_available is True, the manifest will be marked with a temporary tag to ensure it remains + available. + """ if not require_available: return _lookup_manifest(repository_id, manifest_digest, allow_dead=allow_dead) @@ -103,16 +106,17 @@ def get_or_create_manifest( raise_on_error=False, retriever=None, ): - """ Returns a CreatedManifest for the manifest in the specified repository with the matching - digest (if it already exists) or, if not yet created, creates and returns the manifest. + """ + Returns a CreatedManifest for the manifest in the specified repository with the matching digest + (if it already exists) or, if not yet created, creates and returns the manifest. - Returns None if there was an error creating the manifest, unless raise_on_error is specified, - in which case a CreateManifestException exception will be raised instead to provide more - context to the error. + Returns None if there was an error creating the manifest, unless raise_on_error is specified, + in which case a CreateManifestException exception will be raised instead to provide more + context to the error. - Note that *all* blobs referenced by the manifest must exist already in the repository or this - method will fail with a None. - """ + Note that *all* blobs referenced by the manifest must exist already in the repository or this + method will fail with a None. + """ existing = lookup_manifest( repository_id, manifest_interface_instance.digest, diff --git a/data/model/oci/retriever.py b/data/model/oci/retriever.py index c6daf23f4..5118ce02c 100644 --- a/data/model/oci/retriever.py +++ b/data/model/oci/retriever.py @@ -10,9 +10,10 @@ RETRY_DELAY = 0.3 # seconds class RepositoryContentRetriever(ContentRetriever): - """ Implementation of the ContentRetriever interface for manifests that retrieves - config blobs and child manifests for the specified repository. - """ + """ + Implementation of the ContentRetriever interface for manifests that retrieves config blobs and + child manifests for the specified repository. + """ def __init__(self, repository_id, storage): self.repository_id = repository_id @@ -23,7 +24,9 @@ class RepositoryContentRetriever(ContentRetriever): return RepositoryContentRetriever(repository_id, storage) def get_manifest_bytes_with_digest(self, digest): - """ Returns the bytes of the manifest with the given digest or None if none found. """ + """ + Returns the bytes of the manifest with the given digest or None if none found. + """ query = ( Manifest.select() .where(Manifest.repository == self.repository_id) @@ -36,7 +39,9 @@ class RepositoryContentRetriever(ContentRetriever): return None def get_blob_bytes_with_digest(self, digest): - """ Returns the bytes of the blob with the given digest or None if none found. """ + """ + Returns the bytes of the blob with the given digest or None if none found. + """ blob = get_repository_blob_by_digest(self.repository_id, digest) if blob is None: return None diff --git a/data/model/oci/shared.py b/data/model/oci/shared.py index 6bf59c18b..19049f10c 100644 --- a/data/model/oci/shared.py +++ b/data/model/oci/shared.py @@ -2,7 +2,9 @@ from data.database import Manifest, ManifestLegacyImage, Image def get_legacy_image_for_manifest(manifest_id): - """ Returns the legacy image associated with the given manifest, if any, or None if none. """ + """ + Returns the legacy image associated with the given manifest, if any, or None if none. + """ try: query = ( ManifestLegacyImage.select(ManifestLegacyImage, Image) @@ -15,7 +17,9 @@ def get_legacy_image_for_manifest(manifest_id): def get_manifest_for_legacy_image(image_id): - """ Returns a manifest that is associated with the given image, if any, or None if none. """ + """ + Returns a manifest that is associated with the given image, if any, or None if none. + """ try: query = ( ManifestLegacyImage.select(ManifestLegacyImage, Manifest) diff --git a/data/model/oci/tag.py b/data/model/oci/tag.py index 4d4d9bfcf..3d3c14969 100644 --- a/data/model/oci/tag.py +++ b/data/model/oci/tag.py @@ -36,7 +36,9 @@ logger = logging.getLogger(__name__) def get_tag_by_id(tag_id): - """ Returns the tag with the given ID, joined with its manifest or None if none. """ + """ + Returns the tag with the given ID, joined with its manifest or None if none. + """ try: return Tag.select(Tag, Manifest).join(Manifest).where(Tag.id == tag_id).get() except Tag.DoesNotExist: @@ -44,9 +46,12 @@ def get_tag_by_id(tag_id): def get_tag(repository_id, tag_name): - """ Returns the alive, non-hidden tag with the given name under the specified repository or - None if none. The tag is returned joined with its manifest. - """ + """ + Returns the alive, non-hidden tag with the given name under the specified repository or None if + none. + + The tag is returned joined with its manifest. + """ query = ( Tag.select(Tag, Manifest) .join(Manifest) @@ -65,9 +70,11 @@ def get_tag(repository_id, tag_name): def lookup_alive_tags_shallow(repository_id, start_pagination_id=None, limit=None): - """ Returns a list of the tags alive in the specified repository. Note that the tags returned - *only* contain their ID and name. Also note that the Tags are returned ordered by ID. - """ + """ + Returns a list of the tags alive in the specified repository. Note that the tags returned. + + *only* contain their ID and name. Also note that the Tags are returned ordered by ID. + """ query = Tag.select(Tag.id, Tag.name).where(Tag.repository == repository_id).order_by(Tag.id) if start_pagination_id is not None: @@ -80,9 +87,11 @@ def lookup_alive_tags_shallow(repository_id, start_pagination_id=None, limit=Non def list_alive_tags(repository_id): - """ Returns a list of all the tags alive in the specified repository. - Tag's returned are joined with their manifest. - """ + """ + Returns a list of all the tags alive in the specified repository. + + Tag's returned are joined with their manifest. + """ query = Tag.select(Tag, Manifest).join(Manifest).where(Tag.repository == repository_id) return filter_to_alive_tags(query) @@ -96,13 +105,14 @@ def list_repository_tag_history( active_tags_only=False, since_time_ms=None, ): - """ Returns a tuple of the full set of tags found in the specified repository, including those - that are no longer alive (unless active_tags_only is True), and whether additional tags exist. - If specific_tag_name is given, the tags are further filtered by name. If since is given, tags - are further filtered to newer than that date. + """ + Returns a tuple of the full set of tags found in the specified repository, including those that + are no longer alive (unless active_tags_only is True), and whether additional tags exist. If + specific_tag_name is given, the tags are further filtered by name. If since is given, tags are + further filtered to newer than that date. - Note that the returned Manifest will not contain the manifest contents. - """ + Note that the returned Manifest will not contain the manifest contents. + """ query = ( Tag.select(Tag, Manifest.id, Manifest.digest, Manifest.media_type) .join(Manifest) @@ -130,7 +140,9 @@ def list_repository_tag_history( def get_legacy_images_for_tags(tags): - """ Returns a map from tag ID to the legacy image for the tag. """ + """ + Returns a map from tag ID to the legacy image for the tag. + """ if not tags: return {} @@ -146,9 +158,12 @@ def get_legacy_images_for_tags(tags): def find_matching_tag(repository_id, tag_names, tag_kinds=None): - """ Finds an alive tag in the specified repository with one of the specified tag names and - returns it or None if none. Tag's returned are joined with their manifest. - """ + """ + Finds an alive tag in the specified repository with one of the specified tag names and returns + it or None if none. + + Tag's returned are joined with their manifest. + """ assert repository_id assert tag_names @@ -171,9 +186,10 @@ def find_matching_tag(repository_id, tag_names, tag_kinds=None): def get_most_recent_tag_lifetime_start(repository_ids): - """ Returns a map from repo ID to the timestamp of the most recently pushed alive tag - for each specified repository or None if none. - """ + """ + Returns a map from repo ID to the timestamp of the most recently pushed alive tag for each + specified repository or None if none. + """ assert len(repository_ids) > 0 and None not in repository_ids query = ( @@ -187,9 +203,11 @@ def get_most_recent_tag_lifetime_start(repository_ids): def get_most_recent_tag(repository_id): - """ Returns the most recently pushed alive tag in the specified repository or None if none. - The Tag returned is joined with its manifest. - """ + """ + Returns the most recently pushed alive tag in the specified repository or None if none. + + The Tag returned is joined with its manifest. + """ assert repository_id query = ( @@ -208,8 +226,9 @@ def get_most_recent_tag(repository_id): def get_expired_tag(repository_id, tag_name): - """ Returns a tag with the given name that is expired in the repository or None if none. - """ + """ + Returns a tag with the given name that is expired in the repository or None if none. + """ try: return ( Tag.select() @@ -223,9 +242,10 @@ def get_expired_tag(repository_id, tag_name): def create_temporary_tag_if_necessary(manifest, expiration_sec): - """ Creates a temporary tag pointing to the given manifest, with the given expiration in seconds, - unless there is an existing tag that will keep the manifest around. - """ + """ + Creates a temporary tag pointing to the given manifest, with the given expiration in seconds, + unless there is an existing tag that will keep the manifest around. + """ tag_name = "$temp-%s" % str(uuid.uuid4()) now_ms = get_epoch_timestamp_ms() end_ms = now_ms + (expiration_sec * 1000) @@ -259,10 +279,13 @@ def create_temporary_tag_if_necessary(manifest, expiration_sec): def retarget_tag(tag_name, manifest_id, is_reversion=False, now_ms=None, adjust_old_model=True): - """ Creates or updates a tag with the specified name to point to the given manifest under - its repository. If this action is a reversion to a previous manifest, is_reversion - should be set to True. Returns the newly created tag row or None on error. - """ + """ + Creates or updates a tag with the specified name to point to the given manifest under its + repository. + + If this action is a reversion to a previous manifest, is_reversion should be set to True. + Returns the newly created tag row or None on error. + """ try: manifest = ( Manifest.select(Manifest, MediaType) @@ -346,9 +369,12 @@ def retarget_tag(tag_name, manifest_id, is_reversion=False, now_ms=None, adjust_ def delete_tag(repository_id, tag_name): - """ Deletes the alive tag with the given name in the specified repository and returns the deleted - tag. If the tag did not exist, returns None. - """ + """ + Deletes the alive tag with the given name in the specified repository and returns the deleted + tag. + + If the tag did not exist, returns None. + """ tag = get_tag(repository_id, tag_name) if tag is None: return None @@ -357,7 +383,9 @@ def delete_tag(repository_id, tag_name): def _delete_tag(tag, now_ms): - """ Deletes the given tag by marking it as expired. """ + """ + Deletes the given tag by marking it as expired. + """ now_ts = int(now_ms / 1000) with db_transaction(): @@ -387,9 +415,11 @@ def _delete_tag(tag, now_ms): def delete_tags_for_manifest(manifest): - """ Deletes all tags pointing to the given manifest. Returns the list of tags - deleted. - """ + """ + Deletes all tags pointing to the given manifest. + + Returns the list of tags deleted. + """ query = Tag.select().where(Tag.manifest == manifest) query = filter_to_alive_tags(query) query = filter_to_visible_tags(query) @@ -405,16 +435,19 @@ def delete_tags_for_manifest(manifest): def filter_to_visible_tags(query): - """ Adjusts the specified Tag query to only return those tags that are visible. - """ + """ + Adjusts the specified Tag query to only return those tags that are visible. + """ return query.where(Tag.hidden == False) def filter_to_alive_tags(query, now_ms=None, model=Tag): - """ Adjusts the specified Tag query to only return those tags alive. If now_ms is specified, - the given timestamp (in MS) is used in place of the current timestamp for determining wherther - a tag is alive. - """ + """ + Adjusts the specified Tag query to only return those tags alive. + + If now_ms is specified, the given timestamp (in MS) is used in place of the current timestamp + for determining wherther a tag is alive. + """ if now_ms is None: now_ms = get_epoch_timestamp_ms() @@ -424,7 +457,9 @@ def filter_to_alive_tags(query, now_ms=None, model=Tag): def set_tag_expiration_sec_for_manifest(manifest_id, expiration_seconds): - """ Sets the tag expiration for any tags that point to the given manifest ID. """ + """ + Sets the tag expiration for any tags that point to the given manifest ID. + """ query = Tag.select().where(Tag.manifest == manifest_id) query = filter_to_alive_tags(query) tags = list(query) @@ -436,7 +471,9 @@ def set_tag_expiration_sec_for_manifest(manifest_id, expiration_seconds): def set_tag_expiration_for_manifest(manifest_id, expiration_datetime): - """ Sets the tag expiration for any tags that point to the given manifest ID. """ + """ + Sets the tag expiration for any tags that point to the given manifest ID. + """ query = Tag.select().where(Tag.manifest == manifest_id) query = filter_to_alive_tags(query) tags = list(query) @@ -448,11 +485,12 @@ def set_tag_expiration_for_manifest(manifest_id, expiration_datetime): def change_tag_expiration(tag_id, expiration_datetime): - """ Changes the expiration of the specified tag to the given expiration datetime. If - the expiration datetime is None, then the tag is marked as not expiring. Returns - a tuple of the previous expiration timestamp in seconds (if any), and whether the - operation succeeded. - """ + """ + Changes the expiration of the specified tag to the given expiration datetime. + + If the expiration datetime is None, then the tag is marked as not expiring. Returns a tuple of + the previous expiration timestamp in seconds (if any), and whether the operation succeeded. + """ try: tag = Tag.get(id=tag_id) except Tag.DoesNotExist: @@ -478,8 +516,9 @@ def change_tag_expiration(tag_id, expiration_datetime): def lookup_unrecoverable_tags(repo): - """ Returns the tags in a repository that are expired and past their time machine recovery - period. """ + """ + Returns the tags in a repository that are expired and past their time machine recovery period. + """ expired_clause = get_epoch_timestamp_ms() - (Namespace.removed_tag_expiration_s * 1000) return ( Tag.select() @@ -491,9 +530,11 @@ def lookup_unrecoverable_tags(repo): def set_tag_end_ms(tag, end_ms): - """ Sets the end timestamp for a tag. Should only be called by change_tag_expiration - or tests. - """ + """ + Sets the end timestamp for a tag. + + Should only be called by change_tag_expiration or tests. + """ with db_transaction(): updated = ( @@ -523,9 +564,10 @@ def set_tag_end_ms(tag, end_ms): def tags_containing_legacy_image(image): - """ Yields all alive Tags containing the given image as a legacy image, somewhere in its - legacy image hierarchy. - """ + """ + Yields all alive Tags containing the given image as a legacy image, somewhere in its legacy + image hierarchy. + """ ancestors_str = "%s%s/%%" % (image.ancestors, image.id) tags = ( Tag.select() @@ -542,9 +584,10 @@ def tags_containing_legacy_image(image): def lookup_notifiable_tags_for_legacy_image(docker_image_id, storage_uuid, event_name): - """ Yields any alive Tags found in repositories with an event with the given name registered - and whose legacy Image has the given docker image ID and storage UUID. - """ + """ + Yields any alive Tags found in repositories with an event with the given name registered and + whose legacy Image has the given docker image ID and storage UUID. + """ event = ExternalNotificationEvent.get(name=event_name) images = ( Image.select() diff --git a/data/model/organization.py b/data/model/organization.py index d44bfb9ec..48c39ed6d 100644 --- a/data/model/organization.py +++ b/data/model/organization.py @@ -95,7 +95,9 @@ def __get_org_admin_users(org): def get_admin_users(org): - """ Returns the owner users for the organization. """ + """ + Returns the owner users for the organization. + """ return __get_org_admin_users(org) @@ -127,9 +129,10 @@ def remove_organization_member(org, user_obj): def get_organization_member_set(org, include_robots=False, users_filter=None): - """ Returns the set of all member usernames under the given organization, with optional - filtering by robots and/or by a specific set of User objects. - """ + """ + Returns the set of all member usernames under the given organization, with optional filtering by + robots and/or by a specific set of User objects. + """ Org = User.alias() org_users = ( User.select(User.username) diff --git a/data/model/permission.py b/data/model/permission.py index 8ddf0d238..697e742f9 100644 --- a/data/model/permission.py +++ b/data/model/permission.py @@ -240,7 +240,9 @@ def __apply_permission_list(repo, proto_query, name_property, create_permission_ def __entity_permission_repo_query( entity_id, entity_table, entity_id_property, namespace_name, repository_name ): - """ This method works for both users and teams. """ + """ + This method works for both users and teams. + """ return ( RepositoryPermission.select(entity_table, Repository, Namespace, Role, RepositoryPermission) diff --git a/data/model/repo_mirror.py b/data/model/repo_mirror.py index 890a4e52b..ddd846ccf 100644 --- a/data/model/repo_mirror.py +++ b/data/model/repo_mirror.py @@ -27,11 +27,13 @@ MAX_SYNC_DURATION = 60 * 60 * 2 # 2 Hours def get_eligible_mirrors(): """ - Returns the RepoMirrorConfig that are ready to run now. This includes those that are: - 1. Not currently syncing but whose start time is in the past - 2. Status of "sync now" - 3. Currently marked as syncing but whose expiration time is in the past - """ + Returns the RepoMirrorConfig that are ready to run now. + + This includes those that are: + 1. Not currently syncing but whose start time is in the past + 2. Status of "sync now" + 3. Currently marked as syncing but whose expiration time is in the past + """ now = datetime.utcnow() immediate_candidates_filter = (RepoMirrorConfig.sync_status == RepoMirrorStatus.SYNC_NOW) & ( RepoMirrorConfig.sync_expiration_date >> None @@ -63,20 +65,25 @@ def get_eligible_mirrors(): def get_max_id_for_repo_mirror_config(): - """ Gets the maximum id for repository mirroring """ + """ + Gets the maximum id for repository mirroring. + """ return RepoMirrorConfig.select(fn.Max(RepoMirrorConfig.id)).scalar() def get_min_id_for_repo_mirror_config(): - """ Gets the minimum id for a repository mirroring """ + """ + Gets the minimum id for a repository mirroring. + """ return RepoMirrorConfig.select(fn.Min(RepoMirrorConfig.id)).scalar() def claim_mirror(mirror): """ - Attempt to create an exclusive lock on the RepoMirrorConfig and return it. - If unable to create the lock, `None` will be returned. - """ + Attempt to create an exclusive lock on the RepoMirrorConfig and return it. + + If unable to create the lock, `None` will be returned. + """ # Attempt to update the RepoMirrorConfig to mark it as "claimed" now = datetime.utcnow() @@ -99,14 +106,13 @@ def claim_mirror(mirror): def release_mirror(mirror, sync_status): """ - Return a mirror to the queue and update its status. + Return a mirror to the queue and update its status. - Upon success, move next sync to be at the next interval in the future. Failures remain with - current date to ensure they are picked up for repeat attempt. After MAX_SYNC_RETRIES, - the next sync will be moved ahead as if it were a success. This is to allow a daily sync, - for example, to retry the next day. Without this, users would need to manually run syncs - to clear failure state. - """ + Upon success, move next sync to be at the next interval in the future. Failures remain with + current date to ensure they are picked up for repeat attempt. After MAX_SYNC_RETRIES, the next + sync will be moved ahead as if it were a success. This is to allow a daily sync, for example, to + retry the next day. Without this, users would need to manually run syncs to clear failure state. + """ if sync_status == RepoMirrorStatus.FAIL: retries = max(0, mirror.sync_retries_remaining - 1) @@ -141,8 +147,8 @@ def release_mirror(mirror, sync_status): def expire_mirror(mirror): """ - Set the mirror to synchronize ASAP and reset its failure count. - """ + Set the mirror to synchronize ASAP and reset its failure count. + """ # Set the next-sync date to now # TODO: Verify the `where` conditions would not expire a currently syncing mirror. @@ -166,8 +172,8 @@ def expire_mirror(mirror): def create_mirroring_rule(repository, rule_value, rule_type=RepoMirrorRuleType.TAG_GLOB_CSV): """ - Create a RepoMirrorRule for a given Repository. - """ + Create a RepoMirrorRule for a given Repository. + """ if rule_type != RepoMirrorRuleType.TAG_GLOB_CSV: raise ValidationError("validation failed: rule_type must be TAG_GLOB_CSV") @@ -194,8 +200,8 @@ def enable_mirroring_for_repository( sync_start_date=None, ): """ - Create a RepoMirrorConfig and set the Repository to the MIRROR state. - """ + Create a RepoMirrorConfig and set the Repository to the MIRROR state. + """ assert internal_robot.robot namespace, _ = parse_robot_username(internal_robot.username) @@ -239,8 +245,8 @@ def enable_mirroring_for_repository( def update_sync_status(mirror, sync_status): """ - Update the sync status - """ + Update the sync status. + """ query = RepoMirrorConfig.update( sync_transaction_id=uuid_generator(), sync_status=sync_status ).where( @@ -255,10 +261,12 @@ def update_sync_status(mirror, sync_status): def update_sync_status_to_sync_now(mirror): """ - This will change the sync status to SYNC_NOW and set the retries remaining to one, if it is - less than one. None will be returned in cases where this is not possible, such as if the - mirror is in the SYNCING state. - """ + This will change the sync status to SYNC_NOW and set the retries remaining to one, if it is less + than one. + + None will be returned in cases where this is not possible, such as if the mirror is in the + SYNCING state. + """ if mirror.sync_status == RepoMirrorStatus.SYNCING: return None @@ -283,10 +291,12 @@ def update_sync_status_to_sync_now(mirror): def update_sync_status_to_cancel(mirror): """ - If the mirror is SYNCING, it will be force-claimed (ignoring existing transaction id), and the - state will set to NEVER_RUN. None will be returned in cases where this is not possible, such - as if the mirror is not in the SYNCING state. - """ + If the mirror is SYNCING, it will be force-claimed (ignoring existing transaction id), and the + state will set to NEVER_RUN. + + None will be returned in cases where this is not possible, such as if the mirror is not in the + SYNCING state. + """ if ( mirror.sync_status != RepoMirrorStatus.SYNCING @@ -308,9 +318,9 @@ def update_sync_status_to_cancel(mirror): def update_with_transaction(mirror, **kwargs): """ - Helper function which updates a Repository's RepoMirrorConfig while also rolling its - sync_transaction_id for locking purposes. - """ + Helper function which updates a Repository's RepoMirrorConfig while also rolling its + sync_transaction_id for locking purposes. + """ # RepoMirrorConfig attributes which can be modified mutable_attributes = ( @@ -349,8 +359,8 @@ def update_with_transaction(mirror, **kwargs): def get_mirror(repository): """ - Return the RepoMirrorConfig associated with the given Repository, or None if it doesn't exist. - """ + Return the RepoMirrorConfig associated with the given Repository, or None if it doesn't exist. + """ try: return RepoMirrorConfig.get(repository=repository) except RepoMirrorConfig.DoesNotExist: @@ -359,31 +369,31 @@ def get_mirror(repository): def enable_mirror(repository): """ - Enables a RepoMirrorConfig. - """ + Enables a RepoMirrorConfig. + """ mirror = get_mirror(repository) return bool(update_with_transaction(mirror, is_enabled=True)) def disable_mirror(repository): """ - Disables a RepoMirrorConfig. - """ + Disables a RepoMirrorConfig. + """ mirror = get_mirror(repository) return bool(update_with_transaction(mirror, is_enabled=False)) def delete_mirror(repository): """ - Delete a Repository Mirroring configuration. - """ + Delete a Repository Mirroring configuration. + """ raise NotImplementedError("TODO: Not Implemented") def change_remote(repository, remote_repository): """ - Update the external repository for Repository Mirroring. - """ + Update the external repository for Repository Mirroring. + """ mirror = get_mirror(repository) updates = {"external_reference": remote_repository} return bool(update_with_transaction(mirror, **updates)) @@ -391,8 +401,8 @@ def change_remote(repository, remote_repository): def change_credentials(repository, username, password): """ - Update the credentials used to access the remote repository. - """ + Update the credentials used to access the remote repository. + """ mirror = get_mirror(repository) updates = { "external_registry_username": username, @@ -403,32 +413,32 @@ def change_credentials(repository, username, password): def change_username(repository, username): """ - Update the Username used to access the external repository. - """ + Update the Username used to access the external repository. + """ mirror = get_mirror(repository) return bool(update_with_transaction(mirror, external_registry_username=username)) def change_sync_interval(repository, interval): """ - Update the interval at which a repository will be synchronized. - """ + Update the interval at which a repository will be synchronized. + """ mirror = get_mirror(repository) return bool(update_with_transaction(mirror, sync_interval=interval)) def change_sync_start_date(repository, dt): """ - Specify when the repository should be synchronized next. - """ + Specify when the repository should be synchronized next. + """ mirror = get_mirror(repository) return bool(update_with_transaction(mirror, sync_start_date=dt)) def change_root_rule(repository, rule): """ - Specify which rule should be used for repository mirroring. - """ + Specify which rule should be used for repository mirroring. + """ assert rule.repository == repository mirror = get_mirror(repository) return bool(update_with_transaction(mirror, root_rule=rule)) @@ -436,26 +446,28 @@ def change_root_rule(repository, rule): def change_sync_status(repository, sync_status): """ - Change Repository's mirroring status. - """ + Change Repository's mirroring status. + """ mirror = get_mirror(repository) return update_with_transaction(mirror, sync_status=sync_status) def change_retries_remaining(repository, retries_remaining): """ - Change the number of retries remaining for mirroring a repository. - """ + Change the number of retries remaining for mirroring a repository. + """ mirror = get_mirror(repository) return update_with_transaction(mirror, sync_retries_remaining=retries_remaining) def change_external_registry_config(repository, config_updates): """ - Update the 'external_registry_config' with the passed in fields. Config has: - verify_tls: True|False - proxy: JSON fields 'http_proxy', 'https_proxy', andn 'no_proxy' - """ + Update the 'external_registry_config' with the passed in fields. + + Config has: + verify_tls: True|False + proxy: JSON fields 'http_proxy', 'https_proxy', andn 'no_proxy' + """ mirror = get_mirror(repository) external_registry_config = mirror.external_registry_config @@ -476,9 +488,11 @@ def change_external_registry_config(repository, config_updates): def get_mirroring_robot(repository): """ - Return the robot used for mirroring. Returns None if the repository does not have an associated - RepoMirrorConfig or the robot does not exist. - """ + Return the robot used for mirroring. + + Returns None if the repository does not have an associated RepoMirrorConfig or the robot does + not exist. + """ mirror = get_mirror(repository) if mirror: return mirror.internal_robot @@ -488,8 +502,8 @@ def get_mirroring_robot(repository): def set_mirroring_robot(repository, robot): """ - Sets the mirroring robot for the repository. - """ + Sets the mirroring robot for the repository. + """ assert robot.robot namespace, _ = parse_robot_username(robot.username) if namespace != repository.namespace_user.username: @@ -521,8 +535,8 @@ def create_rule( right_child=None, ): """ - Create a new Rule for mirroring a Repository - """ + Create a new Rule for mirroring a Repository. + """ validate_rule(rule_type, rule_value) @@ -539,16 +553,16 @@ def create_rule( def list_rules(repository): """ - Returns all RepoMirrorRules associated with a Repository. - """ + Returns all RepoMirrorRules associated with a Repository. + """ rules = RepoMirrorRule.select().where(RepoMirrorRule.repository == repository).all() return rules def get_root_rule(repository): """ - Return the primary mirroring Rule - """ + Return the primary mirroring Rule. + """ mirror = get_mirror(repository) try: rule = RepoMirrorRule.get(repository=repository) @@ -559,8 +573,8 @@ def get_root_rule(repository): def change_rule(repository, rule_type, rule_value): """ - Update the value of an existing rule. - """ + Update the value of an existing rule. + """ validate_rule(rule_type, rule_value) diff --git a/data/model/repository.py b/data/model/repository.py index 348d4dee5..b9bfcb027 100644 --- a/data/model/repository.py +++ b/data/model/repository.py @@ -53,7 +53,9 @@ SEARCH_FIELDS = Enum("SearchFields", ["name", "description"]) class RepoStateConfigException(Exception): - """ Repository.state value requires further configuration to operate. """ + """ + Repository.state value requires further configuration to operate. + """ pass @@ -130,9 +132,10 @@ def _get_gc_expiration_policies(): def get_random_gc_policy(): - """ Return a single random policy from the database to use when garbage collecting or None if - none available. - """ + """ + Return a single random policy from the database to use when garbage collecting or None if none + available. + """ policies = _get_gc_expiration_policies() if not policies: return None @@ -176,13 +179,17 @@ def find_repository_with_garbage(limit_to_gc_policy_s): def star_repository(user, repository): - """ Stars a repository. """ + """ + Stars a repository. + """ star = Star.create(user=user.id, repository=repository.id) star.save() def unstar_repository(user, repository): - """ Unstars a repository. """ + """ + Unstars a repository. + """ try: (Star.delete().where(Star.repository == repository.id, Star.user == user.id).execute()) except Star.DoesNotExist: @@ -200,7 +207,9 @@ def set_description(repo, description): def get_user_starred_repositories(user, kind_filter="image"): - """ Retrieves all of the repositories a user has starred. """ + """ + Retrieves all of the repositories a user has starred. + """ try: repo_kind = Repository.kind.get_id(kind_filter) except RepositoryKind.DoesNotExist: @@ -221,7 +230,9 @@ def get_user_starred_repositories(user, kind_filter="image"): def repository_is_starred(user, repository): - """ Determines whether a user has starred a repository or not. """ + """ + Determines whether a user has starred a repository or not. + """ try: (Star.select().where(Star.repository == repository.id, Star.user == user.id).get()) return True @@ -230,9 +241,10 @@ def repository_is_starred(user, repository): def get_stars(repository_ids): - """ Returns a map from repository ID to the number of stars for each repository in the - given repository IDs list. - """ + """ + Returns a map from repository ID to the number of stars for each repository in the given + repository IDs list. + """ if not repository_ids: return {} @@ -253,8 +265,9 @@ def get_stars(repository_ids): def get_visible_repositories( username, namespace=None, kind_filter="image", include_public=False, start_id=None, limit=None ): - """ Returns the repositories visible to the given user (if any). - """ + """ + Returns the repositories visible to the given user (if any). + """ if not include_public and not username: # Short circuit by returning a query that will find no repositories. We need to return a query # here, as it will be modified by other queries later on. @@ -295,7 +308,9 @@ def get_visible_repositories( def get_app_repository(namespace_name, repository_name): - """ Find an application repository. """ + """ + Find an application repository. + """ try: return _basequery.get_existing_repository( namespace_name, repository_name, kind_filter="application" @@ -328,10 +343,12 @@ def _get_namespace_user(username): def get_filtered_matching_repositories( lookup_value, filter_username=None, repo_kind="image", offset=0, limit=25, search_fields=None ): - """ Returns an iterator of all repositories matching the given lookup value, with optional - filtering to a specific user. If the user is unspecified, only public repositories will - be returned. - """ + """ + Returns an iterator of all repositories matching the given lookup value, with optional filtering + to a specific user. + + If the user is unspecified, only public repositories will be returned. + """ if search_fields is None: search_fields = set([SEARCH_FIELDS.description.name, SEARCH_FIELDS.name.name]) @@ -413,10 +430,12 @@ def _filter_repositories_visible_to_user(unfiltered_query, filter_user_id, limit def _get_sorted_matching_repositories( lookup_value, repo_kind="image", include_private=False, search_fields=None, ids_only=False ): - """ Returns a query of repositories matching the given lookup string, with optional inclusion of - private repositories. Note that this method does *not* filter results based on visibility - to users. - """ + """ + Returns a query of repositories matching the given lookup string, with optional inclusion of + private repositories. + + Note that this method does *not* filter results based on visibility to users. + """ select_fields = [Repository.id] if ids_only else [Repository, Namespace] if not lookup_value: @@ -555,9 +574,11 @@ def confirm_email_authorization_for_repo(code): def is_empty(namespace_name, repository_name): - """ Returns if the repository referenced by the given namespace and name is empty. If the repo - doesn't exist, returns True. - """ + """ + Returns if the repository referenced by the given namespace and name is empty. + + If the repo doesn't exist, returns True. + """ try: tag.list_repository_tags(namespace_name, repository_name).limit(1).get() return False @@ -566,7 +587,11 @@ def is_empty(namespace_name, repository_name): def get_repository_state(namespace_name, repository_name): - """ Return the Repository State if the Repository exists. Otherwise, returns None. """ + """ + Return the Repository State if the Repository exists. + + Otherwise, returns None. + """ repo = get_repository(namespace_name, repository_name) if repo: return repo.state @@ -580,8 +605,10 @@ def set_repository_state(repo, state): def mark_repository_for_deletion(namespace_name, repository_name, repository_gc_queue): - """ Marks a repository for future deletion in the background. The repository will be - renamed and hidden, and then deleted later by a worker. + """ + Marks a repository for future deletion in the background. + + The repository will be renamed and hidden, and then deleted later by a worker. """ repo = get_repository(namespace_name, repository_name) if not repo: diff --git a/data/model/repositoryactioncount.py b/data/model/repositoryactioncount.py index 40cb4474f..9cc715f25 100644 --- a/data/model/repositoryactioncount.py +++ b/data/model/repositoryactioncount.py @@ -33,9 +33,10 @@ SEARCH_BUCKETS = [ def find_uncounted_repository(): - """ Returns a repository that has not yet had an entry added into the RepositoryActionCount - table for yesterday. - """ + """ + Returns a repository that has not yet had an entry added into the RepositoryActionCount table + for yesterday. + """ try: # Get a random repository to count. today = date.today() @@ -56,9 +57,11 @@ def find_uncounted_repository(): def count_repository_actions(to_count, day): - """ Aggregates repository actions from the LogEntry table for the specified day. Returns the - count or None on error. - """ + """ + Aggregates repository actions from the LogEntry table for the specified day. + + Returns the count or None on error. + """ # TODO: Clean this up a bit. def lookup_action_count(model): return ( @@ -81,9 +84,11 @@ def count_repository_actions(to_count, day): def store_repository_action_count(repository, day, action_count): - """ Stores the action count for a repository for a specific day. Returns False if the - repository already has an entry for the specified day. - """ + """ + Stores the action count for a repository for a specific day. + + Returns False if the repository already has an entry for the specified day. + """ try: RepositoryActionCount.create(repository=repository, date=day, count=action_count) return True @@ -93,10 +98,13 @@ def store_repository_action_count(repository, day, action_count): def update_repository_score(repo): - """ Updates the repository score entry for the given table by retrieving information from - the RepositoryActionCount table. Note that count_repository_actions for the repo should - be called first. Returns True if the row was updated and False otherwise. - """ + """ + Updates the repository score entry for the given table by retrieving information from the + RepositoryActionCount table. + + Note that count_repository_actions for the repo should be called first. Returns True if the row + was updated and False otherwise. + """ today = date.today() # Retrieve the counts for each bucket and calculate the final score. diff --git a/data/model/storage.py b/data/model/storage.py index 1c96a9d20..e67470857 100644 --- a/data/model/storage.py +++ b/data/model/storage.py @@ -58,7 +58,9 @@ def get_image_location_for_id(location_id): def add_storage_placement(storage, location_name): - """ Adds a storage placement for the given storage at the given location. """ + """ + Adds a storage placement for the given storage at the given location. + """ location = get_image_location_for_name(location_name) try: ImageStoragePlacement.create(location=location.id, storage=storage) @@ -68,9 +70,10 @@ def add_storage_placement(storage, location_name): def _orphaned_storage_query(candidate_ids): - """ Returns the subset of the candidate ImageStorage IDs representing storages that are no - longer referenced by images. - """ + """ + Returns the subset of the candidate ImageStorage IDs representing storages that are no longer + referenced by images. + """ # Issue a union query to find all storages that are still referenced by a candidate storage. This # is much faster than the group_by and having call we used to use here. nonorphaned_queries = [] @@ -106,17 +109,20 @@ def _orphaned_storage_query(candidate_ids): def garbage_collect_storage(storage_id_whitelist): - """ Performs GC on a possible subset of the storage's with the IDs found in the - whitelist. The storages in the whitelist will be checked, and any orphaned will - be removed, with those IDs being returned. - """ + """ + Performs GC on a possible subset of the storage's with the IDs found in the whitelist. + + The storages in the whitelist will be checked, and any orphaned will be removed, with those IDs + being returned. + """ if len(storage_id_whitelist) == 0: return [] def placements_to_filtered_paths_set(placements_list): - """ Returns the list of paths to remove from storage, filtered from the given placements - query by removing any CAS paths that are still referenced by storage(s) in the database. - """ + """ + Returns the list of paths to remove from storage, filtered from the given placements query + by removing any CAS paths that are still referenced by storage(s) in the database. + """ with ensure_under_transaction(): if not placements_list: return set() @@ -305,7 +311,9 @@ def get_storage_by_uuid(storage_uuid): def get_layer_path(storage_record): - """ Returns the path in the storage engine to the layer data referenced by the storage row. """ + """ + Returns the path in the storage engine to the layer data referenced by the storage row. + """ assert storage_record.cas_path is not None return get_layer_path_for_storage( storage_record.uuid, storage_record.cas_path, storage_record.content_checksum @@ -313,8 +321,9 @@ def get_layer_path(storage_record): def get_layer_path_for_storage(storage_uuid, cas_path, content_checksum): - """ Returns the path in the storage engine to the layer data referenced by the storage - information. """ + """ + Returns the path in the storage engine to the layer data referenced by the storage information. + """ store = config.store if not cas_path: logger.debug("Serving layer from legacy v1 path for storage %s", storage_uuid) @@ -324,8 +333,9 @@ def get_layer_path_for_storage(storage_uuid, cas_path, content_checksum): def lookup_repo_storages_by_content_checksum(repo, checksums, by_manifest=False): - """ Looks up repository storages (without placements) matching the given repository - and checksum. """ + """ + Looks up repository storages (without placements) matching the given repository and checksum. + """ if not checksums: return [] @@ -379,9 +389,10 @@ def lookup_repo_storages_by_content_checksum(repo, checksums, by_manifest=False) def set_image_storage_metadata( docker_image_id, namespace_name, repository_name, image_size, uncompressed_size ): - """ Sets metadata that is specific to the binary storage of the data, irrespective of how it - is used in the layer tree. - """ + """ + Sets metadata that is specific to the binary storage of the data, irrespective of how it is used + in the layer tree. + """ if image_size is None: raise DataModelException("Empty image size field") diff --git a/data/model/tag.py b/data/model/tag.py index 2fc11b01d..49ecdeb5f 100644 --- a/data/model/tag.py +++ b/data/model/tag.py @@ -45,12 +45,16 @@ logger = logging.getLogger(__name__) def get_max_id_for_sec_scan(): - """ Gets the maximum id for security scanning """ + """ + Gets the maximum id for security scanning. + """ return RepositoryTag.select(fn.Max(RepositoryTag.id)).scalar() def get_min_id_for_sec_scan(version): - """ Gets the minimum id for a security scanning """ + """ + Gets the minimum id for a security scanning. + """ return _tag_alive( RepositoryTag.select(fn.Min(RepositoryTag.id)) .join(Image) @@ -59,7 +63,9 @@ def get_min_id_for_sec_scan(version): def get_tag_pk_field(): - """ Returns the primary key for Image DB model """ + """ + Returns the primary key for Image DB model. + """ return RepositoryTag.id @@ -88,7 +94,9 @@ def _tag_alive(query, now_ts=None): def filter_has_repository_event(query, event): - """ Filters the query by ensuring the repositories returned have the given event. """ + """ + Filters the query by ensuring the repositories returned have the given event. + """ return ( query.join(Repository) .join(RepositoryNotification) @@ -97,10 +105,11 @@ def filter_has_repository_event(query, event): def filter_tags_have_repository_event(query, event): - """ Filters the query by ensuring the repository tags live in a repository that has the given - event. Also returns the image storage for the tag's image and orders the results by - lifetime_start_ts. - """ + """ + Filters the query by ensuring the repository tags live in a repository that has the given event. + + Also returns the image storage for the tag's image and orders the results by lifetime_start_ts. + """ query = filter_has_repository_event(query, event) query = query.switch(RepositoryTag).join(Image).join(ImageStorage) query = query.switch(RepositoryTag).order_by(RepositoryTag.lifetime_start_ts.desc()) @@ -114,8 +123,10 @@ _MAX_IMAGE_LOOKUP_COUNT = 500 def get_matching_tags_for_images( image_pairs, filter_images=None, filter_tags=None, selections=None ): - """ Returns all tags that contain the images with the given docker_image_id and storage_uuid, - as specified as an iterable of pairs. """ + """ + Returns all tags that contain the images with the given docker_image_id and storage_uuid, as + specified as an iterable of pairs. + """ if not image_pairs: return [] @@ -200,8 +211,10 @@ def get_matching_tags_for_images( def get_matching_tags(docker_image_id, storage_uuid, *args): - """ Returns a query pointing to all tags that contain the image with the - given docker_image_id and storage_uuid. """ + """ + Returns a query pointing to all tags that contain the image with the given docker_image_id and + storage_uuid. + """ image_row = image.get_image_with_storage(docker_image_id, storage_uuid) if image_row is None: return RepositoryTag.select().where(RepositoryTag.id < 0) # Empty query. @@ -226,7 +239,9 @@ def get_tags_for_image(image_id, *args): def get_tag_manifest_digests(tags): - """ Returns a map from tag ID to its associated manifest digest, if any. """ + """ + Returns a map from tag ID to its associated manifest digest, if any. + """ if not tags: return dict() @@ -238,9 +253,10 @@ def get_tag_manifest_digests(tags): def list_active_repo_tags(repo, start_id=None, limit=None, include_images=True): - """ Returns all of the active, non-hidden tags in a repository, joined to they images - and (if present), their manifest. - """ + """ + Returns all of the active, non-hidden tags in a repository, joined to they images and (if + present), their manifest. + """ if include_images: query = _tag_alive( RepositoryTag.select(RepositoryTag, Image, ImageStorage, TagManifest.digest) @@ -377,8 +393,11 @@ def create_or_update_tag_for_repo( def create_temporary_hidden_tag(repo, image_obj, expiration_s): - """ Create a tag with a defined timeline, that will not appear in the UI or CLI. Returns the name - of the temporary tag. """ + """ + Create a tag with a defined timeline, that will not appear in the UI or CLI. + + Returns the name of the temporary tag. + """ now_ts = get_epoch_timestamp() expire_ts = now_ts + expiration_s tag_name = str(uuid4()) @@ -394,8 +413,9 @@ def create_temporary_hidden_tag(repo, image_obj, expiration_s): def lookup_unrecoverable_tags(repo): - """ Returns the tags in a repository that are expired and past their time machine recovery - period. """ + """ + Returns the tags in a repository that are expired and past their time machine recovery period. + """ expired_clause = get_epoch_timestamp() - Namespace.removed_tag_expiration_s return ( RepositoryTag.select() @@ -519,7 +539,9 @@ def list_repository_tag_history( def restore_tag_to_manifest(repo_obj, tag_name, manifest_digest): - """ Restores a tag to a specific manifest digest. """ + """ + Restores a tag to a specific manifest digest. + """ with db_transaction(): # Verify that the manifest digest already existed under this repository under the # tag. @@ -559,7 +581,9 @@ def restore_tag_to_manifest(repo_obj, tag_name, manifest_digest): def restore_tag_to_image(repo_obj, tag_name, docker_image_id): - """ Restores a tag to a specific image ID. """ + """ + Restores a tag to a specific image ID. + """ with db_transaction(): # Verify that the image ID already existed under this repository under the # tag. @@ -588,9 +612,12 @@ def restore_tag_to_image(repo_obj, tag_name, docker_image_id): def store_tag_manifest_for_testing( namespace_name, repository_name, tag_name, manifest, leaf_layer_id, storage_id_map ): - """ Stores a tag manifest for a specific tag name in the database. Returns the TagManifest - object, as well as a boolean indicating whether the TagManifest was created. - """ + """ + Stores a tag manifest for a specific tag name in the database. + + Returns the TagManifest object, as well as a boolean indicating whether the TagManifest was + created. + """ try: repo = _basequery.get_existing_repository(namespace_name, repository_name) except Repository.DoesNotExist: @@ -602,9 +629,12 @@ def store_tag_manifest_for_testing( def store_tag_manifest_for_repo( repository_id, tag_name, manifest, leaf_layer_id, storage_id_map, reversion=False ): - """ Stores a tag manifest for a specific tag name in the database. Returns the TagManifest - object, as well as a boolean indicating whether the TagManifest was created. - """ + """ + Stores a tag manifest for a specific tag name in the database. + + Returns the TagManifest object, as well as a boolean indicating whether the TagManifest was + created. + """ # Create the new-style OCI manifest and its blobs. oci_manifest = _populate_manifest_and_blobs( repository_id, manifest, storage_id_map, leaf_layer_id=leaf_layer_id @@ -737,7 +767,9 @@ def _populate_manifest_and_blobs(repository, manifest, storage_id_map, leaf_laye def populate_manifest(repository, manifest, legacy_image, storage_ids): - """ Populates the rows for the manifest, including its blobs and legacy image. """ + """ + Populates the rows for the manifest, including its blobs and legacy image. + """ media_type = Manifest.media_type.get_id(manifest.media_type) # Check for an existing manifest. If present, return it. @@ -838,9 +870,11 @@ def _load_repo_manifests(namespace, repo_name, allow_dead=False): def change_repository_tag_expiration(namespace_name, repo_name, tag_name, expiration_date): - """ Changes the expiration of the tag with the given name to the given expiration datetime. If - the expiration datetime is None, then the tag is marked as not expiring. - """ + """ + Changes the expiration of the tag with the given name to the given expiration datetime. + + If the expiration datetime is None, then the tag is marked as not expiring. + """ try: tag = get_active_tag(namespace_name, repo_name, tag_name) return change_tag_expiration(tag, expiration_date) @@ -850,18 +884,20 @@ def change_repository_tag_expiration(namespace_name, repo_name, tag_name, expira def set_tag_expiration_for_manifest(tag_manifest, expiration_sec): """ - Changes the expiration of the tag that points to the given manifest to be its lifetime start + - the expiration seconds. - """ + Changes the expiration of the tag that points to the given manifest to be its lifetime start + + the expiration seconds. + """ expiration_time_ts = tag_manifest.tag.lifetime_start_ts + expiration_sec expiration_date = datetime.utcfromtimestamp(expiration_time_ts) return change_tag_expiration(tag_manifest.tag, expiration_date) def change_tag_expiration(tag, expiration_date): - """ Changes the expiration of the given tag to the given expiration datetime. If - the expiration datetime is None, then the tag is marked as not expiring. - """ + """ + Changes the expiration of the given tag to the given expiration datetime. + + If the expiration datetime is None, then the tag is marked as not expiring. + """ end_ts = None min_expire_sec = convert_to_timedelta(config.app_config.get("LABELED_EXPIRATION_MINIMUM", "1h")) max_expire_sec = convert_to_timedelta( @@ -880,9 +916,11 @@ def change_tag_expiration(tag, expiration_date): def set_tag_end_ts(tag, end_ts): - """ Sets the end timestamp for a tag. Should only be called by change_tag_expiration - or tests. - """ + """ + Sets the end timestamp for a tag. + + Should only be called by change_tag_expiration or tests. + """ end_ms = end_ts * 1000 if end_ts is not None else None with db_transaction(): @@ -915,9 +953,9 @@ def set_tag_end_ts(tag, end_ts): def find_matching_tag(repo_id, tag_names): - """ Finds the most recently pushed alive tag in the repository with one of the given names, - if any. - """ + """ + Finds the most recently pushed alive tag in the repository with one of the given names, if any. + """ try: return _tag_alive( RepositoryTag.select() @@ -929,7 +967,9 @@ def find_matching_tag(repo_id, tag_names): def get_most_recent_tag(repo_id): - """ Returns the most recently pushed alive tag in the repository, or None if none. """ + """ + Returns the most recently pushed alive tag in the repository, or None if none. + """ try: return _tag_alive( RepositoryTag.select() diff --git a/data/model/team.py b/data/model/team.py index 1e446751e..853524cc1 100644 --- a/data/model/team.py +++ b/data/model/team.py @@ -234,9 +234,10 @@ def get_matching_teams(team_prefix, organization): def get_teams_within_org(organization, has_external_auth=False): - """ Returns a AttrDict of team info (id, name, description), its role under the org, - the number of repositories on which it has permission, and the number of members. - """ + """ + Returns a AttrDict of team info (id, name, description), its role under the org, the number of + repositories on which it has permission, and the number of members. + """ query = Team.select().where(Team.organization == organization).join(TeamRole) def _team_view(team): @@ -363,8 +364,10 @@ def delete_team_invite(code, user_obj=None): def find_matching_team_invite(code, user_obj): - """ Finds a team invite with the given code that applies to the given user and returns it or - raises a DataModelException if not found. """ + """ + Finds a team invite with the given code that applies to the given user and returns it or raises + a DataModelException if not found. + """ found = lookup_team_invite(code) # If the invite is for a specific user, we have to confirm that here. @@ -380,7 +383,9 @@ def find_matching_team_invite(code, user_obj): def find_organization_invites(organization, user_obj): - """ Finds all organization team invites for the given user under the given organization. """ + """ + Finds all organization team invites for the given user under the given organization. + """ invite_check = TeamMemberInvite.user == user_obj if user_obj.verified: invite_check = invite_check | (TeamMemberInvite.email == user_obj.email) @@ -392,10 +397,14 @@ def find_organization_invites(organization, user_obj): def confirm_team_invite(code, user_obj): - """ Confirms the given team invite code for the given user by adding the user to the team - and deleting the code. Raises a DataModelException if the code was not found or does - not apply to the given user. If the user is invited to two or more teams under the - same organization, they are automatically confirmed for all of them. """ + """ + Confirms the given team invite code for the given user by adding the user to the team and + deleting the code. + + Raises a DataModelException if the code was not found or does not apply to the given user. If + the user is invited to two or more teams under the same organization, they are automatically + confirmed for all of them. + """ found = find_matching_team_invite(code, user_obj) # Find all matching invitations for the user under the organization. @@ -434,10 +443,12 @@ def confirm_team_invite(code, user_obj): def get_federated_team_member_mapping(team, login_service_name): - """ Returns a dict of all federated IDs for all team members in the team whose users are - bound to the login service within the given name. The dictionary is from federated service - identifier (username) to their Quay User table ID. - """ + """ + Returns a dict of all federated IDs for all team members in the team whose users are bound to + the login service within the given name. + + The dictionary is from federated service identifier (username) to their Quay User table ID. + """ login_service = LoginService.get(name=login_service_name) query = ( @@ -451,17 +462,27 @@ def get_federated_team_member_mapping(team, login_service_name): def list_team_users(team): - """ Returns an iterator of all the *users* found in a team. Does not include robots. """ + """ + Returns an iterator of all the *users* found in a team. + + Does not include robots. + """ return User.select().join(TeamMember).join(Team).where(Team.id == team, User.robot == False) def list_team_robots(team): - """ Returns an iterator of all the *robots* found in a team. Does not include users. """ + """ + Returns an iterator of all the *robots* found in a team. + + Does not include users. + """ return User.select().join(TeamMember).join(Team).where(Team.id == team, User.robot == True) def set_team_syncing(team, login_service_name, config): - """ Sets the given team to sync to the given service using the given config. """ + """ + Sets the given team to sync to the given service using the given config. + """ login_service = LoginService.get(name=login_service_name) return TeamSync.create( team=team, transaction_id="", service=login_service, config=json.dumps(config) @@ -469,16 +490,20 @@ def set_team_syncing(team, login_service_name, config): def remove_team_syncing(orgname, teamname): - """ Removes syncing on the team matching the given organization name and team name. """ + """ + Removes syncing on the team matching the given organization name and team name. + """ existing = get_team_sync_information(orgname, teamname) if existing: existing.delete_instance() def get_stale_team(stale_timespan): - """ Returns a team that is setup to sync to an external group, and who has not been synced in - now - stale_timespan. Returns None if none found. - """ + """ + Returns a team that is setup to sync to an external group, and who has not been synced in. + + now - stale_timespan. Returns None if none found. + """ stale_at = datetime.now() - stale_timespan try: @@ -500,9 +525,10 @@ def get_stale_team(stale_timespan): def get_team_sync_information(orgname, teamname): - """ Returns the team syncing information for the team with the given name under the organization - with the given name or None if none. - """ + """ + Returns the team syncing information for the team with the given name under the organization + with the given name or None if none. + """ query = ( TeamSync.select(TeamSync, LoginService) .join(Team) @@ -519,10 +545,12 @@ def get_team_sync_information(orgname, teamname): def update_sync_status(team_sync_info): - """ Attempts to update the transaction ID and last updated time on a TeamSync object. If the - transaction ID on the entry in the DB does not match that found on the object, this method - returns False, which indicates another caller updated it first. - """ + """ + Attempts to update the transaction ID and last updated time on a TeamSync object. + + If the transaction ID on the entry in the DB does not match that found on the object, this + method returns False, which indicates another caller updated it first. + """ new_transaction_id = str(uuid.uuid4()) query = TeamSync.update(transaction_id=new_transaction_id, last_updated=datetime.now()).where( TeamSync.id == team_sync_info.id, TeamSync.transaction_id == team_sync_info.transaction_id @@ -531,7 +559,9 @@ def update_sync_status(team_sync_info): def delete_members_not_present(team, member_id_set): - """ Deletes all members of the given team that are not found in the member ID set. """ + """ + Deletes all members of the given team that are not found in the member ID set. + """ with db_transaction(): user_ids = set([u.id for u in list_team_users(team)]) to_delete = list(user_ids - member_id_set) diff --git a/data/model/test/test_basequery.py b/data/model/test/test_basequery.py index f5d42d3c0..57d0142d4 100644 --- a/data/model/test/test_basequery.py +++ b/data/model/test/test_basequery.py @@ -21,9 +21,10 @@ def _is_team_member(team, user): def _get_visible_repositories_for_user( user, repo_kind="image", include_public=False, namespace=None ): - """ Returns all repositories directly visible to the given user, by either repo permission, - or the user being the admin of a namespace. - """ + """ + Returns all repositories directly visible to the given user, by either repo permission, or the + user being the admin of a namespace. + """ for repo in Repository.select(): if repo_kind is not None and repo.kind.name != repo_kind: continue diff --git a/data/model/test/test_gc.py b/data/model/test/test_gc.py index b65789a6f..33f441064 100644 --- a/data/model/test/test_gc.py +++ b/data/model/test/test_gc.py @@ -201,10 +201,10 @@ def _get_dangling_manifest_count(): @contextmanager def assert_gc_integrity(expect_storage_removed=True, check_oci_tags=True): - """ Specialized assertion for ensuring that GC cleans up all dangling storages - and labels, invokes the callback for images removed and doesn't invoke the - callback for images *not* removed. - """ + """ + Specialized assertion for ensuring that GC cleans up all dangling storages and labels, invokes + the callback for images removed and doesn't invoke the callback for images *not* removed. + """ # Add a callback for when images are removed. removed_image_storages = [] model.config.register_image_cleanup_callback(removed_image_storages.extend) @@ -277,9 +277,10 @@ def assert_gc_integrity(expect_storage_removed=True, check_oci_tags=True): def test_has_garbage(default_tag_policy, initialized_db): - """ Remove all existing repositories, then add one without garbage, check, then add one with - garbage, and check again. - """ + """ + Remove all existing repositories, then add one without garbage, check, then add one with + garbage, and check again. + """ # Delete all existing repos. for repo in database.Repository.select().order_by(database.Repository.id): assert model.gc.purge_repository(repo, force=True) @@ -330,8 +331,10 @@ def test_find_garbage_policy_functions(default_tag_policy, initialized_db): def test_one_tag(default_tag_policy, initialized_db): - """ Create a repository with a single tag, then remove that tag and verify that the repository - is now empty. """ + """ + Create a repository with a single tag, then remove that tag and verify that the repository is + now empty. + """ with assert_gc_integrity(): repository = create_repository(latest=["i1", "i2", "i3"]) delete_tag(repository, "latest") @@ -339,7 +342,9 @@ def test_one_tag(default_tag_policy, initialized_db): def test_two_tags_unshared_images(default_tag_policy, initialized_db): - """ Repository has two tags with no shared images between them. """ + """ + Repository has two tags with no shared images between them. + """ with assert_gc_integrity(): repository = create_repository(latest=["i1", "i2", "i3"], other=["f1", "f2"]) delete_tag(repository, "latest") @@ -348,9 +353,11 @@ def test_two_tags_unshared_images(default_tag_policy, initialized_db): def test_two_tags_shared_images(default_tag_policy, initialized_db): - """ Repository has two tags with shared images. Deleting the tag should only remove the - unshared images. - """ + """ + Repository has two tags with shared images. + + Deleting the tag should only remove the unshared images. + """ with assert_gc_integrity(): repository = create_repository(latest=["i1", "i2", "i3"], other=["i1", "f1"]) delete_tag(repository, "latest") @@ -359,9 +366,11 @@ def test_two_tags_shared_images(default_tag_policy, initialized_db): def test_unrelated_repositories(default_tag_policy, initialized_db): - """ Two repositories with different images. Removing the tag from one leaves the other's - images intact. - """ + """ + Two repositories with different images. + + Removing the tag from one leaves the other's images intact. + """ with assert_gc_integrity(): repository1 = create_repository(latest=["i1", "i2", "i3"], name="repo1") repository2 = create_repository(latest=["j1", "j2", "j3"], name="repo2") @@ -373,9 +382,11 @@ def test_unrelated_repositories(default_tag_policy, initialized_db): def test_related_repositories(default_tag_policy, initialized_db): - """ Two repositories with shared images. Removing the tag from one leaves the other's - images intact. - """ + """ + Two repositories with shared images. + + Removing the tag from one leaves the other's images intact. + """ with assert_gc_integrity(): repository1 = create_repository(latest=["i1", "i2", "i3"], name="repo1") repository2 = create_repository(latest=["i1", "i2", "j1"], name="repo2") @@ -387,9 +398,10 @@ def test_related_repositories(default_tag_policy, initialized_db): def test_inaccessible_repositories(default_tag_policy, initialized_db): - """ Two repositories under different namespaces should result in the images being deleted - but not completely removed from the database. - """ + """ + Two repositories under different namespaces should result in the images being deleted but not + completely removed from the database. + """ with assert_gc_integrity(): repository1 = create_repository(namespace=ADMIN_ACCESS_USER, latest=["i1", "i2", "i3"]) repository2 = create_repository(namespace=PUBLIC_USER, latest=["i1", "i2", "i3"]) @@ -400,8 +412,11 @@ def test_inaccessible_repositories(default_tag_policy, initialized_db): def test_many_multiple_shared_images(default_tag_policy, initialized_db): - """ Repository has multiple tags with shared images. Delete all but one tag. - """ + """ + Repository has multiple tags with shared images. + + Delete all but one tag. + """ with assert_gc_integrity(): repository = create_repository( latest=["i1", "i2", "i3", "i4", "i5", "i6", "i7", "i8", "j0"], @@ -421,9 +436,11 @@ def test_many_multiple_shared_images(default_tag_policy, initialized_db): def test_multiple_shared_images(default_tag_policy, initialized_db): - """ Repository has multiple tags with shared images. Selectively deleting the tags, and - verifying at each step. - """ + """ + Repository has multiple tags with shared images. + + Selectively deleting the tags, and verifying at each step. + """ with assert_gc_integrity(): repository = create_repository( latest=["i1", "i2", "i3"], @@ -512,8 +529,11 @@ def test_empty_gc(default_tag_policy, initialized_db): def test_time_machine_no_gc(default_tag_policy, initialized_db): - """ Repository has two tags with shared images. Deleting the tag should not remove any images - """ + """ + Repository has two tags with shared images. + + Deleting the tag should not remove any images + """ with assert_gc_integrity(expect_storage_removed=False): repository = create_repository(latest=["i1", "i2", "i3"], other=["i1", "f1"]) _set_tag_expiration_policy(repository.namespace_user.username, 60 * 60 * 24) @@ -524,9 +544,11 @@ def test_time_machine_no_gc(default_tag_policy, initialized_db): def test_time_machine_gc(default_tag_policy, initialized_db): - """ Repository has two tags with shared images. Deleting the second tag should cause the images - for the first deleted tag to gc. - """ + """ + Repository has two tags with shared images. + + Deleting the second tag should cause the images for the first deleted tag to gc. + """ now = datetime.utcnow() with assert_gc_integrity(): @@ -547,9 +569,11 @@ def test_time_machine_gc(default_tag_policy, initialized_db): def test_images_shared_storage(default_tag_policy, initialized_db): - """ Repository with two tags, both with the same shared storage. Deleting the first - tag should delete the first image, but *not* its storage. - """ + """ + Repository with two tags, both with the same shared storage. + + Deleting the first tag should delete the first image, but *not* its storage. + """ with assert_gc_integrity(expect_storage_removed=False): repository = create_repository() @@ -585,9 +609,11 @@ def test_images_shared_storage(default_tag_policy, initialized_db): def test_image_with_cas(default_tag_policy, initialized_db): - """ A repository with a tag pointing to an image backed by CAS. Deleting and GCing the tag - should result in the storage and its CAS data being removed. - """ + """ + A repository with a tag pointing to an image backed by CAS. + + Deleting and GCing the tag should result in the storage and its CAS data being removed. + """ with assert_gc_integrity(expect_storage_removed=True): repository = create_repository() @@ -627,10 +653,13 @@ def test_image_with_cas(default_tag_policy, initialized_db): def test_images_shared_cas(default_tag_policy, initialized_db): - """ A repository, each two tags, pointing to the same image, which has image storage - with the same *CAS path*, but *distinct records*. Deleting the first tag should delete the - first image, and its storage, but not the file in storage, as it shares its CAS path. - """ + """ + A repository, each two tags, pointing to the same image, which has image storage with the same + *CAS path*, but *distinct records*. + + Deleting the first tag should delete the first image, and its storage, but not the file in + storage, as it shares its CAS path. + """ with assert_gc_integrity(expect_storage_removed=True): repository = create_repository() @@ -686,10 +715,12 @@ def test_images_shared_cas(default_tag_policy, initialized_db): def test_images_shared_cas_with_new_blob_table(default_tag_policy, initialized_db): - """ A repository with a tag and image that shares its CAS path with a record in the new Blob - table. Deleting the first tag should delete the first image, and its storage, but not the - file in storage, as it shares its CAS path with the blob row. - """ + """ + A repository with a tag and image that shares its CAS path with a record in the new Blob table. + + Deleting the first tag should delete the first image, and its storage, but not the file in + storage, as it shares its CAS path with the blob row. + """ with assert_gc_integrity(expect_storage_removed=True): repository = create_repository() @@ -733,7 +764,9 @@ def test_images_shared_cas_with_new_blob_table(default_tag_policy, initialized_d def test_super_long_image_chain_gc(app, default_tag_policy): - """ Test that a super long chain of images all gets properly GCed. """ + """ + Test that a super long chain of images all gets properly GCed. + """ with assert_gc_integrity(): images = ["i%s" % i for i in range(0, 100)] repository = create_repository(latest=images) @@ -744,9 +777,10 @@ def test_super_long_image_chain_gc(app, default_tag_policy): def test_manifest_v2_shared_config_and_blobs(app, default_tag_policy): - """ Test that GCing a tag that refers to a V2 manifest with the same config and some shared - blobs as another manifest ensures that the config blob and shared blob are NOT GCed. - """ + """ + Test that GCing a tag that refers to a V2 manifest with the same config and some shared blobs as + another manifest ensures that the config blob and shared blob are NOT GCed. + """ repo = model.repository.create_repository("devtable", "newrepo", None) manifest1, built1 = create_manifest_for_testing( repo, differentiation_field="1", include_shared_blob=True diff --git a/data/model/test/test_image_sharing.py b/data/model/test/test_image_sharing.py index 1bc77d344..4e506c186 100644 --- a/data/model/test/test_image_sharing.py +++ b/data/model/test/test_image_sharing.py @@ -63,8 +63,11 @@ def assertDifferentStorage( def test_same_user(storage, initialized_db): - """ The same user creates two images, each which should be shared in the same repo. This is a - sanity check. """ + """ + The same user creates two images, each which should be shared in the same repo. + + This is a sanity check. + """ # Create a reference to a new docker ID => new image. first_storage_id = createStorage(storage, "first-image") @@ -83,7 +86,9 @@ def test_same_user(storage, initialized_db): def test_no_user_private_repo(storage, initialized_db): - """ If no user is specified (token case usually), then no sharing can occur on a private repo. """ + """ + If no user is specified (token case usually), then no sharing can occur on a private repo. + """ # Create a reference to a new docker ID => new image. first_storage = createStorage(storage, "the-image", username=None, repository=SHARED_REPO) @@ -94,7 +99,10 @@ def test_no_user_private_repo(storage, initialized_db): def test_no_user_public_repo(storage, initialized_db): - """ If no user is specified (token case usually), then no sharing can occur on a private repo except when the image is first public. """ + """ + If no user is specified (token case usually), then no sharing can occur on a private repo except + when the image is first public. + """ # Create a reference to a new docker ID => new image. first_storage = createStorage(storage, "the-image", username=None, repository=PUBLIC_REPO) @@ -103,7 +111,9 @@ def test_no_user_public_repo(storage, initialized_db): def test_different_user_same_repo(storage, initialized_db): - """ Two different users create the same image in the same repo. """ + """ + Two different users create the same image in the same repo. + """ # Create a reference to a new docker ID under the first user => new image. first_storage = createStorage( @@ -117,7 +127,9 @@ def test_different_user_same_repo(storage, initialized_db): def test_different_repo_no_shared_access(storage, initialized_db): - """ Neither user has access to the other user's repository. """ + """ + Neither user has access to the other user's repository. + """ # Create a reference to a new docker ID under the first user => new image. first_storage_id = createStorage( @@ -134,7 +146,9 @@ def test_different_repo_no_shared_access(storage, initialized_db): def test_public_than_private(storage, initialized_db): - """ An image is created publicly then used privately, so it should be shared. """ + """ + An image is created publicly then used privately, so it should be shared. + """ # Create a reference to a new docker ID under the first user => new image. first_storage = createStorage( @@ -148,7 +162,9 @@ def test_public_than_private(storage, initialized_db): def test_private_than_public(storage, initialized_db): - """ An image is created privately then used publicly, so it should *not* be shared. """ + """ + An image is created privately then used publicly, so it should *not* be shared. + """ # Create a reference to a new docker ID under the first user => new image. first_storage = createStorage(storage, "the-image", username=ADMIN_ACCESS_USER, repository=REPO) @@ -160,10 +176,12 @@ def test_private_than_public(storage, initialized_db): def test_different_repo_with_access(storage, initialized_db): - """ An image is created in one repo (SHARED_REPO) which the user (PUBLIC_USER) has access to. Later, the - image is created in another repo (PUBLIC_REPO) that the user also has access to. The image should - be shared since the user has access. - """ + """ + An image is created in one repo (SHARED_REPO) which the user (PUBLIC_USER) has access to. + + Later, the image is created in another repo (PUBLIC_REPO) that the user also has access to. The + image should be shared since the user has access. + """ # Create the image in the shared repo => new image. first_storage = createStorage( storage, "the-image", username=ADMIN_ACCESS_USER, repository=SHARED_REPO @@ -177,7 +195,9 @@ def test_different_repo_with_access(storage, initialized_db): def test_org_access(storage, initialized_db): - """ An image is accessible by being a member of the organization. """ + """ + An image is accessible by being a member of the organization. + """ # Create the new image under the org's repo => new image. first_storage = createStorage( @@ -196,7 +216,9 @@ def test_org_access(storage, initialized_db): def test_org_access_different_user(storage, initialized_db): - """ An image is accessible by being a member of the organization. """ + """ + An image is accessible by being a member of the organization. + """ # Create the new image under the org's repo => new image. first_storage = createStorage( @@ -215,7 +237,9 @@ def test_org_access_different_user(storage, initialized_db): def test_org_no_access(storage, initialized_db): - """ An image is not accessible if not a member of the organization. """ + """ + An image is not accessible if not a member of the organization. + """ # Create the new image under the org's repo => new image. first_storage = createStorage( @@ -229,7 +253,9 @@ def test_org_no_access(storage, initialized_db): def test_org_not_team_member_with_access(storage, initialized_db): - """ An image is accessible to a user specifically listed as having permission on the org repo. """ + """ + An image is accessible to a user specifically listed as having permission on the org repo. + """ # Create the new image under the org's repo => new image. first_storage = createStorage( @@ -243,7 +269,9 @@ def test_org_not_team_member_with_access(storage, initialized_db): def test_org_not_team_member_with_no_access(storage, initialized_db): - """ A user that has access to one org repo but not another and is not a team member. """ + """ + A user that has access to one org repo but not another and is not a team member. + """ # Create the new image under the org's repo => new image. first_storage = createStorage( diff --git a/data/model/test/test_log.py b/data/model/test/test_log.py index 53e751a4d..ecef49d04 100644 --- a/data/model/test/test_log.py +++ b/data/model/test/test_log.py @@ -39,7 +39,9 @@ def user(): @pytest.mark.parametrize("action_kind", [("pull"), ("oops")]) def test_log_action_unknown_action(action_kind): - """ test unknown action types throw an exception when logged """ + """ + test unknown action types throw an exception when logged. + """ with pytest.raises(Exception): log_action(action_kind, None) diff --git a/data/model/test/test_repo_mirroring.py b/data/model/test/test_repo_mirroring.py index eb880a5ba..4e6031966 100644 --- a/data/model/test/test_repo_mirroring.py +++ b/data/model/test/test_repo_mirroring.py @@ -56,8 +56,8 @@ def disable_existing_mirrors(): def test_eligible_oldest_first(initialized_db): """ - Eligible mirror candidates should be returned with the oldest (earliest created) first. - """ + Eligible mirror candidates should be returned with the oldest (earliest created) first. + """ disable_existing_mirrors() mirror_first, repo_first = create_mirror_repo_robot(["updated", "created"], repo_name="first") @@ -76,8 +76,9 @@ def test_eligible_oldest_first(initialized_db): def test_eligible_includes_expired_syncing(initialized_db): """ - Mirrors that have an end time in the past are eligible even if their state indicates still syncing. - """ + Mirrors that have an end time in the past are eligible even if their state indicates still + syncing. + """ disable_existing_mirrors() mirror_first, repo_first = create_mirror_repo_robot(["updated", "created"], repo_name="first") @@ -105,8 +106,8 @@ def test_eligible_includes_expired_syncing(initialized_db): def test_eligible_includes_immediate(initialized_db): """ - Mirrors that are SYNC_NOW, regardless of starting time - """ + Mirrors that are SYNC_NOW, regardless of starting time. + """ disable_existing_mirrors() mirror_first, repo_first = create_mirror_repo_robot(["updated", "created"], repo_name="first") @@ -155,8 +156,8 @@ def test_create_rule_validations(initialized_db): def test_long_registry_passwords(initialized_db): """ - Verify that long passwords, such as Base64 JWT used by Redhat's Registry, work as expected. - """ + Verify that long passwords, such as Base64 JWT used by Redhat's Registry, work as expected. + """ MAX_PASSWORD_LENGTH = 1024 username = "".join("a" for _ in range(MAX_PASSWORD_LENGTH)) @@ -181,8 +182,8 @@ def test_long_registry_passwords(initialized_db): def test_sync_status_to_cancel(initialized_db): """ - SYNCING and SYNC_NOW mirrors may be canceled, ending in NEVER_RUN - """ + SYNCING and SYNC_NOW mirrors may be canceled, ending in NEVER_RUN. + """ disable_existing_mirrors() mirror, repo = create_mirror_repo_robot(["updated", "created"], repo_name="cancel") @@ -217,8 +218,8 @@ def test_sync_status_to_cancel(initialized_db): def test_release_mirror(initialized_db): """ - Mirrors that are SYNC_NOW, regardless of starting time - """ + Mirrors that are SYNC_NOW, regardless of starting time. + """ disable_existing_mirrors() mirror, repo = create_mirror_repo_robot(["updated", "created"], repo_name="first") diff --git a/data/model/token.py b/data/model/token.py index e6a699900..71ca63b7b 100644 --- a/data/model/token.py +++ b/data/model/token.py @@ -44,7 +44,9 @@ def create_delegate_token(namespace_name, repository_name, friendly_name, role=" def load_token_data(code): - """ Load the permissions for any token by code. """ + """ + Load the permissions for any token by code. + """ token_name = code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH] token_code = code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:] @@ -77,7 +79,9 @@ def load_token_data(code): def get_full_token_string(token): - """ Returns the full string to use for this token to login. """ + """ + Returns the full string to use for this token to login. + """ assert token.token_name token_code = token.token_code.decrypt() assert len(token.token_name) == ACCESS_TOKEN_NAME_PREFIX_LENGTH diff --git a/data/model/user.py b/data/model/user.py index 95b049964..078f0b079 100644 --- a/data/model/user.py +++ b/data/model/user.py @@ -89,7 +89,9 @@ def create_user( prompts=tuple(), is_possible_abuser=False, ): - """ Creates a regular user, if allowed. """ + """ + Creates a regular user, if allowed. + """ if not validate_password(password): raise InvalidPasswordException(INVALID_PASSWORD_MESSAGE) @@ -168,9 +170,10 @@ def create_user_noverify( def increase_maximum_build_count(user, maximum_queued_builds_count): - """ Increases the maximum number of allowed builds on the namespace, if greater than that - already present. - """ + """ + Increases the maximum number of allowed builds on the namespace, if greater than that already + present. + """ if ( user.maximum_queued_builds_count is not None and maximum_queued_builds_count > user.maximum_queued_builds_count @@ -282,15 +285,19 @@ def change_send_invoice_email(user, invoice_email): def _convert_to_s(timespan_string): - """ Returns the given timespan string (e.g. `2w` or `45s`) into seconds. """ + """ + Returns the given timespan string (e.g. `2w` or `45s`) into seconds. + """ return convert_to_timedelta(timespan_string).total_seconds() def change_user_tag_expiration(user, tag_expiration_s): - """ Changes the tag expiration on the given user/org. Note that the specified expiration must - be within the configured TAG_EXPIRATION_OPTIONS or this method will raise a - DataModelException. - """ + """ + Changes the tag expiration on the given user/org. + + Note that the specified expiration must be within the configured TAG_EXPIRATION_OPTIONS or this + method will raise a DataModelException. + """ allowed_options = [_convert_to_s(o) for o in config.app_config["TAG_EXPIRATION_OPTIONS"]] if tag_expiration_s not in allowed_options: raise DataModelException("Invalid tag expiration option") @@ -357,8 +364,10 @@ def get_or_create_robot_metadata(robot): def update_robot_metadata(robot, description="", unstructured_json=None): - """ Updates the description and user-specified unstructured metadata associated - with a robot account to that specified. """ + """ + Updates the description and user-specified unstructured metadata associated with a robot account + to that specified. + """ metadata = get_or_create_robot_metadata(robot) metadata.description = description metadata.unstructured_json = unstructured_json or metadata.unstructured_json or {} @@ -366,13 +375,17 @@ def update_robot_metadata(robot, description="", unstructured_json=None): def retrieve_robot_token(robot): - """ Returns the decrypted token for the given robot. """ + """ + Returns the decrypted token for the given robot. + """ token = RobotAccountToken.get(robot_account=robot).token.decrypt() return token def get_robot_and_metadata(robot_shortname, parent): - """ Returns a tuple of the robot matching the given shortname, its token, and its metadata. """ + """ + Returns a tuple of the robot matching the given shortname, its token, and its metadata. + """ robot_username = format_robot_username(parent.username, robot_shortname) robot, metadata = lookup_robot_and_metadata(robot_username) token = retrieve_robot_token(robot) @@ -489,14 +502,18 @@ def delete_robot(robot_username): def list_namespace_robots(namespace): - """ Returns all the robots found under the given namespace. """ + """ + Returns all the robots found under the given namespace. + """ return _list_entity_robots(namespace) def _list_entity_robots(entity_name, include_metadata=True, include_token=True): - """ Return the list of robots for the specified entity. This MUST return a query, not a - materialized list so that callers can use db_for_update. - """ + """ + Return the list of robots for the specified entity. + + This MUST return a query, not a materialized list so that callers can use db_for_update. + """ if include_metadata or include_token: query = ( User.select(User, RobotAccountToken, RobotAccountMetadata) @@ -543,7 +560,9 @@ def list_entity_robot_permission_teams(entity_name, limit=None, include_permissi def update_user_metadata(user, metadata=None): - """ Updates the metadata associated with the user, including his/her name and company. """ + """ + Updates the metadata associated with the user, including his/her name and company. + """ metadata = metadata if metadata is not None else {} with db_transaction(): @@ -826,7 +845,9 @@ def get_user_or_org_by_customer_id(customer_id): def invalidate_all_sessions(user): - """ Invalidates all existing user sessions by rotating the user's UUID. """ + """ + Invalidates all existing user sessions by rotating the user's UUID. + """ if not user: return @@ -924,11 +945,13 @@ def _get_matching_users( def verify_user(username_or_email, password): - """ Verifies that the given username/email + password pair is valid. If the username or e-mail - address is invalid, returns None. If the password specified does not match for the given user, - either returns None or raises TooManyLoginAttemptsException if there have been too many - invalid login attempts. Returns the user object if the login was valid. - """ + """ + Verifies that the given username/email + password pair is valid. + + If the username or e-mail address is invalid, returns None. If the password specified does not + match for the given user, either returns None or raises TooManyLoginAttemptsException if there + have been too many invalid login attempts. Returns the user object if the login was valid. + """ # Make sure we didn't get any unicode for the username. try: @@ -1080,7 +1103,9 @@ def detach_external_login(user, service_name): def get_solely_admined_organizations(user_obj): - """ Returns the organizations admined solely by the given user. """ + """ + Returns the organizations admined solely by the given user. + """ orgs = ( User.select() .where(User.organization == True) @@ -1115,10 +1140,12 @@ def get_solely_admined_organizations(user_obj): def mark_namespace_for_deletion(user, queues, namespace_gc_queue, force=False): - """ Marks a namespace (as referenced by the given user) for deletion. A queue item will be added - to delete the namespace's repositories and storage, while the namespace itself will be - renamed, disabled, and delinked from other tables. - """ + """ + Marks a namespace (as referenced by the given user) for deletion. + + A queue item will be added to delete the namespace's repositories and storage, while the + namespace itself will be renamed, disabled, and delinked from other tables. + """ if not user.enabled: return None @@ -1173,7 +1200,9 @@ def mark_namespace_for_deletion(user, queues, namespace_gc_queue, force=False): def delete_namespace_via_marker(marker_id, queues): - """ Deletes a namespace referenced by the given DeletedNamespace marker ID. """ + """ + Deletes a namespace referenced by the given DeletedNamespace marker ID. + """ try: marker = DeletedNamespace.get(id=marker_id) except DeletedNamespace.DoesNotExist: @@ -1183,9 +1212,12 @@ def delete_namespace_via_marker(marker_id, queues): def delete_user(user, queues): - """ Deletes a user/organization/robot. Should *not* be called by any user-facing API. Instead, - mark_namespace_for_deletion should be used, and the queue should call this method. - """ + """ + Deletes a user/organization/robot. + + Should *not* be called by any user-facing API. Instead, mark_namespace_for_deletion should be + used, and the queue should call this method. + """ # Delete all queue items for the user. for queue in queues: queue.delete_namespaced_items(user.username) @@ -1241,7 +1273,9 @@ def _delete_user_linked_data(user): def get_pull_credentials(robotname): - """ Returns the pull credentials for a robot with the given name. """ + """ + Returns the pull credentials for a robot with the given name. + """ try: robot = lookup_robot(robotname) except InvalidRobotException: @@ -1258,13 +1292,17 @@ def get_pull_credentials(robotname): def get_region_locations(user): - """ Returns the locations defined as preferred storage for the given user. """ + """ + Returns the locations defined as preferred storage for the given user. + """ query = UserRegion.select().join(ImageStorageLocation).where(UserRegion.user == user) return set([region.location.name for region in query]) def get_federated_logins(user_ids, service_name): - """ Returns all federated logins for the given user ids under the given external service. """ + """ + Returns all federated logins for the given user ids under the given external service. + """ if not user_ids: return [] @@ -1278,7 +1316,9 @@ def get_federated_logins(user_ids, service_name): def list_namespace_geo_restrictions(namespace_name): - """ Returns all of the defined geographic restrictions for the given namespace. """ + """ + Returns all of the defined geographic restrictions for the given namespace. + """ return NamespaceGeoRestriction.select().join(User).where(User.username == namespace_name) diff --git a/data/queue.py b/data/queue.py index 846f3b665..92cf4093f 100644 --- a/data/queue.py +++ b/data/queue.py @@ -45,7 +45,9 @@ DEFAULT_BATCH_SIZE = 1000 class WorkQueue(object): - """ Work queue defines methods for interacting with a queue backed by the database. """ + """ + Work queue defines methods for interacting with a queue backed by the database. + """ def __init__( self, queue_name, transaction_factory, canonical_name_match_list=None, has_namespace=False, @@ -95,7 +97,9 @@ class WorkQueue(object): ) def num_alive_jobs(self, canonical_name_list): - """ Returns the number of alive queue items with a given prefix. """ + """ + Returns the number of alive queue items with a given prefix. + """ def strip_slash(name): return name.lstrip("/") @@ -113,7 +117,10 @@ class WorkQueue(object): def num_available_jobs_between( self, available_min_time, available_max_time, canonical_name_list ): - """ Returns the number of available queue items with a given prefix, between the two provided times. """ + """ + Returns the number of available queue items with a given prefix, between the two provided + times. + """ def strip_slash(name): return name.lstrip("/") @@ -159,8 +166,11 @@ class WorkQueue(object): queue_items_available_unlocked.labels(self._queue_name).set(available_not_running_count) def has_retries_remaining(self, item_id): - """ Returns whether the queue item with the given id has any retries remaining. If the - queue item does not exist, returns False. """ + """ + Returns whether the queue item with the given id has any retries remaining. + + If the queue item does not exist, returns False. + """ with self._transaction_factory(db): try: return QueueItem.get(id=item_id).retries_remaining > 0 @@ -168,7 +178,9 @@ class WorkQueue(object): return False def delete_namespaced_items(self, namespace, subpath=None): - """ Deletes all items in this queue that exist under the given namespace. """ + """ + Deletes all items in this queue that exist under the given namespace. + """ if not self._has_namespaced_items: return False @@ -178,9 +190,8 @@ class WorkQueue(object): def alive(self, canonical_name_list): """ - Returns True if a job matching the canonical name list is currently processing - or available. - """ + Returns True if a job matching the canonical name list is currently processing or available. + """ canonical_name = self._canonical_name([self._queue_name] + canonical_name_list) try: select_query = QueueItem.select().where(QueueItem.queue_name == canonical_name) @@ -208,9 +219,11 @@ class WorkQueue(object): def batch_put(canonical_name_list, message, available_after=0, retries_remaining=5): """ - Put an item, if it shouldn't be processed for some number of seconds, - specify that amount as available_after. Returns the ID of the queue item added. - """ + Put an item, if it shouldn't be processed for some number of seconds, specify that + amount as available_after. + + Returns the ID of the queue item added. + """ items_to_insert.append( self._queue_dict(canonical_name_list, message, available_after, retries_remaining) ) @@ -226,8 +239,12 @@ class WorkQueue(object): remaining = remaining[batch_size:] def put(self, canonical_name_list, message, available_after=0, retries_remaining=5): - """ Put an item, if it shouldn't be processed for some number of seconds, - specify that amount as available_after. Returns the ID of the queue item added. """ + """ + Put an item, if it shouldn't be processed for some number of seconds, specify that amount as + available_after. + + Returns the ID of the queue item added. + """ item = QueueItem.create( **self._queue_dict(canonical_name_list, message, available_after, retries_remaining) ) @@ -235,8 +252,11 @@ class WorkQueue(object): return str(item.id) def _select_available_item(self, ordering_required, now): - """ Selects an available queue item from the queue table and returns it, if any. If none, - return None. """ + """ + Selects an available queue item from the queue table and returns it, if any. + + If none, return None. + """ name_match_query = self._name_match_query() try: @@ -267,10 +287,13 @@ class WorkQueue(object): return None def _attempt_to_claim_item(self, db_item, now, processing_time): - """ Attempts to claim the specified queue item for this instance. Returns True on success and False on failure. + """ + Attempts to claim the specified queue item for this instance. Returns True on success and + False on failure. - Note that the underlying QueueItem row in the database will be changed on success, but - the db_item object given as a parameter will *not* have its fields updated. """ + Note that the underlying QueueItem row in the database will be changed on success, but the + db_item object given as a parameter will *not* have its fields updated. + """ # Try to claim the item. We do so by updating the item's information only if its current # state ID matches that returned in the previous query. Since all updates to the QueueItem @@ -292,9 +315,12 @@ class WorkQueue(object): return changed == 1 def get(self, processing_time=300, ordering_required=False): - """ Get an available item and mark it as unavailable for the default of five - minutes. The result of this method must always be composed of simple - python objects which are JSON serializable for network portability reasons. """ + """ + Get an available item and mark it as unavailable for the default of five minutes. + + The result of this method must always be composed of simple python objects which are JSON + serializable for network portability reasons. + """ now = datetime.utcnow() # Select an available queue item. @@ -324,8 +350,11 @@ class WorkQueue(object): ) def cancel(self, item_id): - """ Attempts to cancel the queue item with the given ID from the queue. - Returns true on success and false if the queue item could not be canceled. """ + """ + Attempts to cancel the queue item with the given ID from the queue. + + Returns true on success and false if the queue item could not be canceled. + """ count_removed = QueueItem.delete().where(QueueItem.id == item_id).execute() return count_removed > 0 @@ -378,11 +407,11 @@ class WorkQueue(object): def delete_expired(expiration_threshold, deletion_threshold, batch_size): """ - Deletes all queue items that are older than the provided expiration threshold in batches of the - provided size. If there are less items than the deletion threshold, this method does nothing. + Deletes all queue items that are older than the provided expiration threshold in batches of the + provided size. If there are less items than the deletion threshold, this method does nothing. - Returns the number of items deleted. - """ + Returns the number of items deleted. + """ to_delete = list( QueueItem.select() .where(QueueItem.processing_expires <= expiration_threshold) diff --git a/data/readreplica.py b/data/readreplica.py index 1dfb6d26a..2bd15dab9 100644 --- a/data/readreplica.py +++ b/data/readreplica.py @@ -8,15 +8,18 @@ ReadOnlyConfig = namedtuple("ReadOnlyConfig", ["is_readonly", "read_replicas"]) class ReadOnlyModeException(Exception): - """ Exception raised if a write operation was attempted when in read only mode. - """ + """ + Exception raised if a write operation was attempted when in read only mode. + """ class AutomaticFailoverWrapper(object): - """ Class which wraps a peewee database driver and (optionally) a second driver. - When executing SQL, if an OperationalError occurs, if a second driver is given, - the query is attempted again on the fallback DB. Otherwise, the exception is raised. - """ + """ + Class which wraps a peewee database driver and (optionally) a second driver. + + When executing SQL, if an OperationalError occurs, if a second driver is given, the query is + attempted again on the fallback DB. Otherwise, the exception is raised. + """ def __init__(self, primary_db, fallback_db=None): self._primary_db = primary_db @@ -45,20 +48,20 @@ class AutomaticFailoverWrapper(object): class ReadReplicaSupportedModel(Model): - """ Base model for peewee data models that support using a read replica for SELECT - requests not under transactions, and automatic failover to the master if the - read replica fails. + """ + Base model for peewee data models that support using a read replica for SELECT requests not + under transactions, and automatic failover to the master if the read replica fails. - Read-only queries are initially attempted on one of the read replica databases - being used; if an OperationalError occurs when attempting to invoke the query, - then the failure is logged and the query is retried on the database master. + Read-only queries are initially attempted on one of the read replica databases + being used; if an OperationalError occurs when attempting to invoke the query, + then the failure is logged and the query is retried on the database master. - Queries that are non-SELECTs (or under transactions) are always tried on the - master. + Queries that are non-SELECTs (or under transactions) are always tried on the + master. - If the system is configured into read only mode, then all non-read-only queries - will raise a ReadOnlyModeException. - """ + If the system is configured into read only mode, then all non-read-only queries + will raise a ReadOnlyModeException. + """ @classmethod def _read_only_config(cls): @@ -77,9 +80,11 @@ class ReadReplicaSupportedModel(Model): @classmethod def _select_database(cls): - """ Selects a read replica database if we're configured to support read replicas. + """ + Selects a read replica database if we're configured to support read replicas. + Otherwise, selects the master database. - """ + """ # Select the master DB if read replica support is not enabled. read_only_config = cls._read_only_config() if not read_only_config.read_replicas: diff --git a/data/registry_model/blobuploader.py b/data/registry_model/blobuploader.py index 4240d8e47..56719dce7 100644 --- a/data/registry_model/blobuploader.py +++ b/data/registry_model/blobuploader.py @@ -34,19 +34,27 @@ BLOB_CONTENT_TYPE = "application/octet-stream" class BlobUploadException(Exception): - """ Base for all exceptions raised when uploading blobs. """ + """ + Base for all exceptions raised when uploading blobs. + """ class BlobRangeMismatchException(BlobUploadException): - """ Exception raised if the range to be uploaded does not match. """ + """ + Exception raised if the range to be uploaded does not match. + """ class BlobDigestMismatchException(BlobUploadException): - """ Exception raised if the digest requested does not match that of the contents uploaded. """ + """ + Exception raised if the digest requested does not match that of the contents uploaded. + """ class BlobTooLargeException(BlobUploadException): - """ Exception raised if the data uploaded exceeds the maximum_blob_size. """ + """ + Exception raised if the data uploaded exceeds the maximum_blob_size. + """ def __init__(self, uploaded, max_allowed): super(BlobTooLargeException, self).__init__() @@ -61,9 +69,12 @@ BlobUploadSettings = namedtuple( def create_blob_upload(repository_ref, storage, settings, extra_blob_stream_handlers=None): - """ Creates a new blob upload in the specified repository and returns a manager for interacting - with that upload. Returns None if a new blob upload could not be started. - """ + """ + Creates a new blob upload in the specified repository and returns a manager for interacting with + that upload. + + Returns None if a new blob upload could not be started. + """ location_name = storage.preferred_locations[0] new_upload_uuid, upload_metadata = storage.initiate_chunked_upload(location_name) blob_upload = registry_model.create_blob_upload( @@ -78,9 +89,10 @@ def create_blob_upload(repository_ref, storage, settings, extra_blob_stream_hand def retrieve_blob_upload_manager(repository_ref, blob_upload_id, storage, settings): - """ Retrieves the manager for an in-progress blob upload with the specified ID under the given - repository or None if none. - """ + """ + Retrieves the manager for an in-progress blob upload with the specified ID under the given + repository or None if none. + """ blob_upload = registry_model.lookup_blob_upload(repository_ref, blob_upload_id) if blob_upload is None: return None @@ -90,9 +102,10 @@ def retrieve_blob_upload_manager(repository_ref, blob_upload_id, storage, settin @contextmanager def complete_when_uploaded(blob_upload): - """ Wraps the given blob upload in a context manager that completes the upload when the context - closes. - """ + """ + Wraps the given blob upload in a context manager that completes the upload when the context + closes. + """ try: yield blob_upload except Exception as ex: @@ -106,10 +119,13 @@ def complete_when_uploaded(blob_upload): @contextmanager def upload_blob(repository_ref, storage, settings, extra_blob_stream_handlers=None): - """ Starts a new blob upload in the specified repository and yields a manager for interacting - with that upload. When the context manager completes, the blob upload is deleted, whether - committed to a blob or not. Yields None if a blob upload could not be started. - """ + """ + Starts a new blob upload in the specified repository and yields a manager for interacting with + that upload. + + When the context manager completes, the blob upload is deleted, whether committed to a blob or + not. Yields None if a blob upload could not be started. + """ created = create_blob_upload(repository_ref, storage, settings, extra_blob_stream_handlers) if not created: yield None @@ -127,8 +143,10 @@ def upload_blob(repository_ref, storage, settings, extra_blob_stream_handlers=No class _BlobUploadManager(object): - """ Defines a helper class for easily interacting with blob uploads in progress, including - handling of database and storage calls. """ + """ + Defines a helper class for easily interacting with blob uploads in progress, including handling + of database and storage calls. + """ def __init__( self, repository_ref, blob_upload, settings, storage, extra_blob_stream_handlers=None @@ -145,15 +163,19 @@ class _BlobUploadManager(object): @property def blob_upload_id(self): - """ Returns the unique ID for the blob upload. """ + """ + Returns the unique ID for the blob upload. + """ return self.blob_upload.upload_id def upload_chunk(self, app_config, input_fp, start_offset=0, length=-1): - """ Uploads a chunk of data found in the given input file-like interface. start_offset and - length are optional and should match a range header if any was given. + """ + Uploads a chunk of data found in the given input file-like interface. start_offset and + length are optional and should match a range header if any was given. - Returns the total number of bytes uploaded after this upload has completed. Raises - a BlobUploadException if the upload failed. """ + Returns the total number of bytes uploaded after this upload has completed. Raises a + BlobUploadException if the upload failed. + """ assert start_offset is not None assert length is not None @@ -271,7 +293,9 @@ class _BlobUploadManager(object): return new_blob_bytes def cancel_upload(self): - """ Cancels the blob upload, deleting any data uploaded and removing the upload itself. """ + """ + Cancels the blob upload, deleting any data uploaded and removing the upload itself. + """ if self.blob_upload is None: return @@ -286,12 +310,14 @@ class _BlobUploadManager(object): registry_model.delete_blob_upload(self.blob_upload) def commit_to_blob(self, app_config, expected_digest=None): - """ Commits the blob upload to a blob under the repository. The resulting blob will be marked - to not be GCed for some period of time (as configured by `committed_blob_expiration`). + """ + Commits the blob upload to a blob under the repository. The resulting blob will be marked to + not be GCed for some period of time (as configured by `committed_blob_expiration`). - If expected_digest is specified, the content digest of the data uploaded for the blob is - compared to that given and, if it does not match, a BlobDigestMismatchException is - raised. The digest given must be of type `Digest` and not a string. """ + If expected_digest is specified, the content digest of the data uploaded for the blob is + compared to that given and, if it does not match, a BlobDigestMismatchException is raised. + The digest given must be of type `Digest` and not a string. + """ # Compare the content digest. if expected_digest is not None: self._validate_digest(expected_digest) @@ -322,7 +348,9 @@ class _BlobUploadManager(object): return blob def _validate_digest(self, expected_digest): - """ Verifies that the digest's SHA matches that of the uploaded data. """ + """ + Verifies that the digest's SHA matches that of the uploaded data. + """ try: computed_digest = digest_tools.sha256_digest_from_hashlib(self.blob_upload.sha_state) if not digest_tools.digests_equal(computed_digest, expected_digest): @@ -337,9 +365,12 @@ class _BlobUploadManager(object): raise BlobDigestMismatchException() def _finalize_blob_storage(self, app_config): - """ When an upload is successful, this ends the uploading process from the storage's perspective. + """ + When an upload is successful, this ends the uploading process from the storage's + perspective. - Returns True if the blob already existed. """ + Returns True if the blob already existed. + """ computed_digest = digest_tools.sha256_digest_from_hashlib(self.blob_upload.sha_state) final_blob_location = digest_tools.content_path(computed_digest) diff --git a/data/registry_model/datatype.py b/data/registry_model/datatype.py index 01b7c3a6a..6525f8c5f 100644 --- a/data/registry_model/datatype.py +++ b/data/registry_model/datatype.py @@ -4,15 +4,16 @@ from functools import wraps, total_ordering class FromDictionaryException(Exception): - """ Exception raised if constructing a data type from a dictionary fails due to - missing data. - """ + """ + Exception raised if constructing a data type from a dictionary fails due to missing data. + """ def datatype(name, static_fields): - """ Defines a base class for a datatype that will represent a row from the database, - in an abstracted form. - """ + """ + Defines a base class for a datatype that will represent a row from the database, in an + abstracted form. + """ @total_ordering class DataType(object): @@ -60,7 +61,9 @@ def datatype(name, static_fields): def requiresinput(input_name): - """ Marks a property on the data type as requiring an input to be invoked. """ + """ + Marks a property on the data type as requiring an input to be invoked. + """ def inner(func): @wraps(func) @@ -78,7 +81,9 @@ def requiresinput(input_name): def optionalinput(input_name): - """ Marks a property on the data type as having an input be optional when invoked. """ + """ + Marks a property on the data type as having an input be optional when invoked. + """ def inner(func): @wraps(func) diff --git a/data/registry_model/datatypes.py b/data/registry_model/datatypes.py index e0aeddc29..1588542b0 100644 --- a/data/registry_model/datatypes.py +++ b/data/registry_model/datatypes.py @@ -16,7 +16,9 @@ from util.bytes import Bytes class RepositoryReference(datatype("Repository", [])): - """ RepositoryReference is a reference to a repository, passed to registry interface methods. """ + """ + RepositoryReference is a reference to a repository, passed to registry interface methods. + """ @classmethod def for_repo_obj( @@ -61,13 +63,17 @@ class RepositoryReference(datatype("Repository", [])): @property @optionalinput("kind") def kind(self, kind): - """ Returns the kind of the repository. """ + """ + Returns the kind of the repository. + """ return kind or model.repository.get_repo_kind_name(self._repositry_obj) @property @optionalinput("is_public") def is_public(self, is_public): - """ Returns whether the repository is public. """ + """ + Returns whether the repository is public. + """ if is_public is not None: return is_public @@ -75,7 +81,9 @@ class RepositoryReference(datatype("Repository", [])): @property def trust_enabled(self): - """ Returns whether trust is enabled in this repository. """ + """ + Returns whether trust is enabled in this repository. + """ repository = self._repository_obj if repository is None: return None @@ -84,14 +92,17 @@ class RepositoryReference(datatype("Repository", [])): @property def id(self): - """ Returns the database ID of the repository. """ + """ + Returns the database ID of the repository. + """ return self._db_id @property @optionalinput("namespace_name") def namespace_name(self, namespace_name=None): - """ Returns the namespace name of this repository. - """ + """ + Returns the namespace name of this repository. + """ if namespace_name is not None: return namespace_name @@ -104,8 +115,9 @@ class RepositoryReference(datatype("Repository", [])): @property @optionalinput("is_free_namespace") def is_free_namespace(self, is_free_namespace=None): - """ Returns whether the namespace of the repository is on a free plan. - """ + """ + Returns whether the namespace of the repository is on a free plan. + """ if is_free_namespace is not None: return is_free_namespace @@ -118,8 +130,9 @@ class RepositoryReference(datatype("Repository", [])): @property @optionalinput("repo_name") def name(self, repo_name=None): - """ Returns the name of this repository. - """ + """ + Returns the name of this repository. + """ if repo_name is not None: return repo_name @@ -132,7 +145,9 @@ class RepositoryReference(datatype("Repository", [])): @property @optionalinput("state") def state(self, state=None): - """ Return the state of the Repository. """ + """ + Return the state of the Repository. + """ if state is not None: return state @@ -144,7 +159,9 @@ class RepositoryReference(datatype("Repository", [])): class Label(datatype("Label", ["key", "value", "uuid", "source_type_name", "media_type_name"])): - """ Label represents a label on a manifest. """ + """ + Label represents a label on a manifest. + """ @classmethod def for_label(cls, label): @@ -162,7 +179,9 @@ class Label(datatype("Label", ["key", "value", "uuid", "source_type_name", "medi class ShallowTag(datatype("ShallowTag", ["name"])): - """ ShallowTag represents a tag in a repository, but only contains basic information. """ + """ + ShallowTag represents a tag in a repository, but only contains basic information. + """ @classmethod def for_tag(cls, tag): @@ -180,7 +199,9 @@ class ShallowTag(datatype("ShallowTag", ["name"])): @property def id(self): - """ The ID of this tag for pagination purposes only. """ + """ + The ID of this tag for pagination purposes only. + """ return self._db_id @@ -198,7 +219,9 @@ class Tag( ], ) ): - """ Tag represents a tag in a repository, which points to a manifest or image. """ + """ + Tag represents a tag in a repository, which points to a manifest or image. + """ @classmethod def for_tag(cls, tag, legacy_image=None): @@ -246,47 +269,65 @@ class Tag( @property @requiresinput("manifest") def _manifest(self, manifest): - """ Returns the manifest for this tag. Will only apply to new-style OCI tags. """ + """ + Returns the manifest for this tag. + + Will only apply to new-style OCI tags. + """ return manifest @property @optionalinput("manifest") def manifest(self, manifest): - """ Returns the manifest for this tag or None if none. Will only apply to new-style OCI tags. - """ + """ + Returns the manifest for this tag or None if none. + + Will only apply to new-style OCI tags. + """ return Manifest.for_manifest(manifest, self.legacy_image_if_present) @property @requiresinput("repository") def repository(self, repository): - """ Returns the repository under which this tag lives. - """ + """ + Returns the repository under which this tag lives. + """ return repository @property @requiresinput("legacy_image") def legacy_image(self, legacy_image): - """ Returns the legacy Docker V1-style image for this tag. Note that this - will be None for tags whose manifests point to other manifests instead of images. - """ + """ + Returns the legacy Docker V1-style image for this tag. + + Note that this will be None for tags whose manifests point to other manifests instead of + images. + """ return legacy_image @property @optionalinput("legacy_image") def legacy_image_if_present(self, legacy_image): - """ Returns the legacy Docker V1-style image for this tag. Note that this - will be None for tags whose manifests point to other manifests instead of images. - """ + """ + Returns the legacy Docker V1-style image for this tag. + + Note that this will be None for tags whose manifests point to other manifests instead of + images. + """ return legacy_image @property def id(self): - """ The ID of this tag for pagination purposes only. """ + """ + The ID of this tag for pagination purposes only. + """ return self._db_id class Manifest(datatype("Manifest", ["digest", "media_type", "internal_manifest_bytes"])): - """ Manifest represents a manifest in a repository. """ + """ + Manifest represents a manifest in a repository. + """ @classmethod def for_tag_manifest(cls, tag_manifest, legacy_image=None): @@ -328,20 +369,25 @@ class Manifest(datatype("Manifest", ["digest", "media_type", "internal_manifest_ @property @requiresinput("legacy_image") def legacy_image(self, legacy_image): - """ Returns the legacy Docker V1-style image for this manifest. - """ + """ + Returns the legacy Docker V1-style image for this manifest. + """ return legacy_image @property @optionalinput("legacy_image") def legacy_image_if_present(self, legacy_image): - """ Returns the legacy Docker V1-style image for this manifest. Note that this - will be None for manifests that point to other manifests instead of images. - """ + """ + Returns the legacy Docker V1-style image for this manifest. + + Note that this will be None for manifests that point to other manifests instead of images. + """ return legacy_image def get_parsed_manifest(self, validate=True): - """ Returns the parsed manifest for this manifest. """ + """ + Returns the parsed manifest for this manifest. + """ assert self.internal_manifest_bytes return parse_manifest_from_bytes( self.internal_manifest_bytes, self.media_type, validate=validate @@ -349,9 +395,10 @@ class Manifest(datatype("Manifest", ["digest", "media_type", "internal_manifest_ @property def layers_compressed_size(self): - """ Returns the total compressed size of the layers in the manifest or None if this could not - be computed. - """ + """ + Returns the total compressed size of the layers in the manifest or None if this could not be + computed. + """ try: return self.get_parsed_manifest().layers_compressed_size except ManifestException: @@ -359,7 +406,9 @@ class Manifest(datatype("Manifest", ["digest", "media_type", "internal_manifest_ @property def is_manifest_list(self): - """ Returns True if this manifest points to a list (instead of an image). """ + """ + Returns True if this manifest points to a list (instead of an image). + """ return self.media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE @@ -378,7 +427,9 @@ class LegacyImage( ], ) ): - """ LegacyImage represents a Docker V1-style image found in a repository. """ + """ + LegacyImage represents a Docker V1-style image found in a repository. + """ @classmethod def for_image(cls, image, images_map=None, tags_map=None, blob=None): @@ -405,17 +456,21 @@ class LegacyImage( @property def id(self): - """ Returns the database ID of the legacy image. """ + """ + Returns the database ID of the legacy image. + """ return self._db_id @property @requiresinput("images_map") @requiresinput("ancestor_id_list") def parents(self, images_map, ancestor_id_list): - """ Returns the parent images for this image. Raises an exception if the parents have - not been loaded before this property is invoked. Parents are returned starting at the - leaf image. - """ + """ + Returns the parent images for this image. + + Raises an exception if the parents have not been loaded before this property is invoked. + Parents are returned starting at the leaf image. + """ return [ LegacyImage.for_image(images_map[ancestor_id], images_map=images_map) for ancestor_id in reversed(ancestor_id_list) @@ -425,17 +480,21 @@ class LegacyImage( @property @requiresinput("blob") def blob(self, blob): - """ Returns the blob for this image. Raises an exception if the blob has - not been loaded before this property is invoked. - """ + """ + Returns the blob for this image. + + Raises an exception if the blob has not been loaded before this property is invoked. + """ return blob @property @requiresinput("tags_map") def tags(self, tags_map): - """ Returns the tags pointing to this image. Raises an exception if the tags have - not been loaded before this property is invoked. - """ + """ + Returns the tags pointing to this image. + + Raises an exception if the tags have not been loaded before this property is invoked. + """ tags = tags_map.get(self._db_id) if not tags: return [] @@ -445,7 +504,9 @@ class LegacyImage( @unique class SecurityScanStatus(Enum): - """ Security scan status enum """ + """ + Security scan status enum. + """ SCANNED = "scanned" FAILED = "failed" @@ -454,17 +515,21 @@ class SecurityScanStatus(Enum): class ManifestLayer(namedtuple("ManifestLayer", ["layer_info", "blob"])): - """ Represents a single layer in a manifest. The `layer_info` data will be manifest-type specific, - but will have a few expected fields (such as `digest`). The `blob` represents the associated - blob for this layer, optionally with placements. If the layer is a remote layer, the blob will - be None. - """ + """ + Represents a single layer in a manifest. + + The `layer_info` data will be manifest-type specific, but will have a few expected fields (such + as `digest`). The `blob` represents the associated blob for this layer, optionally with + placements. If the layer is a remote layer, the blob will be None. + """ def estimated_size(self, estimate_multiplier): - """ Returns the estimated size of this layer. If the layers' blob has an uncompressed size, - it is used. Otherwise, the compressed_size field in the layer is multiplied by the - multiplier. - """ + """ + Returns the estimated size of this layer. + + If the layers' blob has an uncompressed size, it is used. Otherwise, the compressed_size + field in the layer is multiplied by the multiplier. + """ if self.blob.uncompressed_size: return self.blob.uncompressed_size @@ -474,7 +539,9 @@ class ManifestLayer(namedtuple("ManifestLayer", ["layer_info", "blob"])): class Blob( datatype("Blob", ["uuid", "digest", "compressed_size", "uncompressed_size", "uploading"]) ): - """ Blob represents a content-addressable piece of storage. """ + """ + Blob represents a content-addressable piece of storage. + """ @classmethod def for_image_storage(cls, image_storage, storage_path, placements=None): @@ -494,19 +561,25 @@ class Blob( @property @requiresinput("storage_path") def storage_path(self, storage_path): - """ Returns the path of this blob in storage. """ + """ + Returns the path of this blob in storage. + """ # TODO: change this to take in the storage engine? return storage_path @property @requiresinput("placements") def placements(self, placements): - """ Returns all the storage placements at which the Blob can be found. """ + """ + Returns all the storage placements at which the Blob can be found. + """ return placements class DerivedImage(datatype("DerivedImage", ["verb", "varying_metadata", "blob"])): - """ DerivedImage represents an image derived from a manifest via some form of verb. """ + """ + DerivedImage represents an image derived from a manifest via some form of verb. + """ @classmethod def for_derived_storage(cls, derived, verb, varying_metadata, blob): @@ -516,14 +589,18 @@ class DerivedImage(datatype("DerivedImage", ["verb", "varying_metadata", "blob"] @property def unique_id(self): - """ Returns a unique ID for this derived image. This call will consistently produce the same - unique ID across calls in the same code base. - """ + """ + Returns a unique ID for this derived image. + + This call will consistently produce the same unique ID across calls in the same code base. + """ return hashlib.sha256("%s:%s" % (self.verb, self._db_id)).hexdigest() class TorrentInfo(datatype("TorrentInfo", ["pieces", "piece_length"])): - """ TorrentInfo represents information to pull a blob via torrent. """ + """ + TorrentInfo represents information to pull a blob via torrent. + """ @classmethod def for_torrent_info(cls, torrent_info): @@ -550,7 +627,9 @@ class BlobUpload( ], ) ): - """ BlobUpload represents information about an in-progress upload to create a blob. """ + """ + BlobUpload represents information about an in-progress upload to create a blob. + """ @classmethod def for_upload(cls, blob_upload, location_name=None): @@ -569,9 +648,10 @@ class BlobUpload( class LikelyVulnerableTag(datatype("LikelyVulnerableTag", ["layer_id", "name"])): - """ LikelyVulnerableTag represents a tag in a repository that is likely vulnerable to a notified - vulnerability. - """ + """ + LikelyVulnerableTag represents a tag in a repository that is likely vulnerable to a notified + vulnerability. + """ # TODO: Remove all of this once we're on the new security model exclusively. @classmethod diff --git a/data/registry_model/interface.py b/data/registry_model/interface.py index 3ead2f22c..293e67cf1 100644 --- a/data/registry_model/interface.py +++ b/data/registry_model/interface.py @@ -4,48 +4,63 @@ from six import add_metaclass @add_metaclass(ABCMeta) class RegistryDataInterface(object): - """ Interface for code to work with the registry data model. The registry data model consists - of all tables that store registry-specific information, such as Manifests, Blobs, Images, - and Labels. - """ + """ + Interface for code to work with the registry data model. + + The registry data model consists of all tables that store registry-specific information, such as + Manifests, Blobs, Images, and Labels. + """ @abstractmethod def supports_schema2(self, namespace_name): - """ Returns whether the implementation of the data interface supports schema 2 format - manifests. """ + """ + Returns whether the implementation of the data interface supports schema 2 format manifests. + """ @abstractmethod def get_tag_legacy_image_id(self, repository_ref, tag_name, storage): - """ Returns the legacy image ID for the tag with a legacy images in - the repository. Returns None if None. - """ + """ + Returns the legacy image ID for the tag with a legacy images in the repository. + + Returns None if None. + """ @abstractmethod def get_legacy_tags_map(self, repository_ref, storage): - """ Returns a map from tag name to its legacy image ID, for all tags with legacy images in - the repository. Note that this can be a *very* heavy operation. - """ + """ + Returns a map from tag name to its legacy image ID, for all tags with legacy images in the + repository. + + Note that this can be a *very* heavy operation. + """ @abstractmethod def find_matching_tag(self, repository_ref, tag_names): - """ Finds an alive tag in the repository matching one of the given tag names and returns it - or None if none. - """ + """ + Finds an alive tag in the repository matching one of the given tag names and returns it or + None if none. + """ @abstractmethod def get_most_recent_tag(self, repository_ref): - """ Returns the most recently pushed alive tag in the repository, if any. If none, returns - None. - """ + """ + Returns the most recently pushed alive tag in the repository, if any. + + If none, returns None. + """ @abstractmethod def lookup_repository(self, namespace_name, repo_name, kind_filter=None): - """ Looks up and returns a reference to the repository with the given namespace and name, - or None if none. """ + """ + Looks up and returns a reference to the repository with the given namespace and name, or + None if none. + """ @abstractmethod def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False): - """ Returns the manifest associated with the given tag. """ + """ + Returns the manifest associated with the given tag. + """ @abstractmethod def lookup_manifest_by_digest( @@ -56,18 +71,22 @@ class RegistryDataInterface(object): include_legacy_image=False, require_available=False, ): - """ Looks up the manifest with the given digest under the given repository and returns it - or None if none. If allow_dead is True, manifests pointed to by dead tags will also - be returned. If require_available is True, a temporary tag will be added onto the - returned manifest (before it is returned) to ensure it is available until another - tagging or manifest operation is taken. - """ + """ + Looks up the manifest with the given digest under the given repository and returns it or + None if none. + + If allow_dead is True, manifests pointed to by dead tags will also be returned. If + require_available is True, a temporary tag will be added onto the returned manifest (before + it is returned) to ensure it is available until another tagging or manifest operation is + taken. + """ @abstractmethod def create_manifest_and_retarget_tag( self, repository_ref, manifest_interface_instance, tag_name, storage, raise_on_error=False ): - """ Creates a manifest in a repository, adding all of the necessary data in the model. + """ + Creates a manifest in a repository, adding all of the necessary data in the model. The `manifest_interface_instance` parameter must be an instance of the manifest interface as returned by the image/docker package. @@ -76,79 +95,94 @@ class RegistryDataInterface(object): method will fail and return None. Returns a reference to the (created manifest, tag) or (None, None) on error. - """ + """ @abstractmethod def get_legacy_images(self, repository_ref): """ - Returns an iterator of all the LegacyImage's defined in the matching repository. - """ + Returns an iterator of all the LegacyImage's defined in the matching repository. + """ @abstractmethod def get_legacy_image( self, repository_ref, docker_image_id, include_parents=False, include_blob=False ): """ - Returns the matching LegacyImages under the matching repository, if any. If none, - returns None. - """ + Returns the matching LegacyImages under the matching repository, if any. + + If none, returns None. + """ @abstractmethod def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None): - """ Creates a label on the manifest with the given key and value. + """ + Creates a label on the manifest with the given key and value. - Can raise InvalidLabelKeyException or InvalidMediaTypeException depending - on the validation errors. - """ + Can raise InvalidLabelKeyException or InvalidMediaTypeException depending on the validation + errors. + """ @abstractmethod def batch_create_manifest_labels(self, manifest): - """ Returns a context manager for batch creation of labels on a manifest. + """ + Returns a context manager for batch creation of labels on a manifest. - Can raise InvalidLabelKeyException or InvalidMediaTypeException depending - on the validation errors. - """ + Can raise InvalidLabelKeyException or InvalidMediaTypeException depending on the validation + errors. + """ @abstractmethod def list_manifest_labels(self, manifest, key_prefix=None): - """ Returns all labels found on the manifest. If specified, the key_prefix will filter the - labels returned to those keys that start with the given prefix. - """ + """ + Returns all labels found on the manifest. + + If specified, the key_prefix will filter the labels returned to those keys that start with + the given prefix. + """ @abstractmethod def get_manifest_label(self, manifest, label_uuid): - """ Returns the label with the specified UUID on the manifest or None if none. """ + """ + Returns the label with the specified UUID on the manifest or None if none. + """ @abstractmethod def delete_manifest_label(self, manifest, label_uuid): - """ Delete the label with the specified UUID on the manifest. Returns the label deleted - or None if none. - """ + """ + Delete the label with the specified UUID on the manifest. + + Returns the label deleted or None if none. + """ @abstractmethod def lookup_cached_active_repository_tags( self, model_cache, repository_ref, start_pagination_id, limit ): """ - Returns a page of active tags in a repository. Note that the tags returned by this method - are ShallowTag objects, which only contain the tag name. This method will automatically cache - the result and check the cache before making a call. - """ + Returns a page of active tags in a repository. + + Note that the tags returned by this method are ShallowTag objects, which only contain the + tag name. This method will automatically cache the result and check the cache before making + a call. + """ @abstractmethod def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit): """ - Returns a page of active tags in a repository. Note that the tags returned by this method - are ShallowTag objects, which only contain the tag name. - """ + Returns a page of active tags in a repository. + + Note that the tags returned by this method are ShallowTag objects, which only contain the + tag name. + """ @abstractmethod def list_all_active_repository_tags(self, repository_ref, include_legacy_images=False): """ - Returns a list of all the active tags in the repository. Note that this is a *HEAVY* - operation on repositories with a lot of tags, and should only be used for testing or - where other more specific operations are not possible. - """ + Returns a list of all the active tags in the repository. + + Note that this is a *HEAVY* operation on repositories with a lot of tags, and should only be + used for testing or where other more specific operations are not possible. + """ @abstractmethod def list_repository_tag_history( @@ -161,29 +195,32 @@ class RegistryDataInterface(object): since_time_ms=None, ): """ - Returns the history of all tags in the repository (unless filtered). This includes tags that - have been made in-active due to newer versions of those tags coming into service. - """ + Returns the history of all tags in the repository (unless filtered). + + This includes tags that have been made in-active due to newer versions of those tags coming + into service. + """ @abstractmethod def get_most_recent_tag_lifetime_start(self, repository_refs): - """ - Returns a map from repository ID to the last modified time ( seconds from epoch, UTC) - for each repository in the given repository reference list. - """ + """ + Returns a map from repository ID to the last modified time ( seconds from epoch, UTC) for + each repository in the given repository reference list. + """ @abstractmethod def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False): """ - Returns the latest, *active* tag found in the repository, with the matching name - or None if none. - """ + Returns the latest, *active* tag found in the repository, with the matching name or None if + none. + """ @abstractmethod def has_expired_tag(self, repository_ref, tag_name): """ - Returns true if and only if the repository contains a tag with the given name that is expired. - """ + Returns true if and only if the repository contains a tag with the given name that is + expired. + """ @abstractmethod def retarget_tag( @@ -196,92 +233,115 @@ class RegistryDataInterface(object): is_reversion=False, ): """ - Creates, updates or moves a tag to a new entry in history, pointing to the manifest or - legacy image specified. If is_reversion is set to True, this operation is considered a - reversion over a previous tag move operation. Returns the updated Tag or None on error. - """ + Creates, updates or moves a tag to a new entry in history, pointing to the manifest or + legacy image specified. + + If is_reversion is set to True, this operation is considered a reversion over a previous tag + move operation. Returns the updated Tag or None on error. + """ @abstractmethod def delete_tag(self, repository_ref, tag_name): """ - Deletes the latest, *active* tag with the given name in the repository. - """ + Deletes the latest, *active* tag with the given name in the repository. + """ @abstractmethod def delete_tags_for_manifest(self, manifest): """ - Deletes all tags pointing to the given manifest, making the manifest inaccessible for pulling. - Returns the tags deleted, if any. Returns None on error. - """ + Deletes all tags pointing to the given manifest, making the manifest inaccessible for + pulling. + + Returns the tags deleted, if any. Returns None on error. + """ @abstractmethod def change_repository_tag_expiration(self, tag, expiration_date): - """ Sets the expiration date of the tag under the matching repository to that given. If the - expiration date is None, then the tag will not expire. Returns a tuple of the previous - expiration timestamp in seconds (if any), and whether the operation succeeded. - """ + """ + Sets the expiration date of the tag under the matching repository to that given. + + If the expiration date is None, then the tag will not expire. Returns a tuple of the + previous expiration timestamp in seconds (if any), and whether the operation succeeded. + """ @abstractmethod def get_legacy_images_owned_by_tag(self, tag): - """ Returns all legacy images *solely owned and used* by the given tag. """ + """ + Returns all legacy images *solely owned and used* by the given tag. + """ @abstractmethod def get_security_status(self, manifest_or_legacy_image): - """ Returns the security status for the given manifest or legacy image or None if none. """ + """ + Returns the security status for the given manifest or legacy image or None if none. + """ @abstractmethod def reset_security_status(self, manifest_or_legacy_image): - """ Resets the security status for the given manifest or legacy image, ensuring that it will - get re-indexed. - """ + """ + Resets the security status for the given manifest or legacy image, ensuring that it will get + re-indexed. + """ @abstractmethod def backfill_manifest_for_tag(self, tag): - """ Backfills a manifest for the V1 tag specified. - If a manifest already exists for the tag, returns that manifest. + """ + Backfills a manifest for the V1 tag specified. If a manifest already exists for the tag, + returns that manifest. NOTE: This method will only be necessary until we've completed the backfill, at which point it should be removed. - """ + """ @abstractmethod def is_existing_disabled_namespace(self, namespace_name): - """ Returns whether the given namespace exists and is disabled. """ + """ + Returns whether the given namespace exists and is disabled. + """ @abstractmethod def is_namespace_enabled(self, namespace_name): - """ Returns whether the given namespace exists and is enabled. """ + """ + Returns whether the given namespace exists and is enabled. + """ @abstractmethod def get_manifest_local_blobs(self, manifest, include_placements=False): - """ Returns the set of local blobs for the given manifest or None if none. """ + """ + Returns the set of local blobs for the given manifest or None if none. + """ @abstractmethod def list_manifest_layers(self, manifest, storage, include_placements=False): - """ Returns an *ordered list* of the layers found in the manifest, starting at the base - and working towards the leaf, including the associated Blob and its placements - (if specified). The layer information in `layer_info` will be of type + """ + Returns an *ordered list* of the layers found in the manifest, starting at the base and + working towards the leaf, including the associated Blob and its placements (if specified). + + The layer information in `layer_info` will be of type `image.docker.types.ManifestImageLayer`. Should not be called for a manifest list. - """ + """ @abstractmethod def list_parsed_manifest_layers( self, repository_ref, parsed_manifest, storage, include_placements=False ): - """ Returns an *ordered list* of the layers found in the parsed manifest, starting at the base - and working towards the leaf, including the associated Blob and its placements - (if specified). The layer information in `layer_info` will be of type + """ + Returns an *ordered list* of the layers found in the parsed manifest, starting at the base + and working towards the leaf, including the associated Blob and its placements (if + specified). + + The layer information in `layer_info` will be of type `image.docker.types.ManifestImageLayer`. Should not be called for a manifest list. - """ + """ @abstractmethod def lookup_derived_image( self, manifest, verb, storage, varying_metadata=None, include_placements=False ): """ - Looks up the derived image for the given manifest, verb and optional varying metadata and - returns it or None if none. - """ + Looks up the derived image for the given manifest, verb and optional varying metadata and + returns it or None if none. + """ @abstractmethod def lookup_or_create_derived_image( @@ -294,64 +354,72 @@ class RegistryDataInterface(object): include_placements=False, ): """ - Looks up the derived image for the given maniest, verb and optional varying metadata - and returns it. If none exists, a new derived image is created. - """ + Looks up the derived image for the given maniest, verb and optional varying metadata and + returns it. + + If none exists, a new derived image is created. + """ @abstractmethod def get_derived_image_signature(self, derived_image, signer_name): """ - Returns the signature associated with the derived image and a specific signer or None if none. - """ + Returns the signature associated with the derived image and a specific signer or None if + none. + """ @abstractmethod def set_derived_image_signature(self, derived_image, signer_name, signature): """ - Sets the calculated signature for the given derived image and signer to that specified. - """ + Sets the calculated signature for the given derived image and signer to that specified. + """ @abstractmethod def delete_derived_image(self, derived_image): """ - Deletes a derived image and all of its storage. - """ + Deletes a derived image and all of its storage. + """ @abstractmethod def set_derived_image_size(self, derived_image, compressed_size): """ - Sets the compressed size on the given derived image. - """ + Sets the compressed size on the given derived image. + """ @abstractmethod def get_torrent_info(self, blob): """ - Returns the torrent information associated with the given blob or None if none. - """ + Returns the torrent information associated with the given blob or None if none. + """ @abstractmethod def set_torrent_info(self, blob, piece_length, pieces): """ - Sets the torrent infomation associated with the given blob to that specified. - """ + Sets the torrent infomation associated with the given blob to that specified. + """ @abstractmethod def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False): """ - Returns the blob in the repository with the given digest, if any or None if none. Note that - there may be multiple records in the same repository for the same blob digest, so the return - value of this function may change. - """ + Returns the blob in the repository with the given digest, if any or None if none. + + Note that there may be multiple records in the same repository for the same blob digest, so + the return value of this function may change. + """ @abstractmethod def create_blob_upload(self, repository_ref, upload_id, location_name, storage_metadata): - """ Creates a new blob upload and returns a reference. If the blob upload could not be - created, returns None. """ + """ + Creates a new blob upload and returns a reference. + + If the blob upload could not be created, returns None. + """ @abstractmethod def lookup_blob_upload(self, repository_ref, blob_upload_id): - """ Looks up the blob upload with the given ID under the specified repository and returns it - or None if none. - """ + """ + Looks up the blob upload with the given ID under the specified repository and returns it or + None if none. + """ @abstractmethod def update_blob_upload( @@ -365,65 +433,81 @@ class RegistryDataInterface(object): chunk_count, sha_state, ): - """ Updates the fields of the blob upload to match those given. Returns the updated blob upload - or None if the record does not exists. - """ + """ + Updates the fields of the blob upload to match those given. + + Returns the updated blob upload or None if the record does not exists. + """ @abstractmethod def delete_blob_upload(self, blob_upload): - """ Deletes a blob upload record. """ + """ + Deletes a blob upload record. + """ @abstractmethod def commit_blob_upload(self, blob_upload, blob_digest_str, blob_expiration_seconds): - """ Commits the blob upload into a blob and sets an expiration before that blob will be GCed. - """ + """ + Commits the blob upload into a blob and sets an expiration before that blob will be GCed. + """ @abstractmethod def mount_blob_into_repository(self, blob, target_repository_ref, expiration_sec): """ - Mounts the blob from another repository into the specified target repository, and adds an - expiration before that blob is automatically GCed. This function is useful during push - operations if an existing blob from another repository is being pushed. Returns False if - the mounting fails. Note that this function does *not* check security for mounting the blob - and the caller is responsible for doing this check (an example can be found in - endpoints/v2/blob.py). - """ + Mounts the blob from another repository into the specified target repository, and adds an + expiration before that blob is automatically GCed. + + This function is useful during push operations if an existing blob from another repository + is being pushed. Returns False if the mounting fails. Note that this function does *not* + check security for mounting the blob and the caller is responsible for doing this check (an + example can be found in endpoints/v2/blob.py). + """ @abstractmethod def set_tags_expiration_for_manifest(self, manifest, expiration_sec): """ - Sets the expiration on all tags that point to the given manifest to that specified. - """ + Sets the expiration on all tags that point to the given manifest to that specified. + """ @abstractmethod def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage): - """ Returns the schema 1 version of this manifest, or None if none. """ + """ + Returns the schema 1 version of this manifest, or None if none. + """ @abstractmethod def create_manifest_with_temp_tag( self, repository_ref, manifest_interface_instance, expiration_sec, storage ): - """ Creates a manifest under the repository and sets a temporary tag to point to it. + """ + Creates a manifest under the repository and sets a temporary tag to point to it. + Returns the manifest object created or None on error. - """ + """ @abstractmethod def get_cached_namespace_region_blacklist(self, model_cache, namespace_name): - """ Returns a cached set of ISO country codes blacklisted for pulls for the namespace - or None if the list could not be loaded. - """ + """ + Returns a cached set of ISO country codes blacklisted for pulls for the namespace or None if + the list could not be loaded. + """ @abstractmethod def convert_manifest( self, manifest, namespace_name, repo_name, tag_name, allowed_mediatypes, storage ): - """ Attempts to convert the specified into a parsed manifest with a media type - in the allowed_mediatypes set. If not possible, or an error occurs, returns None. - """ + """ + Attempts to convert the specified into a parsed manifest with a media type in the + allowed_mediatypes set. + + If not possible, or an error occurs, returns None. + """ @abstractmethod def yield_tags_for_vulnerability_notification(self, layer_id_pairs): - """ Yields tags that contain one (or more) of the given layer ID pairs, in repositories - which have been registered for vulnerability_found notifications. Returns an iterator - of LikelyVulnerableTag instances. - """ + """ + Yields tags that contain one (or more) of the given layer ID pairs, in repositories which + have been registered for vulnerability_found notifications. + + Returns an iterator of LikelyVulnerableTag instances. + """ diff --git a/data/registry_model/label_handlers.py b/data/registry_model/label_handlers.py index 9ea580aea..bfda8b951 100644 --- a/data/registry_model/label_handlers.py +++ b/data/registry_model/label_handlers.py @@ -6,7 +6,9 @@ logger = logging.getLogger(__name__) def _expires_after(label_dict, manifest, model): - """ Sets the expiration of a manifest based on the quay.expires-in label. """ + """ + Sets the expiration of a manifest based on the quay.expires-in label. + """ try: timedelta = convert_to_timedelta(label_dict["value"]) except ValueError: @@ -24,7 +26,9 @@ _LABEL_HANDLERS = { def apply_label_to_manifest(label_dict, manifest, model): - """ Runs the handler defined, if any, for the given label. """ + """ + Runs the handler defined, if any, for the given label. + """ handler = _LABEL_HANDLERS.get(label_dict["key"]) if handler is not None: handler(label_dict, manifest, model) diff --git a/data/registry_model/manifestbuilder.py b/data/registry_model/manifestbuilder.py index 8e2cb3cde..b7322933c 100644 --- a/data/registry_model/manifestbuilder.py +++ b/data/registry_model/manifestbuilder.py @@ -22,9 +22,12 @@ _SESSION_KEY = "__manifestbuilder" def create_manifest_builder(repository_ref, storage, legacy_signing_key): - """ Creates a new manifest builder for populating manifests under the specified repository - and returns it. Returns None if the builder could not be constructed. - """ + """ + Creates a new manifest builder for populating manifests under the specified repository and + returns it. + + Returns None if the builder could not be constructed. + """ builder_id = str(uuid.uuid4()) builder = _ManifestBuilder( repository_ref, _BuilderState(builder_id, {}, {}, {}, []), storage, legacy_signing_key @@ -34,9 +37,10 @@ def create_manifest_builder(repository_ref, storage, legacy_signing_key): def lookup_manifest_builder(repository_ref, builder_id, storage, legacy_signing_key): - """ Looks up the manifest builder with the given ID under the specified repository and returns - it or None if none. - """ + """ + Looks up the manifest builder with the given ID under the specified repository and returns it or + None if none. + """ builder_state_tuple = session.get(_SESSION_KEY) if builder_state_tuple is None: return None @@ -49,9 +53,10 @@ def lookup_manifest_builder(repository_ref, builder_id, storage, legacy_signing_ class _ManifestBuilder(object): - """ Helper class which provides an interface for bookkeeping the layers and configuration of - manifests being constructed. - """ + """ + Helper class which provides an interface for bookkeeping the layers and configuration of + manifests being constructed. + """ def __init__(self, repository_ref, builder_state, storage, legacy_signing_key): self._repository_ref = repository_ref @@ -61,12 +66,16 @@ class _ManifestBuilder(object): @property def builder_id(self): - """ Returns the unique ID for this builder. """ + """ + Returns the unique ID for this builder. + """ return self._builder_state.builder_id @property def committed_tags(self): - """ Returns the tags committed by this builder, if any. """ + """ + Returns the tags committed by this builder, if any. + """ return [ registry_model.get_repo_tag(self._repository_ref, tag_name, include_legacy_image=True) for tag_name in self._builder_state.tags.keys() @@ -75,9 +84,11 @@ class _ManifestBuilder(object): def start_layer( self, layer_id, v1_metadata_string, location_name, calling_user, temp_tag_expiration ): - """ Starts a new layer with the given ID to be placed into a manifest. Returns the layer - started or None if an error occurred. - """ + """ + Starts a new layer with the given ID to be placed into a manifest. + + Returns the layer started or None if an error occurred. + """ # Ensure the repository still exists. repository = model.repository.lookup_repository(self._repository_ref._db_id) if repository is None: @@ -148,7 +159,11 @@ class _ManifestBuilder(object): return ManifestLayer(layer_id, v1_metadata_string, created.id) def lookup_layer(self, layer_id): - """ Returns a layer with the given ID under this builder. If none exists, returns None. """ + """ + Returns a layer with the given ID under this builder. + + If none exists, returns None. + """ if layer_id not in self._builder_state.images: return None @@ -159,7 +174,9 @@ class _ManifestBuilder(object): return ManifestLayer(layer_id, image.v1_json_metadata, image.id) def assign_layer_blob(self, layer, blob, computed_checksums): - """ Assigns a blob to a layer. """ + """ + Assigns a blob to a layer. + """ assert blob assert not blob.uploading @@ -180,25 +197,31 @@ class _ManifestBuilder(object): return True def validate_layer_checksum(self, layer, checksum): - """ Returns whether the checksum for a layer matches that specified. - """ + """ + Returns whether the checksum for a layer matches that specified. + """ return checksum in self.get_layer_checksums(layer) def get_layer_checksums(self, layer): - """ Returns the registered defined for the layer, if any. """ + """ + Returns the registered defined for the layer, if any. + """ return self._builder_state.checksums.get(layer.layer_id) or [] def save_precomputed_checksum(self, layer, checksum): - """ Saves a precomputed checksum for a layer. """ + """ + Saves a precomputed checksum for a layer. + """ checksums = self._builder_state.checksums.get(layer.layer_id) or [] checksums.append(checksum) self._builder_state.checksums[layer.layer_id] = checksums self._save_to_session() def commit_tag_and_manifest(self, tag_name, layer): - """ Commits a new tag + manifest for that tag to the repository with the given name, - pointing to the given layer. - """ + """ + Commits a new tag + manifest for that tag to the repository with the given name, pointing to + the given layer. + """ legacy_image = registry_model.get_legacy_image(self._repository_ref, layer.layer_id) if legacy_image is None: return None @@ -214,10 +237,12 @@ class _ManifestBuilder(object): return tag def done(self): - """ Marks the manifest builder as complete and disposes of any state. This call is optional - and it is expected manifest builders will eventually time out if unused for an - extended period of time. - """ + """ + Marks the manifest builder as complete and disposes of any state. + + This call is optional and it is expected manifest builders will eventually time out if + unused for an extended period of time. + """ temp_storages = self._builder_state.temp_storages for storage_id in temp_storages: try: diff --git a/data/registry_model/modelsplitter.py b/data/registry_model/modelsplitter.py index d2886cc9c..f0ea38812 100644 --- a/data/registry_model/modelsplitter.py +++ b/data/registry_model/modelsplitter.py @@ -24,8 +24,9 @@ class SplitModel(object): self.oci_only_mode = oci_only_mode def supports_schema2(self, namespace_name): - """ Returns whether the implementation of the data interface supports schema 2 format - manifests. """ + """ + Returns whether the implementation of the data interface supports schema 2 format manifests. + """ return namespace_name in self.v22_namespace_whitelist def _namespace_from_kwargs(self, args_dict): diff --git a/data/registry_model/registry_oci_model.py b/data/registry_model/registry_oci_model.py index 06d418f8c..b9d360b54 100644 --- a/data/registry_model/registry_oci_model.py +++ b/data/registry_model/registry_oci_model.py @@ -32,22 +32,25 @@ logger = logging.getLogger(__name__) class OCIModel(SharedModel, RegistryDataInterface): """ - OCIModel implements the data model for the registry API using a database schema - after it was changed to support the OCI specification. - """ + OCIModel implements the data model for the registry API using a database schema after it was + changed to support the OCI specification. + """ def __init__(self, oci_model_only=True): self.oci_model_only = oci_model_only def supports_schema2(self, namespace_name): - """ Returns whether the implementation of the data interface supports schema 2 format - manifests. """ + """ + Returns whether the implementation of the data interface supports schema 2 format manifests. + """ return True def get_tag_legacy_image_id(self, repository_ref, tag_name, storage): - """ Returns the legacy image ID for the tag with a legacy images in - the repository. Returns None if None. - """ + """ + Returns the legacy image ID for the tag with a legacy images in the repository. + + Returns None if None. + """ tag = self.get_repo_tag(repository_ref, tag_name, include_legacy_image=True) if tag is None: return None @@ -64,9 +67,12 @@ class OCIModel(SharedModel, RegistryDataInterface): return None def get_legacy_tags_map(self, repository_ref, storage): - """ Returns a map from tag name to its legacy image ID, for all tags with legacy images in - the repository. Note that this can be a *very* heavy operation. - """ + """ + Returns a map from tag name to its legacy image ID, for all tags with legacy images in the + repository. + + Note that this can be a *very* heavy operation. + """ tags = oci.tag.list_alive_tags(repository_ref._db_id) legacy_images_map = oci.tag.get_legacy_images_for_tags(tags) @@ -115,23 +121,28 @@ class OCIModel(SharedModel, RegistryDataInterface): return model.image.get_image(manifest_obj.repository_id, v1_id) def find_matching_tag(self, repository_ref, tag_names): - """ Finds an alive tag in the repository matching one of the given tag names and returns it - or None if none. - """ + """ + Finds an alive tag in the repository matching one of the given tag names and returns it or + None if none. + """ found_tag = oci.tag.find_matching_tag(repository_ref._db_id, tag_names) assert found_tag is None or not found_tag.hidden return Tag.for_tag(found_tag) def get_most_recent_tag(self, repository_ref): - """ Returns the most recently pushed alive tag in the repository, if any. If none, returns - None. - """ + """ + Returns the most recently pushed alive tag in the repository, if any. + + If none, returns None. + """ found_tag = oci.tag.get_most_recent_tag(repository_ref._db_id) assert found_tag is None or not found_tag.hidden return Tag.for_tag(found_tag) def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False): - """ Returns the manifest associated with the given tag. """ + """ + Returns the manifest associated with the given tag. + """ legacy_image = None if include_legacy_image: legacy_image = oci.shared.get_legacy_image_for_manifest(tag._manifest) @@ -146,8 +157,10 @@ class OCIModel(SharedModel, RegistryDataInterface): include_legacy_image=False, require_available=False, ): - """ Looks up the manifest with the given digest under the given repository and returns it - or None if none. """ + """ + Looks up the manifest with the given digest under the given repository and returns it or + None if none. + """ manifest = oci.manifest.lookup_manifest( repository_ref._db_id, manifest_digest, @@ -172,7 +185,9 @@ class OCIModel(SharedModel, RegistryDataInterface): return Manifest.for_manifest(manifest, legacy_image) def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None): - """ Creates a label on the manifest with the given key and value. """ + """ + Creates a label on the manifest with the given key and value. + """ label_data = dict( key=key, value=value, source_type_name=source_type_name, media_type_name=media_type_name ) @@ -196,11 +211,12 @@ class OCIModel(SharedModel, RegistryDataInterface): @contextmanager def batch_create_manifest_labels(self, manifest): - """ Returns a context manager for batch creation of labels on a manifest. + """ + Returns a context manager for batch creation of labels on a manifest. - Can raise InvalidLabelKeyException or InvalidMediaTypeException depending - on the validation errors. - """ + Can raise InvalidLabelKeyException or InvalidMediaTypeException depending on the validation + errors. + """ labels_to_add = [] def add_label(key, value, source_type_name, media_type_name=None): @@ -226,36 +242,46 @@ class OCIModel(SharedModel, RegistryDataInterface): apply_label_to_manifest(label_data, manifest, self) def list_manifest_labels(self, manifest, key_prefix=None): - """ Returns all labels found on the manifest. If specified, the key_prefix will filter the - labels returned to those keys that start with the given prefix. - """ + """ + Returns all labels found on the manifest. + + If specified, the key_prefix will filter the labels returned to those keys that start with + the given prefix. + """ labels = oci.label.list_manifest_labels(manifest._db_id, prefix_filter=key_prefix) return [Label.for_label(l) for l in labels] def get_manifest_label(self, manifest, label_uuid): - """ Returns the label with the specified UUID on the manifest or None if none. """ + """ + Returns the label with the specified UUID on the manifest or None if none. + """ return Label.for_label(oci.label.get_manifest_label(label_uuid, manifest._db_id)) def delete_manifest_label(self, manifest, label_uuid): - """ Delete the label with the specified UUID on the manifest. Returns the label deleted - or None if none. - """ + """ + Delete the label with the specified UUID on the manifest. + + Returns the label deleted or None if none. + """ return Label.for_label(oci.label.delete_manifest_label(label_uuid, manifest._db_id)) def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit): """ - Returns a page of actvie tags in a repository. Note that the tags returned by this method - are ShallowTag objects, which only contain the tag name. - """ + Returns a page of actvie tags in a repository. + + Note that the tags returned by this method are ShallowTag objects, which only contain the + tag name. + """ tags = oci.tag.lookup_alive_tags_shallow(repository_ref._db_id, start_pagination_id, limit) return [ShallowTag.for_tag(tag) for tag in tags] def list_all_active_repository_tags(self, repository_ref, include_legacy_images=False): """ - Returns a list of all the active tags in the repository. Note that this is a *HEAVY* - operation on repositories with a lot of tags, and should only be used for testing or - where other more specific operations are not possible. - """ + Returns a list of all the active tags in the repository. + + Note that this is a *HEAVY* operation on repositories with a lot of tags, and should only be + used for testing or where other more specific operations are not possible. + """ tags = list(oci.tag.list_alive_tags(repository_ref._db_id)) legacy_images_map = {} if include_legacy_images: @@ -276,9 +302,11 @@ class OCIModel(SharedModel, RegistryDataInterface): since_time_ms=None, ): """ - Returns the history of all tags in the repository (unless filtered). This includes tags that - have been made in-active due to newer versions of those tags coming into service. - """ + Returns the history of all tags in the repository (unless filtered). + + This includes tags that have been made in-active due to newer versions of those tags coming + into service. + """ tags, has_more = oci.tag.list_repository_tag_history( repository_ref._db_id, page, size, specific_tag_name, active_tags_only, since_time_ms ) @@ -295,15 +323,16 @@ class OCIModel(SharedModel, RegistryDataInterface): def has_expired_tag(self, repository_ref, tag_name): """ - Returns true if and only if the repository contains a tag with the given name that is expired. - """ + Returns true if and only if the repository contains a tag with the given name that is + expired. + """ return bool(oci.tag.get_expired_tag(repository_ref._db_id, tag_name)) def get_most_recent_tag_lifetime_start(self, repository_refs): - """ - Returns a map from repository ID to the last modified time (in s) for each repository in the - given repository reference list. - """ + """ + Returns a map from repository ID to the last modified time (in s) for each repository in the + given repository reference list. + """ if not repository_refs: return {} @@ -314,9 +343,9 @@ class OCIModel(SharedModel, RegistryDataInterface): def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False): """ - Returns the latest, *active* tag found in the repository, with the matching name - or None if none. - """ + Returns the latest, *active* tag found in the repository, with the matching name or None if + none. + """ assert isinstance(tag_name, basestring) tag = oci.tag.get_tag(repository_ref._db_id, tag_name) @@ -333,7 +362,8 @@ class OCIModel(SharedModel, RegistryDataInterface): def create_manifest_and_retarget_tag( self, repository_ref, manifest_interface_instance, tag_name, storage, raise_on_error=False ): - """ Creates a manifest in a repository, adding all of the necessary data in the model. + """ + Creates a manifest in a repository, adding all of the necessary data in the model. The `manifest_interface_instance` parameter must be an instance of the manifest interface as returned by the image/docker package. @@ -344,7 +374,7 @@ class OCIModel(SharedModel, RegistryDataInterface): Returns a reference to the (created manifest, tag) or (None, None) on error, unless raise_on_error is set to True, in which case a CreateManifestException may also be raised. - """ + """ # Get or create the manifest itself. created_manifest = oci.manifest.get_or_create_manifest( repository_ref._db_id, @@ -387,10 +417,12 @@ class OCIModel(SharedModel, RegistryDataInterface): is_reversion=False, ): """ - Creates, updates or moves a tag to a new entry in history, pointing to the manifest or - legacy image specified. If is_reversion is set to True, this operation is considered a - reversion over a previous tag move operation. Returns the updated Tag or None on error. - """ + Creates, updates or moves a tag to a new entry in history, pointing to the manifest or + legacy image specified. + + If is_reversion is set to True, this operation is considered a reversion over a previous tag + move operation. Returns the updated Tag or None on error. + """ assert legacy_manifest_key is not None manifest_id = manifest_or_legacy_image._db_id if isinstance(manifest_or_legacy_image, LegacyImage): @@ -447,8 +479,8 @@ class OCIModel(SharedModel, RegistryDataInterface): def delete_tag(self, repository_ref, tag_name): """ - Deletes the latest, *active* tag with the given name in the repository. - """ + Deletes the latest, *active* tag with the given name in the repository. + """ deleted_tag = oci.tag.delete_tag(repository_ref._db_id, tag_name) if deleted_tag is None: # TODO: This is only needed because preoci raises an exception. Remove and fix @@ -460,21 +492,27 @@ class OCIModel(SharedModel, RegistryDataInterface): def delete_tags_for_manifest(self, manifest): """ - Deletes all tags pointing to the given manifest, making the manifest inaccessible for pulling. - Returns the tags deleted, if any. Returns None on error. - """ + Deletes all tags pointing to the given manifest, making the manifest inaccessible for + pulling. + + Returns the tags deleted, if any. Returns None on error. + """ deleted_tags = oci.tag.delete_tags_for_manifest(manifest._db_id) return [Tag.for_tag(tag) for tag in deleted_tags] def change_repository_tag_expiration(self, tag, expiration_date): - """ Sets the expiration date of the tag under the matching repository to that given. If the - expiration date is None, then the tag will not expire. Returns a tuple of the previous - expiration timestamp in seconds (if any), and whether the operation succeeded. - """ + """ + Sets the expiration date of the tag under the matching repository to that given. + + If the expiration date is None, then the tag will not expire. Returns a tuple of the + previous expiration timestamp in seconds (if any), and whether the operation succeeded. + """ return oci.tag.change_tag_expiration(tag._db_id, expiration_date) def get_legacy_images_owned_by_tag(self, tag): - """ Returns all legacy images *solely owned and used* by the given tag. """ + """ + Returns all legacy images *solely owned and used* by the given tag. + """ tag_obj = oci.tag.get_tag_by_id(tag._db_id) if tag_obj is None: return None @@ -526,7 +564,9 @@ class OCIModel(SharedModel, RegistryDataInterface): return [LegacyImage.for_image(image, images_map=images_map) for image in images] def get_security_status(self, manifest_or_legacy_image): - """ Returns the security status for the given manifest or legacy image or None if none. """ + """ + Returns the security status for the given manifest or legacy image or None if none. + """ image = None if isinstance(manifest_or_legacy_image, Manifest): @@ -547,9 +587,10 @@ class OCIModel(SharedModel, RegistryDataInterface): return SecurityScanStatus.QUEUED def reset_security_status(self, manifest_or_legacy_image): - """ Resets the security status for the given manifest or legacy image, ensuring that it will - get re-indexed. - """ + """ + Resets the security status for the given manifest or legacy image, ensuring that it will get + re-indexed. + """ image = None if isinstance(manifest_or_legacy_image, Manifest): @@ -568,12 +609,13 @@ class OCIModel(SharedModel, RegistryDataInterface): image.save() def backfill_manifest_for_tag(self, tag): - """ Backfills a manifest for the V1 tag specified. - If a manifest already exists for the tag, returns that manifest. + """ + Backfills a manifest for the V1 tag specified. If a manifest already exists for the tag, + returns that manifest. NOTE: This method will only be necessary until we've completed the backfill, at which point it should be removed. - """ + """ # Nothing to do for OCI tags. manifest = tag.manifest if manifest is None: @@ -603,9 +645,9 @@ class OCIModel(SharedModel, RegistryDataInterface): self, manifest, verb, storage, varying_metadata=None, include_placements=False ): """ - Looks up the derived image for the given manifest, verb and optional varying metadata and - returns it or None if none. - """ + Looks up the derived image for the given manifest, verb and optional varying metadata and + returns it or None if none. + """ legacy_image = self._get_legacy_compatible_image_for_manifest(manifest, storage) if legacy_image is None: return None @@ -623,9 +665,11 @@ class OCIModel(SharedModel, RegistryDataInterface): include_placements=False, ): """ - Looks up the derived image for the given maniest, verb and optional varying metadata - and returns it. If none exists, a new derived image is created. - """ + Looks up the derived image for the given maniest, verb and optional varying metadata and + returns it. + + If none exists, a new derived image is created. + """ legacy_image = self._get_legacy_compatible_image_for_manifest(manifest, storage) if legacy_image is None: return None @@ -637,12 +681,14 @@ class OCIModel(SharedModel, RegistryDataInterface): def set_tags_expiration_for_manifest(self, manifest, expiration_sec): """ - Sets the expiration on all tags that point to the given manifest to that specified. - """ + Sets the expiration on all tags that point to the given manifest to that specified. + """ oci.tag.set_tag_expiration_sec_for_manifest(manifest._db_id, expiration_sec) def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage): - """ Returns the schema 1 manifest for this manifest, or None if none. """ + """ + Returns the schema 1 manifest for this manifest, or None if none. + """ try: parsed = manifest.get_parsed_manifest() except ManifestException: @@ -677,9 +723,11 @@ class OCIModel(SharedModel, RegistryDataInterface): def create_manifest_with_temp_tag( self, repository_ref, manifest_interface_instance, expiration_sec, storage ): - """ Creates a manifest under the repository and sets a temporary tag to point to it. + """ + Creates a manifest under the repository and sets a temporary tag to point to it. + Returns the manifest object created or None on error. - """ + """ # Get or create the manifest itself. get_or_create_manifest will take care of the # temporary tag work. created_manifest = oci.manifest.get_or_create_manifest( @@ -697,10 +745,11 @@ class OCIModel(SharedModel, RegistryDataInterface): def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False): """ - Returns the blob in the repository with the given digest, if any or None if none. Note that - there may be multiple records in the same repository for the same blob digest, so the return - value of this function may change. - """ + Returns the blob in the repository with the given digest, if any or None if none. + + Note that there may be multiple records in the same repository for the same blob digest, so + the return value of this function may change. + """ image_storage = self._get_shared_storage(blob_digest) if image_storage is None: image_storage = oci.blob.get_repository_blob_by_digest( @@ -724,10 +773,11 @@ class OCIModel(SharedModel, RegistryDataInterface): def list_parsed_manifest_layers( self, repository_ref, parsed_manifest, storage, include_placements=False ): - """ Returns an *ordered list* of the layers found in the parsed manifest, starting at the base - and working towards the leaf, including the associated Blob and its placements - (if specified). - """ + """ + Returns an *ordered list* of the layers found in the parsed manifest, starting at the base + and working towards the leaf, including the associated Blob and its placements (if + specified). + """ return self._list_manifest_layers( repository_ref._db_id, parsed_manifest, @@ -737,7 +787,9 @@ class OCIModel(SharedModel, RegistryDataInterface): ) def get_manifest_local_blobs(self, manifest, include_placements=False): - """ Returns the set of local blobs for the given manifest or None if none. """ + """ + Returns the set of local blobs for the given manifest or None if none. + """ try: manifest_row = database.Manifest.get(id=manifest._db_id) except database.Manifest.DoesNotExist: @@ -748,10 +800,12 @@ class OCIModel(SharedModel, RegistryDataInterface): ) def yield_tags_for_vulnerability_notification(self, layer_id_pairs): - """ Yields tags that contain one (or more) of the given layer ID pairs, in repositories - which have been registered for vulnerability_found notifications. Returns an iterator - of LikelyVulnerableTag instances. - """ + """ + Yields tags that contain one (or more) of the given layer ID pairs, in repositories which + have been registered for vulnerability_found notifications. + + Returns an iterator of LikelyVulnerableTag instances. + """ for docker_image_id, storage_uuid in layer_id_pairs: tags = oci.tag.lookup_notifiable_tags_for_legacy_image( docker_image_id, storage_uuid, "vulnerability_found" diff --git a/data/registry_model/registry_pre_oci_model.py b/data/registry_model/registry_pre_oci_model.py index 4a07a7d8a..7c6ee7b8c 100644 --- a/data/registry_model/registry_pre_oci_model.py +++ b/data/registry_model/registry_pre_oci_model.py @@ -31,19 +31,22 @@ logger = logging.getLogger(__name__) class PreOCIModel(SharedModel, RegistryDataInterface): """ - PreOCIModel implements the data model for the registry API using a database schema - before it was changed to support the OCI specification. - """ + PreOCIModel implements the data model for the registry API using a database schema before it was + changed to support the OCI specification. + """ def supports_schema2(self, namespace_name): - """ Returns whether the implementation of the data interface supports schema 2 format - manifests. """ + """ + Returns whether the implementation of the data interface supports schema 2 format manifests. + """ return False def get_tag_legacy_image_id(self, repository_ref, tag_name, storage): - """ Returns the legacy image ID for the tag with a legacy images in - the repository. Returns None if None. - """ + """ + Returns the legacy image ID for the tag with a legacy images in the repository. + + Returns None if None. + """ tag = self.get_repo_tag(repository_ref, tag_name, include_legacy_image=True) if tag is None: return None @@ -51,30 +54,36 @@ class PreOCIModel(SharedModel, RegistryDataInterface): return tag.legacy_image.docker_image_id def get_legacy_tags_map(self, repository_ref, storage): - """ Returns a map from tag name to its legacy image, for all tags with legacy images in - the repository. - """ + """ + Returns a map from tag name to its legacy image, for all tags with legacy images in the + repository. + """ tags = self.list_all_active_repository_tags(repository_ref, include_legacy_images=True) return {tag.name: tag.legacy_image.docker_image_id for tag in tags} def find_matching_tag(self, repository_ref, tag_names): - """ Finds an alive tag in the repository matching one of the given tag names and returns it - or None if none. - """ + """ + Finds an alive tag in the repository matching one of the given tag names and returns it or + None if none. + """ found_tag = model.tag.find_matching_tag(repository_ref._db_id, tag_names) assert found_tag is None or not found_tag.hidden return Tag.for_repository_tag(found_tag) def get_most_recent_tag(self, repository_ref): - """ Returns the most recently pushed alive tag in the repository, if any. If none, returns - None. - """ + """ + Returns the most recently pushed alive tag in the repository, if any. + + If none, returns None. + """ found_tag = model.tag.get_most_recent_tag(repository_ref._db_id) assert found_tag is None or not found_tag.hidden return Tag.for_repository_tag(found_tag) def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False): - """ Returns the manifest associated with the given tag. """ + """ + Returns the manifest associated with the given tag. + """ try: tag_manifest = database.TagManifest.get(tag_id=tag._db_id) except database.TagManifest.DoesNotExist: @@ -93,8 +102,10 @@ class PreOCIModel(SharedModel, RegistryDataInterface): include_legacy_image=False, require_available=False, ): - """ Looks up the manifest with the given digest under the given repository and returns it - or None if none. """ + """ + Looks up the manifest with the given digest under the given repository and returns it or + None if none. + """ repo = model.repository.lookup_repository(repository_ref._db_id) if repo is None: return None @@ -117,7 +128,8 @@ class PreOCIModel(SharedModel, RegistryDataInterface): def create_manifest_and_retarget_tag( self, repository_ref, manifest_interface_instance, tag_name, storage, raise_on_error=False ): - """ Creates a manifest in a repository, adding all of the necessary data in the model. + """ + Creates a manifest in a repository, adding all of the necessary data in the model. The `manifest_interface_instance` parameter must be an instance of the manifest interface as returned by the image/docker package. @@ -126,7 +138,7 @@ class PreOCIModel(SharedModel, RegistryDataInterface): method will fail and return None. Returns a reference to the (created manifest, tag) or (None, None) on error. - """ + """ # NOTE: Only Schema1 is supported by the pre_oci_model. assert isinstance(manifest_interface_instance, DockerSchema1Manifest) if not manifest_interface_instance.layers: @@ -219,7 +231,9 @@ class PreOCIModel(SharedModel, RegistryDataInterface): return manifest, Tag.for_repository_tag(repo_tag) def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None): - """ Creates a label on the manifest with the given key and value. """ + """ + Creates a label on the manifest with the given key and value. + """ try: tag_manifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: @@ -242,11 +256,12 @@ class PreOCIModel(SharedModel, RegistryDataInterface): @contextmanager def batch_create_manifest_labels(self, manifest): - """ Returns a context manager for batch creation of labels on a manifest. + """ + Returns a context manager for batch creation of labels on a manifest. - Can raise InvalidLabelKeyException or InvalidMediaTypeException depending - on the validation errors. - """ + Can raise InvalidLabelKeyException or InvalidMediaTypeException depending on the validation + errors. + """ try: tag_manifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: @@ -278,27 +293,36 @@ class PreOCIModel(SharedModel, RegistryDataInterface): apply_label_to_manifest(label, manifest, self) def list_manifest_labels(self, manifest, key_prefix=None): - """ Returns all labels found on the manifest. If specified, the key_prefix will filter the - labels returned to those keys that start with the given prefix. - """ + """ + Returns all labels found on the manifest. + + If specified, the key_prefix will filter the labels returned to those keys that start with + the given prefix. + """ labels = model.label.list_manifest_labels(manifest._db_id, prefix_filter=key_prefix) return [Label.for_label(l) for l in labels] def get_manifest_label(self, manifest, label_uuid): - """ Returns the label with the specified UUID on the manifest or None if none. """ + """ + Returns the label with the specified UUID on the manifest or None if none. + """ return Label.for_label(model.label.get_manifest_label(label_uuid, manifest._db_id)) def delete_manifest_label(self, manifest, label_uuid): - """ Delete the label with the specified UUID on the manifest. Returns the label deleted - or None if none. - """ + """ + Delete the label with the specified UUID on the manifest. + + Returns the label deleted or None if none. + """ return Label.for_label(model.label.delete_manifest_label(label_uuid, manifest._db_id)) def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit): """ - Returns a page of actvie tags in a repository. Note that the tags returned by this method - are ShallowTag objects, which only contain the tag name. - """ + Returns a page of actvie tags in a repository. + + Note that the tags returned by this method are ShallowTag objects, which only contain the + tag name. + """ tags = model.tag.list_active_repo_tags( repository_ref._db_id, include_images=False, start_id=start_pagination_id, limit=limit ) @@ -306,10 +330,11 @@ class PreOCIModel(SharedModel, RegistryDataInterface): def list_all_active_repository_tags(self, repository_ref, include_legacy_images=False): """ - Returns a list of all the active tags in the repository. Note that this is a *HEAVY* - operation on repositories with a lot of tags, and should only be used for testing or - where other more specific operations are not possible. - """ + Returns a list of all the active tags in the repository. + + Note that this is a *HEAVY* operation on repositories with a lot of tags, and should only be + used for testing or where other more specific operations are not possible. + """ if not include_legacy_images: tags = model.tag.list_active_repo_tags(repository_ref._db_id, include_images=False) return [Tag.for_repository_tag(tag) for tag in tags] @@ -334,9 +359,11 @@ class PreOCIModel(SharedModel, RegistryDataInterface): since_time_ms=None, ): """ - Returns the history of all tags in the repository (unless filtered). This includes tags that - have been made in-active due to newer versions of those tags coming into service. - """ + Returns the history of all tags in the repository (unless filtered). + + This includes tags that have been made in-active due to newer versions of those tags coming + into service. + """ # Only available on OCI model if since_time_ms is not None: @@ -357,8 +384,9 @@ class PreOCIModel(SharedModel, RegistryDataInterface): def has_expired_tag(self, repository_ref, tag_name): """ - Returns true if and only if the repository contains a tag with the given name that is expired. - """ + Returns true if and only if the repository contains a tag with the given name that is + expired. + """ try: model.tag.get_expired_tag_in_repo(repository_ref._db_id, tag_name) return True @@ -366,10 +394,10 @@ class PreOCIModel(SharedModel, RegistryDataInterface): return False def get_most_recent_tag_lifetime_start(self, repository_refs): - """ - Returns a map from repository ID to the last modified time (in s) for each repository in the - given repository reference list. - """ + """ + Returns a map from repository ID to the last modified time (in s) for each repository in the + given repository reference list. + """ if not repository_refs: return {} @@ -386,9 +414,9 @@ class PreOCIModel(SharedModel, RegistryDataInterface): def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False): """ - Returns the latest, *active* tag found in the repository, with the matching name - or None if none. - """ + Returns the latest, *active* tag found in the repository, with the matching name or None if + none. + """ assert isinstance(tag_name, basestring) tag = model.tag.get_active_tag_for_repo(repository_ref._db_id, tag_name) if tag is None: @@ -411,10 +439,12 @@ class PreOCIModel(SharedModel, RegistryDataInterface): is_reversion=False, ): """ - Creates, updates or moves a tag to a new entry in history, pointing to the manifest or - legacy image specified. If is_reversion is set to True, this operation is considered a - reversion over a previous tag move operation. Returns the updated Tag or None on error. - """ + Creates, updates or moves a tag to a new entry in history, pointing to the manifest or + legacy image specified. + + If is_reversion is set to True, this operation is considered a reversion over a previous tag + move operation. Returns the updated Tag or None on error. + """ # TODO: unify this. assert legacy_manifest_key is not None if not is_reversion: @@ -444,8 +474,8 @@ class PreOCIModel(SharedModel, RegistryDataInterface): def delete_tag(self, repository_ref, tag_name): """ - Deletes the latest, *active* tag with the given name in the repository. - """ + Deletes the latest, *active* tag with the given name in the repository. + """ repo = model.repository.lookup_repository(repository_ref._db_id) if repo is None: return None @@ -455,9 +485,11 @@ class PreOCIModel(SharedModel, RegistryDataInterface): def delete_tags_for_manifest(self, manifest): """ - Deletes all tags pointing to the given manifest, making the manifest inaccessible for pulling. - Returns the tags deleted, if any. Returns None on error. - """ + Deletes all tags pointing to the given manifest, making the manifest inaccessible for + pulling. + + Returns the tags deleted, if any. Returns None on error. + """ try: tagmanifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: @@ -469,10 +501,12 @@ class PreOCIModel(SharedModel, RegistryDataInterface): return [Tag.for_repository_tag(tag) for tag in tags] def change_repository_tag_expiration(self, tag, expiration_date): - """ Sets the expiration date of the tag under the matching repository to that given. If the - expiration date is None, then the tag will not expire. Returns a tuple of the previous - expiration timestamp in seconds (if any), and whether the operation succeeded. - """ + """ + Sets the expiration date of the tag under the matching repository to that given. + + If the expiration date is None, then the tag will not expire. Returns a tuple of the + previous expiration timestamp in seconds (if any), and whether the operation succeeded. + """ try: tag_obj = database.RepositoryTag.get(id=tag._db_id) except database.RepositoryTag.DoesNotExist: @@ -481,7 +515,9 @@ class PreOCIModel(SharedModel, RegistryDataInterface): return model.tag.change_tag_expiration(tag_obj, expiration_date) def get_legacy_images_owned_by_tag(self, tag): - """ Returns all legacy images *solely owned and used* by the given tag. """ + """ + Returns all legacy images *solely owned and used* by the given tag. + """ try: tag_obj = database.RepositoryTag.get(id=tag._db_id) except database.RepositoryTag.DoesNotExist: @@ -521,7 +557,9 @@ class PreOCIModel(SharedModel, RegistryDataInterface): return [LegacyImage.for_image(image, images_map=images_map) for image in images] def get_security_status(self, manifest_or_legacy_image): - """ Returns the security status for the given manifest or legacy image or None if none. """ + """ + Returns the security status for the given manifest or legacy image or None if none. + """ image = None if isinstance(manifest_or_legacy_image, Manifest): @@ -544,9 +582,10 @@ class PreOCIModel(SharedModel, RegistryDataInterface): return SecurityScanStatus.QUEUED def reset_security_status(self, manifest_or_legacy_image): - """ Resets the security status for the given manifest or legacy image, ensuring that it will - get re-indexed. - """ + """ + Resets the security status for the given manifest or legacy image, ensuring that it will get + re-indexed. + """ image = None if isinstance(manifest_or_legacy_image, Manifest): @@ -567,12 +606,13 @@ class PreOCIModel(SharedModel, RegistryDataInterface): image.save() def backfill_manifest_for_tag(self, tag): - """ Backfills a manifest for the V1 tag specified. - If a manifest already exists for the tag, returns that manifest. + """ + Backfills a manifest for the V1 tag specified. If a manifest already exists for the tag, + returns that manifest. NOTE: This method will only be necessary until we've completed the backfill, at which point it should be removed. - """ + """ # Ensure that there isn't already a manifest for the tag. tag_manifest = model.tag.get_tag_manifest(tag._db_id) if tag_manifest is not None: @@ -625,9 +665,9 @@ class PreOCIModel(SharedModel, RegistryDataInterface): self, manifest, verb, storage, varying_metadata=None, include_placements=False ): """ - Looks up the derived image for the given manifest, verb and optional varying metadata and - returns it or None if none. - """ + Looks up the derived image for the given manifest, verb and optional varying metadata and + returns it or None if none. + """ try: tag_manifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: @@ -648,9 +688,11 @@ class PreOCIModel(SharedModel, RegistryDataInterface): include_placements=False, ): """ - Looks up the derived image for the given maniest, verb and optional varying metadata - and returns it. If none exists, a new derived image is created. - """ + Looks up the derived image for the given maniest, verb and optional varying metadata and + returns it. + + If none exists, a new derived image is created. + """ try: tag_manifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: @@ -665,8 +707,8 @@ class PreOCIModel(SharedModel, RegistryDataInterface): def set_tags_expiration_for_manifest(self, manifest, expiration_sec): """ - Sets the expiration on all tags that point to the given manifest to that specified. - """ + Sets the expiration on all tags that point to the given manifest to that specified. + """ try: tag_manifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: @@ -675,7 +717,9 @@ class PreOCIModel(SharedModel, RegistryDataInterface): model.tag.set_tag_expiration_for_manifest(tag_manifest, expiration_sec) def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage): - """ Returns the schema 1 version of this manifest, or None if none. """ + """ + Returns the schema 1 version of this manifest, or None if none. + """ try: return manifest.get_parsed_manifest() except ManifestException: @@ -699,17 +743,20 @@ class PreOCIModel(SharedModel, RegistryDataInterface): def create_manifest_with_temp_tag( self, repository_ref, manifest_interface_instance, expiration_sec, storage ): - """ Creates a manifest under the repository and sets a temporary tag to point to it. + """ + Creates a manifest under the repository and sets a temporary tag to point to it. + Returns the manifest object created or None on error. - """ + """ raise NotImplementedError("Unsupported in pre OCI model") def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False): """ - Returns the blob in the repository with the given digest, if any or None if none. Note that - there may be multiple records in the same repository for the same blob digest, so the return - value of this function may change. - """ + Returns the blob in the repository with the given digest, if any or None if none. + + Note that there may be multiple records in the same repository for the same blob digest, so + the return value of this function may change. + """ image_storage = self._get_shared_storage(blob_digest) if image_storage is None: try: @@ -734,16 +781,19 @@ class PreOCIModel(SharedModel, RegistryDataInterface): def list_parsed_manifest_layers( self, repository_ref, parsed_manifest, storage, include_placements=False ): - """ Returns an *ordered list* of the layers found in the parsed manifest, starting at the base - and working towards the leaf, including the associated Blob and its placements - (if specified). - """ + """ + Returns an *ordered list* of the layers found in the parsed manifest, starting at the base + and working towards the leaf, including the associated Blob and its placements (if + specified). + """ return self._list_manifest_layers( repository_ref._db_id, parsed_manifest, storage, include_placements=include_placements ) def get_manifest_local_blobs(self, manifest, include_placements=False): - """ Returns the set of local blobs for the given manifest or None if none. """ + """ + Returns the set of local blobs for the given manifest or None if none. + """ try: tag_manifest = database.TagManifest.get(id=manifest._db_id) except database.TagManifest.DoesNotExist: @@ -754,10 +804,12 @@ class PreOCIModel(SharedModel, RegistryDataInterface): ) def yield_tags_for_vulnerability_notification(self, layer_id_pairs): - """ Yields tags that contain one (or more) of the given layer ID pairs, in repositories - which have been registered for vulnerability_found notifications. Returns an iterator - of LikelyVulnerableTag instances. - """ + """ + Yields tags that contain one (or more) of the given layer ID pairs, in repositories which + have been registered for vulnerability_found notifications. + + Returns an iterator of LikelyVulnerableTag instances. + """ event = database.ExternalNotificationEvent.get(name="vulnerability_found") def filter_notifying_repos(query): diff --git a/data/registry_model/shared.py b/data/registry_model/shared.py index 089ec507c..01fc9d337 100644 --- a/data/registry_model/shared.py +++ b/data/registry_model/shared.py @@ -31,13 +31,15 @@ MAXIMUM_GENERATED_MANIFEST_SIZE = 3 * 1024 * 1024 # 3 MB class SharedModel: """ - SharedModel implements those data model operations for the registry API that are unchanged - between the old and new data models. - """ + SharedModel implements those data model operations for the registry API that are unchanged + between the old and new data models. + """ def lookup_repository(self, namespace_name, repo_name, kind_filter=None): - """ Looks up and returns a reference to the repository with the given namespace and name, - or None if none. """ + """ + Looks up and returns a reference to the repository with the given namespace and name, or + None if none. + """ repo = model.repository.get_repository(namespace_name, repo_name, kind_filter=kind_filter) state = repo.state if repo is not None else None return RepositoryReference.for_repo_obj( @@ -49,19 +51,24 @@ class SharedModel: ) def is_existing_disabled_namespace(self, namespace_name): - """ Returns whether the given namespace exists and is disabled. """ + """ + Returns whether the given namespace exists and is disabled. + """ namespace = model.user.get_namespace_user(namespace_name) return namespace is not None and not namespace.enabled def is_namespace_enabled(self, namespace_name): - """ Returns whether the given namespace exists and is enabled. """ + """ + Returns whether the given namespace exists and is enabled. + """ namespace = model.user.get_namespace_user(namespace_name) return namespace is not None and namespace.enabled def get_derived_image_signature(self, derived_image, signer_name): """ - Returns the signature associated with the derived image and a specific signer or None if none. - """ + Returns the signature associated with the derived image and a specific signer or None if + none. + """ try: derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id) except database.DerivedStorageForImage.DoesNotExist: @@ -76,8 +83,8 @@ class SharedModel: def set_derived_image_signature(self, derived_image, signer_name, signature): """ - Sets the calculated signature for the given derived image and signer to that specified. - """ + Sets the calculated signature for the given derived image and signer to that specified. + """ try: derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id) except database.DerivedStorageForImage.DoesNotExist: @@ -91,8 +98,8 @@ class SharedModel: def delete_derived_image(self, derived_image): """ - Deletes a derived image and all of its storage. - """ + Deletes a derived image and all of its storage. + """ try: derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id) except database.DerivedStorageForImage.DoesNotExist: @@ -102,8 +109,8 @@ class SharedModel: def set_derived_image_size(self, derived_image, compressed_size): """ - Sets the compressed size on the given derived image. - """ + Sets the compressed size on the given derived image. + """ try: derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id) except database.DerivedStorageForImage.DoesNotExist: @@ -116,8 +123,8 @@ class SharedModel: def get_torrent_info(self, blob): """ - Returns the torrent information associated with the given blob or None if none. - """ + Returns the torrent information associated with the given blob or None if none. + """ try: image_storage = database.ImageStorage.get(id=blob._db_id) except database.ImageStorage.DoesNotExist: @@ -132,8 +139,8 @@ class SharedModel: def set_torrent_info(self, blob, piece_length, pieces): """ - Sets the torrent infomation associated with the given blob to that specified. - """ + Sets the torrent infomation associated with the given blob to that specified. + """ try: image_storage = database.ImageStorage.get(id=blob._db_id) except database.ImageStorage.DoesNotExist: @@ -150,10 +157,12 @@ class SharedModel: self, model_cache, repository_ref, start_pagination_id, limit ): """ - Returns a page of active tags in a repository. Note that the tags returned by this method - are ShallowTag objects, which only contain the tag name. This method will automatically cache - the result and check the cache before making a call. - """ + Returns a page of active tags in a repository. + + Note that the tags returned by this method are ShallowTag objects, which only contain the + tag name. This method will automatically cache the result and check the cache before making + a call. + """ def load_tags(): tags = self.lookup_active_repository_tags(repository_ref, start_pagination_id, limit) @@ -170,9 +179,10 @@ class SharedModel: return self.lookup_active_repository_tags(repository_ref, start_pagination_id, limit) def get_cached_namespace_region_blacklist(self, model_cache, namespace_name): - """ Returns a cached set of ISO country codes blacklisted for pulls for the namespace - or None if the list could not be loaded. - """ + """ + Returns a cached set of ISO country codes blacklisted for pulls for the namespace or None if + the list could not be loaded. + """ def load_blacklist(): restrictions = model.user.list_namespace_geo_restrictions(namespace_name) @@ -190,9 +200,10 @@ class SharedModel: def get_cached_repo_blob(self, model_cache, namespace_name, repo_name, blob_digest): """ - Returns the blob in the repository with the given digest if any or None if none. - Caches the result in the caching system. - """ + Returns the blob in the repository with the given digest if any or None if none. + + Caches the result in the caching system. + """ def load_blob(): repository_ref = self.lookup_repository(namespace_name, repo_name) @@ -227,8 +238,11 @@ class SharedModel: pass def create_blob_upload(self, repository_ref, new_upload_id, location_name, storage_metadata): - """ Creates a new blob upload and returns a reference. If the blob upload could not be - created, returns None. """ + """ + Creates a new blob upload and returns a reference. + + If the blob upload could not be created, returns None. + """ repo = model.repository.lookup_repository(repository_ref._db_id) if repo is None: return None @@ -242,9 +256,10 @@ class SharedModel: return None def lookup_blob_upload(self, repository_ref, blob_upload_id): - """ Looks up the blob upload withn the given ID under the specified repository and returns it - or None if none. - """ + """ + Looks up the blob upload withn the given ID under the specified repository and returns it or + None if none. + """ upload_record = model.blob.get_blob_upload_by_uuid(blob_upload_id) if upload_record is None: return None @@ -262,9 +277,11 @@ class SharedModel: chunk_count, sha_state, ): - """ Updates the fields of the blob upload to match those given. Returns the updated blob upload - or None if the record does not exists. - """ + """ + Updates the fields of the blob upload to match those given. + + Returns the updated blob upload or None if the record does not exists. + """ upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id) if upload_record is None: return None @@ -280,14 +297,17 @@ class SharedModel: return BlobUpload.for_upload(upload_record) def delete_blob_upload(self, blob_upload): - """ Deletes a blob upload record. """ + """ + Deletes a blob upload record. + """ upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id) if upload_record is not None: upload_record.delete_instance() def commit_blob_upload(self, blob_upload, blob_digest_str, blob_expiration_seconds): - """ Commits the blob upload into a blob and sets an expiration before that blob will be GCed. - """ + """ + Commits the blob upload into a blob and sets an expiration before that blob will be GCed. + """ upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id) if upload_record is None: return None @@ -313,11 +333,12 @@ class SharedModel: def mount_blob_into_repository(self, blob, target_repository_ref, expiration_sec): """ - Mounts the blob from another repository into the specified target repository, and adds an - expiration before that blob is automatically GCed. This function is useful during push - operations if an existing blob from another repository is being pushed. Returns False if - the mounting fails. - """ + Mounts the blob from another repository into the specified target repository, and adds an + expiration before that blob is automatically GCed. + + This function is useful during push operations if an existing blob from another repository + is being pushed. Returns False if the mounting fails. + """ storage = model.blob.temp_link_blob( target_repository_ref._db_id, blob.digest, expiration_sec ) @@ -325,8 +346,8 @@ class SharedModel: def get_legacy_images(self, repository_ref): """ - Returns an iterator of all the LegacyImage's defined in the matching repository. - """ + Returns an iterator of all the LegacyImage's defined in the matching repository. + """ repo = model.repository.lookup_repository(repository_ref._db_id) if repo is None: return None @@ -348,9 +369,10 @@ class SharedModel: self, repository_ref, docker_image_id, include_parents=False, include_blob=False ): """ - Returns the matching LegacyImages under the matching repository, if any. If none, - returns None. - """ + Returns the matching LegacyImages under the matching repository, if any. + + If none, returns None. + """ repo = model.repository.lookup_repository(repository_ref._db_id) if repo is None: return None @@ -409,10 +431,12 @@ class SharedModel: def _list_manifest_layers( self, repo_id, parsed, storage, include_placements=False, by_manifest=False ): - """ Returns an *ordered list* of the layers found in the manifest, starting at the base and + """ + Returns an *ordered list* of the layers found in the manifest, starting at the base and working towards the leaf, including the associated Blob and its placements (if specified). + Returns None if the manifest could not be parsed and validated. - """ + """ assert not parsed.is_manifest_list retriever = RepositoryContentRetriever(repo_id, storage) @@ -543,7 +567,9 @@ class SharedModel: return None def _get_shared_storage(self, blob_digest): - """ Returns an ImageStorage row for the blob digest if it is a globally shared storage. """ + """ + Returns an ImageStorage row for the blob digest if it is a globally shared storage. + """ # If the EMPTY_LAYER_BLOB_DIGEST is in the checksums, look it up directly. Since we have # so many duplicate copies in the database currently, looking it up bound to a repository # can be incredibly slow, and, since it is defined as a globally shared layer, this is extra diff --git a/data/secscan_model/datatypes.py b/data/secscan_model/datatypes.py index 58e6558a6..e2c009e1c 100644 --- a/data/secscan_model/datatypes.py +++ b/data/secscan_model/datatypes.py @@ -25,7 +25,9 @@ class ScanLookupStatus(IntEnum): class SecurityInformationLookupResult(object): - """ Represents the result of calling to lookup security information for a manifest/image. """ + """ + Represents the result of calling to lookup security information for a manifest/image. + """ def __init__(self, status, security_information=None, indexing_error=None, request_error=None): self._status = status @@ -49,24 +51,33 @@ class SecurityInformationLookupResult(object): @property def security_information(self): - """ The loaded security information for the manifest/image, in dictionary form. """ + """ + The loaded security information for the manifest/image, in dictionary form. + """ return self._security_information @property def status(self): - """ The ScanLookupStatus of this requested lookup. """ + """ + The ScanLookupStatus of this requested lookup. + """ return self._status @property def indexing_error(self): - """ Returns the string of the error message describing why the manifest/image failed to index. - May be empty or None if there was no error. + """ + Returns the string of the error message describing why the manifest/image failed to index. + + May be empty or None if there was no error. """ return self._indexing_error @property def scanner_request_error(self): - """ Returns the string of the error message when trying to load the security information - from the downstream scanner. None otherwise. + """ + Returns the string of the error message when trying to load the security information from + the downstream scanner. + + None otherwise. """ return self._request_error diff --git a/data/secscan_model/interface.py b/data/secscan_model/interface.py index 6a180370b..b65b7f9fe 100644 --- a/data/secscan_model/interface.py +++ b/data/secscan_model/interface.py @@ -6,34 +6,46 @@ from deprecated import deprecated @add_metaclass(ABCMeta) class SecurityScannerInterface(object): - """ Interface for code to work with the security scan data model. This model encapsulates - all access when speaking to an external security scanner, as well as any data tracking - in the database. + """ + Interface for code to work with the security scan data model. + + This model encapsulates all access when speaking to an external security scanner, as well as any + data tracking in the database. """ @abstractmethod def load_security_information(self, manifest_or_legacy_image, include_vulnerabilities=False): - """ Loads the security information for the given manifest or legacy image, returning - a SecurityInformationLookupResult structure. The manifest_or_legacy_image must be a Manifest - or LegacyImage datatype from the registry_model. + """ + Loads the security information for the given manifest or legacy image, returning a + SecurityInformationLookupResult structure. + + The manifest_or_legacy_image must be a Manifest or LegacyImage datatype from the + registry_model. """ @abstractmethod def perform_indexing(self, start_token=None): - """ Performs indexing of the next set of unindexed manifests/images. If start_token is given, - the indexing should resume from that point. Returns a new start index for the next - iteration of indexing. The tokens returned and given are assumed to be opaque outside - of this implementation and should not be relied upon by the caller to conform to any - particular format. + """ + Performs indexing of the next set of unindexed manifests/images. + + If start_token is given, the indexing should resume from that point. Returns a new start + index for the next iteration of indexing. The tokens returned and given are assumed to be + opaque outside of this implementation and should not be relied upon by the caller to conform + to any particular format. """ @abstractmethod def register_model_cleanup_callbacks(self, data_model_config): - """ Registers any cleanup callbacks with the data model. Typically, a callback is registered - to remove the manifest/image from the security indexer if it has been GCed in the data model. + """ + Registers any cleanup callbacks with the data model. + + Typically, a callback is registered to remove the manifest/image from the security indexer + if it has been GCed in the data model. """ @abstractproperty @deprecated(reason="Only exposed for the legacy notification worker") def legacy_api_handler(self): - """ Exposes the legacy security scan API for legacy workers that need it or None if none. """ + """ + Exposes the legacy security scan API for legacy workers that need it or None if none. + """ diff --git a/data/secscan_model/secscan_v2_model.py b/data/secscan_model/secscan_v2_model.py index 09e3aa210..e70d91ba3 100644 --- a/data/secscan_model/secscan_v2_model.py +++ b/data/secscan_model/secscan_v2_model.py @@ -38,19 +38,22 @@ unscanned_images = Gauge( class ScanToken(namedtuple("NextScanToken", ["min_id"])): """ - ScanToken represents an opaque token that can be passed between runs of the security worker - to continue scanning whereever the previous run left off. Note that the data of the token is - *opaque* to the security worker, and the security worker should *not* pull any data out or modify - the token in any way. - """ + ScanToken represents an opaque token that can be passed between runs of the security worker to + continue scanning whereever the previous run left off. Note that the data of the token is. + + *opaque* to the security worker, and the security worker should *not* pull any data out or modify + the token in any way. + """ @deprecated(reason="Will be replaced by a V4 API security scanner soon") class V2SecurityScanner(SecurityScannerInterface): - """ Implementation of the security scanner interface for Clair V2 API-compatible implementations. + """ + Implementation of the security scanner interface for Clair V2 API-compatible implementations. NOTE: This is a legacy implementation and is intended to be removed once everyone is moved to - the more modern V4 API. (Yes, we skipped V3) """ + the more modern V4 API. (Yes, we skipped V3) + """ def __init__(self, app, instance_keys, storage): self.app = app @@ -88,7 +91,9 @@ class V2SecurityScanner(SecurityScannerInterface): @property def legacy_api_handler(self): - """ Exposes the legacy security scan API for legacy workers that need it. """ + """ + Exposes the legacy security scan API for legacy workers that need it. + """ return self._legacy_secscan_api def register_model_cleanup_callbacks(self, data_model_config): @@ -176,11 +181,13 @@ class V2SecurityScanner(SecurityScannerInterface): return (iterator, ScanToken(max_id + 1)) def perform_indexing(self, start_token=None): - """ Performs indexing of the next set of unindexed manifests/images. If start_token is given, - the indexing should resume from that point. Returns a new start index for the next - iteration of indexing. The tokens returned and given are assumed to be opaque outside - of this implementation and should not be relied upon by the caller to conform to any - particular format. + """ + Performs indexing of the next set of unindexed manifests/images. + + If start_token is given, the indexing should resume from that point. Returns a new start + index for the next iteration of indexing. The tokens returned and given are assumed to be + opaque outside of this implementation and should not be relied upon by the caller to conform + to any particular format. """ # NOTE: This import is in here because otherwise this class would depend upon app. # Its not great, but as this is intended to be legacy until its removed, its okay. diff --git a/data/text.py b/data/text.py index 0ae10136d..4578c2742 100644 --- a/data/text.py +++ b/data/text.py @@ -2,9 +2,10 @@ from peewee import NodeList, SQL, fn, TextField, Field def _escape_wildcard(search_query): - """ Escapes the wildcards found in the given search query so that they are treated as *characters* - rather than wildcards when passed to a LIKE or ILIKE clause with an ESCAPE '!'. - """ + """ + Escapes the wildcards found in the given search query so that they are treated as *characters* + rather than wildcards when passed to a LIKE or ILIKE clause with an ESCAPE '!'. + """ search_query = ( search_query.replace("!", "!!").replace("%", "!%").replace("_", "!_").replace("[", "![") ) @@ -18,15 +19,18 @@ def _escape_wildcard(search_query): def prefix_search(field, prefix_query): - """ Returns the wildcard match for searching for the given prefix query. """ + """ + Returns the wildcard match for searching for the given prefix query. + """ # Escape the known wildcard characters. prefix_query = _escape_wildcard(prefix_query) return Field.__pow__(field, NodeList((prefix_query + "%", SQL("ESCAPE '!'")))) def match_mysql(field, search_query): - """ Generates a full-text match query using a Match operation, which is needed for MySQL. - """ + """ + Generates a full-text match query using a Match operation, which is needed for MySQL. + """ if field.name.find("`") >= 0: # Just to be safe. raise Exception("How did field name '%s' end up containing a backtick?" % field.name) @@ -45,9 +49,10 @@ def match_mysql(field, search_query): def match_like(field, search_query): - """ Generates a full-text match query using an ILIKE operation, which is needed for SQLite and - Postgres. - """ + """ + Generates a full-text match query using an ILIKE operation, which is needed for SQLite and + Postgres. + """ escaped_query = _escape_wildcard(search_query) clause = NodeList(("%" + escaped_query + "%", SQL("ESCAPE '!'"))) return Field.__pow__(field, clause) diff --git a/data/userevent.py b/data/userevent.py index 8819afe7b..4ad2a5916 100644 --- a/data/userevent.py +++ b/data/userevent.py @@ -8,14 +8,15 @@ logger = logging.getLogger(__name__) class CannotReadUserEventsException(Exception): - """ Exception raised if user events cannot be read. """ + """ + Exception raised if user events cannot be read. + """ class UserEventBuilder(object): """ - Defines a helper class for constructing UserEvent and UserEventListener - instances. - """ + Defines a helper class for constructing UserEvent and UserEventListener instances. + """ def __init__(self, redis_config): self._redis_config = redis_config @@ -56,9 +57,8 @@ class UserEventsBuilderModule(object): class UserEvent(object): """ - Defines a helper class for publishing to realtime user events - as backed by Redis. - """ + Defines a helper class for publishing to realtime user events as backed by Redis. + """ def __init__(self, redis_config, username): self._redis = redis.StrictRedis(socket_connect_timeout=2, socket_timeout=2, **redis_config) @@ -75,9 +75,10 @@ class UserEvent(object): def publish_event_data(self, event_id, data_obj): """ - Publishes the serialized form of the data object for the given event. Note that this occurs - in a thread to prevent blocking. - """ + Publishes the serialized form of the data object for the given event. + + Note that this occurs in a thread to prevent blocking. + """ def conduct(): try: @@ -92,9 +93,8 @@ class UserEvent(object): class UserEventListener(object): """ - Defines a helper class for subscribing to realtime user events as - backed by Redis. - """ + Defines a helper class for subscribing to realtime user events as backed by Redis. + """ def __init__(self, redis_config, username, events=None): events = events or set([]) @@ -117,10 +117,10 @@ class UserEventListener(object): def event_stream(self): """ - Starts listening for events on the channel(s), yielding for each event - found. Will yield a "pulse" event (a custom event we've decided) as a heartbeat - every few seconds. - """ + Starts listening for events on the channel(s), yielding for each event found. + + Will yield a "pulse" event (a custom event we've decided) as a heartbeat every few seconds. + """ while True: pubsub = self._pubsub if pubsub is None: @@ -148,9 +148,10 @@ class UserEventListener(object): def stop(self): """ - Unsubscribes from the channel(s). Should be called once the connection - has terminated. - """ + Unsubscribes from the channel(s). + + Should be called once the connection has terminated. + """ if self._pubsub is not None: self._pubsub.unsubscribe() self._pubsub.close() diff --git a/data/userfiles.py b/data/userfiles.py index bb7b9f587..9e77977e0 100644 --- a/data/userfiles.py +++ b/data/userfiles.py @@ -85,7 +85,9 @@ class DelegateUserfiles(object): return os.path.join(self._prefix or "", os.path.basename(file_id)) def prepare_for_drop(self, mime_type, requires_cors=True): - """ Returns a signed URL to upload a file to our bucket. """ + """ + Returns a signed URL to upload a file to our bucket. + """ logger.debug("Requested upload url with content type: %s" % mime_type) file_id = str(uuid4()) path = self.get_file_id_path(file_id) diff --git a/data/users/__init__.py b/data/users/__init__.py index f191af83b..e6becb8f9 100644 --- a/data/users/__init__.py +++ b/data/users/__init__.py @@ -41,7 +41,9 @@ LDAP_CERT_FILENAME = "ldap.crt" def get_users_handler(config, _, override_config_dir): - """ Returns a users handler for the authentication configured in the given config object. """ + """ + Returns a users handler for the authentication configured in the given config object. + """ authentication_type = config.get("AUTHENTICATION_TYPE", "Database") if authentication_type == "Database": @@ -145,7 +147,9 @@ class UserAuthentication(object): return users def encrypt_user_password(self, password): - """ Returns an encrypted version of the user's password. """ + """ + Returns an encrypted version of the user's password. + """ data = {"password": password} message = json.dumps(data) @@ -153,7 +157,9 @@ class UserAuthentication(object): return cipher.encrypt(message) def _decrypt_user_password(self, encrypted): - """ Attempts to decrypt the given password and returns it. """ + """ + Attempts to decrypt the given password and returns it. + """ cipher = AESCipher(self.secret_key) try: @@ -171,102 +177,129 @@ class UserAuthentication(object): return data.get("password", encrypted) def ping(self): - """ Returns whether the authentication engine is reachable and working. """ + """ + Returns whether the authentication engine is reachable and working. + """ return self.state.ping() @property def federated_service(self): - """ Returns the name of the federated service for the auth system. If none, should return None. - """ + """ + Returns the name of the federated service for the auth system. + + If none, should return None. + """ return self.state.federated_service @property def requires_distinct_cli_password(self): - """ Returns whether this auth system requires a distinct CLI password to be created, - in-system, before the CLI can be used. """ + """ + Returns whether this auth system requires a distinct CLI password to be created, in-system, + before the CLI can be used. + """ return self.state.requires_distinct_cli_password @property def supports_encrypted_credentials(self): - """ Returns whether this auth system supports using encrypted credentials. """ + """ + Returns whether this auth system supports using encrypted credentials. + """ return self.state.supports_encrypted_credentials def has_password_set(self, username): - """ Returns whether the user has a password set in the auth system. """ + """ + Returns whether the user has a password set in the auth system. + """ return self.state.has_password_set(username) @property def supports_fresh_login(self): - """ Returns whether this auth system supports the fresh login check. """ + """ + Returns whether this auth system supports the fresh login check. + """ return self.state.supports_fresh_login def query_users(self, query, limit=20): - """ Performs a lookup against the user system for the specified query. The returned tuple - will be of the form (results, federated_login_id, err_msg). If the method is unsupported, - the results portion of the tuple will be None instead of empty list. + """ + Performs a lookup against the user system for the specified query. The returned tuple will + be of the form (results, federated_login_id, err_msg). If the method is unsupported, the + results portion of the tuple will be None instead of empty list. Note that this method can and will return results for users not yet found within the database; it is the responsibility of the caller to call link_user if they need the database row for the user system record. Results will be in the form of objects's with username and email fields. - """ + """ return self.state.query_users(query, limit) def link_user(self, username_or_email): - """ Returns a tuple containing the database user record linked to the given username/email - and any error that occurred when trying to link the user. - """ + """ + Returns a tuple containing the database user record linked to the given username/email and + any error that occurred when trying to link the user. + """ return self.state.link_user(username_or_email) def get_and_link_federated_user_info(self, user_info, internal_create=False): - """ Returns a tuple containing the database user record linked to the given UserInformation - pair and any error that occurred when trying to link the user. + """ + Returns a tuple containing the database user record linked to the given UserInformation pair + and any error that occurred when trying to link the user. - If `internal_create` is True, the caller is an internal user creation process (such - as team syncing), and the "can a user be created" check will be bypassed. - """ + If `internal_create` is True, the caller is an internal user creation process (such as team + syncing), and the "can a user be created" check will be bypassed. + """ return self.state.get_and_link_federated_user_info( user_info, internal_create=internal_create ) def confirm_existing_user(self, username, password): - """ Verifies that the given password matches to the given DB username. Unlike - verify_credentials, this call first translates the DB user via the FederatedLogin table - (where applicable). - """ + """ + Verifies that the given password matches to the given DB username. + + Unlike verify_credentials, this call first translates the DB user via the FederatedLogin + table (where applicable). + """ return self.state.confirm_existing_user(username, password) def verify_credentials(self, username_or_email, password): - """ Verifies that the given username and password credentials are valid. """ + """ + Verifies that the given username and password credentials are valid. + """ return self.state.verify_credentials(username_or_email, password) def check_group_lookup_args(self, group_lookup_args): - """ Verifies that the given group lookup args point to a valid group. Returns a tuple consisting - of a boolean status and an error message (if any). - """ + """ + Verifies that the given group lookup args point to a valid group. + + Returns a tuple consisting of a boolean status and an error message (if any). + """ return self.state.check_group_lookup_args(group_lookup_args) def service_metadata(self): - """ Returns a dictionary of extra metadata to present to *superusers* about this auth engine. + """ + Returns a dictionary of extra metadata to present to *superusers* about this auth engine. + For example, LDAP returns the base DN so we can display to the user during sync setup. - """ + """ return self.state.service_metadata() def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False): - """ Returns a tuple of an iterator over all the members of the group matching the given lookup + """ + Returns a tuple of an iterator over all the members of the group matching the given lookup args dictionary, or the error that occurred if the initial call failed or is unsupported. - The format of the lookup args dictionary is specific to the implementation. - Each result in the iterator is a tuple of (UserInformation, error_message), and only - one will be not-None. - """ + + The format of the lookup args dictionary is specific to the implementation. Each result in + the iterator is a tuple of (UserInformation, error_message), and only one will be not-None. + """ return self.state.iterate_group_members( group_lookup_args, page_size=page_size, disable_pagination=disable_pagination ) def verify_and_link_user(self, username_or_email, password, basic_auth=False): - """ Verifies that the given username and password credentials are valid and, if so, - creates or links the database user to the federated identity. """ + """ + Verifies that the given username and password credentials are valid and, if so, creates or + links the database user to the federated identity. + """ # First try to decode the password as a signed token. if basic_auth: decrypted = self._decrypt_user_password(password) diff --git a/data/users/apptoken.py b/data/users/apptoken.py index 69b00fb15..57e966f11 100644 --- a/data/users/apptoken.py +++ b/data/users/apptoken.py @@ -10,9 +10,9 @@ logger = logging.getLogger(__name__) class AppTokenInternalAuth(object): - """ Forces all internal credential login to go through an app token, by disabling all other - access. - """ + """ + Forces all internal credential login to go through an app token, by disabling all other access. + """ @property def supports_fresh_login(self): @@ -65,5 +65,9 @@ class AppTokenInternalAuth(object): return {} def ping(self): - """ Always assumed to be working. If the DB is broken, other checks will handle it. """ + """ + Always assumed to be working. + + If the DB is broken, other checks will handle it. + """ return (True, None) diff --git a/data/users/database.py b/data/users/database.py index 80d0bda8a..851932cb4 100644 --- a/data/users/database.py +++ b/data/users/database.py @@ -11,7 +11,11 @@ class DatabaseUsers(object): return True def ping(self): - """ Always assumed to be working. If the DB is broken, other checks will handle it. """ + """ + Always assumed to be working. + + If the DB is broken, other checks will handle it. + """ return (True, None) @property @@ -28,7 +32,9 @@ class DatabaseUsers(object): return True def verify_credentials(self, username_or_email, password): - """ Simply delegate to the model implementation. """ + """ + Simply delegate to the model implementation. + """ result = model.user.verify_user(username_or_email, password) if not result: return (None, "Invalid Username or Password") @@ -36,32 +42,46 @@ class DatabaseUsers(object): return (result, None) def verify_and_link_user(self, username_or_email, password): - """ Simply delegate to the model implementation. """ + """ + Simply delegate to the model implementation. + """ return self.verify_credentials(username_or_email, password) def confirm_existing_user(self, username, password): return self.verify_credentials(username, password) def link_user(self, username_or_email): - """ Never used since all users being added are already, by definition, in the database. """ + """ + Never used since all users being added are already, by definition, in the database. + """ return (None, "Unsupported for this authentication system") def get_and_link_federated_user_info(self, user_info, internal_create=False): - """ Never used since all users being added are already, by definition, in the database. """ + """ + Never used since all users being added are already, by definition, in the database. + """ return (None, "Unsupported for this authentication system") def query_users(self, query, limit): - """ No need to implement, as we already query for users directly in the database. """ + """ + No need to implement, as we already query for users directly in the database. + """ return (None, "", "") def check_group_lookup_args(self, group_lookup_args): - """ Never used since all groups, by definition, are in the database. """ + """ + Never used since all groups, by definition, are in the database. + """ return (False, "Not supported") def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False): - """ Never used since all groups, by definition, are in the database. """ + """ + Never used since all groups, by definition, are in the database. + """ return (None, "Not supported") def service_metadata(self): - """ Never used since database has no metadata """ + """ + Never used since database has no metadata. + """ return {} diff --git a/data/users/externaljwt.py b/data/users/externaljwt.py index d8049ffb5..1cb93956b 100644 --- a/data/users/externaljwt.py +++ b/data/users/externaljwt.py @@ -10,7 +10,9 @@ logger = logging.getLogger(__name__) class ExternalJWTAuthN(FederatedUsers): - """ Delegates authentication to a REST endpoint that returns JWTs. """ + """ + Delegates authentication to a REST endpoint that returns JWTs. + """ PUBLIC_KEY_FILENAME = "jwt-authn.cert" @@ -119,7 +121,9 @@ class ExternalJWTAuthN(FederatedUsers): return (user_info, None) def _execute_call(self, url, aud, auth=None, params=None): - """ Executes a call to the external JWT auth provider. """ + """ + Executes a call to the external JWT auth provider. + """ result = self.client.get(url, timeout=2, auth=auth, params=params) if result.status_code != 200: return (None, result.text or "Could not make JWT auth call") diff --git a/data/users/externalldap.py b/data/users/externalldap.py index 5a8126899..a1b190483 100644 --- a/data/users/externalldap.py +++ b/data/users/externalldap.py @@ -274,7 +274,9 @@ class LDAPUsers(FederatedUsers): return (False, None) def get_user(self, username_or_email): - """ Looks up a username or email in LDAP. """ + """ + Looks up a username or email in LDAP. + """ logger.debug("Looking up LDAP username or email %s", username_or_email) (found_user, err_msg) = self._ldap_single_user_search(username_or_email) if err_msg is not None: @@ -285,7 +287,9 @@ class LDAPUsers(FederatedUsers): return self._build_user_information(found_response) def query_users(self, query, limit=20): - """ Queries LDAP for matching users. """ + """ + Queries LDAP for matching users. + """ if not query: return (None, self.federated_service, "Empty query") @@ -306,7 +310,9 @@ class LDAPUsers(FederatedUsers): return (final_results, self.federated_service, None) def verify_credentials(self, username_or_email, password): - """ Verify the credentials with LDAP. """ + """ + Verify the credentials with LDAP. + """ # Make sure that even if the server supports anonymous binds, we don't allow it if not password: return (None, "Anonymous binding not allowed") diff --git a/data/users/federated.py b/data/users/federated.py index f694b0bfb..571c6f9e8 100644 --- a/data/users/federated.py +++ b/data/users/federated.py @@ -15,7 +15,9 @@ DISABLED_MESSAGE = "User creation is disabled. Please contact your administrator class FederatedUsers(object): - """ Base class for all federated users systems. """ + """ + Base class for all federated users systems. + """ def __init__(self, federated_service, requires_email): self._federated_service = federated_service @@ -42,19 +44,23 @@ class FederatedUsers(object): return False def get_user(self, username_or_email): - """ Retrieves the user with the given username or email, returning a tuple containing - a UserInformation (if success) and the error message (on failure). - """ + """ + Retrieves the user with the given username or email, returning a tuple containing a + UserInformation (if success) and the error message (on failure). + """ raise NotImplementedError def verify_credentials(self, username_or_email, password): - """ Verifies the given credentials against the backing federated service, returning - a tuple containing a UserInformation (on success) and the error message (on failure). - """ + """ + Verifies the given credentials against the backing federated service, returning a tuple + containing a UserInformation (on success) and the error message (on failure). + """ raise NotImplementedError def query_users(self, query, limit=20): - """ If implemented, get_user must be implemented as well. """ + """ + If implemented, get_user must be implemented as well. + """ return (None, "Not supported") def link_user(self, username_or_email): @@ -70,9 +76,10 @@ class FederatedUsers(object): ) def verify_and_link_user(self, username_or_email, password): - """ Verifies the given credentials and, if valid, creates/links a database user to the + """ + Verifies the given credentials and, if valid, creates/links a database user to the associated federated service. - """ + """ (credentials, err_msg) = self.verify_credentials(username_or_email, password) if credentials is None: return (None, err_msg) @@ -80,9 +87,12 @@ class FederatedUsers(object): return self._get_and_link_federated_user_info(credentials.username, credentials.email) def confirm_existing_user(self, username, password): - """ Confirms that the given *database* username and service password are valid for the linked - service. This method is used when the federated service's username is not known. - """ + """ + Confirms that the given *database* username and service password are valid for the linked + service. + + This method is used when the federated service's username is not known. + """ db_user = model.user.get_user(username) if not db_user: return (None, "Invalid user") @@ -98,21 +108,28 @@ class FederatedUsers(object): return (db_user, None) def service_metadata(self): - """ Returns a dictionary of extra metadata to present to *superusers* about this auth engine. + """ + Returns a dictionary of extra metadata to present to *superusers* about this auth engine. + For example, LDAP returns the base DN so we can display to the user during sync setup. - """ + """ return {} def check_group_lookup_args(self, group_lookup_args): - """ Verifies that the given group lookup args point to a valid group. Returns a tuple consisting - of a boolean status and an error message (if any). - """ + """ + Verifies that the given group lookup args point to a valid group. + + Returns a tuple consisting of a boolean status and an error message (if any). + """ return (False, "Not supported") def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False): - """ Returns an iterator over all the members of the group matching the given lookup args - dictionary. The format of the lookup args dictionary is specific to the implementation. - """ + """ + Returns an iterator over all the members of the group matching the given lookup args + dictionary. + + The format of the lookup args dictionary is specific to the implementation. + """ return (None, "Not supported") def _get_and_link_federated_user_info(self, username, email, internal_create=False): diff --git a/data/users/keystone.py b/data/users/keystone.py index f8c03c99d..12c61382d 100644 --- a/data/users/keystone.py +++ b/data/users/keystone.py @@ -38,7 +38,9 @@ def get_keystone_users( class KeystoneV2Users(FederatedUsers): - """ Delegates authentication to OpenStack Keystone V2. """ + """ + Delegates authentication to OpenStack Keystone V2. + """ def __init__( self, @@ -145,7 +147,9 @@ class KeystoneV2Users(FederatedUsers): class KeystoneV3Users(FederatedUsers): - """ Delegates authentication to OpenStack Keystone V3. """ + """ + Delegates authentication to OpenStack Keystone V3. + """ def __init__( self, diff --git a/data/users/shared.py b/data/users/shared.py index fd507837e..5ddf28562 100644 --- a/data/users/shared.py +++ b/data/users/shared.py @@ -7,7 +7,9 @@ from data import model def can_create_user(email_address, blacklisted_domains=None): - """ Returns true if a user with the specified e-mail address can be created. """ + """ + Returns true if a user with the specified e-mail address can be created. + """ if features.BLACKLISTED_EMAILS and email_address and "@" in email_address: blacklisted_domains = blacklisted_domains or [] diff --git a/data/users/teamsync.py b/data/users/teamsync.py index 2fea6b607..75f696879 100644 --- a/data/users/teamsync.py +++ b/data/users/teamsync.py @@ -10,9 +10,10 @@ MAX_TEAMS_PER_ITERATION = 500 def sync_teams_to_groups(authentication, stale_cutoff): - """ Performs team syncing by looking up any stale team(s) found, and performing the sync - operation on them. - """ + """ + Performs team syncing by looking up any stale team(s) found, and performing the sync operation + on them. + """ logger.debug("Looking up teams to sync to groups") sync_team_tried = set() @@ -36,9 +37,11 @@ def sync_teams_to_groups(authentication, stale_cutoff): def sync_team(authentication, stale_team_sync): - """ Performs synchronization of a team (as referenced by the TeamSync stale_team_sync). - Returns True on success and False otherwise. - """ + """ + Performs synchronization of a team (as referenced by the TeamSync stale_team_sync). + + Returns True on success and False otherwise. + """ sync_config = json.loads(stale_team_sync.config) logger.info( "Syncing team `%s` under organization %s via %s (#%s)", diff --git a/data/users/test/test_users.py b/data/users/test/test_users.py index 250308759..4f1463645 100644 --- a/data/users/test/test_users.py +++ b/data/users/test/test_users.py @@ -60,7 +60,9 @@ def test_auth_createuser(auth_system_builder, user1, user2, config, app): def test_createuser_with_blacklist( auth_system_builder, email, blacklisting_enabled, can_create, config, app ): - """Verify email blacklisting with User Creation""" + """ + Verify email blacklisting with User Creation. + """ MOCK_CONFIG = {"BLACKLISTED_EMAIL_DOMAINS": ["blacklisted.com", "blacklisted.net"]} MOCK_PASSWORD = "somepass" diff --git a/digest/digest_tools.py b/digest/digest_tools.py index 7273b20fe..baa510f2c 100644 --- a/digest/digest_tools.py +++ b/digest/digest_tools.py @@ -27,7 +27,9 @@ class Digest(object): @staticmethod def parse_digest(digest): - """ Returns the digest parsed out to its components. """ + """ + Returns the digest parsed out to its components. + """ match = Digest.DIGEST_REGEX.match(digest) if match is None or match.end() != len(digest): raise InvalidDigestException("Not a valid digest: %s", digest) @@ -44,7 +46,9 @@ class Digest(object): def content_path(digest): - """ Returns a relative path to the parsed digest. """ + """ + Returns a relative path to the parsed digest. + """ parsed = Digest.parse_digest(digest) components = [] @@ -58,7 +62,9 @@ def content_path(digest): def sha256_digest(content): - """ Returns a sha256 hash of the content bytes in digest form. """ + """ + Returns a sha256 hash of the content bytes in digest form. + """ def single_chunk_generator(): yield content @@ -67,8 +73,9 @@ def sha256_digest(content): def sha256_digest_from_generator(content_generator): - """ Reads all of the data from the iterator and creates a sha256 digest from the content - """ + """ + Reads all of the data from the iterator and creates a sha256 digest from the content. + """ digest = hashlib.sha256() for chunk in content_generator: digest.update(chunk) @@ -80,6 +87,7 @@ def sha256_digest_from_hashlib(sha256_hash_obj): def digests_equal(lhs_digest_string, rhs_digest_string): - """ Parse and compare the two digests, returns True if the digests are equal, False otherwise. - """ + """ + Parse and compare the two digests, returns True if the digests are equal, False otherwise. + """ return Digest.parse_digest(lhs_digest_string) == Digest.parse_digest(rhs_digest_string) diff --git a/endpoints/api/__init__.py b/endpoints/api/__init__.py index 298319a0b..e5eb9bb54 100644 --- a/endpoints/api/__init__.py +++ b/endpoints/api/__init__.py @@ -121,7 +121,9 @@ def truthy_bool(param): def format_date(date): - """ Output an RFC822 date format. """ + """ + Output an RFC822 date format. + """ if date is None: return None return formatdate(timegm(date.utctimetuple())) @@ -189,10 +191,12 @@ def query_param(name, help_str, type=reqparse.text_type, default=None, choices=( def page_support(page_token_kwarg="page_token", parsed_args_kwarg="parsed_args"): def inner(func): - """ Adds pagination support to an API endpoint. The decorated API will have an - added query parameter named 'next_page'. Works in tandem with the - modelutil paginate method. - """ + """ + Adds pagination support to an API endpoint. + + The decorated API will have an added query parameter named 'next_page'. Works in tandem with + the modelutil paginate method. + """ @wraps(func) @query_param("next_page", "The page token for the next page", type=str) diff --git a/endpoints/api/__init__models_interface.py b/endpoints/api/__init__models_interface.py index a22d733b9..1b12a116f 100644 --- a/endpoints/api/__init__models_interface.py +++ b/endpoints/api/__init__models_interface.py @@ -6,8 +6,8 @@ from six import add_metaclass @add_metaclass(ABCMeta) class InitDataInterface(object): """ - Interface that represents all data store interactions required by __init__. - """ + Interface that represents all data store interactions required by __init__. + """ @abstractmethod def is_app_repository(self, namespace_name, repository_name): diff --git a/endpoints/api/appspecifictokens.py b/endpoints/api/appspecifictokens.py index 93a8ed4f4..0705bcaa8 100644 --- a/endpoints/api/appspecifictokens.py +++ b/endpoints/api/appspecifictokens.py @@ -1,4 +1,6 @@ -""" Manages app specific tokens for the current user. """ +""" +Manages app specific tokens for the current user. +""" import logging import math @@ -56,7 +58,9 @@ _DEFAULT_TOKEN_EXPIRATION_WINDOW = "4w" @resource("/v1/user/apptoken") @show_if(features.APP_SPECIFIC_TOKENS) class AppTokens(ApiResource): - """ Lists all app specific tokens for a user """ + """ + Lists all app specific tokens for a user. + """ schemas = { "NewToken": { @@ -73,7 +77,9 @@ class AppTokens(ApiResource): @parse_args() @query_param("expiring", "If true, only returns those tokens expiring soon", type=truthy_bool) def get(self, parsed_args): - """ Lists the app specific tokens for the user. """ + """ + Lists the app specific tokens for the user. + """ expiring = parsed_args["expiring"] if expiring: expiration = app.config.get("APP_SPECIFIC_TOKEN_EXPIRATION") @@ -94,7 +100,9 @@ class AppTokens(ApiResource): @nickname("createAppToken") @validate_json_request("NewToken") def post(self): - """ Create a new app specific token for user. """ + """ + Create a new app specific token for user. + """ title = request.get_json()["title"] token = model.appspecifictoken.create_token(get_authenticated_user(), title) @@ -113,13 +121,17 @@ class AppTokens(ApiResource): @show_if(features.APP_SPECIFIC_TOKENS) @path_param("token_uuid", "The uuid of the app specific token") class AppToken(ApiResource): - """ Provides operations on an app specific token """ + """ + Provides operations on an app specific token. + """ @require_user_admin @require_fresh_login @nickname("getAppToken") def get(self, token_uuid): - """ Returns a specific app token for the user. """ + """ + Returns a specific app token for the user. + """ token = model.appspecifictoken.get_token_by_uuid(token_uuid, owner=get_authenticated_user()) if token is None: raise NotFound() @@ -132,7 +144,9 @@ class AppToken(ApiResource): @require_fresh_login @nickname("revokeAppToken") def delete(self, token_uuid): - """ Revokes a specific app token for the user. """ + """ + Revokes a specific app token for the user. + """ token = model.appspecifictoken.revoke_token_by_uuid( token_uuid, owner=get_authenticated_user() ) diff --git a/endpoints/api/billing.py b/endpoints/api/billing.py index 2d3efe10a..16ab59f47 100644 --- a/endpoints/api/billing.py +++ b/endpoints/api/billing.py @@ -1,4 +1,6 @@ -""" Billing information, subscriptions, and plan information. """ +""" +Billing information, subscriptions, and plan information. +""" import stripe @@ -32,7 +34,9 @@ import json def get_namespace_plan(namespace): - """ Returns the plan of the given namespace. """ + """ + Returns the plan of the given namespace. + """ namespace_user = model.user.get_namespace_user(namespace) if namespace_user is None: return None @@ -54,7 +58,9 @@ def get_namespace_plan(namespace): def lookup_allowed_private_repos(namespace): - """ Returns false if the given namespace has used its allotment of private repositories. """ + """ + Returns false if the given namespace has used its allotment of private repositories. + """ current_plan = get_namespace_plan(namespace) if current_plan is None: return False @@ -185,11 +191,15 @@ def delete_billing_invoice_field(user, field_uuid): @resource("/v1/plans/") @show_if(features.BILLING) class ListPlans(ApiResource): - """ Resource for listing the available plans. """ + """ + Resource for listing the available plans. + """ @nickname("listPlans") def get(self): - """ List the avaialble plans. """ + """ + List the avaialble plans. + """ return { "plans": PLANS, } @@ -199,7 +209,9 @@ class ListPlans(ApiResource): @internal_only @show_if(features.BILLING) class UserCard(ApiResource): - """ Resource for managing a user's credit card. """ + """ + Resource for managing a user's credit card. + """ schemas = { "UserCard": { @@ -219,7 +231,9 @@ class UserCard(ApiResource): @require_user_admin @nickname("getUserCard") def get(self): - """ Get the user's credit card. """ + """ + Get the user's credit card. + """ user = get_authenticated_user() return get_card(user) @@ -227,7 +241,9 @@ class UserCard(ApiResource): @nickname("setUserCard") @validate_json_request("UserCard") def post(self): - """ Update the user's credit card. """ + """ + Update the user's credit card. + """ user = get_authenticated_user() token = request.get_json()["token"] response = set_card(user, token) @@ -241,7 +257,9 @@ class UserCard(ApiResource): @related_user_resource(UserCard) @show_if(features.BILLING) class OrganizationCard(ApiResource): - """ Resource for managing an organization's credit card. """ + """ + Resource for managing an organization's credit card. + """ schemas = { "OrgCard": { @@ -261,7 +279,9 @@ class OrganizationCard(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("getOrgCard") def get(self, orgname): - """ Get the organization's credit card. """ + """ + Get the organization's credit card. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): organization = model.organization.get_organization(orgname) @@ -272,7 +292,9 @@ class OrganizationCard(ApiResource): @nickname("setOrgCard") @validate_json_request("OrgCard") def post(self, orgname): - """ Update the orgnaization's credit card. """ + """ + Update the orgnaization's credit card. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): organization = model.organization.get_organization(orgname) @@ -288,7 +310,9 @@ class OrganizationCard(ApiResource): @internal_only @show_if(features.BILLING) class UserPlan(ApiResource): - """ Resource for managing a user's subscription. """ + """ + Resource for managing a user's subscription. + """ schemas = { "UserSubscription": { @@ -313,7 +337,9 @@ class UserPlan(ApiResource): @nickname("updateUserSubscription") @validate_json_request("UserSubscription") def put(self): - """ Create or update the user's subscription. """ + """ + Create or update the user's subscription. + """ request_data = request.get_json() plan = request_data["plan"] token = request_data["token"] if "token" in request_data else None @@ -323,7 +349,9 @@ class UserPlan(ApiResource): @require_user_admin @nickname("getUserSubscription") def get(self): - """ Fetch any existing subscription for the user. """ + """ + Fetch any existing subscription for the user. + """ cus = None user = get_authenticated_user() private_repos = model.user.get_private_repo_count(user.username) @@ -351,7 +379,9 @@ class UserPlan(ApiResource): @related_user_resource(UserPlan) @show_if(features.BILLING) class OrganizationPlan(ApiResource): - """ Resource for managing a org's subscription. """ + """ + Resource for managing a org's subscription. + """ schemas = { "OrgSubscription": { @@ -376,7 +406,9 @@ class OrganizationPlan(ApiResource): @nickname("updateOrgSubscription") @validate_json_request("OrgSubscription") def put(self, orgname): - """ Create or update the org's subscription. """ + """ + Create or update the org's subscription. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): request_data = request.get_json() @@ -390,7 +422,9 @@ class OrganizationPlan(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("getOrgSubscription") def get(self, orgname): - """ Fetch any existing subscription for the org. """ + """ + Fetch any existing subscription for the org. + """ cus = None permission = AdministerOrganizationPermission(orgname) if permission.can(): @@ -419,12 +453,16 @@ class OrganizationPlan(ApiResource): @internal_only @show_if(features.BILLING) class UserInvoiceList(ApiResource): - """ Resource for listing a user's invoices. """ + """ + Resource for listing a user's invoices. + """ @require_user_admin @nickname("listUserInvoices") def get(self): - """ List the invoices for the current user. """ + """ + List the invoices for the current user. + """ user = get_authenticated_user() if not user.stripe_id: raise NotFound() @@ -437,12 +475,16 @@ class UserInvoiceList(ApiResource): @related_user_resource(UserInvoiceList) @show_if(features.BILLING) class OrganizationInvoiceList(ApiResource): - """ Resource for listing an orgnaization's invoices. """ + """ + Resource for listing an orgnaization's invoices. + """ @require_scope(scopes.ORG_ADMIN) @nickname("listOrgInvoices") def get(self, orgname): - """ List the invoices for the specified orgnaization. """ + """ + List the invoices for the specified orgnaization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): organization = model.organization.get_organization(orgname) @@ -458,7 +500,9 @@ class OrganizationInvoiceList(ApiResource): @internal_only @show_if(features.BILLING) class UserInvoiceFieldList(ApiResource): - """ Resource for listing and creating a user's custom invoice fields. """ + """ + Resource for listing and creating a user's custom invoice fields. + """ schemas = { "InvoiceField": { @@ -476,7 +520,9 @@ class UserInvoiceFieldList(ApiResource): @require_user_admin @nickname("listUserInvoiceFields") def get(self): - """ List the invoice fields for the current user. """ + """ + List the invoice fields for the current user. + """ user = get_authenticated_user() if not user.stripe_id: raise NotFound() @@ -487,7 +533,9 @@ class UserInvoiceFieldList(ApiResource): @nickname("createUserInvoiceField") @validate_json_request("InvoiceField") def post(self): - """ Creates a new invoice field. """ + """ + Creates a new invoice field. + """ user = get_authenticated_user() if not user.stripe_id: raise NotFound() @@ -501,12 +549,16 @@ class UserInvoiceFieldList(ApiResource): @internal_only @show_if(features.BILLING) class UserInvoiceField(ApiResource): - """ Resource for deleting a user's custom invoice fields. """ + """ + Resource for deleting a user's custom invoice fields. + """ @require_user_admin @nickname("deleteUserInvoiceField") def delete(self, field_uuid): - """ Deletes the invoice field for the current user. """ + """ + Deletes the invoice field for the current user. + """ user = get_authenticated_user() if not user.stripe_id: raise NotFound() @@ -524,7 +576,9 @@ class UserInvoiceField(ApiResource): @internal_only @show_if(features.BILLING) class OrganizationInvoiceFieldList(ApiResource): - """ Resource for listing and creating an organization's custom invoice fields. """ + """ + Resource for listing and creating an organization's custom invoice fields. + """ schemas = { "InvoiceField": { @@ -542,7 +596,9 @@ class OrganizationInvoiceFieldList(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("listOrgInvoiceFields") def get(self, orgname): - """ List the invoice fields for the organization. """ + """ + List the invoice fields for the organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): organization = model.organization.get_organization(orgname) @@ -557,7 +613,9 @@ class OrganizationInvoiceFieldList(ApiResource): @nickname("createOrgInvoiceField") @validate_json_request("InvoiceField") def post(self, orgname): - """ Creates a new invoice field. """ + """ + Creates a new invoice field. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): organization = model.organization.get_organization(orgname) @@ -577,12 +635,16 @@ class OrganizationInvoiceFieldList(ApiResource): @internal_only @show_if(features.BILLING) class OrganizationInvoiceField(ApiResource): - """ Resource for deleting an organization's custom invoice fields. """ + """ + Resource for deleting an organization's custom invoice fields. + """ @require_scope(scopes.ORG_ADMIN) @nickname("deleteOrgInvoiceField") def delete(self, orgname, field_uuid): - """ Deletes the invoice field for the current user. """ + """ + Deletes the invoice field for the current user. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): organization = model.organization.get_organization(orgname) diff --git a/endpoints/api/build.py b/endpoints/api/build.py index fe737fcea..bb0408eb5 100644 --- a/endpoints/api/build.py +++ b/endpoints/api/build.py @@ -1,4 +1,6 @@ -""" Create, list, cancel and get status/logs of repository builds. """ +""" +Create, list, cancel and get status/logs of repository builds. +""" import datetime import hashlib import json @@ -105,7 +107,9 @@ def trigger_view(trigger, can_read=False, can_admin=False, for_build=False): def _get_build_status(build_obj): - """ Returns the updated build phase, status and (if any) error for the build object. """ + """ + Returns the updated build phase, status and (if any) error for the build object. + """ phase = build_obj.phase status = {} error = None @@ -186,7 +190,9 @@ def build_status_view(build_obj): @resource("/v1/repository//build/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryBuildList(RepositoryParamResource): - """ Resource related to creating and listing repository builds. """ + """ + Resource related to creating and listing repository builds. + """ schemas = { "RepositoryBuildRequest": { @@ -238,7 +244,9 @@ class RepositoryBuildList(RepositoryParamResource): @nickname("getRepoBuilds") @disallow_for_app_repositories def get(self, namespace, repository, parsed_args): - """ Get the list of repository builds. """ + """ + Get the list of repository builds. + """ limit = parsed_args.get("limit", 5) since = parsed_args.get("since", None) @@ -254,7 +262,9 @@ class RepositoryBuildList(RepositoryParamResource): @disallow_for_non_normal_repositories @validate_json_request("RepositoryBuildRequest") def post(self, namespace, repository): - """ Request that a repository be built and pushed from the specified input. """ + """ + Request that a repository be built and pushed from the specified input. + """ logger.debug("User requested repository initialization.") request_json = request.get_json() @@ -377,13 +387,17 @@ class RepositoryBuildList(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("build_uuid", "The UUID of the build") class RepositoryBuildResource(RepositoryParamResource): - """ Resource for dealing with repository builds. """ + """ + Resource for dealing with repository builds. + """ @require_repo_read @nickname("getRepoBuild") @disallow_for_app_repositories def get(self, namespace, repository, build_uuid): - """ Returns information about a build. """ + """ + Returns information about a build. + """ try: build = model.build.get_repository_build(build_uuid) except model.build.InvalidRepositoryBuildException: @@ -402,7 +416,9 @@ class RepositoryBuildResource(RepositoryParamResource): @disallow_for_app_repositories @disallow_for_non_normal_repositories def delete(self, namespace, repository, build_uuid): - """ Cancels a repository build. """ + """ + Cancels a repository build. + """ try: build = model.build.get_repository_build(build_uuid) except model.build.InvalidRepositoryBuildException: @@ -424,13 +440,17 @@ class RepositoryBuildResource(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("build_uuid", "The UUID of the build") class RepositoryBuildStatus(RepositoryParamResource): - """ Resource for dealing with repository build status. """ + """ + Resource for dealing with repository build status. + """ @require_repo_read @nickname("getRepoBuildStatus") @disallow_for_app_repositories def get(self, namespace, repository, build_uuid): - """ Return the status for the builds specified by the build uuids. """ + """ + Return the status for the builds specified by the build uuids. + """ build = model.build.get_repository_build(build_uuid) if ( not build @@ -467,13 +487,17 @@ def get_logs_or_log_url(build): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("build_uuid", "The UUID of the build") class RepositoryBuildLogs(RepositoryParamResource): - """ Resource for loading repository build logs. """ + """ + Resource for loading repository build logs. + """ @require_repo_read @nickname("getRepoBuildLogs") @disallow_for_app_repositories def get(self, namespace, repository, build_uuid): - """ Return the build logs for the build specified by the build uuid. """ + """ + Return the build logs for the build specified by the build uuid. + """ can_write = ModifyRepositoryPermission(namespace, repository).can() if not features.READER_BUILD_LOGS and not can_write: raise Unauthorized() @@ -492,7 +516,9 @@ class RepositoryBuildLogs(RepositoryParamResource): @resource("/v1/filedrop/") @internal_only class FileDropResource(ApiResource): - """ Custom verb for setting up a client side file transfer. """ + """ + Custom verb for setting up a client side file transfer. + """ schemas = { "FileDropRequest": { @@ -511,7 +537,9 @@ class FileDropResource(ApiResource): @nickname("getFiledropUrl") @validate_json_request("FileDropRequest") def post(self): - """ Request a URL to which a file may be uploaded. """ + """ + Request a URL to which a file may be uploaded. + """ mime_type = request.get_json()["mimeType"] (url, file_id) = user_files.prepare_for_drop(mime_type, requires_cors=True) return { diff --git a/endpoints/api/discovery.py b/endpoints/api/discovery.py index bac5d4071..cd177e5e4 100644 --- a/endpoints/api/discovery.py +++ b/endpoints/api/discovery.py @@ -1,5 +1,7 @@ # TODO to extract the discovery stuff into a util at the top level and then use it both here and config_app discovery.py -""" API discovery information. """ +""" +API discovery information. +""" import re import logging @@ -316,12 +318,16 @@ def swagger_route_data(include_internal=False, compact=False): @resource("/v1/discovery") class DiscoveryResource(ApiResource): - """Ability to inspect the API for usage information and documentation.""" + """ + Ability to inspect the API for usage information and documentation. + """ @parse_args() @query_param("internal", "Whether to include internal APIs.", type=truthy_bool, default=False) @nickname("discovery") @anon_allowed def get(self, parsed_args): - """ List all of the API endpoints available in the swagger API format.""" + """ + List all of the API endpoints available in the swagger API format. + """ return swagger_route_data(parsed_args["internal"]) diff --git a/endpoints/api/error.py b/endpoints/api/error.py index 43187cc7c..4eb5f67c3 100644 --- a/endpoints/api/error.py +++ b/endpoints/api/error.py @@ -1,4 +1,6 @@ -""" Error details API """ +""" +Error details API. +""" from flask import url_for from endpoints.api import resource, nickname, ApiResource, path_param, define_json_response @@ -16,7 +18,9 @@ def error_view(error_type): @resource("/v1/error/") @path_param("error_type", "The error code identifying the type of error.") class Error(ApiResource): - """ Resource for Error Descriptions""" + """ + Resource for Error Descriptions. + """ schemas = { "ApiErrorDescription": { @@ -47,7 +51,9 @@ class Error(ApiResource): @define_json_response("ApiErrorDescription") @nickname("getErrorDescription") def get(self, error_type): - """ Get a detailed description of the error """ + """ + Get a detailed description of the error. + """ if error_type in ERROR_DESCRIPTION.keys(): return error_view(error_type) diff --git a/endpoints/api/globalmessages.py b/endpoints/api/globalmessages.py index 733b88d5c..bf050e9a9 100644 --- a/endpoints/api/globalmessages.py +++ b/endpoints/api/globalmessages.py @@ -1,4 +1,6 @@ -""" Messages API. """ +""" +Messages API. +""" from flask import abort from flask import make_response from flask import request @@ -21,7 +23,9 @@ from globalmessages_models_pre_oci import pre_oci_model as model @resource("/v1/messages") class GlobalUserMessages(ApiResource): - """ Resource for getting a list of super user messages """ + """ + Resource for getting a list of super user messages. + """ schemas = { "GetMessage": { @@ -81,7 +85,9 @@ class GlobalUserMessages(ApiResource): @nickname("getGlobalMessages") def get(self): - """ Return a super users messages """ + """ + Return a super users messages. + """ return { "messages": [m.to_dict() for m in model.get_all_messages()], } @@ -92,7 +98,9 @@ class GlobalUserMessages(ApiResource): @validate_json_request("CreateMessage") @require_scope(scopes.SUPERUSER) def post(self): - """ Create a message """ + """ + Create a message. + """ if not features.SUPER_USERS: abort(404) @@ -111,14 +119,18 @@ class GlobalUserMessages(ApiResource): @resource("/v1/message/") @show_if(features.SUPER_USERS) class GlobalUserMessage(ApiResource): - """ Resource for managing individual messages """ + """ + Resource for managing individual messages. + """ @require_fresh_login @verify_not_prod @nickname("deleteGlobalMessage") @require_scope(scopes.SUPERUSER) def delete(self, uuid): - """ Delete a message """ + """ + Delete a message. + """ if SuperUserPermission().can(): model.delete_message(uuid) return make_response("", 204) diff --git a/endpoints/api/globalmessages_models_interface.py b/endpoints/api/globalmessages_models_interface.py index b0eb9169c..c87e18e42 100644 --- a/endpoints/api/globalmessages_models_interface.py +++ b/endpoints/api/globalmessages_models_interface.py @@ -19,8 +19,8 @@ class GlobalMessage( @add_metaclass(ABCMeta) class GlobalMessageDataInterface(object): """ - Data interface for globalmessages API - """ + Data interface for globalmessages API. + """ @abstractmethod def get_all_messages(self): diff --git a/endpoints/api/image.py b/endpoints/api/image.py index 1f3a4b3c1..6fac79b56 100644 --- a/endpoints/api/image.py +++ b/endpoints/api/image.py @@ -1,4 +1,6 @@ -""" List and lookup repository images. """ +""" +List and lookup repository images. +""" import json from data.registry_model import registry_model @@ -47,13 +49,17 @@ def image_dict(image, with_history=False, with_tags=False): @resource("/v1/repository//image/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryImageList(RepositoryParamResource): - """ Resource for listing repository images. """ + """ + Resource for listing repository images. + """ @require_repo_read @nickname("listRepositoryImages") @disallow_for_app_repositories def get(self, namespace, repository): - """ List the images for the specified repository. """ + """ + List the images for the specified repository. + """ repo_ref = registry_model.lookup_repository(namespace, repository) if repo_ref is None: raise NotFound() @@ -66,13 +72,17 @@ class RepositoryImageList(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("image_id", "The Docker image ID") class RepositoryImage(RepositoryParamResource): - """ Resource for handling repository images. """ + """ + Resource for handling repository images. + """ @require_repo_read @nickname("getImage") @disallow_for_app_repositories def get(self, namespace, repository, image_id): - """ Get the information available for the specified image. """ + """ + Get the information available for the specified image. + """ repo_ref = registry_model.lookup_repository(namespace, repository) if repo_ref is None: raise NotFound() diff --git a/endpoints/api/logs.py b/endpoints/api/logs.py index e15bb447b..ee47722be 100644 --- a/endpoints/api/logs.py +++ b/endpoints/api/logs.py @@ -1,4 +1,6 @@ -""" Access usage logs for organizations or repositories. """ +""" +Access usage logs for organizations or repositories. +""" from datetime import datetime, timedelta from flask import request @@ -112,7 +114,9 @@ def _get_aggregate_logs( @resource("/v1/repository//logs") @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryLogs(RepositoryParamResource): - """ Resource for fetching logs for the specific repository. """ + """ + Resource for fetching logs for the specific repository. + """ @require_repo_admin @nickname("listRepoLogs") @@ -121,7 +125,9 @@ class RepositoryLogs(RepositoryParamResource): @query_param("endtime", 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str) @page_support() def get(self, namespace, repository, page_token, parsed_args): - """ List the logs for the specified repository. """ + """ + List the logs for the specified repository. + """ if registry_model.lookup_repository(namespace, repository) is None: raise NotFound() @@ -138,7 +144,9 @@ class RepositoryLogs(RepositoryParamResource): @resource("/v1/user/logs") class UserLogs(ApiResource): - """ Resource for fetching logs for the current user. """ + """ + Resource for fetching logs for the current user. + """ @require_user_admin @nickname("listUserLogs") @@ -148,7 +156,9 @@ class UserLogs(ApiResource): @query_param("performer", "Username for which to filter logs.", type=str) @page_support() def get(self, parsed_args, page_token): - """ List the logs for the current user. """ + """ + List the logs for the current user. + """ performer_name = parsed_args["performer"] start_time = parsed_args["starttime"] end_time = parsed_args["endtime"] @@ -168,7 +178,9 @@ class UserLogs(ApiResource): @path_param("orgname", "The name of the organization") @related_user_resource(UserLogs) class OrgLogs(ApiResource): - """ Resource for fetching logs for the entire organization. """ + """ + Resource for fetching logs for the entire organization. + """ @nickname("listOrgLogs") @parse_args() @@ -178,7 +190,9 @@ class OrgLogs(ApiResource): @page_support() @require_scope(scopes.ORG_ADMIN) def get(self, orgname, page_token, parsed_args): - """ List the logs for the specified organization. """ + """ + List the logs for the specified organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): performer_name = parsed_args["performer"] @@ -200,7 +214,9 @@ class OrgLogs(ApiResource): @show_if(features.AGGREGATED_LOG_COUNT_RETRIEVAL) @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryAggregateLogs(RepositoryParamResource): - """ Resource for fetching aggregated logs for the specific repository. """ + """ + Resource for fetching aggregated logs for the specific repository. + """ @require_repo_admin @nickname("getAggregateRepoLogs") @@ -208,7 +224,9 @@ class RepositoryAggregateLogs(RepositoryParamResource): @query_param("starttime", 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str) @query_param("endtime", 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str) def get(self, namespace, repository, parsed_args): - """ Returns the aggregated logs for the specified repository. """ + """ + Returns the aggregated logs for the specified repository. + """ if registry_model.lookup_repository(namespace, repository) is None: raise NotFound() @@ -220,7 +238,9 @@ class RepositoryAggregateLogs(RepositoryParamResource): @resource("/v1/user/aggregatelogs") @show_if(features.AGGREGATED_LOG_COUNT_RETRIEVAL) class UserAggregateLogs(ApiResource): - """ Resource for fetching aggregated logs for the current user. """ + """ + Resource for fetching aggregated logs for the current user. + """ @require_user_admin @nickname("getAggregateUserLogs") @@ -229,7 +249,9 @@ class UserAggregateLogs(ApiResource): @query_param("endtime", 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str) @query_param("performer", "Username for which to filter logs.", type=str) def get(self, parsed_args): - """ Returns the aggregated logs for the current user. """ + """ + Returns the aggregated logs for the current user. + """ performer_name = parsed_args["performer"] start_time = parsed_args["starttime"] end_time = parsed_args["endtime"] @@ -249,7 +271,9 @@ class UserAggregateLogs(ApiResource): @path_param("orgname", "The name of the organization") @related_user_resource(UserLogs) class OrgAggregateLogs(ApiResource): - """ Resource for fetching aggregate logs for the entire organization. """ + """ + Resource for fetching aggregate logs for the entire organization. + """ @nickname("getAggregateOrgLogs") @parse_args() @@ -258,7 +282,9 @@ class OrgAggregateLogs(ApiResource): @query_param("performer", "Username for which to filter logs.", type=str) @require_scope(scopes.ORG_ADMIN) def get(self, orgname, parsed_args): - """ Gets the aggregated logs for the specified organization. """ + """ + Gets the aggregated logs for the specified organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): performer_name = parsed_args["performer"] @@ -319,7 +345,9 @@ def _queue_logs_export(start_time, end_time, options, namespace_name, repository @show_if(features.LOG_EXPORT) @path_param("repository", "The full path of the repository. e.g. namespace/name") class ExportRepositoryLogs(RepositoryParamResource): - """ Resource for exporting the logs for the specific repository. """ + """ + Resource for exporting the logs for the specific repository. + """ schemas = {"ExportLogs": EXPORT_LOGS_SCHEMA} @@ -330,7 +358,9 @@ class ExportRepositoryLogs(RepositoryParamResource): @query_param("endtime", 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str) @validate_json_request("ExportLogs") def post(self, namespace, repository, parsed_args): - """ Queues an export of the logs for the specified repository. """ + """ + Queues an export of the logs for the specified repository. + """ if registry_model.lookup_repository(namespace, repository) is None: raise NotFound() @@ -347,7 +377,9 @@ class ExportRepositoryLogs(RepositoryParamResource): @resource("/v1/user/exportlogs") @show_if(features.LOG_EXPORT) class ExportUserLogs(ApiResource): - """ Resource for exporting the logs for the current user repository. """ + """ + Resource for exporting the logs for the current user repository. + """ schemas = {"ExportLogs": EXPORT_LOGS_SCHEMA} @@ -358,7 +390,9 @@ class ExportUserLogs(ApiResource): @query_param("endtime", 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str) @validate_json_request("ExportLogs") def post(self, parsed_args): - """ Returns the aggregated logs for the current user. """ + """ + Returns the aggregated logs for the current user. + """ start_time = parsed_args["starttime"] end_time = parsed_args["endtime"] @@ -374,7 +408,9 @@ class ExportUserLogs(ApiResource): @path_param("orgname", "The name of the organization") @related_user_resource(ExportUserLogs) class ExportOrgLogs(ApiResource): - """ Resource for exporting the logs for an entire organization. """ + """ + Resource for exporting the logs for an entire organization. + """ schemas = {"ExportLogs": EXPORT_LOGS_SCHEMA} @@ -385,7 +421,9 @@ class ExportOrgLogs(ApiResource): @require_scope(scopes.ORG_ADMIN) @validate_json_request("ExportLogs") def post(self, orgname, parsed_args): - """ Exports the logs for the specified organization. """ + """ + Exports the logs for the specified organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): start_time = parsed_args["starttime"] diff --git a/endpoints/api/manifest.py b/endpoints/api/manifest.py index a655010e8..9baf8a7a7 100644 --- a/endpoints/api/manifest.py +++ b/endpoints/api/manifest.py @@ -1,4 +1,6 @@ -""" Manage the manifests of a repository. """ +""" +Manage the manifests of a repository. +""" import json import logging @@ -98,7 +100,9 @@ def _manifest_dict(manifest): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("manifestref", "The digest of the manifest") class RepositoryManifest(RepositoryParamResource): - """ Resource for retrieving a specific repository manifest. """ + """ + Resource for retrieving a specific repository manifest. + """ @require_repo_read @nickname("getRepoManifest") @@ -121,7 +125,9 @@ class RepositoryManifest(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("manifestref", "The digest of the manifest") class RepositoryManifestLabels(RepositoryParamResource): - """ Resource for listing the labels on a specific repository manifest. """ + """ + Resource for listing the labels on a specific repository manifest. + """ schemas = { "AddLabel": { @@ -171,7 +177,9 @@ class RepositoryManifestLabels(RepositoryParamResource): @disallow_for_non_normal_repositories @validate_json_request("AddLabel") def post(self, namespace_name, repository_name, manifestref): - """ Adds a new label into the tag manifest. """ + """ + Adds a new label into the tag manifest. + """ label_data = request.get_json() # Check for any reserved prefixes. @@ -236,13 +244,17 @@ class RepositoryManifestLabels(RepositoryParamResource): @path_param("manifestref", "The digest of the manifest") @path_param("labelid", "The ID of the label") class ManageRepositoryManifestLabel(RepositoryParamResource): - """ Resource for managing the labels on a specific repository manifest. """ + """ + Resource for managing the labels on a specific repository manifest. + """ @require_repo_read @nickname("getManifestLabel") @disallow_for_app_repositories def get(self, namespace_name, repository_name, manifestref, labelid): - """ Retrieves the label with the specific ID under the manifest. """ + """ + Retrieves the label with the specific ID under the manifest. + """ repo_ref = registry_model.lookup_repository(namespace_name, repository_name) if repo_ref is None: raise NotFound() @@ -262,7 +274,9 @@ class ManageRepositoryManifestLabel(RepositoryParamResource): @disallow_for_app_repositories @disallow_for_non_normal_repositories def delete(self, namespace_name, repository_name, manifestref, labelid): - """ Deletes an existing label from a manifest. """ + """ + Deletes an existing label from a manifest. + """ repo_ref = registry_model.lookup_repository(namespace_name, repository_name) if repo_ref is None: raise NotFound() diff --git a/endpoints/api/mirror.py b/endpoints/api/mirror.py index f0ff0e180..6e3ed9c4e 100644 --- a/endpoints/api/mirror.py +++ b/endpoints/api/mirror.py @@ -110,12 +110,16 @@ common_properties = { @path_param("repository", "The full path of the repository. e.g. namespace/name") @show_if(features.REPO_MIRROR) class RepoMirrorSyncNowResource(RepositoryParamResource): - """ A resource for managing RepoMirrorConfig.sync_status """ + """ + A resource for managing RepoMirrorConfig.sync_status. + """ @require_repo_admin @nickname("syncNow") def post(self, namespace_name, repository_name): - """ Update the sync_status for a given Repository's mirroring configuration. """ + """ + Update the sync_status for a given Repository's mirroring configuration. + """ repo = model.repository.get_repository(namespace_name, repository_name) if not repo: raise NotFound() @@ -140,12 +144,16 @@ class RepoMirrorSyncNowResource(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @show_if(features.REPO_MIRROR) class RepoMirrorSyncCancelResource(RepositoryParamResource): - """ A resource for managing RepoMirrorConfig.sync_status """ + """ + A resource for managing RepoMirrorConfig.sync_status. + """ @require_repo_admin @nickname("syncCancel") def post(self, namespace_name, repository_name): - """ Update the sync_status for a given Repository's mirroring configuration. """ + """ + Update the sync_status for a given Repository's mirroring configuration. + """ repo = model.repository.get_repository(namespace_name, repository_name) if not repo: raise NotFound() @@ -171,8 +179,8 @@ class RepoMirrorSyncCancelResource(RepositoryParamResource): @show_if(features.REPO_MIRROR) class RepoMirrorResource(RepositoryParamResource): """ - Resource for managing repository mirroring. - """ + Resource for managing repository mirroring. + """ schemas = { "CreateMirrorConfig": { @@ -211,7 +219,9 @@ class RepoMirrorResource(RepositoryParamResource): @define_json_response("ViewMirrorConfig") @nickname("getRepoMirrorConfig") def get(self, namespace_name, repository_name): - """ Return the Mirror configuration for a given Repository. """ + """ + Return the Mirror configuration for a given Repository. + """ repo = model.repository.get_repository(namespace_name, repository_name) if not repo: raise NotFound() @@ -246,7 +256,9 @@ class RepoMirrorResource(RepositoryParamResource): @nickname("createRepoMirrorConfig") @validate_json_request("CreateMirrorConfig") def post(self, namespace_name, repository_name): - """ Create a RepoMirrorConfig for a given Repository. """ + """ + Create a RepoMirrorConfig for a given Repository. + """ # TODO: Tidy up this function # TODO: Specify only the data we want to pass on when creating the RepoMirrorConfig. Avoid # the possibility of data injection. @@ -296,7 +308,9 @@ class RepoMirrorResource(RepositoryParamResource): @validate_json_request("UpdateMirrorConfig") @nickname("changeRepoMirrorConfig") def put(self, namespace_name, repository_name): - """ Allow users to modifying the repository's mirroring configuration. """ + """ + Allow users to modifying the repository's mirroring configuration. + """ values = request.get_json() repo = model.repository.get_repository(namespace_name, repository_name) @@ -477,7 +491,9 @@ class RepoMirrorResource(RepositoryParamResource): return "", 201 def _setup_robot_for_mirroring(self, namespace_name, repo_name, robot_username): - """ Validate robot exists and give write permissions. """ + """ + Validate robot exists and give write permissions. + """ robot = model.user.lookup_robot(robot_username) assert robot.robot @@ -497,7 +513,9 @@ class RepoMirrorResource(RepositoryParamResource): return robot def _string_to_dt(self, string): - """ Convert String to correct DateTime format. """ + """ + Convert String to correct DateTime format. + """ if string is None: return None @@ -516,7 +534,9 @@ class RepoMirrorResource(RepositoryParamResource): return dt def _dt_to_string(self, dt): - """ Convert DateTime to correctly formatted String.""" + """ + Convert DateTime to correctly formatted String. + """ if dt is None: return None diff --git a/endpoints/api/organization.py b/endpoints/api/organization.py index 179ee7f42..c575414ab 100644 --- a/endpoints/api/organization.py +++ b/endpoints/api/organization.py @@ -1,4 +1,6 @@ -""" Manage organizations, members and OAuth applications. """ +""" +Manage organizations, members and OAuth applications. +""" import logging import recaptcha2 @@ -91,7 +93,9 @@ def org_view(o, teams): @resource("/v1/organization/") class OrganizationList(ApiResource): - """ Resource for creating organizations. """ + """ + Resource for creating organizations. + """ schemas = { "NewOrg": { @@ -113,7 +117,9 @@ class OrganizationList(ApiResource): @nickname("createOrganization") @validate_json_request("NewOrg") def post(self): - """ Create a new organization. """ + """ + Create a new organization. + """ user = get_authenticated_user() org_data = request.get_json() existing = None @@ -161,7 +167,9 @@ class OrganizationList(ApiResource): @path_param("orgname", "The name of the organization") @related_user_resource(User) class Organization(ApiResource): - """ Resource for managing organizations. """ + """ + Resource for managing organizations. + """ schemas = { "UpdateOrg": { @@ -188,7 +196,9 @@ class Organization(ApiResource): @nickname("getOrganization") def get(self, orgname): - """ Get the details for the specified organization """ + """ + Get the details for the specified organization. + """ try: org = model.organization.get_organization(orgname) except model.InvalidOrganizationException: @@ -205,7 +215,9 @@ class Organization(ApiResource): @nickname("changeOrganizationDetails") @validate_json_request("UpdateOrg") def put(self, orgname): - """ Change the details for the specified organization. """ + """ + Change the details for the specified organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -248,7 +260,9 @@ class Organization(ApiResource): @require_fresh_login @nickname("deleteAdminedOrganization") def delete(self, orgname): - """ Deletes the specified organization. """ + """ + Deletes the specified organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -268,12 +282,16 @@ class Organization(ApiResource): @related_user_resource(PrivateRepositories) @show_if(features.BILLING) class OrgPrivateRepositories(ApiResource): - """ Custom verb to compute whether additional private repositories are available. """ + """ + Custom verb to compute whether additional private repositories are available. + """ @require_scope(scopes.ORG_ADMIN) @nickname("getOrganizationPrivateAllowed") def get(self, orgname): - """ Return whether or not this org is allowed to create new private repositories. """ + """ + Return whether or not this org is allowed to create new private repositories. + """ permission = CreateRepositoryPermission(orgname) if permission.can(): organization = model.organization.get_organization(orgname) @@ -301,17 +319,19 @@ class OrgPrivateRepositories(ApiResource): @resource("/v1/organization//collaborators") @path_param("orgname", "The name of the organization") class OrganizationCollaboratorList(ApiResource): - """ Resource for listing outside collaborators of an organization. + """ + Resource for listing outside collaborators of an organization. - Collaborators are users that do not belong to any team in the - organiztion, but who have direct permissions on one or more - repositories belonging to the organization. - """ + Collaborators are users that do not belong to any team in the organiztion, but who have direct + permissions on one or more repositories belonging to the organization. + """ @require_scope(scopes.ORG_ADMIN) @nickname("getOrganizationCollaborators") def get(self, orgname): - """ List outside collaborators of the specified organization. """ + """ + List outside collaborators of the specified organization. + """ permission = AdministerOrganizationPermission(orgname) if not permission.can(): raise Unauthorized() @@ -350,12 +370,16 @@ class OrganizationCollaboratorList(ApiResource): @resource("/v1/organization//members") @path_param("orgname", "The name of the organization") class OrganizationMemberList(ApiResource): - """ Resource for listing the members of an organization. """ + """ + Resource for listing the members of an organization. + """ @require_scope(scopes.ORG_ADMIN) @nickname("getOrganizationMembers") def get(self, orgname): - """ List the human members of the specified organization. """ + """ + List the human members of the specified organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -404,13 +428,16 @@ class OrganizationMemberList(ApiResource): @path_param("orgname", "The name of the organization") @path_param("membername", "The username of the organization member") class OrganizationMember(ApiResource): - """ Resource for managing individual organization members. """ + """ + Resource for managing individual organization members. + """ @require_scope(scopes.ORG_ADMIN) @nickname("getOrganizationMember") def get(self, orgname, membername): - """ Retrieves the details of a member of the organization. - """ + """ + Retrieves the details of a member of the organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): # Lookup the user. @@ -457,9 +484,10 @@ class OrganizationMember(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("removeOrganizationMember") def delete(self, orgname, membername): - """ Removes a member from an organization, revoking all its repository - priviledges and removing it from all teams in the organization. - """ + """ + Removes a member from an organization, revoking all its repository priviledges and removing + it from all teams in the organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): # Lookup the user. @@ -482,11 +510,15 @@ class OrganizationMember(ApiResource): @resource("/v1/app/") @path_param("client_id", "The OAuth client ID") class ApplicationInformation(ApiResource): - """ Resource that returns public information about a registered application. """ + """ + Resource that returns public information about a registered application. + """ @nickname("getApplicationInformation") def get(self, client_id): - """ Get information on the specified application. """ + """ + Get information on the specified application. + """ application = model.oauth.get_application_for_client_id(client_id) if not application: raise NotFound() @@ -525,7 +557,9 @@ def app_view(application): @resource("/v1/organization//applications") @path_param("orgname", "The name of the organization") class OrganizationApplications(ApiResource): - """ Resource for managing applications defined by an organization. """ + """ + Resource for managing applications defined by an organization. + """ schemas = { "NewApp": { @@ -557,7 +591,9 @@ class OrganizationApplications(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("getOrganizationApplications") def get(self, orgname): - """ List the applications for the specified organization """ + """ + List the applications for the specified organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -574,7 +610,9 @@ class OrganizationApplications(ApiResource): @nickname("createOrganizationApplication") @validate_json_request("NewApp") def post(self, orgname): - """ Creates a new application under this organization. """ + """ + Creates a new application under this organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -606,7 +644,9 @@ class OrganizationApplications(ApiResource): @path_param("orgname", "The name of the organization") @path_param("client_id", "The OAuth client ID") class OrganizationApplicationResource(ApiResource): - """ Resource for managing an application defined by an organizations. """ + """ + Resource for managing an application defined by an organizations. + """ schemas = { "UpdateApp": { @@ -638,7 +678,9 @@ class OrganizationApplicationResource(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("getOrganizationApplication") def get(self, orgname, client_id): - """ Retrieves the application with the specified client_id under the specified organization """ + """ + Retrieves the application with the specified client_id under the specified organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -658,7 +700,9 @@ class OrganizationApplicationResource(ApiResource): @nickname("updateOrganizationApplication") @validate_json_request("UpdateApp") def put(self, orgname, client_id): - """ Updates an application under this organization. """ + """ + Updates an application under this organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -690,7 +734,9 @@ class OrganizationApplicationResource(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("deleteOrganizationApplication") def delete(self, orgname, client_id): - """ Deletes the application under this organization. """ + """ + Deletes the application under this organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -717,11 +763,15 @@ class OrganizationApplicationResource(ApiResource): @path_param("client_id", "The OAuth client ID") @internal_only class OrganizationApplicationResetClientSecret(ApiResource): - """ Custom verb for resetting the client secret of an application. """ + """ + Custom verb for resetting the client secret of an application. + """ @nickname("resetOrganizationApplicationClientSecret") def post(self, orgname, client_id): - """ Resets the client secret of the application. """ + """ + Resets the client secret of the application. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: diff --git a/endpoints/api/permission.py b/endpoints/api/permission.py index b9ab2acdb..ed5421adc 100644 --- a/endpoints/api/permission.py +++ b/endpoints/api/permission.py @@ -1,4 +1,6 @@ -""" Manage repository permissions. """ +""" +Manage repository permissions. +""" import logging @@ -24,12 +26,16 @@ logger = logging.getLogger(__name__) @resource("/v1/repository//permissions/team/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryTeamPermissionList(RepositoryParamResource): - """ Resource for repository team permissions. """ + """ + Resource for repository team permissions. + """ @require_repo_admin @nickname("listRepoTeamPermissions") def get(self, namespace_name, repository_name): - """ List all team permission. """ + """ + List all team permission. + """ repo_perms = model.get_repo_permissions_by_team(namespace_name, repository_name) return { @@ -40,12 +46,16 @@ class RepositoryTeamPermissionList(RepositoryParamResource): @resource("/v1/repository//permissions/user/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryUserPermissionList(RepositoryParamResource): - """ Resource for repository user permissions. """ + """ + Resource for repository user permissions. + """ @require_repo_admin @nickname("listRepoUserPermissions") def get(self, namespace_name, repository_name): - """ List all user permissions. """ + """ + List all user permissions. + """ perms = model.get_repo_permissions_by_user(namespace_name, repository_name) return {"permissions": {p.username: p.to_dict() for p in perms}} @@ -54,13 +64,17 @@ class RepositoryUserPermissionList(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("username", "The username of the user to which the permissions apply") class RepositoryUserTransitivePermission(RepositoryParamResource): - """ Resource for retrieving whether a user has access to a repository, either directly - or via a team. """ + """ + Resource for retrieving whether a user has access to a repository, either directly or via a + team. + """ @require_repo_admin @nickname("getUserTransitivePermission") def get(self, namespace_name, repository_name, username): - """ Get the fetch the permission for the specified user. """ + """ + Get the fetch the permission for the specified user. + """ roles = model.get_repo_roles(username, namespace_name, repository_name) @@ -74,7 +88,9 @@ class RepositoryUserTransitivePermission(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("username", "The username of the user to which the permission applies") class RepositoryUserPermission(RepositoryParamResource): - """ Resource for managing individual user permissions. """ + """ + Resource for managing individual user permissions. + """ schemas = { "UserPermission": { @@ -94,7 +110,9 @@ class RepositoryUserPermission(RepositoryParamResource): @require_repo_admin @nickname("getUserPermissions") def get(self, namespace_name, repository_name, username): - """ Get the permission for the specified user. """ + """ + Get the permission for the specified user. + """ logger.debug( "Get repo: %s/%s permissions for user %s", namespace_name, repository_name, username ) @@ -105,7 +123,9 @@ class RepositoryUserPermission(RepositoryParamResource): @nickname("changeUserPermissions") @validate_json_request("UserPermission") def put(self, namespace_name, repository_name, username): # Also needs to respond to post - """ Update the perimssions for an existing repository. """ + """ + Update the perimssions for an existing repository. + """ new_permission = request.get_json() logger.debug("Setting permission to: %s for user %s", new_permission["role"], username) @@ -135,7 +155,9 @@ class RepositoryUserPermission(RepositoryParamResource): @require_repo_admin @nickname("deleteUserPermissions") def delete(self, namespace_name, repository_name, username): - """ Delete the permission for the user. """ + """ + Delete the permission for the user. + """ try: model.delete_repo_permission_for_user(username, namespace_name, repository_name) except DeleteException as ex: @@ -155,7 +177,9 @@ class RepositoryUserPermission(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("teamname", "The name of the team to which the permission applies") class RepositoryTeamPermission(RepositoryParamResource): - """ Resource for managing individual team permissions. """ + """ + Resource for managing individual team permissions. + """ schemas = { "TeamPermission": { @@ -175,7 +199,9 @@ class RepositoryTeamPermission(RepositoryParamResource): @require_repo_admin @nickname("getTeamPermissions") def get(self, namespace_name, repository_name, teamname): - """ Fetch the permission for the specified team. """ + """ + Fetch the permission for the specified team. + """ logger.debug( "Get repo: %s/%s permissions for team %s", namespace_name, repository_name, teamname ) @@ -186,7 +212,9 @@ class RepositoryTeamPermission(RepositoryParamResource): @nickname("changeTeamPermissions") @validate_json_request("TeamPermission") def put(self, namespace_name, repository_name, teamname): - """ Update the existing team permission. """ + """ + Update the existing team permission. + """ new_permission = request.get_json() logger.debug("Setting permission to: %s for team %s", new_permission["role"], teamname) @@ -210,7 +238,9 @@ class RepositoryTeamPermission(RepositoryParamResource): @require_repo_admin @nickname("deleteTeamPermissions") def delete(self, namespace_name, repository_name, teamname): - """ Delete the permission for the specified team. """ + """ + Delete the permission for the specified team. + """ try: model.delete_repo_permission_for_team(teamname, namespace_name, repository_name) except DeleteException as ex: diff --git a/endpoints/api/permission_models_interface.py b/endpoints/api/permission_models_interface.py index 59c1d4113..7d8ef2b8b 100644 --- a/endpoints/api/permission_models_interface.py +++ b/endpoints/api/permission_models_interface.py @@ -66,8 +66,8 @@ class TeamPermission(namedtuple("TeamPermission", ["role_name", "team_name", "av @add_metaclass(ABCMeta) class PermissionDataInterface(object): """ - Data interface used by permissions API - """ + Data interface used by permissions API. + """ @abstractmethod def get_repo_permissions_by_user(self, namespace_name, repository_name): diff --git a/endpoints/api/permission_models_pre_oci.py b/endpoints/api/permission_models_pre_oci.py index 2af4b7e9b..921cb818f 100644 --- a/endpoints/api/permission_models_pre_oci.py +++ b/endpoints/api/permission_models_pre_oci.py @@ -12,9 +12,9 @@ from permission_models_interface import ( class PreOCIModel(PermissionDataInterface): """ - PreOCIModel implements the data model for Permission using a database schema - before it was changed to support the OCI specification. - """ + PreOCIModel implements the data model for Permission using a database schema before it was + changed to support the OCI specification. + """ def get_repo_permissions_by_user(self, namespace_name, repository_name): org = None diff --git a/endpoints/api/prototype.py b/endpoints/api/prototype.py index 51bc75be1..bfd35325c 100644 --- a/endpoints/api/prototype.py +++ b/endpoints/api/prototype.py @@ -1,4 +1,6 @@ -""" Manage default permissions added to repositories. """ +""" +Manage default permissions added to repositories. +""" from flask import request @@ -74,7 +76,9 @@ def log_prototype_action(action_kind, orgname, prototype, **kwargs): @resource("/v1/organization//prototypes") @path_param("orgname", "The name of the organization") class PermissionPrototypeList(ApiResource): - """ Resource for listing and creating permission prototypes. """ + """ + Resource for listing and creating permission prototypes. + """ schemas = { "NewPrototype": { @@ -121,7 +125,9 @@ class PermissionPrototypeList(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("getOrganizationPrototypePermissions") def get(self, orgname): - """ List the existing prototypes for this organization. """ + """ + List the existing prototypes for this organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -145,7 +151,9 @@ class PermissionPrototypeList(ApiResource): @nickname("createOrganizationPrototypePermission") @validate_json_request("NewPrototype") def post(self, orgname): - """ Create a new permission prototype. """ + """ + Create a new permission prototype. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -206,7 +214,9 @@ class PermissionPrototypeList(ApiResource): @path_param("orgname", "The name of the organization") @path_param("prototypeid", "The ID of the prototype") class PermissionPrototype(ApiResource): - """ Resource for managingin individual permission prototypes. """ + """ + Resource for managingin individual permission prototypes. + """ schemas = { "PrototypeUpdate": { @@ -226,7 +236,9 @@ class PermissionPrototype(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("deleteOrganizationPrototypePermission") def delete(self, orgname, prototypeid): - """ Delete an existing permission prototype. """ + """ + Delete an existing permission prototype. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -248,7 +260,9 @@ class PermissionPrototype(ApiResource): @nickname("updateOrganizationPrototypePermission") @validate_json_request("PrototypeUpdate") def put(self, orgname, prototypeid): - """ Update the role of an existing permission prototype. """ + """ + Update the role of an existing permission prototype. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: diff --git a/endpoints/api/repoemail.py b/endpoints/api/repoemail.py index 2d9e221c4..29ca810c2 100644 --- a/endpoints/api/repoemail.py +++ b/endpoints/api/repoemail.py @@ -1,4 +1,6 @@ -""" Authorize repository to send e-mail notifications. """ +""" +Authorize repository to send e-mail notifications. +""" import logging @@ -32,12 +34,16 @@ logger = logging.getLogger(__name__) @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("email", "The e-mail address") class RepositoryAuthorizedEmail(RepositoryParamResource): - """ Resource for checking and authorizing e-mail addresses to receive repo notifications. """ + """ + Resource for checking and authorizing e-mail addresses to receive repo notifications. + """ @require_repo_admin @nickname("checkRepoEmailAuthorized") def get(self, namespace, repository, email): - """ Checks to see if the given e-mail address is authorized on this repository. """ + """ + Checks to see if the given e-mail address is authorized on this repository. + """ record = model.get_email_authorized_for_repo(namespace, repository, email) if not record: abort(404) @@ -47,7 +53,9 @@ class RepositoryAuthorizedEmail(RepositoryParamResource): @require_repo_admin @nickname("sendAuthorizeRepoEmail") def post(self, namespace, repository, email): - """ Starts the authorization process for an e-mail address on a repository. """ + """ + Starts the authorization process for an e-mail address on a repository. + """ with tf(db): record = model.get_email_authorized_for_repo(namespace, repository, email) diff --git a/endpoints/api/repoemail_models_interface.py b/endpoints/api/repoemail_models_interface.py index b748819b7..882eaa1dd 100644 --- a/endpoints/api/repoemail_models_interface.py +++ b/endpoints/api/repoemail_models_interface.py @@ -11,13 +11,14 @@ class RepositoryAuthorizedEmail( ) ): """ - Tag represents a name to an image. - :type email: string - :type repository_name: string - :type namespace_name: string - :type confirmed: boolean - :type code: string - """ + Tag represents a name to an image. + + :type email: string + :type repository_name: string + :type namespace_name: string + :type confirmed: boolean + :type code: string + """ def to_dict(self): return { @@ -32,17 +33,17 @@ class RepositoryAuthorizedEmail( @add_metaclass(ABCMeta) class RepoEmailDataInterface(object): """ - Interface that represents all data store interactions required by a Repo Email. - """ + Interface that represents all data store interactions required by a Repo Email. + """ @abstractmethod def get_email_authorized_for_repo(self, namespace_name, repository_name, email): """ - Returns a RepositoryAuthorizedEmail if available else None - """ + Returns a RepositoryAuthorizedEmail if available else None. + """ @abstractmethod def create_email_authorization_for_repo(self, namespace_name, repository_name, email): """ - Returns the newly created repository authorized email. - """ + Returns the newly created repository authorized email. + """ diff --git a/endpoints/api/repoemail_models_pre_oci.py b/endpoints/api/repoemail_models_pre_oci.py index 769f35257..39c4ecb17 100644 --- a/endpoints/api/repoemail_models_pre_oci.py +++ b/endpoints/api/repoemail_models_pre_oci.py @@ -16,9 +16,9 @@ def _return_none_or_data(func, namespace_name, repository_name, email): class PreOCIModel(RepoEmailDataInterface): """ - PreOCIModel implements the data model for the Repo Email using a database schema - before it was changed to support the OCI specification. - """ + PreOCIModel implements the data model for the Repo Email using a database schema before it was + changed to support the OCI specification. + """ def get_email_authorized_for_repo(self, namespace_name, repository_name, email): return _return_none_or_data( diff --git a/endpoints/api/repository.py b/endpoints/api/repository.py index 8d3bb217b..ab35a02f1 100644 --- a/endpoints/api/repository.py +++ b/endpoints/api/repository.py @@ -1,4 +1,6 @@ -""" List, create and manage repositories. """ +""" +List, create and manage repositories. +""" import logging import datetime @@ -59,9 +61,11 @@ MAX_DAYS_IN_3_MONTHS = 92 def check_allowed_private_repos(namespace): - """ Checks to see if the given namespace has reached its private repository limit. If so, - raises a ExceedsLicenseException. - """ + """ + Checks to see if the given namespace has reached its private repository limit. + + If so, raises a ExceedsLicenseException. + """ # Not enabled if billing is disabled. if not features.BILLING: return @@ -72,7 +76,9 @@ def check_allowed_private_repos(namespace): @resource("/v1/repository") class RepositoryList(ApiResource): - """Operations for creating and listing repositories.""" + """ + Operations for creating and listing repositories. + """ schemas = { "NewRepo": { @@ -110,7 +116,9 @@ class RepositoryList(ApiResource): @nickname("createRepo") @validate_json_request("NewRepo") def post(self): - """Create a new repository.""" + """ + Create a new repository. + """ owner = get_authenticated_user() req = request.get_json() @@ -186,8 +194,9 @@ class RepositoryList(ApiResource): @query_param("repo_kind", "The kind of repositories to return", type=str, default="image") @page_support() def get(self, page_token, parsed_args): - """ Fetch the list of repositories visible to the current user under a variety of situations. - """ + """ + Fetch the list of repositories visible to the current user under a variety of situations. + """ # Ensure that the user requests either filtered by a namespace, only starred repositories, # or public repositories. This ensures that the user is not requesting *all* visible repos, # which can cause a surge in DB CPU usage. @@ -225,7 +234,9 @@ class RepositoryList(ApiResource): @resource("/v1/repository/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class Repository(RepositoryParamResource): - """Operations for managing a specific repository.""" + """ + Operations for managing a specific repository. + """ schemas = { "RepoUpdate": { @@ -251,7 +262,9 @@ class Repository(RepositoryParamResource): @require_repo_read @nickname("getRepo") def get(self, namespace, repository, parsed_args): - """Fetch the specified repository.""" + """ + Fetch the specified repository. + """ logger.debug("Get repo: %s/%s" % (namespace, repository)) include_tags = parsed_args["includeTags"] max_tags = 500 @@ -292,7 +305,9 @@ class Repository(RepositoryParamResource): @nickname("updateRepo") @validate_json_request("RepoUpdate") def put(self, namespace, repository): - """ Update the description in the specified repository. """ + """ + Update the description in the specified repository. + """ if not model.repo_exists(namespace, repository): raise NotFound() @@ -310,7 +325,9 @@ class Repository(RepositoryParamResource): @require_repo_admin @nickname("deleteRepository") def delete(self, namespace, repository): - """ Delete a repository. """ + """ + Delete a repository. + """ username = model.mark_repository_for_deletion(namespace, repository, repository_gc_queue) if features.BILLING: @@ -327,7 +344,9 @@ class Repository(RepositoryParamResource): @resource("/v1/repository//changevisibility") @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryVisibility(RepositoryParamResource): - """ Custom verb for changing the visibility of the repository. """ + """ + Custom verb for changing the visibility of the repository. + """ schemas = { "ChangeVisibility": { @@ -348,7 +367,9 @@ class RepositoryVisibility(RepositoryParamResource): @nickname("changeRepoVisibility") @validate_json_request("ChangeVisibility") def post(self, namespace, repository): - """ Change the visibility of a repository. """ + """ + Change the visibility of a repository. + """ if model.repo_exists(namespace, repository): values = request.get_json() visibility = values["visibility"] @@ -368,7 +389,9 @@ class RepositoryVisibility(RepositoryParamResource): @resource("/v1/repository//changetrust") @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryTrust(RepositoryParamResource): - """ Custom verb for changing the trust settings of the repository. """ + """ + Custom verb for changing the trust settings of the repository. + """ schemas = { "ChangeRepoTrust": { @@ -389,7 +412,9 @@ class RepositoryTrust(RepositoryParamResource): @nickname("changeRepoTrust") @validate_json_request("ChangeRepoTrust") def post(self, namespace, repository): - """ Change the visibility of a repository. """ + """ + Change the visibility of a repository. + """ if not model.repo_exists(namespace, repository): raise NotFound() @@ -414,7 +439,9 @@ class RepositoryTrust(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @show_if(features.REPO_MIRROR) class RepositoryStateResource(RepositoryParamResource): - """ Custom verb for changing the state of the repository. """ + """ + Custom verb for changing the state of the repository. + """ schemas = { "ChangeRepoState": { @@ -435,7 +462,9 @@ class RepositoryStateResource(RepositoryParamResource): @nickname("changeRepoState") @validate_json_request("ChangeRepoState") def put(self, namespace, repository): - """ Change the state of a repository. """ + """ + Change the state of a repository. + """ if not model.repo_exists(namespace, repository): raise NotFound() diff --git a/endpoints/api/repository_models_interface.py b/endpoints/api/repository_models_interface.py index 8df0d8f3e..784e46bcf 100644 --- a/endpoints/api/repository_models_interface.py +++ b/endpoints/api/repository_models_interface.py @@ -32,18 +32,19 @@ class RepositoryBaseElement( ) ): """ - Repository a single quay repository - :type namespace_name: string - :type repository_name: string - :type is_starred: boolean - :type is_public: boolean - :type kind_name: string - :type description: string - :type namespace_user_organization: boolean - :type should_last_modified: boolean - :type should_popularity: boolean - :type should_is_starred: boolean - """ + Repository a single quay repository. + + :type namespace_name: string + :type repository_name: string + :type is_starred: boolean + :type is_public: boolean + :type kind_name: string + :type description: string + :type namespace_user_organization: boolean + :type should_last_modified: boolean + :type should_popularity: boolean + :type should_is_starred: boolean + """ def to_dict(self): repo = { @@ -73,11 +74,12 @@ class ApplicationRepository( ) ): """ - Repository a single quay repository - :type repository_base_elements: RepositoryBaseElement - :type channels: [Channel] - :type releases: [Release] - """ + Repository a single quay repository. + + :type repository_base_elements: RepositoryBaseElement + :type channels: [Channel] + :type releases: [Release] + """ def to_dict(self): repo_data = { @@ -104,13 +106,14 @@ class ImageRepositoryRepository( ) ): """ - Repository a single quay repository - :type repository_base_elements: RepositoryBaseElement - :type tags: [Tag] - :type counts: [count] - :type badge_token: string - :type trust_enabled: boolean - """ + Repository a single quay repository. + + :type repository_base_elements: RepositoryBaseElement + :type tags: [Tag] + :type counts: [count] + :type badge_token: string + :type trust_enabled: boolean + """ def to_dict(self): img_repo = { @@ -139,19 +142,21 @@ class ImageRepositoryRepository( class Repository(namedtuple("Repository", ["namespace_name", "repository_name",])): """ - Repository a single quay repository - :type namespace_name: string - :type repository_name: string - """ + Repository a single quay repository. + + :type namespace_name: string + :type repository_name: string + """ class Channel(namedtuple("Channel", ["name", "linked_tag_name", "linked_tag_lifetime_start"])): """ - Repository a single quay repository - :type name: string - :type linked_tag_name: string - :type linked_tag_lifetime_start: string - """ + Repository a single quay repository. + + :type name: string + :type linked_tag_name: string + :type linked_tag_lifetime_start: string + """ def to_dict(self): return { @@ -165,11 +170,12 @@ class Channel(namedtuple("Channel", ["name", "linked_tag_name", "linked_tag_life class Release(namedtuple("Channel", ["name", "lifetime_start", "releases_channels_map"])): """ - Repository a single quay repository - :type name: string - :type last_modified: string - :type releases_channels_map: {string -> string} - """ + Repository a single quay repository. + + :type name: string + :type last_modified: string + :type releases_channels_map: {string -> string} + """ def to_dict(self): return { @@ -239,28 +245,28 @@ class Count(namedtuple("Count", ["date", "count"])): @add_metaclass(ABCMeta) class RepositoryDataInterface(object): """ - Interface that represents all data store interactions required by a Repository. - """ + Interface that represents all data store interactions required by a Repository. + """ @abstractmethod def get_repo(self, namespace_name, repository_name, user, include_tags=True, max_tags=500): """ - Returns a repository - """ + Returns a repository. + """ @abstractmethod def repo_exists(self, namespace_name, repository_name): """ - Returns true if a repo exists and false if not - """ + Returns true if a repo exists and false if not. + """ @abstractmethod def create_repo( self, namespace, name, creating_user, description, visibility="private", repo_kind="image" ): """ - Returns creates a new repo - """ + Returns creates a new repo. + """ @abstractmethod def get_repo_list( @@ -276,41 +282,41 @@ class RepositoryDataInterface(object): popularity, ): """ - Returns a RepositoryBaseElement - """ + Returns a RepositoryBaseElement. + """ @abstractmethod def set_repository_visibility(self, namespace_name, repository_name, visibility): """ - Sets a repository's visibility if it is found - """ + Sets a repository's visibility if it is found. + """ @abstractmethod def set_trust(self, namespace_name, repository_name, trust): """ - Sets a repository's trust_enabled field if it is found - """ + Sets a repository's trust_enabled field if it is found. + """ @abstractmethod def set_description(self, namespace_name, repository_name, description): """ - Sets a repository's description if it is found. - """ + Sets a repository's description if it is found. + """ @abstractmethod def mark_repository_for_deletion(self, namespace_name, repository_name, repository_gc_queue): """ - Marks a repository for deletion. - """ + Marks a repository for deletion. + """ @abstractmethod def check_repository_usage(self, user_name, plan_found): """ - Creates a notification for a user if they are over or under on their repository usage - """ + Creates a notification for a user if they are over or under on their repository usage. + """ @abstractmethod def set_repository_state(self, namespace_name, repository_name, state): """ - Set the State of the Repository. - """ + Set the State of the Repository. + """ diff --git a/endpoints/api/repository_models_pre_oci.py b/endpoints/api/repository_models_pre_oci.py index 994666b2b..7ca1aa9b4 100644 --- a/endpoints/api/repository_models_pre_oci.py +++ b/endpoints/api/repository_models_pre_oci.py @@ -32,9 +32,9 @@ def _create_channel(channel, releases_channels_map): class PreOCIModel(RepositoryDataInterface): """ - PreOCIModel implements the data model for the Repo Email using a database schema - before it was changed to support the OCI specification. - """ + PreOCIModel implements the data model for the Repo Email using a database schema before it was + changed to support the OCI specification. + """ def check_repository_usage(self, username, plan_found): private_repos = model.user.get_private_repo_count(username) diff --git a/endpoints/api/repositorynotification.py b/endpoints/api/repositorynotification.py index a5d6508f2..2759bd048 100644 --- a/endpoints/api/repositorynotification.py +++ b/endpoints/api/repositorynotification.py @@ -1,4 +1,6 @@ -""" List, create and manage repository events/notifications. """ +""" +List, create and manage repository events/notifications. +""" import logging from flask import request @@ -30,7 +32,9 @@ logger = logging.getLogger(__name__) @resource("/v1/repository//notification/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryNotificationList(RepositoryParamResource): - """ Resource for dealing with listing and creating notifications on a repository. """ + """ + Resource for dealing with listing and creating notifications on a repository. + """ schemas = { "NotificationCreateRequest": { @@ -103,7 +107,9 @@ class RepositoryNotificationList(RepositoryParamResource): @nickname("listRepoNotifications") @disallow_for_app_repositories def get(self, namespace_name, repository_name): - """ List the notifications for the specified repository. """ + """ + List the notifications for the specified repository. + """ notifications = model.list_repo_notifications(namespace_name, repository_name) return {"notifications": [n.to_dict() for n in notifications]} @@ -112,13 +118,17 @@ class RepositoryNotificationList(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("uuid", "The UUID of the notification") class RepositoryNotification(RepositoryParamResource): - """ Resource for dealing with specific notifications. """ + """ + Resource for dealing with specific notifications. + """ @require_repo_admin @nickname("getRepoNotification") @disallow_for_app_repositories def get(self, namespace_name, repository_name, uuid): - """ Get information for the specified notification. """ + """ + Get information for the specified notification. + """ found = model.get_repo_notification(uuid) if not found: raise NotFound() @@ -128,7 +138,9 @@ class RepositoryNotification(RepositoryParamResource): @nickname("deleteRepoNotification") @disallow_for_app_repositories def delete(self, namespace_name, repository_name, uuid): - """ Deletes the specified notification. """ + """ + Deletes the specified notification. + """ deleted = model.delete_repo_notification(namespace_name, repository_name, uuid) if not deleted: raise InvalidRequest( @@ -155,7 +167,9 @@ class RepositoryNotification(RepositoryParamResource): @nickname("resetRepositoryNotificationFailures") @disallow_for_app_repositories def post(self, namespace_name, repository_name, uuid): - """ Resets repository notification to 0 failures. """ + """ + Resets repository notification to 0 failures. + """ reset = model.reset_notification_number_of_failures(namespace_name, repository_name, uuid) if not reset: raise InvalidRequest( @@ -183,13 +197,17 @@ class RepositoryNotification(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("uuid", "The UUID of the notification") class TestRepositoryNotification(RepositoryParamResource): - """ Resource for queuing a test of a notification. """ + """ + Resource for queuing a test of a notification. + """ @require_repo_admin @nickname("testRepoNotification") @disallow_for_app_repositories def post(self, namespace_name, repository_name, uuid): - """ Queues a test notification for this repository. """ + """ + Queues a test notification for this repository. + """ test_note = model.queue_test_notification(uuid) if not test_note: raise InvalidRequest( diff --git a/endpoints/api/repositorynotification_models_interface.py b/endpoints/api/repositorynotification_models_interface.py index 368c8f7ad..3f0e91d80 100644 --- a/endpoints/api/repositorynotification_models_interface.py +++ b/endpoints/api/repositorynotification_models_interface.py @@ -21,15 +21,16 @@ class RepositoryNotification( ) ): """ - RepositoryNotification represents a notification for a repository. - :type uuid: string - :type event: string - :type method: string - :type config: string - :type title: string - :type event_config: string - :type number_of_failures: int - """ + RepositoryNotification represents a notification for a repository. + + :type uuid: string + :type event: string + :type method: string + :type config: string + :type title: string + :type event_config: string + :type number_of_failures: int + """ def to_dict(self): try: @@ -56,8 +57,9 @@ class RepositoryNotification( @add_metaclass(ABCMeta) class RepoNotificationInterface(object): """ - Interface that represents all data store interactions required by the RepositoryNotification API - """ + Interface that represents all data store interactions required by the RepositoryNotification + API. + """ @abstractmethod def create_repo_notification( diff --git a/endpoints/api/repotoken.py b/endpoints/api/repotoken.py index 9f45f1d61..7e2c15700 100644 --- a/endpoints/api/repotoken.py +++ b/endpoints/api/repotoken.py @@ -1,4 +1,6 @@ -""" Manage repository access tokens (DEPRECATED). """ +""" +Manage repository access tokens (DEPRECATED). +""" import logging @@ -17,7 +19,9 @@ logger = logging.getLogger(__name__) @resource("/v1/repository//tokens/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositoryTokenList(RepositoryParamResource): - """ Resource for creating and listing repository tokens. """ + """ + Resource for creating and listing repository tokens. + """ schemas = { "NewToken": { @@ -36,14 +40,18 @@ class RepositoryTokenList(RepositoryParamResource): @require_repo_admin @nickname("listRepoTokens") def get(self, namespace_name, repo_name): - """ List the tokens for the specified repository. """ + """ + List the tokens for the specified repository. + """ return {"message": "Handling of access tokens is no longer supported",}, 410 @require_repo_admin @nickname("createToken") @validate_json_request("NewToken") def post(self, namespace_name, repo_name): - """ Create a new repository token. """ + """ + Create a new repository token. + """ return {"message": "Creation of access tokens is no longer supported",}, 410 @@ -51,7 +59,9 @@ class RepositoryTokenList(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("code", "The token code") class RepositoryToken(RepositoryParamResource): - """ Resource for managing individual tokens. """ + """ + Resource for managing individual tokens. + """ schemas = { "TokenPermission": { @@ -71,18 +81,24 @@ class RepositoryToken(RepositoryParamResource): @require_repo_admin @nickname("getTokens") def get(self, namespace_name, repo_name, code): - """ Fetch the specified repository token information. """ + """ + Fetch the specified repository token information. + """ return {"message": "Handling of access tokens is no longer supported",}, 410 @require_repo_admin @nickname("changeToken") @validate_json_request("TokenPermission") def put(self, namespace_name, repo_name, code): - """ Update the permissions for the specified repository token. """ + """ + Update the permissions for the specified repository token. + """ return {"message": "Handling of access tokens is no longer supported",}, 410 @require_repo_admin @nickname("deleteToken") def delete(self, namespace_name, repo_name, code): - """ Delete the repository token. """ + """ + Delete the repository token. + """ return {"message": "Handling of access tokens is no longer supported",}, 410 diff --git a/endpoints/api/robot.py b/endpoints/api/robot.py index be7bfaa5f..095f3be39 100644 --- a/endpoints/api/robot.py +++ b/endpoints/api/robot.py @@ -1,4 +1,6 @@ -""" Manage user and organization robot accounts. """ +""" +Manage user and organization robot accounts. +""" from endpoints.api import ( resource, @@ -52,7 +54,9 @@ def robots_list(prefix, include_permissions=False, include_token=False, limit=No @resource("/v1/user/robots") class UserRobotList(ApiResource): - """ Resource for listing user robots. """ + """ + Resource for listing user robots. + """ @require_user_admin @nickname("getUserRobots") @@ -68,7 +72,9 @@ class UserRobotList(ApiResource): ) @query_param("limit", "If specified, the number of robots to return.", type=int, default=None) def get(self, parsed_args): - """ List the available robots for the user. """ + """ + List the available robots for the user. + """ user = get_authenticated_user() return robots_list( user.username, @@ -83,7 +89,9 @@ class UserRobotList(ApiResource): "robot_shortname", "The short name for the robot, without any user or organization prefix" ) class UserRobot(ApiResource): - """ Resource for managing a user's robots. """ + """ + Resource for managing a user's robots. + """ schemas = { "CreateRobot": CREATE_ROBOT_SCHEMA, @@ -92,7 +100,9 @@ class UserRobot(ApiResource): @require_user_admin @nickname("getUserRobot") def get(self, robot_shortname): - """ Returns the user's robot with the specified name. """ + """ + Returns the user's robot with the specified name. + """ parent = get_authenticated_user() robot = model.get_user_robot(robot_shortname, parent) return robot.to_dict(include_metadata=True, include_token=True) @@ -102,7 +112,9 @@ class UserRobot(ApiResource): @max_json_size(ROBOT_MAX_SIZE) @validate_json_request("CreateRobot", optional=True) def put(self, robot_shortname): - """ Create a new user robot with the specified name. """ + """ + Create a new user robot with the specified name. + """ parent = get_authenticated_user() create_data = request.get_json() or {} robot = model.create_user_robot( @@ -125,7 +137,9 @@ class UserRobot(ApiResource): @require_user_admin @nickname("deleteUserRobot") def delete(self, robot_shortname): - """ Delete an existing robot. """ + """ + Delete an existing robot. + """ parent = get_authenticated_user() model.delete_robot(format_robot_username(parent.username, robot_shortname)) log_action("delete_robot", parent.username, {"robot": robot_shortname}) @@ -136,7 +150,9 @@ class UserRobot(ApiResource): @path_param("orgname", "The name of the organization") @related_user_resource(UserRobotList) class OrgRobotList(ApiResource): - """ Resource for listing an organization's robots. """ + """ + Resource for listing an organization's robots. + """ @require_scope(scopes.ORG_ADMIN) @nickname("getOrgRobots") @@ -152,7 +168,9 @@ class OrgRobotList(ApiResource): ) @query_param("limit", "If specified, the number of robots to return.", type=int, default=None) def get(self, orgname, parsed_args): - """ List the organization's robots. """ + """ + List the organization's robots. + """ permission = OrganizationMemberPermission(orgname) if permission.can(): include_token = AdministerOrganizationPermission(orgname).can() and parsed_args.get( @@ -178,7 +196,9 @@ class OrgRobotList(ApiResource): ) @related_user_resource(UserRobot) class OrgRobot(ApiResource): - """ Resource for managing an organization's robots. """ + """ + Resource for managing an organization's robots. + """ schemas = { "CreateRobot": CREATE_ROBOT_SCHEMA, @@ -187,7 +207,9 @@ class OrgRobot(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("getOrgRobot") def get(self, orgname, robot_shortname): - """ Returns the organization's robot with the specified name. """ + """ + Returns the organization's robot with the specified name. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): robot = model.get_org_robot(robot_shortname, orgname) @@ -200,7 +222,9 @@ class OrgRobot(ApiResource): @max_json_size(ROBOT_MAX_SIZE) @validate_json_request("CreateRobot", optional=True) def put(self, orgname, robot_shortname): - """ Create a new robot in the organization. """ + """ + Create a new robot in the organization. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): create_data = request.get_json() or {} @@ -226,7 +250,9 @@ class OrgRobot(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("deleteOrgRobot") def delete(self, orgname, robot_shortname): - """ Delete an existing organization robot. """ + """ + Delete an existing organization robot. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): model.delete_robot(format_robot_username(orgname, robot_shortname)) @@ -241,12 +267,16 @@ class OrgRobot(ApiResource): "robot_shortname", "The short name for the robot, without any user or organization prefix" ) class UserRobotPermissions(ApiResource): - """ Resource for listing the permissions a user's robot has in the system. """ + """ + Resource for listing the permissions a user's robot has in the system. + """ @require_user_admin @nickname("getUserRobotPermissions") def get(self, robot_shortname): - """ Returns the list of repository permissions for the user's robot. """ + """ + Returns the list of repository permissions for the user's robot. + """ parent = get_authenticated_user() robot = model.get_user_robot(robot_shortname, parent) permissions = model.list_robot_permissions(robot.name) @@ -261,12 +291,16 @@ class UserRobotPermissions(ApiResource): ) @related_user_resource(UserRobotPermissions) class OrgRobotPermissions(ApiResource): - """ Resource for listing the permissions an org's robot has in the system. """ + """ + Resource for listing the permissions an org's robot has in the system. + """ @require_user_admin @nickname("getOrgRobotPermissions") def get(self, orgname, robot_shortname): - """ Returns the list of repository permissions for the org's robot. """ + """ + Returns the list of repository permissions for the org's robot. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): robot = model.get_org_robot(robot_shortname, orgname) @@ -282,12 +316,16 @@ class OrgRobotPermissions(ApiResource): "robot_shortname", "The short name for the robot, without any user or organization prefix" ) class RegenerateUserRobot(ApiResource): - """ Resource for regenerate an organization's robot's token. """ + """ + Resource for regenerate an organization's robot's token. + """ @require_user_admin @nickname("regenerateUserRobotToken") def post(self, robot_shortname): - """ Regenerates the token for a user's robot. """ + """ + Regenerates the token for a user's robot. + """ parent = get_authenticated_user() robot = model.regenerate_user_robot_token(robot_shortname, parent) log_action("regenerate_robot_token", parent.username, {"robot": robot_shortname}) @@ -301,12 +339,16 @@ class RegenerateUserRobot(ApiResource): ) @related_user_resource(RegenerateUserRobot) class RegenerateOrgRobot(ApiResource): - """ Resource for regenerate an organization's robot's token. """ + """ + Resource for regenerate an organization's robot's token. + """ @require_scope(scopes.ORG_ADMIN) @nickname("regenerateOrgRobotToken") def post(self, orgname, robot_shortname): - """ Regenerates the token for an organization robot. """ + """ + Regenerates the token for an organization robot. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): robot = model.regenerate_org_robot_token(robot_shortname, orgname) diff --git a/endpoints/api/robot_models_interface.py b/endpoints/api/robot_models_interface.py index eaff8d501..23a50332b 100644 --- a/endpoints/api/robot_models_interface.py +++ b/endpoints/api/robot_models_interface.py @@ -10,8 +10,9 @@ class Permission( namedtuple("Permission", ["repository_name", "repository_visibility_name", "role_name"]) ): """ - Permission the relationship between a robot and a repository and whether that robot can see the repo. - """ + Permission the relationship between a robot and a repository and whether that robot can see the + repo. + """ def to_dict(self): return { @@ -25,10 +26,11 @@ class Permission( class Team(namedtuple("Team", ["name", "avatar"])): """ - Team represents a team entry for a robot list entry. - :type name: string - :type avatar: {string -> string} - """ + Team represents a team entry for a robot list entry. + + :type name: string + :type avatar: {string -> string} + """ def to_dict(self): return { @@ -52,15 +54,16 @@ class RobotWithPermissions( ) ): """ - RobotWithPermissions is a list of robot entries. - :type name: string - :type password: string - :type created: datetime|None - :type last_accessed: datetime|None - :type teams: [Team] - :type repository_names: [string] - :type description: string - """ + RobotWithPermissions is a list of robot entries. + + :type name: string + :type password: string + :type created: datetime|None + :type last_accessed: datetime|None + :type teams: [Team] + :type repository_names: [string] + :type description: string + """ def to_dict(self, include_token=False): data = { @@ -87,14 +90,15 @@ class Robot( ) ): """ - Robot represents a robot entity. - :type name: string - :type password: string - :type created: datetime|None - :type last_accessed: datetime|None - :type description: string - :type unstructured_metadata: dict - """ + Robot represents a robot entity. + + :type name: string + :type password: string + :type created: datetime|None + :type last_accessed: datetime|None + :type description: string + :type unstructured_metadata: dict + """ def to_dict(self, include_metadata=False, include_token=False): data = { @@ -118,8 +122,8 @@ class Robot( @add_metaclass(ABCMeta) class RobotInterface(object): """ - Interface that represents all data store interactions required by the Robot API - """ + Interface that represents all data store interactions required by the Robot API. + """ @abstractmethod def get_org_robot(self, robot_shortname, orgname): diff --git a/endpoints/api/search.py b/endpoints/api/search.py index 77c05dbbc..9aef8ebfc 100644 --- a/endpoints/api/search.py +++ b/endpoints/api/search.py @@ -1,4 +1,6 @@ -""" Conduct searches against all registry context. """ +""" +Conduct searches against all registry context. +""" import features @@ -46,7 +48,9 @@ REPOSITORY_SEARCH_SCORE = 4 @resource("/v1/entities/link/") @internal_only class LinkExternalEntity(ApiResource): - """ Resource for linking external entities to internal users. """ + """ + Resource for linking external entities to internal users. + """ @nickname("linkExternalUser") def post(self, username): @@ -74,7 +78,9 @@ class LinkExternalEntity(ApiResource): @resource("/v1/entities/") class EntitySearch(ApiResource): - """ Resource for searching entities. """ + """ + Resource for searching entities. + """ @path_param("prefix", "The prefix of the entities being looked up") @parse_args() @@ -85,7 +91,9 @@ class EntitySearch(ApiResource): @query_param("includeOrgs", "Whether to include orgs names.", type=truthy_bool, default=False) @nickname("getMatchingEntities") def get(self, prefix, parsed_args): - """ Get a list of entities that match the specified prefix. """ + """ + Get a list of entities that match the specified prefix. + """ # Ensure we don't have any unicode characters in the search, as it breaks the search. Nothing # being searched can have unicode in it anyway, so this is a safe operation. @@ -233,7 +241,9 @@ def search_entity_view(username, entity, get_short_name=None): def conduct_team_search(username, query, encountered_teams, results): - """ Finds the matching teams where the user is a member. """ + """ + Finds the matching teams where the user is a member. + """ matching_teams = model.team.get_matching_user_teams(query, get_authenticated_user(), limit=5) for team in matching_teams: if team.id in encountered_teams: @@ -254,7 +264,9 @@ def conduct_team_search(username, query, encountered_teams, results): def conduct_admined_team_search(username, query, encountered_teams, results): - """ Finds matching teams in orgs admined by the user. """ + """ + Finds matching teams in orgs admined by the user. + """ matching_teams = model.team.get_matching_admined_teams(query, get_authenticated_user(), limit=5) for team in matching_teams: if team.id in encountered_teams: @@ -275,7 +287,9 @@ def conduct_admined_team_search(username, query, encountered_teams, results): def conduct_repo_search(username, query, results, offset=0, limit=5): - """ Finds matching repositories. """ + """ + Finds matching repositories. + """ matching_repos = model.repository.get_filtered_matching_repositories( query, username, limit=limit, repo_kind=None, offset=offset ) @@ -286,14 +300,18 @@ def conduct_repo_search(username, query, results, offset=0, limit=5): def conduct_namespace_search(username, query, results): - """ Finds matching users and organizations. """ + """ + Finds matching users and organizations. + """ matching_entities = model.user.get_matching_user_namespaces(query, username, limit=5) for entity in matching_entities: results.append(search_entity_view(username, entity)) def conduct_robot_search(username, query, results): - """ Finds matching robot accounts. """ + """ + Finds matching robot accounts. + """ def get_short_name(name): return parse_robot_username(name)[1] @@ -332,14 +350,18 @@ def repo_result_view(repo, username, last_modified=None, stars=None, popularity= @resource("/v1/find/all") class ConductSearch(ApiResource): - """ Resource for finding users, repositories, teams, etc. """ + """ + Resource for finding users, repositories, teams, etc. + """ @parse_args() @query_param("query", "The search query.", type=str, default="") @require_scope(scopes.READ_REPO) @nickname("conductSearch") def get(self, parsed_args): - """ Get a list of entities and resources that match the specified query. """ + """ + Get a list of entities and resources that match the specified query. + """ query = parsed_args["query"] if not query: return {"results": []} @@ -379,14 +401,18 @@ MAX_RESULT_PAGE_COUNT = app.config.get("SEARCH_MAX_RESULT_PAGE_COUNT", 10) @resource("/v1/find/repositories") class ConductRepositorySearch(ApiResource): - """ Resource for finding repositories. """ + """ + Resource for finding repositories. + """ @parse_args() @query_param("query", "The search query.", type=str, default="") @query_param("page", "The page.", type=int, default=1) @nickname("conductRepoSearch") def get(self, parsed_args): - """ Get a list of apps and repositories that match the specified query. """ + """ + Get a list of apps and repositories that match the specified query. + """ query = parsed_args["query"] page = min(max(1, parsed_args["page"]), MAX_RESULT_PAGE_COUNT) offset = (page - 1) * MAX_PER_PAGE diff --git a/endpoints/api/secscan.py b/endpoints/api/secscan.py index b8fe7d8e2..77c3fc8cb 100644 --- a/endpoints/api/secscan.py +++ b/endpoints/api/secscan.py @@ -1,4 +1,6 @@ -""" List and manage repository vulnerabilities and other security information. """ +""" +List and manage repository vulnerabilities and other security information. +""" import logging import features @@ -27,7 +29,9 @@ from endpoints.api.manifest import MANIFEST_DIGEST_ROUTE @unique class SecurityScanStatus(Enum): - """ Security scan status enum """ + """ + Security scan status enum. + """ SCANNED = "scanned" FAILED = "failed" @@ -46,9 +50,10 @@ logger = logging.getLogger(__name__) def _security_info(manifest_or_legacy_image, include_vulnerabilities=True): - """ Returns a dict representing the result of a call to the security status API for the given - manifest or image. - """ + """ + Returns a dict representing the result of a call to the security status API for the given + manifest or image. + """ result = secscan_model.load_security_information( manifest_or_legacy_image, include_vulnerabilities=include_vulnerabilities ) @@ -70,8 +75,11 @@ def _security_info(manifest_or_legacy_image, include_vulnerabilities=True): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("imageid", "The image ID") class RepositoryImageSecurity(RepositoryParamResource): - """ Operations for managing the vulnerabilities in a repository image. - DEPRECATED: Please retrieve security by manifest . """ + """ + Operations for managing the vulnerabilities in a repository image. + + DEPRECATED: Please retrieve security by manifest . + """ @process_basic_auth_no_pass @require_repo_read @@ -82,7 +90,9 @@ class RepositoryImageSecurity(RepositoryParamResource): "vulnerabilities", "Include vulnerabilities information", type=truthy_bool, default=False ) def get(self, namespace, repository, imageid, parsed_args): - """ Fetches the features and vulnerabilities (if any) for a repository image. """ + """ + Fetches the features and vulnerabilities (if any) for a repository image. + """ repo_ref = registry_model.lookup_repository(namespace, repository) if repo_ref is None: raise NotFound() @@ -99,7 +109,9 @@ class RepositoryImageSecurity(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("manifestref", "The digest of the manifest") class RepositoryManifestSecurity(RepositoryParamResource): - """ Operations for managing the vulnerabilities in a repository manifest. """ + """ + Operations for managing the vulnerabilities in a repository manifest. + """ @process_basic_auth_no_pass @require_repo_read diff --git a/endpoints/api/signing.py b/endpoints/api/signing.py index 5798e7c27..0c3e3af10 100644 --- a/endpoints/api/signing.py +++ b/endpoints/api/signing.py @@ -1,4 +1,6 @@ -""" List and manage repository signing information """ +""" +List and manage repository signing information. +""" import logging import features @@ -23,13 +25,17 @@ logger = logging.getLogger(__name__) @show_if(features.SIGNING) @path_param("repository", "The full path of the repository. e.g. namespace/name") class RepositorySignatures(RepositoryParamResource): - """ Operations for managing the signatures in a repository image. """ + """ + Operations for managing the signatures in a repository image. + """ @require_repo_read @nickname("getRepoSignatures") @disallow_for_app_repositories def get(self, namespace, repository): - """ Fetches the list of signed tags for the repository. """ + """ + Fetches the list of signed tags for the repository. + """ if not model.is_trust_enabled(namespace, repository): raise NotFound() diff --git a/endpoints/api/signing_models_interface.py b/endpoints/api/signing_models_interface.py index ff8b9e369..43f96dc48 100644 --- a/endpoints/api/signing_models_interface.py +++ b/endpoints/api/signing_models_interface.py @@ -5,12 +5,12 @@ from six import add_metaclass @add_metaclass(ABCMeta) class SigningInterface(object): """ - Interface that represents all data store interactions required by the signing API endpoint. - """ + Interface that represents all data store interactions required by the signing API endpoint. + """ @abstractmethod def is_trust_enabled(self, namespace_name, repo_name): """ - Returns whether the repository with the given namespace name and repository name exists and - has trust enabled. - """ + Returns whether the repository with the given namespace name and repository name exists and + has trust enabled. + """ diff --git a/endpoints/api/signing_models_pre_oci.py b/endpoints/api/signing_models_pre_oci.py index 4093d76de..356bff9e7 100644 --- a/endpoints/api/signing_models_pre_oci.py +++ b/endpoints/api/signing_models_pre_oci.py @@ -4,9 +4,9 @@ from endpoints.api.signing_models_interface import SigningInterface class PreOCIModel(SigningInterface): """ - PreOCIModel implements the data model for signing using a database schema - before it was changed to support the OCI specification. - """ + PreOCIModel implements the data model for signing using a database schema before it was changed + to support the OCI specification. + """ def is_trust_enabled(self, namespace_name, repo_name): repo = model.repository.get_repository(namespace_name, repo_name) diff --git a/endpoints/api/subscribe.py b/endpoints/api/subscribe.py index 0bbce84ee..cb70ab097 100644 --- a/endpoints/api/subscribe.py +++ b/endpoints/api/subscribe.py @@ -1,4 +1,6 @@ -""" Subscribe to plans. """ +""" +Subscribe to plans. +""" import logging import stripe import features diff --git a/endpoints/api/subscribe_models_interface.py b/endpoints/api/subscribe_models_interface.py index e1668602f..1cd51f868 100644 --- a/endpoints/api/subscribe_models_interface.py +++ b/endpoints/api/subscribe_models_interface.py @@ -5,23 +5,23 @@ from six import add_metaclass @add_metaclass(ABCMeta) class SubscribeInterface(object): """ - Interface that represents all data store interactions required by the subscribe API endpoint. - """ + Interface that represents all data store interactions required by the subscribe API endpoint. + """ @abstractmethod def get_private_repo_count(self, username): """ - Returns the number of private repositories for a given username or namespace. - """ + Returns the number of private repositories for a given username or namespace. + """ @abstractmethod def create_unique_notification(self, kind_name, target_username, metadata={}): """ - Creates a notification using the given parameters. - """ + Creates a notification using the given parameters. + """ @abstractmethod def delete_notifications_by_kind(self, target_username, kind_name): """ - Remove notifications for a target based on given kind. - """ + Remove notifications for a target based on given kind. + """ diff --git a/endpoints/api/subscribe_models_pre_oci.py b/endpoints/api/subscribe_models_pre_oci.py index 9ff5337ab..f6fd95178 100644 --- a/endpoints/api/subscribe_models_pre_oci.py +++ b/endpoints/api/subscribe_models_pre_oci.py @@ -5,9 +5,9 @@ from endpoints.api.subscribe_models_interface import SubscribeInterface class PreOCIModel(SubscribeInterface): """ - PreOCIModel implements the data model for build triggers using a database schema - before it was changed to support the OCI specification. - """ + PreOCIModel implements the data model for build triggers using a database schema before it was + changed to support the OCI specification. + """ def get_private_repo_count(self, username): return get_private_repo_count(username) diff --git a/endpoints/api/suconfig.py b/endpoints/api/suconfig.py index 69a7aec54..46e05b372 100644 --- a/endpoints/api/suconfig.py +++ b/endpoints/api/suconfig.py @@ -1,4 +1,6 @@ -""" Superuser Config API. """ +""" +Superuser Config API. +""" import logging import os @@ -19,7 +21,9 @@ logger = logging.getLogger(__name__) def database_is_valid(): - """ Returns whether the database, as configured, is valid. """ + """ + Returns whether the database, as configured, is valid. + """ if app.config["TESTING"]: return False @@ -27,7 +31,9 @@ def database_is_valid(): def database_has_users(): - """ Returns whether the database has any users defined. """ + """ + Returns whether the database has any users defined. + """ return model.has_users() @@ -35,14 +41,17 @@ def database_has_users(): @internal_only @show_if(features.SUPER_USERS) class SuperUserRegistryStatus(ApiResource): - """ Resource for determining the status of the registry, such as if config exists, - if a database is configured, and if it has any defined users. - """ + """ + Resource for determining the status of the registry, such as if config exists, if a database is + configured, and if it has any defined users. + """ @nickname("scRegistryStatus") @verify_not_prod def get(self): - """ Returns the status of the registry. """ + """ + Returns the status of the registry. + """ # If we have SETUP_COMPLETE, then we're ready to go! if app.config.get("SETUP_COMPLETE", False): return {"provider_id": config_provider.provider_id, "status": "ready"} @@ -61,7 +70,8 @@ class _AlembicLogHandler(logging.Handler): # From: https://stackoverflow.com/a/44712205 def get_process_id(name): - """Return process ids found by (partial) name or regex. + """ + Return process ids found by (partial) name or regex. >>> get_process_id('kthreadd') [2] @@ -79,12 +89,16 @@ def get_process_id(name): @internal_only @show_if(features.SUPER_USERS) class SuperUserShutdown(ApiResource): - """ Resource for sending a shutdown signal to the container. """ + """ + Resource for sending a shutdown signal to the container. + """ @verify_not_prod @nickname("scShutdownContainer") def post(self): - """ Sends a signal to the phusion init system to shut down the container. """ + """ + Sends a signal to the phusion init system to shut down the container. + """ # Note: This method is called to set the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. if app.config["TESTING"] or not database_has_users() or SuperUserPermission().can(): diff --git a/endpoints/api/suconfig_models_interface.py b/endpoints/api/suconfig_models_interface.py index d41a97d11..a1eae5ec2 100644 --- a/endpoints/api/suconfig_models_interface.py +++ b/endpoints/api/suconfig_models_interface.py @@ -5,35 +5,37 @@ from six import add_metaclass @add_metaclass(ABCMeta) class SuperuserConfigDataInterface(object): """ - Interface that represents all data store interactions required by the superuser config API. - """ + Interface that represents all data store interactions required by the superuser config API. + """ @abstractmethod def is_valid(self): """ - Returns true if the configured database is valid. - """ + Returns true if the configured database is valid. + """ @abstractmethod def has_users(self): """ - Returns true if there are any users defined. - """ + Returns true if there are any users defined. + """ @abstractmethod def create_superuser(self, username, password, email): """ - Creates a new superuser with the given username, password and email. Returns the user's UUID. - """ + Creates a new superuser with the given username, password and email. + + Returns the user's UUID. + """ @abstractmethod def has_federated_login(self, username, service_name): """ - Returns true if the matching user has a federated login under the matching service. - """ + Returns true if the matching user has a federated login under the matching service. + """ @abstractmethod def attach_federated_login(self, username, service_name, federated_username): """ - Attaches a federatated login to the matching user, under the given service. - """ + Attaches a federatated login to the matching user, under the given service. + """ diff --git a/endpoints/api/superuser.py b/endpoints/api/superuser.py index a3d4e3854..0861b2e6d 100644 --- a/endpoints/api/superuser.py +++ b/endpoints/api/superuser.py @@ -1,4 +1,6 @@ -""" Superuser API. """ +""" +Superuser API. +""" import logging import os import string @@ -68,7 +70,9 @@ def get_services(): @resource("/v1/superuser/aggregatelogs") @internal_only class SuperUserAggregateLogs(ApiResource): - """ Resource for fetching aggregated logs for the current user. """ + """ + Resource for fetching aggregated logs for the current user. + """ @require_fresh_login @verify_not_prod @@ -77,7 +81,9 @@ class SuperUserAggregateLogs(ApiResource): @query_param("starttime", "Earliest time from which to get logs. (%m/%d/%Y %Z)", type=str) @query_param("endtime", "Latest time to which to get logs. (%m/%d/%Y %Z)", type=str) def get(self, parsed_args): - """ Returns the aggregated logs for the current system. """ + """ + Returns the aggregated logs for the current system. + """ if SuperUserPermission().can(): (start_time, end_time) = _validate_logs_arguments( parsed_args["starttime"], parsed_args["endtime"] @@ -95,7 +101,9 @@ LOGS_PER_PAGE = 20 @internal_only @show_if(features.SUPER_USERS) class SuperUserLogs(ApiResource): - """ Resource for fetching all logs in the system. """ + """ + Resource for fetching all logs in the system. + """ @require_fresh_login @verify_not_prod @@ -107,7 +115,9 @@ class SuperUserLogs(ApiResource): @page_support() @require_scope(scopes.SUPERUSER) def get(self, parsed_args, page_token): - """ List the usage logs for the current system. """ + """ + List the usage logs for the current system. + """ if SuperUserPermission().can(): start_time = parsed_args["starttime"] end_time = parsed_args["endtime"] @@ -158,14 +168,18 @@ def user_view(user, password=None): @internal_only @show_if(features.SUPER_USERS) class ChangeLog(ApiResource): - """ Resource for returning the change log for enterprise customers. """ + """ + Resource for returning the change log for enterprise customers. + """ @require_fresh_login @verify_not_prod @nickname("getChangeLog") @require_scope(scopes.SUPERUSER) def get(self): - """ Returns the change log for this installation. """ + """ + Returns the change log for this installation. + """ if SuperUserPermission().can(): with open(os.path.join(ROOT_DIR, "CHANGELOG.md"), "r") as f: return {"log": f.read()} @@ -177,14 +191,18 @@ class ChangeLog(ApiResource): @internal_only @show_if(features.SUPER_USERS) class SuperUserOrganizationList(ApiResource): - """ Resource for listing organizations in the system. """ + """ + Resource for listing organizations in the system. + """ @require_fresh_login @verify_not_prod @nickname("listAllOrganizations") @require_scope(scopes.SUPERUSER) def get(self): - """ Returns a list of all organizations in the system. """ + """ + Returns a list of all organizations in the system. + """ if SuperUserPermission().can(): return {"organizations": [org.to_dict() for org in pre_oci_model.get_organizations()]} @@ -194,7 +212,9 @@ class SuperUserOrganizationList(ApiResource): @resource("/v1/superuser/users/") @show_if(features.SUPER_USERS) class SuperUserList(ApiResource): - """ Resource for listing users in the system. """ + """ + Resource for listing users in the system. + """ schemas = { "CreateInstallUser": { @@ -223,7 +243,9 @@ class SuperUserList(ApiResource): ) @require_scope(scopes.SUPERUSER) def get(self, parsed_args): - """ Returns a list of all users in the system. """ + """ + Returns a list of all users in the system. + """ if SuperUserPermission().can(): users = pre_oci_model.get_active_users(disabled=parsed_args["disabled"]) return {"users": [user.to_dict() for user in users]} @@ -236,7 +258,9 @@ class SuperUserList(ApiResource): @validate_json_request("CreateInstallUser") @require_scope(scopes.SUPERUSER) def post(self): - """ Creates a new user. """ + """ + Creates a new user. + """ # Ensure that we are using database auth. if app.config["AUTHENTICATION_TYPE"] != "Database": raise InvalidRequest("Cannot create a user in a non-database auth system") @@ -275,7 +299,9 @@ class SuperUserList(ApiResource): @show_if(features.SUPER_USERS) @show_if(features.MAILING) class SuperUserSendRecoveryEmail(ApiResource): - """ Resource for sending a recovery user on behalf of a user. """ + """ + Resource for sending a recovery user on behalf of a user. + """ @require_fresh_login @verify_not_prod @@ -306,7 +332,9 @@ class SuperUserSendRecoveryEmail(ApiResource): @internal_only @show_if(features.SUPER_USERS) class SuperUserManagement(ApiResource): - """ Resource for managing users in the system. """ + """ + Resource for managing users in the system. + """ schemas = { "UpdateUser": { @@ -326,7 +354,9 @@ class SuperUserManagement(ApiResource): @nickname("getInstallUser") @require_scope(scopes.SUPERUSER) def get(self, username): - """ Returns information about the specified user. """ + """ + Returns information about the specified user. + """ if SuperUserPermission().can(): user = pre_oci_model.get_nonrobot_user(username) if user is None: @@ -341,7 +371,9 @@ class SuperUserManagement(ApiResource): @nickname("deleteInstallUser") @require_scope(scopes.SUPERUSER) def delete(self, username): - """ Deletes the specified user. """ + """ + Deletes the specified user. + """ if SuperUserPermission().can(): user = pre_oci_model.get_nonrobot_user(username) if user is None: @@ -361,7 +393,9 @@ class SuperUserManagement(ApiResource): @validate_json_request("UpdateUser") @require_scope(scopes.SUPERUSER) def put(self, username): - """ Updates information about the specified user. """ + """ + Updates information about the specified user. + """ if SuperUserPermission().can(): user = pre_oci_model.get_nonrobot_user(username) if user is None: @@ -418,14 +452,18 @@ class SuperUserManagement(ApiResource): @internal_only @show_if(features.SUPER_USERS) class SuperUserTakeOwnership(ApiResource): - """ Resource for a superuser to take ownership of a namespace. """ + """ + Resource for a superuser to take ownership of a namespace. + """ @require_fresh_login @verify_not_prod @nickname("takeOwnership") @require_scope(scopes.SUPERUSER) def post(self, namespace): - """ Takes ownership of the specified organization or user. """ + """ + Takes ownership of the specified organization or user. + """ if SuperUserPermission().can(): # Disallow for superusers. if superusers.is_superuser(namespace): @@ -455,7 +493,9 @@ class SuperUserTakeOwnership(ApiResource): @path_param("name", "The name of the organizaton being managed") @show_if(features.SUPER_USERS) class SuperUserOrganizationManagement(ApiResource): - """ Resource for managing organizations in the system. """ + """ + Resource for managing organizations in the system. + """ schemas = { "UpdateOrg": { @@ -473,7 +513,9 @@ class SuperUserOrganizationManagement(ApiResource): @nickname("deleteOrganization") @require_scope(scopes.SUPERUSER) def delete(self, name): - """ Deletes the specified organization. """ + """ + Deletes the specified organization. + """ if SuperUserPermission().can(): pre_oci_model.mark_organization_for_deletion(name) return "", 204 @@ -486,7 +528,9 @@ class SuperUserOrganizationManagement(ApiResource): @validate_json_request("UpdateOrg") @require_scope(scopes.SUPERUSER) def put(self, name): - """ Updates information about the specified user. """ + """ + Updates information about the specified user. + """ if SuperUserPermission().can(): org_data = request.get_json() old_name = org_data["name"] if "name" in org_data else None @@ -522,7 +566,9 @@ def approval_view(approval): @resource("/v1/superuser/keys") @show_if(features.SUPER_USERS) class SuperUserServiceKeyManagement(ApiResource): - """ Resource for managing service keys.""" + """ + Resource for managing service keys. + """ schemas = { "CreateServiceKey": { @@ -636,7 +682,9 @@ class SuperUserServiceKeyManagement(ApiResource): @path_param("kid", "The unique identifier for a service key") @show_if(features.SUPER_USERS) class SuperUserServiceKey(ApiResource): - """ Resource for managing service keys. """ + """ + Resource for managing service keys. + """ schemas = { "PutServiceKey": { @@ -753,7 +801,9 @@ class SuperUserServiceKey(ApiResource): @path_param("kid", "The unique identifier for a service key") @show_if(features.SUPER_USERS) class SuperUserServiceKeyApproval(ApiResource): - """ Resource for approving service keys. """ + """ + Resource for approving service keys. + """ schemas = { "ApproveServiceKey": { @@ -801,14 +851,18 @@ class SuperUserServiceKeyApproval(ApiResource): @path_param("build_uuid", "The UUID of the build") @show_if(features.SUPER_USERS) class SuperUserRepositoryBuildLogs(ApiResource): - """ Resource for loading repository build logs for the superuser. """ + """ + Resource for loading repository build logs for the superuser. + """ @require_fresh_login @verify_not_prod @nickname("getRepoBuildLogsSuperUser") @require_scope(scopes.SUPERUSER) def get(self, build_uuid): - """ Return the build logs for the build specified by the build uuid. """ + """ + Return the build logs for the build specified by the build uuid. + """ if SuperUserPermission().can(): try: repo_build = pre_oci_model.get_repository_build(build_uuid) @@ -824,14 +878,18 @@ class SuperUserRepositoryBuildLogs(ApiResource): @path_param("build_uuid", "The UUID of the build") @show_if(features.SUPER_USERS) class SuperUserRepositoryBuildStatus(ApiResource): - """ Resource for dealing with repository build status. """ + """ + Resource for dealing with repository build status. + """ @require_fresh_login @verify_not_prod @nickname("getRepoBuildStatusSuperUser") @require_scope(scopes.SUPERUSER) def get(self, build_uuid): - """ Return the status for the builds specified by the build uuids. """ + """ + Return the status for the builds specified by the build uuids. + """ if SuperUserPermission().can(): try: build = pre_oci_model.get_repository_build(build_uuid) @@ -847,14 +905,18 @@ class SuperUserRepositoryBuildStatus(ApiResource): @path_param("build_uuid", "The UUID of the build") @show_if(features.SUPER_USERS) class SuperUserRepositoryBuildResource(ApiResource): - """ Resource for dealing with repository builds as a super user. """ + """ + Resource for dealing with repository builds as a super user. + """ @require_fresh_login @verify_not_prod @nickname("getRepoBuildSuperUser") @require_scope(scopes.SUPERUSER) def get(self, build_uuid): - """ Returns information about a build. """ + """ + Returns information about a build. + """ if SuperUserPermission().can(): try: build = pre_oci_model.get_repository_build(build_uuid) diff --git a/endpoints/api/superuser_models_interface.py b/endpoints/api/superuser_models_interface.py index 48d64087b..5348005d9 100644 --- a/endpoints/api/superuser_models_interface.py +++ b/endpoints/api/superuser_models_interface.py @@ -28,14 +28,15 @@ class BuildTrigger( ) ): """ - BuildTrigger represent a trigger that is associated with a build - :type uuid: string - :type service_name: string - :type pull_robot: User - :type can_read: boolean - :type can_admin: boolean - :type for_build: boolean - """ + BuildTrigger represent a trigger that is associated with a build. + + :type uuid: string + :type service_name: string + :type pull_robot: User + :type can_read: boolean + :type can_admin: boolean + :type for_build: boolean + """ def to_dict(self): if not self.uuid: @@ -87,24 +88,25 @@ class RepositoryBuild( ) ): """ - RepositoryBuild represents a build associated with a repostiory - :type uuid: string - :type logs_archived: boolean - :type repository_namespace_user_username: string - :type repository_name: string - :type can_write: boolean - :type can_write: boolean - :type pull_robot: User - :type resource_key: string - :type trigger: Trigger - :type display_name: string - :type started: boolean - :type job_config: {Any -> Any} - :type phase: string - :type status: string - :type error: string - :type archive_url: string - """ + RepositoryBuild represents a build associated with a repostiory. + + :type uuid: string + :type logs_archived: boolean + :type repository_namespace_user_username: string + :type repository_name: string + :type can_write: boolean + :type can_write: boolean + :type pull_robot: User + :type resource_key: string + :type trigger: Trigger + :type display_name: string + :type started: boolean + :type job_config: {Any -> Any} + :type phase: string + :type status: string + :type error: string + :type archive_url: string + """ def to_dict(self): @@ -144,12 +146,13 @@ class RepositoryBuild( class Approval(namedtuple("Approval", ["approver", "approval_type", "approved_date", "notes"])): """ - Approval represents whether a key has been approved or not - :type approver: User - :type approval_type: string - :type approved_date: Date - :type notes: string - """ + Approval represents whether a key has been approved or not. + + :type approver: User + :type approval_type: string + :type approved_date: Date + :type notes: string + """ def to_dict(self): return { @@ -177,18 +180,18 @@ class ServiceKey( ) ): """ - ServiceKey is an apostille signing key - :type name: string - :type kid: int - :type service: string - :type jwk: string - :type metadata: string - :type created_date: Date - :type expiration_date: Date - :type rotation_duration: Date - :type approval: Approval + ServiceKey is an apostille signing key. - """ + :type name: string + :type kid: int + :type service: string + :type jwk: string + :type metadata: string + :type created_date: Date + :type expiration_date: Date + :type rotation_duration: Date + :type approval: Approval + """ def to_dict(self): return { @@ -206,13 +209,14 @@ class ServiceKey( class User(namedtuple("User", ["username", "email", "verified", "enabled", "robot"])): """ - User represents a single user. - :type username: string - :type email: string - :type verified: boolean - :type enabled: boolean - :type robot: User - """ + User represents a single user. + + :type username: string + :type email: string + :type verified: boolean + :type enabled: boolean + :type robot: User + """ def to_dict(self): user_data = { @@ -231,10 +235,11 @@ class User(namedtuple("User", ["username", "email", "verified", "enabled", "robo class Organization(namedtuple("Organization", ["username", "email"])): """ - Organization represents a single org. - :type username: string - :type email: string - """ + Organization represents a single org. + + :type username: string + :type email: string + """ def to_dict(self): return { @@ -247,127 +252,127 @@ class Organization(namedtuple("Organization", ["username", "email"])): @add_metaclass(ABCMeta) class SuperuserDataInterface(object): """ - Interface that represents all data store interactions required by a superuser api. - """ + Interface that represents all data store interactions required by a superuser api. + """ @abstractmethod def get_organizations(self): """ - Returns a list of Organization - """ + Returns a list of Organization. + """ @abstractmethod def get_active_users(self): """ - Returns a list of User - """ + Returns a list of User. + """ @abstractmethod def create_install_user(self, username, password, email): """ - Returns the created user and confirmation code for email confirmation - """ + Returns the created user and confirmation code for email confirmation. + """ @abstractmethod def get_nonrobot_user(self, username): """ - Returns a User - """ + Returns a User. + """ @abstractmethod def create_reset_password_email_code(self, email): """ - Returns a recover password code - """ + Returns a recover password code. + """ @abstractmethod def mark_user_for_deletion(self, username): """ - Returns None - """ + Returns None. + """ @abstractmethod def change_password(self, username, password): """ - Returns None - """ + Returns None. + """ @abstractmethod def update_email(self, username, email, auto_verify): """ - Returns None - """ + Returns None. + """ @abstractmethod def update_enabled(self, username, enabled): """ - Returns None - """ + Returns None. + """ @abstractmethod def take_ownership(self, namespace, authed_user): """ - Returns id of entity and whether the entity was a user - """ + Returns id of entity and whether the entity was a user. + """ @abstractmethod def mark_organization_for_deletion(self, name): """ - Returns None - """ + Returns None. + """ @abstractmethod def change_organization_name(self, old_org_name, new_org_name): """ - Returns updated Organization - """ + Returns updated Organization. + """ @abstractmethod def list_all_service_keys(self): """ - Returns a list of service keys - """ + Returns a list of service keys. + """ @abstractmethod def generate_service_key( self, service, expiration_date, kid=None, name="", metadata=None, rotation_duration=None ): """ - Returns a tuple of private key and public key id - """ + Returns a tuple of private key and public key id. + """ @abstractmethod def approve_service_key(self, kid, approver, approval_type, notes=""): """ - Returns the approved Key - """ + Returns the approved Key. + """ @abstractmethod def get_service_key(self, kid, service=None, alive_only=True, approved_only=True): """ - Returns ServiceKey - """ + Returns ServiceKey. + """ @abstractmethod def set_key_expiration(self, kid, expiration_date): """ - Returns None - """ + Returns None. + """ @abstractmethod def update_service_key(self, kid, name=None, metadata=None): """ - Returns None - """ + Returns None. + """ @abstractmethod def delete_service_key(self, kid): """ - Returns deleted ServiceKey - """ + Returns deleted ServiceKey. + """ @abstractmethod def get_repository_build(self, uuid): """ - Returns RepositoryBuild - """ + Returns RepositoryBuild. + """ diff --git a/endpoints/api/superuser_models_pre_oci.py b/endpoints/api/superuser_models_pre_oci.py index b9cba01c1..67f533a40 100644 --- a/endpoints/api/superuser_models_pre_oci.py +++ b/endpoints/api/superuser_models_pre_oci.py @@ -65,9 +65,9 @@ class InvalidRepositoryBuildException(Exception): class PreOCIModel(SuperuserDataInterface): """ - PreOCIModel implements the data model for the SuperUser using a database schema - before it was changed to support the OCI specification. - """ + PreOCIModel implements the data model for the SuperUser using a database schema before it was + changed to support the OCI specification. + """ def get_repository_build(self, uuid): try: diff --git a/endpoints/api/tag.py b/endpoints/api/tag.py index 3e2e7df77..c42969800 100644 --- a/endpoints/api/tag.py +++ b/endpoints/api/tag.py @@ -1,4 +1,6 @@ -""" Manage the tags of a repository. """ +""" +Manage the tags of a repository. +""" from datetime import datetime from flask import request, abort @@ -65,7 +67,9 @@ def _tag_dict(tag): @resource("/v1/repository//tag/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class ListRepositoryTags(RepositoryParamResource): - """ Resource for listing full repository tag history, alive *and dead*. """ + """ + Resource for listing full repository tag history, alive *and dead*. + """ @require_repo_read @disallow_for_app_repositories @@ -105,7 +109,9 @@ class ListRepositoryTags(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("tag", "The name of the tag") class RepositoryTag(RepositoryParamResource): - """ Resource for managing repository tags. """ + """ + Resource for managing repository tags. + """ schemas = { "ChangeTag": { @@ -134,7 +140,9 @@ class RepositoryTag(RepositoryParamResource): @nickname("changeTag") @validate_json_request("ChangeTag") def put(self, namespace, repository, tag): - """ Change which image a tag points to or create a new tag.""" + """ + Change which image a tag points to or create a new tag. + """ if not TAG_REGEX.match(tag): abort(400, TAG_ERROR) @@ -238,7 +246,9 @@ class RepositoryTag(RepositoryParamResource): @disallow_for_non_normal_repositories @nickname("deleteFullTag") def delete(self, namespace, repository, tag): - """ Delete the specified repository tag. """ + """ + Delete the specified repository tag. + """ repo_ref = registry_model.lookup_repository(namespace, repository) if repo_ref is None: raise NotFound() @@ -260,7 +270,9 @@ class RepositoryTag(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("tag", "The name of the tag") class RepositoryTagImages(RepositoryParamResource): - """ Resource for listing the images in a specific repository tag. """ + """ + Resource for listing the images in a specific repository tag. + """ @require_repo_read @nickname("listTagImages") @@ -273,7 +285,9 @@ class RepositoryTagImages(RepositoryParamResource): default=False, ) def get(self, namespace, repository, tag, parsed_args): - """ List the images for the specified repository tag. """ + """ + List the images for the specified repository tag. + """ repo_ref = registry_model.lookup_repository(namespace, repository) if repo_ref is None: raise NotFound() @@ -309,7 +323,9 @@ class RepositoryTagImages(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("tag", "The name of the tag") class RestoreTag(RepositoryParamResource): - """ Resource for restoring a repository tag back to a previous image. """ + """ + Resource for restoring a repository tag back to a previous image. + """ schemas = { "RestoreTag": { @@ -334,7 +350,9 @@ class RestoreTag(RepositoryParamResource): @nickname("restoreTag") @validate_json_request("RestoreTag") def post(self, namespace, repository, tag): - """ Restores a repository tag back to a previous image in the repository. """ + """ + Restores a repository tag back to a previous image in the repository. + """ repo_ref = registry_model.lookup_repository(namespace, repository) if repo_ref is None: raise NotFound() diff --git a/endpoints/api/team.py b/endpoints/api/team.py index a540aa664..27de3aba3 100644 --- a/endpoints/api/team.py +++ b/endpoints/api/team.py @@ -1,4 +1,6 @@ -""" Create, list and manage an organization's teams. """ +""" +Create, list and manage an organization's teams. +""" import json @@ -138,10 +140,13 @@ def invite_view(invite): def disallow_for_synced_team(except_robots=False): - """ Disallows the decorated operation for a team that is marked as being synced from an internal - auth provider such as LDAP. If except_robots is True, then the operation is allowed if the - member specified on the operation is a robot account. - """ + """ + Disallows the decorated operation for a team that is marked as being synced from an internal + auth provider such as LDAP. + + If except_robots is True, then the operation is allowed if the member specified on the operation + is a robot account. + """ def inner(func): @wraps(func) @@ -169,7 +174,9 @@ disallow_all_for_synced_team = disallow_for_synced_team(except_robots=False) @path_param("orgname", "The name of the organization") @path_param("teamname", "The name of the team") class OrganizationTeam(ApiResource): - """ Resource for manging an organization's teams. """ + """ + Resource for manging an organization's teams. + """ schemas = { "TeamDescription": { @@ -194,7 +201,9 @@ class OrganizationTeam(ApiResource): @nickname("updateOrganizationTeam") @validate_json_request("TeamDescription") def put(self, orgname, teamname): - """ Update the org-wide permission for the specified team. """ + """ + Update the org-wide permission for the specified team. + """ edit_permission = AdministerOrganizationPermission(orgname) if edit_permission.can(): team = None @@ -242,7 +251,9 @@ class OrganizationTeam(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("deleteOrganizationTeam") def delete(self, orgname, teamname): - """ Delete the specified team. """ + """ + Delete the specified team. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): model.team.remove_team(orgname, teamname, get_authenticated_user().username) @@ -253,7 +264,9 @@ class OrganizationTeam(ApiResource): def _syncing_setup_allowed(orgname): - """ Returns whether syncing setup is allowed for the current user over the matching org. """ + """ + Returns whether syncing setup is allowed for the current user over the matching org. + """ if not features.NONSUPERUSER_TEAM_SYNCING_SETUP and not SuperUserPermission().can(): return False @@ -265,7 +278,9 @@ def _syncing_setup_allowed(orgname): @path_param("teamname", "The name of the team") @show_if(features.TEAM_SYNCING) class OrganizationTeamSyncing(ApiResource): - """ Resource for managing syncing of a team by a backing group. """ + """ + Resource for managing syncing of a team by a backing group. + """ @require_scope(scopes.ORG_ADMIN) @require_scope(scopes.SUPERUSER) @@ -315,7 +330,9 @@ class OrganizationTeamSyncing(ApiResource): @path_param("orgname", "The name of the organization") @path_param("teamname", "The name of the team") class TeamMemberList(ApiResource): - """ Resource for managing the list of members for a team. """ + """ + Resource for managing the list of members for a team. + """ @require_scope(scopes.ORG_ADMIN) @parse_args() @@ -324,7 +341,9 @@ class TeamMemberList(ApiResource): ) @nickname("getOrganizationTeamMembers") def get(self, orgname, teamname, parsed_args): - """ Retrieve the list of members for the specified team. """ + """ + Retrieve the list of members for the specified team. + """ view_permission = ViewTeamPermission(orgname, teamname) edit_permission = AdministerOrganizationPermission(orgname) @@ -379,13 +398,17 @@ class TeamMemberList(ApiResource): @path_param("teamname", "The name of the team") @path_param("membername", "The username of the team member") class TeamMember(ApiResource): - """ Resource for managing individual members of a team. """ + """ + Resource for managing individual members of a team. + """ @require_scope(scopes.ORG_ADMIN) @nickname("updateOrganizationTeamMember") @disallow_nonrobots_for_synced_team def put(self, orgname, teamname, membername): - """ Adds or invites a member to an existing team. """ + """ + Adds or invites a member to an existing team. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): team = None @@ -423,9 +446,11 @@ class TeamMember(ApiResource): @nickname("deleteOrganizationTeamMember") @disallow_nonrobots_for_synced_team def delete(self, orgname, teamname, membername): - """ Delete a member of a team. If the user is merely invited to join - the team, then the invite is removed instead. - """ + """ + Delete a member of a team. + + If the user is merely invited to join the team, then the invite is removed instead. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): # Remote the user from the team. @@ -462,13 +487,17 @@ class TeamMember(ApiResource): @resource("/v1/organization//team//invite/") @show_if(features.MAILING) class InviteTeamMember(ApiResource): - """ Resource for inviting a team member via email address. """ + """ + Resource for inviting a team member via email address. + """ @require_scope(scopes.ORG_ADMIN) @nickname("inviteTeamMemberEmail") @disallow_all_for_synced_team def put(self, orgname, teamname, email): - """ Invites an email address to an existing team. """ + """ + Invites an email address to an existing team. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): team = None @@ -494,7 +523,9 @@ class InviteTeamMember(ApiResource): @require_scope(scopes.ORG_ADMIN) @nickname("deleteTeamMemberEmailInvite") def delete(self, orgname, teamname, email): - """ Delete an invite of an email address to join a team. """ + """ + Delete an invite of an email address to join a team. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): team = None @@ -523,11 +554,15 @@ class InviteTeamMember(ApiResource): @path_param("orgname", "The name of the organization") @path_param("teamname", "The name of the team") class TeamPermissions(ApiResource): - """ Resource for listing the permissions an org's team has in the system. """ + """ + Resource for listing the permissions an org's team has in the system. + """ @nickname("getOrganizationTeamPermissions") def get(self, orgname, teamname): - """ Returns the list of repository permissions for the org's team. """ + """ + Returns the list of repository permissions for the org's team. + """ permission = AdministerOrganizationPermission(orgname) if permission.can(): try: @@ -546,12 +581,16 @@ class TeamPermissions(ApiResource): @internal_only @show_if(features.MAILING) class TeamMemberInvite(ApiResource): - """ Resource for managing invites to join a team. """ + """ + Resource for managing invites to join a team. + """ @require_user_admin @nickname("acceptOrganizationTeamInvite") def put(self, code): - """ Accepts an invite to join a team in an organization. """ + """ + Accepts an invite to join a team in an organization. + """ # Accept the invite for the current user. team = try_accept_invite(code, get_authenticated_user()) if not team: @@ -563,7 +602,9 @@ class TeamMemberInvite(ApiResource): @nickname("declineOrganizationTeamInvite") @require_user_admin def delete(self, code): - """ Delete an existing invitation to join a team. """ + """ + Delete an existing invitation to join a team. + """ (team, inviter) = model.team.delete_team_invite(code, user_obj=get_authenticated_user()) model.notification.delete_matching_notifications( diff --git a/endpoints/api/test/shared.py b/endpoints/api/test/shared.py index 8d7736e71..952ae7bdf 100644 --- a/endpoints/api/test/shared.py +++ b/endpoints/api/test/shared.py @@ -3,11 +3,12 @@ from endpoints.api import api def conduct_api_call(client, resource, method, params, body=None, expected_code=200, headers=None): - """ Conducts an API call to the given resource via the given client, and ensures its returned - status matches the code given. + """ + Conducts an API call to the given resource via the given client, and ensures its returned status + matches the code given. - Returns the response. - """ + Returns the response. + """ return conduct_call( client, resource, api.url_for, method, params, body, expected_code, headers=headers ) diff --git a/endpoints/api/test/test_mirror.py b/endpoints/api/test/test_mirror.py index de9ef62b7..e3755ff64 100644 --- a/endpoints/api/test/test_mirror.py +++ b/endpoints/api/test/test_mirror.py @@ -86,7 +86,9 @@ def test_get_repo_does_not_exist(client): def test_get_mirror(client): - """ Verify that performing a `GET` request returns expected and accurate data. """ + """ + Verify that performing a `GET` request returns expected and accurate data. + """ mirror = _setup_mirror() with client_with_identity("devtable", client) as cl: @@ -160,7 +162,9 @@ def test_get_mirror(client): ], ) def test_change_config(key, value, expected_status, client): - """ Verify that changing each attribute works as expected. """ + """ + Verify that changing each attribute works as expected. + """ mirror = _setup_mirror() with client_with_identity("devtable", client) as cl: @@ -223,7 +227,9 @@ def test_change_config(key, value, expected_status, client): ], ) def test_change_credentials(request_body, expected_status, client): - """ Verify credentials can only be modified as a pair. """ + """ + Verify credentials can only be modified as a pair. + """ mirror = _setup_mirror() with client_with_identity("devtable", client) as cl: diff --git a/endpoints/api/trigger.py b/endpoints/api/trigger.py index 0793db672..927847dd2 100644 --- a/endpoints/api/trigger.py +++ b/endpoints/api/trigger.py @@ -1,4 +1,6 @@ -""" Create, list and manage build triggers. """ +""" +Create, list and manage build triggers. +""" import logging from urlparse import urlunparse @@ -62,13 +64,17 @@ def get_trigger(trigger_uuid): @resource("/v1/repository//trigger/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class BuildTriggerList(RepositoryParamResource): - """ Resource for listing repository build triggers. """ + """ + Resource for listing repository build triggers. + """ @require_repo_admin @disallow_for_app_repositories @nickname("listBuildTriggers") def get(self, namespace_name, repo_name): - """ List the triggers for the specified repository. """ + """ + List the triggers for the specified repository. + """ triggers = model.build.list_build_triggers(namespace_name, repo_name) return {"triggers": [trigger_view(trigger, can_admin=True) for trigger in triggers]} @@ -77,7 +83,9 @@ class BuildTriggerList(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("trigger_uuid", "The UUID of the build trigger") class BuildTrigger(RepositoryParamResource): - """ Resource for managing specific build triggers. """ + """ + Resource for managing specific build triggers. + """ schemas = { "UpdateTrigger": { @@ -97,7 +105,9 @@ class BuildTrigger(RepositoryParamResource): @disallow_for_app_repositories @nickname("getBuildTrigger") def get(self, namespace_name, repo_name, trigger_uuid): - """ Get information for the specified build trigger. """ + """ + Get information for the specified build trigger. + """ return trigger_view(get_trigger(trigger_uuid), can_admin=True) @require_repo_admin @@ -106,7 +116,9 @@ class BuildTrigger(RepositoryParamResource): @nickname("updateBuildTrigger") @validate_json_request("UpdateTrigger") def put(self, namespace_name, repo_name, trigger_uuid): - """ Updates the specified build trigger. """ + """ + Updates the specified build trigger. + """ trigger = get_trigger(trigger_uuid) handler = BuildTriggerHandler.get_handler(trigger) @@ -134,7 +146,9 @@ class BuildTrigger(RepositoryParamResource): @disallow_for_non_normal_repositories @nickname("deleteBuildTrigger") def delete(self, namespace_name, repo_name, trigger_uuid): - """ Delete the specified build trigger. """ + """ + Delete the specified build trigger. + """ trigger = get_trigger(trigger_uuid) handler = BuildTriggerHandler.get_handler(trigger) @@ -165,7 +179,9 @@ class BuildTrigger(RepositoryParamResource): @path_param("trigger_uuid", "The UUID of the build trigger") @internal_only class BuildTriggerSubdirs(RepositoryParamResource): - """ Custom verb for fetching the subdirs which are buildable for a trigger. """ + """ + Custom verb for fetching the subdirs which are buildable for a trigger. + """ schemas = { "BuildTriggerSubdirRequest": {"type": "object", "description": "Arbitrary json.",}, @@ -177,7 +193,9 @@ class BuildTriggerSubdirs(RepositoryParamResource): @nickname("listBuildTriggerSubdirs") @validate_json_request("BuildTriggerSubdirRequest") def post(self, namespace_name, repo_name, trigger_uuid): - """ List the subdirectories available for the specified build trigger and source. """ + """ + List the subdirectories available for the specified build trigger and source. + """ trigger = get_trigger(trigger_uuid) user_permission = UserAdminPermission(trigger.connected_user.username) @@ -215,8 +233,9 @@ class BuildTriggerSubdirs(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("trigger_uuid", "The UUID of the build trigger") class BuildTriggerActivate(RepositoryParamResource): - """ Custom verb for activating a build trigger once all required information has been collected. - """ + """ + Custom verb for activating a build trigger once all required information has been collected. + """ schemas = { "BuildTriggerActivateRequest": { @@ -238,7 +257,9 @@ class BuildTriggerActivate(RepositoryParamResource): @nickname("activateBuildTrigger") @validate_json_request("BuildTriggerActivateRequest") def post(self, namespace_name, repo_name, trigger_uuid): - """ Activate the specified build trigger. """ + """ + Activate the specified build trigger. + """ trigger = get_trigger(trigger_uuid) handler = BuildTriggerHandler.get_handler(trigger) if handler.is_active(): @@ -323,9 +344,10 @@ class BuildTriggerActivate(RepositoryParamResource): @path_param("trigger_uuid", "The UUID of the build trigger") @internal_only class BuildTriggerAnalyze(RepositoryParamResource): - """ Custom verb for analyzing the config for a build trigger and suggesting various changes - (such as a robot account to use for pulling) - """ + """ + Custom verb for analyzing the config for a build trigger and suggesting various changes (such as + a robot account to use for pulling) + """ schemas = { "BuildTriggerAnalyzeRequest": { @@ -341,7 +363,9 @@ class BuildTriggerAnalyze(RepositoryParamResource): @nickname("analyzeBuildTrigger") @validate_json_request("BuildTriggerAnalyzeRequest") def post(self, namespace_name, repo_name, trigger_uuid): - """ Analyze the specified build trigger configuration. """ + """ + Analyze the specified build trigger configuration. + """ trigger = get_trigger(trigger_uuid) if trigger.repository.namespace_user.username != namespace_name: @@ -377,7 +401,9 @@ class BuildTriggerAnalyze(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("trigger_uuid", "The UUID of the build trigger") class ActivateBuildTrigger(RepositoryParamResource): - """ Custom verb to manually activate a build trigger. """ + """ + Custom verb to manually activate a build trigger. + """ schemas = { "RunParameters": { @@ -407,7 +433,9 @@ class ActivateBuildTrigger(RepositoryParamResource): @nickname("manuallyStartBuildTrigger") @validate_json_request("RunParameters") def post(self, namespace_name, repo_name, trigger_uuid): - """ Manually start a build from the specified trigger. """ + """ + Manually start a build from the specified trigger. + """ trigger = get_trigger(trigger_uuid) if not trigger.enabled: raise InvalidRequest("Trigger is not enabled.") @@ -444,7 +472,9 @@ class ActivateBuildTrigger(RepositoryParamResource): @path_param("repository", "The full path of the repository. e.g. namespace/name") @path_param("trigger_uuid", "The UUID of the build trigger") class TriggerBuildList(RepositoryParamResource): - """ Resource to represent builds that were activated from the specified trigger. """ + """ + Resource to represent builds that were activated from the specified trigger. + """ @require_repo_admin @disallow_for_app_repositories @@ -452,7 +482,9 @@ class TriggerBuildList(RepositoryParamResource): @query_param("limit", "The maximum number of builds to return", type=int, default=5) @nickname("listTriggerRecentBuilds") def get(self, namespace_name, repo_name, trigger_uuid, parsed_args): - """ List the builds started by the specified trigger. """ + """ + List the builds started by the specified trigger. + """ limit = parsed_args["limit"] builds = model.build.list_trigger_builds(namespace_name, repo_name, trigger_uuid, limit) return {"builds": [build_status_view(bld) for bld in builds]} @@ -464,14 +496,18 @@ FIELD_VALUE_LIMIT = 30 @resource("/v1/repository//trigger//fields/") @internal_only class BuildTriggerFieldValues(RepositoryParamResource): - """ Custom verb to fetch a values list for a particular field name. """ + """ + Custom verb to fetch a values list for a particular field name. + """ @require_repo_admin @disallow_for_app_repositories @disallow_for_non_normal_repositories @nickname("listTriggerFieldValues") def post(self, namespace_name, repo_name, trigger_uuid, field_name): - """ List the field values for a custom run field. """ + """ + List the field values for a custom run field. + """ trigger = get_trigger(trigger_uuid) config = request.get_json() or None @@ -492,7 +528,9 @@ class BuildTriggerFieldValues(RepositoryParamResource): @path_param("trigger_uuid", "The UUID of the build trigger") @internal_only class BuildTriggerSources(RepositoryParamResource): - """ Custom verb to fetch the list of build sources for the trigger config. """ + """ + Custom verb to fetch the list of build sources for the trigger config. + """ schemas = { "BuildTriggerSourcesRequest": { @@ -513,7 +551,9 @@ class BuildTriggerSources(RepositoryParamResource): @nickname("listTriggerBuildSources") @validate_json_request("BuildTriggerSourcesRequest") def post(self, namespace_name, repo_name, trigger_uuid): - """ List the build sources for the trigger configuration thus far. """ + """ + List the build sources for the trigger configuration thus far. + """ namespace = request.get_json().get("namespace") if namespace is None: raise InvalidRequest() @@ -537,13 +577,17 @@ class BuildTriggerSources(RepositoryParamResource): @path_param("trigger_uuid", "The UUID of the build trigger") @internal_only class BuildTriggerSourceNamespaces(RepositoryParamResource): - """ Custom verb to fetch the list of namespaces (orgs, projects, etc) for the trigger config. """ + """ + Custom verb to fetch the list of namespaces (orgs, projects, etc) for the trigger config. + """ @require_repo_admin @disallow_for_app_repositories @nickname("listTriggerBuildSourceNamespaces") def get(self, namespace_name, repo_name, trigger_uuid): - """ List the build sources for the trigger configuration thus far. """ + """ + List the build sources for the trigger configuration thus far. + """ trigger = get_trigger(trigger_uuid) user_permission = UserAdminPermission(trigger.connected_user.username) diff --git a/endpoints/api/trigger_analyzer.py b/endpoints/api/trigger_analyzer.py index 180994fa8..8e18a8441 100644 --- a/endpoints/api/trigger_analyzer.py +++ b/endpoints/api/trigger_analyzer.py @@ -6,7 +6,9 @@ from util import dockerfileparse def is_parent(context, dockerfile_path): - """ This checks whether the context is a parent of the dockerfile_path""" + """ + This checks whether the context is a parent of the dockerfile_path. + """ if context == "" or dockerfile_path == "": return False @@ -28,7 +30,9 @@ def is_parent(context, dockerfile_path): class TriggerAnalyzer: - """ This analyzes triggers and returns the appropriate trigger and robot view to the frontend. """ + """ + This analyzes triggers and returns the appropriate trigger and robot view to the frontend. + """ def __init__( self, handler, namespace_name, server_hostname, new_config_dict, admin_org_permission diff --git a/endpoints/api/user.py b/endpoints/api/user.py index 7f7c0bc65..ebe6d9a0f 100644 --- a/endpoints/api/user.py +++ b/endpoints/api/user.py @@ -1,4 +1,6 @@ -""" Manage the current user. """ +""" +Manage the current user. +""" import logging import json @@ -82,9 +84,11 @@ logger = logging.getLogger(__name__) def handle_invite_code(invite_code, user): - """ Checks that the given invite code matches the specified user's e-mail address. If so, the - user is marked as having a verified e-mail address and this method returns True. - """ + """ + Checks that the given invite code matches the specified user's e-mail address. + + If so, the user is marked as having a verified e-mail address and this method returns True. + """ parsed_invite = parse_single_urn(invite_code) if parsed_invite is None: return False @@ -222,7 +226,9 @@ def notification_view(note): @resource("/v1/user/") class User(ApiResource): - """ Operations related to users. """ + """ + Operations related to users. + """ schemas = { "NewUser": { @@ -323,7 +329,9 @@ class User(ApiResource): @define_json_response("UserView") @anon_allowed def get(self): - """ Get user information for the authenticated user. """ + """ + Get user information for the authenticated user. + """ user = get_authenticated_user() if user is None or user.organization or not UserReadPermission(user.username).can(): raise InvalidToken("Requires authentication", payload={"session_required": False}) @@ -336,7 +344,9 @@ class User(ApiResource): @internal_only @validate_json_request("UpdateUser") def put(self): - """ Update a users details such as password or email. """ + """ + Update a users details such as password or email. + """ user = get_authenticated_user() user_data = request.get_json() previous_username = None @@ -443,7 +453,9 @@ class User(ApiResource): @internal_only @validate_json_request("NewUser") def post(self): - """ Create a new user. """ + """ + Create a new user. + """ if app.config["AUTHENTICATION_TYPE"] != "Database": abort(404) @@ -513,7 +525,9 @@ class User(ApiResource): @nickname("deleteCurrentUser") @internal_only def delete(self): - """ Deletes the current user. """ + """ + Deletes the current user. + """ if app.config["AUTHENTICATION_TYPE"] != "Database": abort(404) @@ -527,13 +541,16 @@ class User(ApiResource): @internal_only @show_if(features.BILLING) class PrivateRepositories(ApiResource): - """ Operations dealing with the available count of private repositories. """ + """ + Operations dealing with the available count of private repositories. + """ @require_user_admin @nickname("getUserPrivateAllowed") def get(self): - """ Get the number of private repos this user has, and whether they are allowed to create more. - """ + """ + Get the number of private repos this user has, and whether they are allowed to create more. + """ user = get_authenticated_user() private_repos = model.user.get_private_repo_count(user.username) repos_allowed = 0 @@ -551,8 +568,10 @@ class PrivateRepositories(ApiResource): @resource("/v1/user/clientkey") @internal_only class ClientKey(ApiResource): - """ Operations for returning an encrypted key which can be used in place of a password - for the Docker client. """ + """ + Operations for returning an encrypted key which can be used in place of a password for the + Docker client. + """ schemas = { "GenerateClientKey": { @@ -566,7 +585,9 @@ class ClientKey(ApiResource): @nickname("generateUserClientKey") @validate_json_request("GenerateClientKey") def post(self): - """ Return's the user's private client key. """ + """ + Return's the user's private client key. + """ if not authentication.supports_encrypted_credentials: raise NotFound() @@ -614,7 +635,9 @@ def conduct_signin(username_or_email, password, invite_code=None): @internal_only @show_if(app.config["AUTHENTICATION_TYPE"] == "Database") class ConvertToOrganization(ApiResource): - """ Operations for converting a user to an organization. """ + """ + Operations for converting a user to an organization. + """ schemas = { "ConvertUser": { @@ -642,7 +665,9 @@ class ConvertToOrganization(ApiResource): @nickname("convertUserToOrganization") @validate_json_request("ConvertUser") def post(self): - """ Convert the user to an organization. """ + """ + Convert the user to an organization. + """ user = get_authenticated_user() convert_data = request.get_json() @@ -676,7 +701,9 @@ class ConvertToOrganization(ApiResource): @show_if(features.DIRECT_LOGIN) @internal_only class Signin(ApiResource): - """ Operations for signing in the user. """ + """ + Operations for signing in the user. + """ schemas = { "SigninUser": { @@ -696,7 +723,9 @@ class Signin(ApiResource): @anon_allowed @readonly_call_allowed def post(self): - """ Sign in the user with the specified credentials. """ + """ + Sign in the user with the specified credentials. + """ signin_data = request.get_json() if not signin_data: raise NotFound() @@ -710,7 +739,9 @@ class Signin(ApiResource): @resource("/v1/signin/verify") @internal_only class VerifyUser(ApiResource): - """ Operations for verifying the existing user. """ + """ + Operations for verifying the existing user. + """ schemas = { "VerifyUser": { @@ -727,7 +758,9 @@ class VerifyUser(ApiResource): @validate_json_request("VerifyUser") @readonly_call_allowed def post(self): - """ Verifies the signed in the user with the specified credentials. """ + """ + Verifies the signed in the user with the specified credentials. + """ signin_data = request.get_json() password = signin_data["password"] @@ -746,11 +779,15 @@ class VerifyUser(ApiResource): @resource("/v1/signout") @internal_only class Signout(ApiResource): - """ Resource for signing out users. """ + """ + Resource for signing out users. + """ @nickname("logout") def post(self): - """ Request that the current user be signed out. """ + """ + Request that the current user be signed out. + """ # Invalidate all sessions for the user. model.user.invalidate_all_sessions(get_authenticated_user()) @@ -766,9 +803,9 @@ class Signout(ApiResource): @resource("/v1/externallogin/") @internal_only class ExternalLoginInformation(ApiResource): - """ Resource for both setting a token for external login and returning its authorization - url. - """ + """ + Resource for both setting a token for external login and returning its authorization url. + """ schemas = { "GetLogin": { @@ -790,7 +827,9 @@ class ExternalLoginInformation(ApiResource): @readonly_call_allowed @validate_json_request("GetLogin") def post(self, service_id): - """ Generates the auth URL and CSRF token explicitly for OIDC/OAuth-associated login. """ + """ + Generates the auth URL and CSRF token explicitly for OIDC/OAuth-associated login. + """ login_service = oauth_login.get_service(service_id) if login_service is None: raise InvalidRequest() @@ -814,12 +853,16 @@ class ExternalLoginInformation(ApiResource): @show_if(features.DIRECT_LOGIN) @internal_only class DetachExternal(ApiResource): - """ Resource for detaching an external login. """ + """ + Resource for detaching an external login. + """ @require_user_admin @nickname("detachExternalLogin") def post(self, service_id): - """ Request that the current user be detached from the external login service. """ + """ + Request that the current user be detached from the external login service. + """ model.user.detach_external_login(get_authenticated_user(), service_id) return {"success": True} @@ -828,7 +871,9 @@ class DetachExternal(ApiResource): @show_if(features.MAILING) @internal_only class Recovery(ApiResource): - """ Resource for requesting a password recovery email. """ + """ + Resource for requesting a password recovery email. + """ schemas = { "RequestRecovery": { @@ -849,7 +894,9 @@ class Recovery(ApiResource): @anon_allowed @validate_json_request("RequestRecovery") def post(self): - """ Request a password recovery email.""" + """ + Request a password recovery email. + """ def redact(value): threshold = max((len(value) / 3) - 1, 1) @@ -1025,7 +1072,9 @@ class UserAuthorization(ApiResource): @resource("/v1/user/starred") class StarredRepositoryList(ApiResource): - """ Operations for creating and listing starred repositories. """ + """ + Operations for creating and listing starred repositories. + """ schemas = { "NewStarredRepository": { @@ -1046,7 +1095,9 @@ class StarredRepositoryList(ApiResource): @require_user_admin @page_support() def get(self, page_token, parsed_args): - """ List all starred repositories. """ + """ + List all starred repositories. + """ repo_query = model.repository.get_user_starred_repositories(get_authenticated_user()) repos, next_page_token = model.modelutil.paginate( @@ -1068,7 +1119,9 @@ class StarredRepositoryList(ApiResource): @validate_json_request("NewStarredRepository") @require_user_admin def post(self): - """ Star a repository. """ + """ + Star a repository. + """ user = get_authenticated_user() req = request.get_json() namespace = req["namespace"] @@ -1087,12 +1140,16 @@ class StarredRepositoryList(ApiResource): @resource("/v1/user/starred/") @path_param("repository", "The full path of the repository. e.g. namespace/name") class StarredRepository(RepositoryParamResource): - """ Operations for managing a specific starred repository. """ + """ + Operations for managing a specific starred repository. + """ @nickname("deleteStar") @require_user_admin def delete(self, namespace, repository): - """ Removes a star from a repository. """ + """ + Removes a star from a repository. + """ user = get_authenticated_user() repo = model.repository.get_repository(namespace, repository) @@ -1103,11 +1160,15 @@ class StarredRepository(RepositoryParamResource): @resource("/v1/users/") class Users(ApiResource): - """ Operations related to retrieving information about other users. """ + """ + Operations related to retrieving information about other users. + """ @nickname("getUserInformation") def get(self, username): - """ Get user information for the specified user. """ + """ + Get user information for the specified user. + """ user = model.user.get_nonrobot_user(username) if user is None: abort(404) diff --git a/endpoints/appr/cnr_backend.py b/endpoints/appr/cnr_backend.py index cd1aef4e5..ca414797c 100644 --- a/endpoints/appr/cnr_backend.py +++ b/endpoints/appr/cnr_backend.py @@ -42,14 +42,18 @@ class Blob(BlobBase): class Channel(ChannelBase): - """ CNR Channel model implemented against the Quay data model. """ + """ + CNR Channel model implemented against the Quay data model. + """ def __init__(self, name, package, current=None): super(Channel, self).__init__(name, package, current=current) self._channel_data = None def _exists(self): - """ Check if the channel is saved already """ + """ + Check if the channel is saved already. + """ return model.channel_exists(self.package, self.name) @classmethod @@ -74,7 +78,9 @@ class Channel(ChannelBase): return self._channel_data def releases(self): - """ Returns the list of versions """ + """ + Returns the list of versions. + """ return self._channel.releases def _add_release(self, release): @@ -85,16 +91,22 @@ class Channel(ChannelBase): class User(object): - """ User in CNR models """ + """ + User in CNR models. + """ @classmethod def get_user(cls, username, password): - """ Returns True if user creds is valid """ + """ + Returns True if user creds is valid. + """ return model.get_user(username, password) class Package(PackageBase): - """ CNR Package model implemented against the Quay data model. """ + """ + CNR Package model implemented against the Quay data model. + """ @classmethod def _apptuple_to_dict(cls, apptuple): @@ -166,7 +178,9 @@ class Package(PackageBase): class QuayDB(CnrDB): - """ Wrapper Class to embed all CNR Models """ + """ + Wrapper Class to embed all CNR Models. + """ Channel = Channel Package = Package diff --git a/endpoints/appr/models_cnr.py b/endpoints/appr/models_cnr.py index 5b7da686f..c0cd3bf17 100644 --- a/endpoints/appr/models_cnr.py +++ b/endpoints/appr/models_cnr.py @@ -37,12 +37,16 @@ def _strip_sha256_header(digest): def _split_package_name(package): - """ Returns the namespace and package-name """ + """ + Returns the namespace and package-name. + """ return package.split("/") def _join_package_name(ns, name): - """ Returns a app-name in the 'namespace/name' format """ + """ + Returns a app-name in the 'namespace/name' format. + """ return "%s/%s" % (ns, name) @@ -100,9 +104,10 @@ class CNRAppModel(AppRegistryDataInterface): def list_applications( self, namespace=None, media_type=None, search=None, username=None, with_channels=False ): - """ Lists all repositories that contain applications, with optional filtering to a specific + """ + Lists all repositories that contain applications, with optional filtering to a specific namespace and view a specific user. - """ + """ limit = app.config.get("APP_REGISTRY_RESULTS_LIMIT", 50) views = [] for repo in appr_model.package.list_packages_query( @@ -147,7 +152,9 @@ class CNRAppModel(AppRegistryDataInterface): return data.model.repository.repository_is_public(namespace, name) def create_application(self, package_name, visibility, owner): - """ Create a new app repository, owner is the user who creates it """ + """ + Create a new app repository, owner is the user who creates it. + """ if self.is_readonly: raise ReadOnlyException("Currently in read-only mode") @@ -155,7 +162,9 @@ class CNRAppModel(AppRegistryDataInterface): data.model.repository.create_repository(ns, name, owner, visibility, "application") def application_exists(self, package_name): - """ Create a new app repository, owner is the user who creates it """ + """ + Create a new app repository, owner is the user who creates it. + """ ns, name = _split_package_name(package_name) return data.model.repository.get_repository(ns, name, kind_filter="application") is not None @@ -189,11 +198,12 @@ class CNRAppModel(AppRegistryDataInterface): ) def list_manifests(self, package_name, release=None): - """ Returns the list of all manifests of an Application. + """ + Returns the list of all manifests of an Application. Todo: * Paginate - """ + """ try: repo = _application(package_name) return list(appr_model.manifest.get_manifest_types(repo, self.models_ref, release)) @@ -202,8 +212,8 @@ class CNRAppModel(AppRegistryDataInterface): def fetch_release(self, package_name, release, media_type): """ - Retrieves an AppRelease from it's repository-name and release-name - """ + Retrieves an AppRelease from it's repository-name and release-name. + """ repo = _application(package_name) try: tag, manifest, blob = appr_model.release.get_app_release( @@ -254,9 +264,9 @@ class CNRAppModel(AppRegistryDataInterface): ) def create_release(self, package, user, visibility, force=False): - """ Add an app-release to a repository - package is an instance of data.cnr.package.Package - """ + """ + Add an app-release to a repository package is an instance of data.cnr.package.Package. + """ if self.is_readonly: raise ReadOnlyException("Currently in read-only mode") @@ -276,9 +286,11 @@ class CNRAppModel(AppRegistryDataInterface): ) def delete_release(self, package_name, release, media_type): - """ Remove/Delete an app-release from an app-repository. + """ + Remove/Delete an app-release from an app-repository. + It does not delete the entire app-repository, only a single release - """ + """ if self.is_readonly: raise ReadOnlyException("Currently in read-only mode") @@ -293,12 +305,15 @@ class CNRAppModel(AppRegistryDataInterface): raise_package_not_found(package_name, release, media_type) def release_exists(self, package, release): - """ Return true if a release with that name already exist or - have existed (include deleted ones) """ + """ + Return true if a release with that name already exist or have existed (include deleted ones) + """ # TODO: Figure out why this isn't implemented. def channel_exists(self, package_name, channel_name): - """ Returns true if channel exists """ + """ + Returns true if channel exists. + """ repo = _application(package_name) return appr_model.tag.tag_exists(repo, channel_name, self.models_ref, "channel") @@ -317,13 +332,17 @@ class CNRAppModel(AppRegistryDataInterface): raise_channel_not_found(package_name, channel_name) def list_channels(self, package_name): - """ Returns all AppChannel for a package """ + """ + Returns all AppChannel for a package. + """ repo = _application(package_name) channels = appr_model.channel.get_repo_channels(repo, self.models_ref) return [ChannelView(name=chan.name, current=chan.linked_tag.name) for chan in channels] def fetch_channel(self, package_name, channel_name, with_releases=True): - """ Returns an AppChannel """ + """ + Returns an AppChannel. + """ repo = _application(package_name) try: diff --git a/endpoints/appr/models_interface.py b/endpoints/appr/models_interface.py index d326188f6..808ff9f57 100644 --- a/endpoints/appr/models_interface.py +++ b/endpoints/appr/models_interface.py @@ -5,20 +5,25 @@ from six import add_metaclass class BlobDescriptor(namedtuple("Blob", ["mediaType", "size", "digest", "urls"])): - """ BlobDescriptor describes a blob with its mediatype, size and digest. - A BlobDescriptor is used to retrieves the actual blob. - """ + """ + BlobDescriptor describes a blob with its mediatype, size and digest. + + A BlobDescriptor is used to retrieves the actual blob. + """ class ChannelReleasesView(namedtuple("ChannelReleasesView", ["name", "current", "releases"])): - """ A channel is a pointer to a Release (current). - Releases are the previous tags pointed by channel (history). - """ + """ + A channel is a pointer to a Release (current). + + Releases are the previous tags pointed by channel (history). + """ class ChannelView(namedtuple("ChannelView", ["name", "current"])): - """ A channel is a pointer to a Release (current). - """ + """ + A channel is a pointer to a Release (current). + """ class ApplicationSummaryView( @@ -37,54 +42,64 @@ class ApplicationSummaryView( ], ) ): - """ ApplicationSummaryView is an aggregated view of an application repository. - """ + """ + ApplicationSummaryView is an aggregated view of an application repository. + """ class ApplicationManifest(namedtuple("ApplicationManifest", ["mediaType", "digest", "content"])): - """ ApplicationManifest embed the BlobDescriptor and some metadata around it. - An ApplicationManifest is content-addressable. - """ + """ + ApplicationManifest embed the BlobDescriptor and some metadata around it. + + An ApplicationManifest is content-addressable. + """ class ApplicationRelease( namedtuple("ApplicationRelease", ["release", "name", "created_at", "manifest"]) ): - """ The ApplicationRelease associates an ApplicationManifest to a repository and release. - """ + """ + The ApplicationRelease associates an ApplicationManifest to a repository and release. + """ @add_metaclass(ABCMeta) class AppRegistryDataInterface(object): - """ Interface that represents all data store interactions required by a App Registry. - """ + """ + Interface that represents all data store interactions required by a App Registry. + """ @abstractmethod def list_applications( self, namespace=None, media_type=None, search=None, username=None, with_channels=False ): - """ Lists all repositories that contain applications, with optional filtering to a specific + """ + Lists all repositories that contain applications, with optional filtering to a specific namespace and/or to those visible to a specific user. - Returns: list of ApplicationSummaryView - """ + Returns: list of ApplicationSummaryView + """ pass @abstractmethod def application_is_public(self, package_name): """ - Returns true if the application is public - """ + Returns true if the application is public. + """ pass @abstractmethod def create_application(self, package_name, visibility, owner): - """ Create a new app repository, owner is the user who creates it """ + """ + Create a new app repository, owner is the user who creates it. + """ pass @abstractmethod def application_exists(self, package_name): - """ Returns true if the application exists """ + """ + Returns true if the application exists. + """ pass @abstractmethod @@ -108,81 +123,95 @@ class AppRegistryDataInterface(object): # @TODO: Paginate @abstractmethod def list_manifests(self, package_name, release=None): - """ Returns the list of all available manifests type of an Application across all releases or - for a specific one. + """ + Returns the list of all available manifests type of an Application across all releases or + for a specific one. Example: >>> get_app_releases('ant31/rocketchat') ['1.7.1', '1.7.0', '1.7.2'] - """ + """ pass @abstractmethod def fetch_release(self, package_name, release, media_type): """ - Returns an ApplicationRelease - """ + Returns an ApplicationRelease. + """ pass @abstractmethod def store_blob(self, cnrblob, content_media_type): """ - Upload the blob content to a storage location and creates a Blob entry in the DB. + Upload the blob content to a storage location and creates a Blob entry in the DB. - Returns a BlobDescriptor - """ + Returns a BlobDescriptor + """ pass @abstractmethod def create_release(self, package, user, visibility, force=False): - """ Creates and returns an ApplicationRelease - - package is a data.model.Package object - - user is the owner of the package - - visibility is a string: 'public' or 'private' - """ + """ + Creates and returns an ApplicationRelease. + + - package is a data.model.Package object + - user is the owner of the package + - visibility is a string: 'public' or 'private' + """ pass @abstractmethod def release_exists(self, package, release): - """ Return true if a release with that name already exist or - has existed (including deleted ones) - """ + """ + Return true if a release with that name already exist or has existed (including deleted + ones) + """ pass @abstractmethod def delete_release(self, package_name, release, media_type): - """ Remove/Delete an app-release from an app-repository. + """ + Remove/Delete an app-release from an app-repository. + It does not delete the entire app-repository, only a single release - """ + """ pass @abstractmethod def list_release_channels(self, package_name, release, active=True): - """ Returns a list of Channel that are/was pointing to a release. + """ + Returns a list of Channel that are/was pointing to a release. + If active is True, returns only active Channel (lifetime_end not null) - """ + """ pass @abstractmethod def channel_exists(self, package_name, channel_name): - """ Returns true if the channel with the given name exists under the matching package """ + """ + Returns true if the channel with the given name exists under the matching package. + """ pass @abstractmethod def update_channel(self, package_name, channel_name, release): - """ Append a new release to the Channel - Returns a new Channel with the release as current - """ + """ + Append a new release to the Channel Returns a new Channel with the release as current. + """ pass @abstractmethod def delete_channel(self, package_name, channel_name): - """ Delete a Channel, it doesn't delete/touch the ApplicationRelease pointed by the channel """ + """ + Delete a Channel, it doesn't delete/touch the ApplicationRelease pointed by the channel. + """ # @TODO: Paginate @abstractmethod def list_channels(self, package_name): - """ Returns all AppChannel for a package """ + """ + Returns all AppChannel for a package. + """ pass @abstractmethod @@ -202,10 +231,14 @@ class AppRegistryDataInterface(object): analytics_sample=1, **kwargs ): - """ Logs an action to the audit log. """ + """ + Logs an action to the audit log. + """ pass @abstractmethod def get_blob_locations(self, digest): - """ Returns a list of strings for the locations in which a Blob is present. """ + """ + Returns a list of strings for the locations in which a Blob is present. + """ pass diff --git a/endpoints/appr/test/test_api.py b/endpoints/appr/test/test_api.py index b55519885..8802e4b07 100644 --- a/endpoints/appr/test/test_api.py +++ b/endpoints/appr/test/test_api.py @@ -107,7 +107,9 @@ class TestServerQuayDB(BaseTestServer): @pytest.mark.xfail def test_push_package_already_exists_force(self, db_with_data1, package_b64blob, client): - """ No force push implemented """ + """ + No force push implemented. + """ BaseTestServer.test_push_package_already_exists_force( self, db_with_data1, package_b64blob, client ) @@ -126,7 +128,9 @@ class TestQuayModels(CnrTestModels): @pytest.mark.xfail def test_channel_delete_releases(self, db_with_data1): - """ Can't remove a release from the channel, only delete the channel entirely """ + """ + Can't remove a release from the channel, only delete the channel entirely. + """ CnrTestModels.test_channel_delete_releases(self, db_with_data1) @pytest.mark.xfail diff --git a/endpoints/building.py b/endpoints/building.py index 6cd79b649..c9975065b 100644 --- a/endpoints/building.py +++ b/endpoints/building.py @@ -20,14 +20,18 @@ logger = logging.getLogger(__name__) class MaximumBuildsQueuedException(Exception): - """ This exception is raised when a build is requested, but the incoming build - would exceed the configured maximum build rate. """ + """ + This exception is raised when a build is requested, but the incoming build would exceed the + configured maximum build rate. + """ pass class BuildTriggerDisabledException(Exception): - """ This exception is raised when a build is required, but the build trigger has been disabled. """ + """ + This exception is raised when a build is required, but the build trigger has been disabled. + """ pass @@ -147,9 +151,11 @@ def start_build(repository, prepared_build, pull_robot_name=None): class PreparedBuild(object): - """ Class which holds all the information about a prepared build. The build queuing service - will use this result to actually invoke the build. - """ + """ + Class which holds all the information about a prepared build. + + The build queuing service will use this result to actually invoke the build. + """ def __init__(self, trigger=None): self._dockerfile_id = None diff --git a/endpoints/common.py b/endpoints/common.py index 0002d8ac9..492061e75 100644 --- a/endpoints/common.py +++ b/endpoints/common.py @@ -30,9 +30,11 @@ JS_BUNDLE_NAME = "bundle" def common_login(user_uuid, permanent_session=True): - """ Performs login of the given user, with optional non-permanence on the session. - Returns a tuple with (success, headers to set on success). - """ + """ + Performs login of the given user, with optional non-permanence on the session. + + Returns a tuple with (success, headers to set on success). + """ user = model.get_user(user_uuid) if user is None: return (False, None) @@ -70,7 +72,9 @@ def common_login(user_uuid, permanent_session=True): def _list_files(path, extension, contains=""): - """ Returns a list of all the files with the given extension found under the given path. """ + """ + Returns a list of all the files with the given extension found under the given path. + """ def matches(f): return os.path.splitext(f)[1] == "." + extension and contains in os.path.splitext(f)[0] @@ -87,7 +91,9 @@ FONT_AWESOME_5 = "use.fontawesome.com/releases/v5.0.4/css/all.css" def render_page_template(name, route_data=None, **kwargs): - """ Renders the page template with the given name as the response and returns its contents. """ + """ + Renders the page template with the given name as the response and returns its contents. + """ main_scripts = _list_files("build", "js", JS_BUNDLE_NAME) use_cdn = app.config.get("USE_CDN", True) diff --git a/endpoints/common_models_interface.py b/endpoints/common_models_interface.py index 44a78cf32..d745c8887 100644 --- a/endpoints/common_models_interface.py +++ b/endpoints/common_models_interface.py @@ -9,24 +9,24 @@ USER_FIELDS = ["uuid", "username", "email", "given_name", "family_name", "compan class User(namedtuple("User", USER_FIELDS)): """ - User represents a user. - """ + User represents a user. + """ @add_metaclass(ABCMeta) class EndpointsCommonDataInterface(object): """ - Interface that represents all data store interactions required by the common endpoints lib. - """ + Interface that represents all data store interactions required by the common endpoints lib. + """ @abstractmethod def get_user(self, user_uuid): """ - Returns the User matching the given uuid, if any or None if none. - """ + Returns the User matching the given uuid, if any or None if none. + """ @abstractmethod def get_namespace_uuid(self, namespace_name): """ - Returns the uuid of the Namespace with the given name, if any. - """ + Returns the uuid of the Namespace with the given name, if any. + """ diff --git a/endpoints/csrf.py b/endpoints/csrf.py index 9c861adb4..b96d312ac 100644 --- a/endpoints/csrf.py +++ b/endpoints/csrf.py @@ -23,9 +23,12 @@ QUAY_CSRF_UPDATED_HEADER_NAME = "X-Next-CSRF-Token" def generate_csrf_token(session_token_name=_QUAY_CSRF_TOKEN_NAME, force=False): - """ If not present in the session, generates a new CSRF token with the given name - and places it into the session. Returns the generated token. - """ + """ + If not present in the session, generates a new CSRF token with the given name and places it into + the session. + + Returns the generated token. + """ if session_token_name not in session or force: session[session_token_name] = base64.b64encode(os.urandom(48)) @@ -37,9 +40,10 @@ def verify_csrf( request_token_name=_QUAY_CSRF_TOKEN_NAME, check_header=True, ): - """ Verifies that the CSRF token with the given name is found in the session and - that the matching token is found in the request args or values. - """ + """ + Verifies that the CSRF token with the given name is found in the session and that the matching + token is found in the request args or values. + """ token = str(session.get(session_token_name, "")) found_token = str(request.values.get(request_token_name, "")) if check_header and not found_token: diff --git a/endpoints/decorators.py b/endpoints/decorators.py index 79a988e96..21dd84941 100644 --- a/endpoints/decorators.py +++ b/endpoints/decorators.py @@ -1,4 +1,6 @@ -""" Various decorators for endpoint and API handlers. """ +""" +Various decorators for endpoint and API handlers. +""" import os import logging @@ -29,9 +31,10 @@ def parse_repository_name( tag_kwarg_name="tag_name", incoming_repo_kwarg="repository", ): - """ Decorator which parses the repository name found in the incoming_repo_kwarg argument, - and applies its pieces to the decorated function. - """ + """ + Decorator which parses the repository name found in the incoming_repo_kwarg argument, and + applies its pieces to the decorated function. + """ def inner(func): @wraps(func) @@ -59,10 +62,12 @@ def parse_repository_name( def param_required(param_name, allow_body=False): - """ Marks a route as requiring a parameter with the given name to exist in the request's arguments - or (if allow_body=True) in its body values. If the parameter is not present, the request will - fail with a 400. - """ + """ + Marks a route as requiring a parameter with the given name to exist in the request's arguments + or (if allow_body=True) in its body values. + + If the parameter is not present, the request will fail with a 400. + """ def wrapper(wrapped): @wraps(wrapped) @@ -78,27 +83,35 @@ def param_required(param_name, allow_body=False): def readonly_call_allowed(func): - """ Marks a method as allowing for invocation when the registry is in a read only state. - Only necessary on non-GET methods. - """ + """ + Marks a method as allowing for invocation when the registry is in a read only state. + + Only necessary on non-GET methods. + """ func.__readonly_call_allowed = True return func def anon_allowed(func): - """ Marks a method to allow anonymous access where it would otherwise be disallowed. """ + """ + Marks a method to allow anonymous access where it would otherwise be disallowed. + """ func.__anon_allowed = True return func def anon_protect(func): - """ Marks a method as requiring some form of valid user auth before it can be executed. """ + """ + Marks a method as requiring some form of valid user auth before it can be executed. + """ func.__anon_protected = True return check_anon_protection(func) def check_anon_protection(func): - """ Validates a method as requiring some form of valid user auth before it can be executed. """ + """ + Validates a method as requiring some form of valid user auth before it can be executed. + """ @wraps(func) def wrapper(*args, **kwargs): @@ -116,9 +129,10 @@ def check_anon_protection(func): def check_readonly(func): - """ Validates that a non-GET method is not invoked when the registry is in read-only mode, - unless explicitly marked as being allowed. - """ + """ + Validates that a non-GET method is not invoked when the registry is in read-only mode, unless + explicitly marked as being allowed. + """ @wraps(func) def wrapper(*args, **kwargs): @@ -140,7 +154,9 @@ def check_readonly(func): def route_show_if(value): - """ Adds/shows the decorated route if the given value is True. """ + """ + Adds/shows the decorated route if the given value is True. + """ def decorator(f): @wraps(f) @@ -156,9 +172,10 @@ def route_show_if(value): def require_xhr_from_browser(func): - """ Requires that API GET calls made from browsers are made via XHR, in order to prevent - reflected text attacks. - """ + """ + Requires that API GET calls made from browsers are made via XHR, in order to prevent reflected + text attacks. + """ @wraps(func) def wrapper(*args, **kwargs): @@ -183,10 +200,12 @@ def require_xhr_from_browser(func): def check_region_blacklisted(error_class=None, namespace_name_kwarg=None): - """ Decorator which checks if the incoming request is from a region geo IP blocked - for the current namespace. The first argument to the wrapped function must be - the namespace name. - """ + """ + Decorator which checks if the incoming request is from a region geo IP blocked for the current + namespace. + + The first argument to the wrapped function must be the namespace name. + """ def wrapper(wrapped): @wraps(wrapped) @@ -226,12 +245,12 @@ def check_repository_state(f): @wraps(f) def wrapper(namespace_name, repo_name, *args, **kwargs): """ - Conditionally allow changes depending on the Repository's state. - NORMAL -> Pass - READ_ONLY -> Block all POST/PUT/DELETE - MIRROR -> Same as READ_ONLY, except treat the Mirroring Robot User as Normal - MARKED_FOR_DELETION -> Block everything as a 404 - """ + Conditionally allow changes depending on the Repository's state. + + NORMAL -> Pass READ_ONLY -> Block all POST/PUT/DELETE MIRROR -> Same as READ_ONLY, + except treat the Mirroring Robot User as Normal MARKED_FOR_DELETION -> Block everything as a + 404 + """ user = get_authenticated_user() if user is None: # NOTE: Remaining auth checks will be handled by subsequent decorators. diff --git a/endpoints/exception.py b/endpoints/exception.py index 20eb39e8c..158c4f759 100644 --- a/endpoints/exception.py +++ b/endpoints/exception.py @@ -35,26 +35,26 @@ ERROR_DESCRIPTION = { class ApiException(HTTPException): """ - Represents an error in the application/problem+json format. + Represents an error in the application/problem+json format. - See: https://tools.ietf.org/html/rfc7807 + See: https://tools.ietf.org/html/rfc7807 - - "type" (string) - A URI reference that identifies the - problem type. + - "type" (string) - A URI reference that identifies the + problem type. - - "title" (string) - A short, human-readable summary of the problem - type. It SHOULD NOT change from occurrence to occurrence of the - problem, except for purposes of localization + - "title" (string) - A short, human-readable summary of the problem + type. It SHOULD NOT change from occurrence to occurrence of the + problem, except for purposes of localization - - "status" (number) - The HTTP status code + - "status" (number) - The HTTP status code - - "detail" (string) - A human-readable explanation specific to this - occurrence of the problem. + - "detail" (string) - A human-readable explanation specific to this + occurrence of the problem. - - "instance" (string) - A URI reference that identifies the specific - occurrence of the problem. It may or may not yield further - information if dereferenced. - """ + - "instance" (string) - A URI reference that identifies the specific + occurrence of the problem. It may or may not yield further + information if dereferenced. + """ def __init__(self, error_type, status_code, error_description, payload=None): Exception.__init__(self) diff --git a/endpoints/keyserver/models_interface.py b/endpoints/keyserver/models_interface.py index d4cb1cd2e..d50117e25 100644 --- a/endpoints/keyserver/models_interface.py +++ b/endpoints/keyserver/models_interface.py @@ -21,9 +21,9 @@ class ServiceKey( ) ): """ - Service Key represents a public key (JWK) being used by an instance of a particular service to - authenticate with other services. - """ + Service Key represents a public key (JWK) being used by an instance of a particular service to + authenticate with other services. + """ pass @@ -39,21 +39,21 @@ class ServiceKeyDoesNotExist(ServiceKeyException): @add_metaclass(ABCMeta) class KeyServerDataInterface(object): """ - Interface that represents all data store interactions required by a JWT key service. - """ + Interface that represents all data store interactions required by a JWT key service. + """ @abstractmethod def list_service_keys(self, service): """ - Returns a list of service keys or an empty list if the service does not exist. - """ + Returns a list of service keys or an empty list if the service does not exist. + """ pass @abstractmethod def get_service_key(self, signer_kid, service=None, alive_only=None, approved_only=None): """ - Returns a service kid with the given kid or raises ServiceKeyNotFound. - """ + Returns a service kid with the given kid or raises ServiceKeyNotFound. + """ pass @abstractmethod @@ -61,20 +61,20 @@ class KeyServerDataInterface(object): self, name, kid, service, jwk, metadata, expiration_date, rotation_duration=None ): """ - Stores a service key. - """ + Stores a service key. + """ pass @abstractmethod def replace_service_key(self, old_kid, kid, jwk, metadata, expiration_date): """ - Replaces a service with a new key or raises ServiceKeyNotFound. - """ + Replaces a service with a new key or raises ServiceKeyNotFound. + """ pass @abstractmethod def delete_service_key(self, kid): """ - Deletes and returns a service key with the given kid or raises ServiceKeyNotFound. - """ + Deletes and returns a service key with the given kid or raises ServiceKeyNotFound. + """ pass diff --git a/endpoints/keyserver/models_pre_oci.py b/endpoints/keyserver/models_pre_oci.py index 3c1d33952..fed72b794 100644 --- a/endpoints/keyserver/models_pre_oci.py +++ b/endpoints/keyserver/models_pre_oci.py @@ -9,9 +9,9 @@ from endpoints.keyserver.models_interface import ( class PreOCIModel(KeyServerDataInterface): """ - PreOCIModel implements the data model for JWT key service using a database schema before it was - changed to support the OCI specification. - """ + PreOCIModel implements the data model for JWT key service using a database schema before it was + changed to support the OCI specification. + """ def list_service_keys(self, service): return data.model.service_keys.list_service_keys(service) @@ -54,8 +54,8 @@ pre_oci_model = PreOCIModel() def _db_key_to_servicekey(key): """ - Converts the Pre-OCI database model of a service key into a ServiceKey. - """ + Converts the Pre-OCI database model of a service key into a ServiceKey. + """ return ServiceKey( name=key.name, kid=key.kid, diff --git a/endpoints/oauth/login.py b/endpoints/oauth/login.py index 674dddf66..2f61cf54f 100644 --- a/endpoints/oauth/login.py +++ b/endpoints/oauth/login.py @@ -59,8 +59,10 @@ def _get_response(result): def _conduct_oauth_login( auth_system, login_service, lid, lusername, lemail, metadata=None, captcha_verified=False ): - """ Conducts login from the result of an OAuth service's login flow and returns - the status of the login, as well as the followup step. """ + """ + Conducts login from the result of an OAuth service's login flow and returns the status of the + login, as well as the followup step. + """ service_id = login_service.service_id() service_name = login_service.service_name() @@ -159,7 +161,9 @@ def _conduct_oauth_login( def _render_ologin_error(service_name, error_message=None, register_redirect=False): - """ Returns a Flask response indicating an OAuth error. """ + """ + Returns a Flask response indicating an OAuth error. + """ user_creation = bool( features.USER_CREATION and features.DIRECT_LOGIN and not features.INVITE_ONLY_USER_CREATION @@ -179,8 +183,9 @@ def _render_ologin_error(service_name, error_message=None, register_redirect=Fal def _perform_login(user_obj, service_name): - """ Attempts to login the given user, returning the Flask result of whether the login succeeded. - """ + """ + Attempts to login the given user, returning the Flask result of whether the login succeeded. + """ success, _ = common_login(user_obj.uuid) if success: if model.user.has_user_prompts(user_obj): @@ -192,9 +197,10 @@ def _perform_login(user_obj, service_name): def _attach_service(login_service, user_obj, lid, lusername): - """ Attaches the given user account to the given service, with the given service user ID and - service username. - """ + """ + Attaches the given user account to the given service, with the given service user ID and service + username. + """ metadata = { "service_username": lusername, } @@ -214,7 +220,9 @@ def _attach_service(login_service, user_obj, lid, lusername): def _register_service(login_service): - """ Registers the given login service, adding its callback and attach routes to the blueprint. """ + """ + Registers the given login service, adding its callback and attach routes to the blueprint. + """ @oauthlogin_csrf_protect def callback_func(): diff --git a/endpoints/test/shared.py b/endpoints/test/shared.py index c1c438f73..8431bc49e 100644 --- a/endpoints/test/shared.py +++ b/endpoints/test/shared.py @@ -31,7 +31,9 @@ def client_with_identity(auth_username, client): @contextmanager def toggle_feature(name, enabled): - """ Context manager which temporarily toggles a feature. """ + """ + Context manager which temporarily toggles a feature. + """ import features previous_value = getattr(features, name) @@ -41,7 +43,9 @@ def toggle_feature(name, enabled): def add_csrf_param(client, params): - """ Returns a params dict with the CSRF parameter added. """ + """ + Returns a params dict with the CSRF parameter added. + """ params = params or {} with client.session_transaction() as sess: @@ -52,7 +56,9 @@ def add_csrf_param(client, params): def gen_basic_auth(username, password): - """ Generates a basic auth header. """ + """ + Generates a basic auth header. + """ return "Basic " + base64.b64encode("%s:%s" % (username, password)) @@ -67,7 +73,9 @@ def conduct_call( headers=None, raw_body=None, ): - """ Conducts a call to a Flask endpoint. """ + """ + Conducts a call to a Flask endpoint. + """ params = add_csrf_param(client, params) final_url = url_for(resource, **params) diff --git a/endpoints/v1/__init__.py b/endpoints/v1/__init__.py index 499982aba..9c83739f9 100644 --- a/endpoints/v1/__init__.py +++ b/endpoints/v1/__init__.py @@ -49,10 +49,12 @@ def handle_readonly(ex): def check_v1_push_enabled(namespace_name_kwarg="namespace_name"): - """ Decorator which checks if V1 push is enabled for the current namespace. The first argument - to the wrapped function must be the namespace name or there must be a kwarg with the - name `namespace_name`. - """ + """ + Decorator which checks if V1 push is enabled for the current namespace. + + The first argument to the wrapped function must be the namespace name or there must be a kwarg + with the name `namespace_name`. + """ def wrapper(wrapped): @wraps(wrapped) diff --git a/endpoints/v1/index.py b/endpoints/v1/index.py index 48edf04c3..c3656a813 100644 --- a/endpoints/v1/index.py +++ b/endpoints/v1/index.py @@ -390,7 +390,9 @@ def get_search(): def _conduct_repo_search(username, query, limit=25, page=1): - """ Finds matching repositories. """ + """ + Finds matching repositories. + """ # Note that we put a maximum limit of five pages here, because this API should only really ever # be used by the Docker CLI, and it doesn't even paginate. page = min(page, 5) diff --git a/endpoints/v1/registry.py b/endpoints/v1/registry.py index 890fdcbcf..b807fec96 100644 --- a/endpoints/v1/registry.py +++ b/endpoints/v1/registry.py @@ -34,7 +34,9 @@ logger = logging.getLogger(__name__) def require_completion(f): - """ This make sure that the image push correctly finished. """ + """ + This make sure that the image push correctly finished. + """ @wraps(f) def wrapper(namespace, repository, *args, **kwargs): @@ -56,7 +58,9 @@ def require_completion(f): def set_cache_headers(f): - """Returns HTTP headers suitable for caching.""" + """ + Returns HTTP headers suitable for caching. + """ @wraps(f) def wrapper(*args, **kwargs): diff --git a/endpoints/v2/__init__.py b/endpoints/v2/__init__.py index 579408c84..9f8438d13 100644 --- a/endpoints/v2/__init__.py +++ b/endpoints/v2/__init__.py @@ -67,8 +67,8 @@ def paginate( callback_kwarg_name="pagination_callback", ): """ - Decorates a handler adding a parsed pagination token and a callback to encode a response token. - """ + Decorates a handler adding a parsed pagination token and a callback to encode a response token. + """ def wrapper(func): @wraps(func) diff --git a/endpoints/v2/blob.py b/endpoints/v2/blob.py index 099ae5604..455212cc0 100644 --- a/endpoints/v2/blob.py +++ b/endpoints/v2/blob.py @@ -126,7 +126,9 @@ def download_blob(namespace_name, repo_name, digest): def _try_to_mount_blob(repository_ref, mount_blob_digest): - """ Attempts to mount a blob requested by the user from another repository. """ + """ + Attempts to mount a blob requested by the user from another repository. + """ logger.debug("Got mount request for blob `%s` into `%s`", mount_blob_digest, repository_ref) from_repo = request.args.get("from", None) if from_repo is None: @@ -406,8 +408,8 @@ def delete_digest(namespace_name, repo_name, digest): def _render_range(num_uploaded_bytes, with_bytes_prefix=True): """ - Returns a string formatted to be used in the Range header. - """ + Returns a string formatted to be used in the Range header. + """ return "{0}0-{1}".format("bytes=" if with_bytes_prefix else "", num_uploaded_bytes - 1) @@ -417,11 +419,11 @@ def _current_request_url(): def _abort_range_not_satisfiable(valid_end, upload_uuid): """ - Writes a failure response for scenarios where the registry cannot function - with the provided range. + Writes a failure response for scenarios where the registry cannot function with the provided + range. - TODO: Unify this with the V2RegistryException class. - """ + TODO: Unify this with the V2RegistryException class. + """ flask_abort( Response( status=416, @@ -436,11 +438,11 @@ def _abort_range_not_satisfiable(valid_end, upload_uuid): def _parse_range_header(range_header_text): """ - Parses the range header. + Parses the range header. - Returns a tuple of the start offset and the length. - If the parse fails, raises _InvalidRangeHeader. - """ + Returns a tuple of the start offset and the length. If the parse fails, raises + _InvalidRangeHeader. + """ found = RANGE_HEADER_REGEX.match(range_header_text) if found is None: raise _InvalidRangeHeader() @@ -456,10 +458,10 @@ def _parse_range_header(range_header_text): def _start_offset_and_length(range_header): """ - Returns a tuple of the start offset and the length. - If the range header doesn't exist, defaults to (0, -1). - If parsing fails, returns (None, None). - """ + Returns a tuple of the start offset and the length. + + If the range header doesn't exist, defaults to (0, -1). If parsing fails, returns (None, None). + """ start_offset, length = 0, -1 if range_header is not None: try: @@ -471,7 +473,9 @@ def _start_offset_and_length(range_header): def _upload_settings(): - """ Returns the settings for instantiating a blob upload manager. """ + """ + Returns the settings for instantiating a blob upload manager. + """ expiration_sec = app.config["PUSH_TEMP_TAG_EXPIRATION_SEC"] settings = BlobUploadSettings( maximum_blob_size=app.config["MAXIMUM_LAYER_SIZE"], @@ -482,10 +486,13 @@ def _upload_settings(): def _upload_chunk(blob_uploader, commit_digest=None): - """ Performs uploading of a chunk of data in the current request's stream, via the blob uploader - given. If commit_digest is specified, the upload is committed to a blob once the stream's - data has been read and stored. - """ + """ + Performs uploading of a chunk of data in the current request's stream, via the blob uploader + given. + + If commit_digest is specified, the upload is committed to a blob once the stream's data has been + read and stored. + """ start_offset, length = _start_offset_and_length(request.headers.get("range")) if None in {start_offset, length}: raise InvalidRequest(message="Invalid range header") diff --git a/endpoints/v2/manifest.py b/endpoints/v2/manifest.py index ab7e15e98..4b8438cc2 100644 --- a/endpoints/v2/manifest.py +++ b/endpoints/v2/manifest.py @@ -272,11 +272,11 @@ def _parse_manifest(): @check_readonly def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref): """ - Delete the manifest specified by the digest. + Delete the manifest specified by the digest. - Note: there is no equivalent method for deleting by tag name because it is - forbidden by the spec. - """ + Note: there is no equivalent method for deleting by tag name because it is + forbidden by the spec. + """ repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if repository_ref is None: raise NameUnknown() diff --git a/endpoints/v2/test/test_manifest_cornercases.py b/endpoints/v2/test/test_manifest_cornercases.py index 6d30dab3e..86beb6b4f 100644 --- a/endpoints/v2/test/test_manifest_cornercases.py +++ b/endpoints/v2/test/test_manifest_cornercases.py @@ -32,27 +32,28 @@ def _perform_cleanup(): def test_missing_link(initialized_db): - """ Tests for a corner case that could result in missing a link to a blob referenced by a - manifest. The test exercises the case as follows: + """ + Tests for a corner case that could result in missing a link to a blob referenced by a manifest. + The test exercises the case as follows: - 1) Push a manifest of a single layer with a Docker ID `FIRST_ID`, pointing - to blob `FIRST_BLOB`. The database should contain the tag referencing the layer, with - no changed ID and the blob not being GCed. + 1) Push a manifest of a single layer with a Docker ID `FIRST_ID`, pointing + to blob `FIRST_BLOB`. The database should contain the tag referencing the layer, with + no changed ID and the blob not being GCed. - 2) Push a manifest of two layers: + 2) Push a manifest of two layers: - Layer 1: `FIRST_ID` with blob `SECOND_BLOB`: Will result in a new synthesized ID - Layer 2: `SECOND_ID` with blob `THIRD_BLOB`: Will result in `SECOND_ID` pointing to the - `THIRD_BLOB`, with a parent pointing to the new synthesized ID's layer. + Layer 1: `FIRST_ID` with blob `SECOND_BLOB`: Will result in a new synthesized ID + Layer 2: `SECOND_ID` with blob `THIRD_BLOB`: Will result in `SECOND_ID` pointing to the + `THIRD_BLOB`, with a parent pointing to the new synthesized ID's layer. - 3) Push a manifest of two layers: + 3) Push a manifest of two layers: - Layer 1: `THIRD_ID` with blob `FOURTH_BLOB`: Will result in a new `THIRD_ID` layer - Layer 2: `FIRST_ID` with blob `THIRD_BLOB`: Since `FIRST_ID` already points to `SECOND_BLOB`, - this will synthesize a new ID. With the current bug, the synthesized ID will match - that of `SECOND_ID`, leaving `THIRD_ID` unlinked and therefore, after a GC, missing - `FOURTH_BLOB`. - """ + Layer 1: `THIRD_ID` with blob `FOURTH_BLOB`: Will result in a new `THIRD_ID` layer + Layer 2: `FIRST_ID` with blob `THIRD_BLOB`: Since `FIRST_ID` already points to `SECOND_BLOB`, + this will synthesize a new ID. With the current bug, the synthesized ID will match + that of `SECOND_ID`, leaving `THIRD_ID` unlinked and therefore, after a GC, missing + `FOURTH_BLOB`. + """ with set_tag_expiration_policy("devtable", 0): location_name = storage.preferred_locations[0] location = database.ImageStorageLocation.get(name=location_name) diff --git a/endpoints/v2/v2auth.py b/endpoints/v2/v2auth.py index f36a83905..7df9ac621 100644 --- a/endpoints/v2/v2auth.py +++ b/endpoints/v2/v2auth.py @@ -56,9 +56,10 @@ scopeResult = namedtuple( @anon_protect def generate_registry_jwt(auth_result): """ - This endpoint will generate a JWT conforming to the Docker Registry v2 Auth Spec: - https://docs.docker.com/registry/spec/auth/token/ - """ + This endpoint will generate a JWT conforming to the Docker Registry v2 Auth Spec: + + https://docs.docker.com/registry/spec/auth/token/ + """ audience_param = request.args.get("service") logger.debug("Request audience: %s", audience_param) diff --git a/endpoints/verbs/__init__.py b/endpoints/verbs/__init__.py index 43888c5c5..b267bf94a 100644 --- a/endpoints/verbs/__init__.py +++ b/endpoints/verbs/__init__.py @@ -70,9 +70,10 @@ class VerbReporter(TarLayerFormatterReporter): def _open_stream(formatter, tag, schema1_manifest, derived_image_id, handlers, reporter): """ - This method generates a stream of data which will be replicated and read from the queue files. - This method runs in a separate process. - """ + This method generates a stream of data which will be replicated and read from the queue files. + + This method runs in a separate process. + """ # For performance reasons, we load the full image list here, cache it, then disconnect from # the database. with database.UseThenDisconnect(app.config): @@ -113,8 +114,11 @@ def _open_stream(formatter, tag, schema1_manifest, derived_image_id, handlers, r def _sign_derived_image(verb, derived_image, queue_file): - """ Read from the queue file and sign the contents which are generated. This method runs in a - separate process. """ + """ + Read from the queue file and sign the contents which are generated. + + This method runs in a separate process. + """ signature = None try: signature = signer.detached_sign(queue_file) @@ -132,9 +136,11 @@ def _sign_derived_image(verb, derived_image, queue_file): def _write_derived_image_to_storage( verb, derived_image, queue_file, namespace, repository, tag_name ): - """ Read from the generated stream and write it back to the storage engine. This method runs in a - separate process. - """ + """ + Read from the generated stream and write it back to the storage engine. + + This method runs in a separate process. + """ def handle_exception(ex): logger.debug( @@ -177,9 +183,11 @@ def _write_derived_image_to_storage( def _torrent_for_blob(blob, is_public): - """ Returns a response containing the torrent file contents for the given blob. May abort - with an error if the state is not valid (e.g. non-public, non-user request). - """ + """ + Returns a response containing the torrent file contents for the given blob. + + May abort with an error if the state is not valid (e.g. non-public, non-user request). + """ # Make sure the storage has a size. if not blob.compressed_size: abort(404) @@ -229,7 +237,9 @@ def _torrent_for_blob(blob, is_public): def _torrent_repo_verb(repository, tag, manifest, verb, **kwargs): - """ Handles returning a torrent for the given verb on the given image and tag. """ + """ + Handles returning a torrent for the given verb on the given image and tag. + """ if not features.BITTORRENT: # Torrent feature is not enabled. abort(406) @@ -539,7 +549,9 @@ def os_arch_checker(os, arch): def observe_route(protocol): - """ Decorates verb endpoints to record the image_pulls metric into Prometheus. """ + """ + Decorates verb endpoints to record the image_pulls metric into Prometheus. + """ def decorator(func): @wraps(func) diff --git a/health/healthcheck.py b/health/healthcheck.py index 67c1ca5c5..32ca3488c 100644 --- a/health/healthcheck.py +++ b/health/healthcheck.py @@ -11,7 +11,9 @@ logger = logging.getLogger(__name__) def get_healthchecker(app, config_provider, instance_keys): - """ Returns a HealthCheck instance for the given app. """ + """ + Returns a HealthCheck instance for the given app. + """ return HealthCheck.get_checker(app, config_provider, instance_keys) @@ -24,38 +26,41 @@ class HealthCheck(object): def check_warning(self): """ - Conducts a check on the warnings, returning a dict representing the HealthCheck - output and a number indicating the health check response code. - """ + Conducts a check on the warnings, returning a dict representing the HealthCheck output and a + number indicating the health check response code. + """ service_statuses = check_warning_services(self.app, []) return self.get_instance_health(service_statuses) def check_instance(self): """ - Conducts a check on this specific instance, returning a dict representing the HealthCheck - output and a number indicating the health check response code. - """ + Conducts a check on this specific instance, returning a dict representing the HealthCheck + output and a number indicating the health check response code. + """ service_statuses = check_all_services(self.app, self.instance_skips, for_instance=True) return self.get_instance_health(service_statuses) def check_endtoend(self): """ - Conducts a check on all services, returning a dict representing the HealthCheck - output and a number indicating the health check response code. - """ + Conducts a check on all services, returning a dict representing the HealthCheck output and a + number indicating the health check response code. + """ service_statuses = check_all_services(self.app, [], for_instance=False) return self.calculate_overall_health(service_statuses) def get_instance_health(self, service_statuses): """ - For the given service statuses, returns a dict representing the HealthCheck - output and a number indicating the health check response code. By default, - this simply ensures that all services are reporting as healthy. - """ + For the given service statuses, returns a dict representing the HealthCheck output and a + number indicating the health check response code. + + By default, this simply ensures that all services are reporting as healthy. + """ return self.calculate_overall_health(service_statuses) def calculate_overall_health(self, service_statuses, skip=None, notes=None): - """ Returns true if and only if all the given service statuses report as healthy. """ + """ + Returns true if and only if all the given service statuses report as healthy. + """ is_healthy = True notes = notes or [] @@ -173,7 +178,9 @@ class RDSAwareHealthCheck(HealthCheck): return self.calculate_overall_health(service_statuses, skip=skip, notes=notes) def _get_rds_status(self): - """ Returns the status of the RDS instance as reported by AWS. """ + """ + Returns the status of the RDS instance as reported by AWS. + """ try: region = boto.rds2.connect_to_region( self.region, diff --git a/health/models_interface.py b/health/models_interface.py index a7fc87b2f..025b4cd92 100644 --- a/health/models_interface.py +++ b/health/models_interface.py @@ -5,10 +5,12 @@ from six import add_metaclass @add_metaclass(ABCMeta) class HealthCheckDataInterface(object): """ - Interface that represents all data store interactions required by health checks. - """ + Interface that represents all data store interactions required by health checks. + """ @abstractmethod def check_health(self, app_config): - """ Returns True if the connection to the database is healthy and False otherwise. """ + """ + Returns True if the connection to the database is healthy and False otherwise. + """ pass diff --git a/health/services.py b/health/services.py index 196e23400..7d831a3c4 100644 --- a/health/services.py +++ b/health/services.py @@ -34,7 +34,9 @@ def _compute_internal_endpoint(app, endpoint): def _check_gunicorn(endpoint): def fn(app): - """ Returns the status of the gunicorn workers. """ + """ + Returns the status of the gunicorn workers. + """ client = app.config["HTTPCLIENT"] registry_url = _compute_internal_endpoint(app, endpoint) try: @@ -50,7 +52,9 @@ def _check_gunicorn(endpoint): def _check_jwt_proxy(app): - """ Returns the status of JWT proxy in the container. """ + """ + Returns the status of JWT proxy in the container. + """ client = app.config["HTTPCLIENT"] registry_url = _compute_internal_endpoint(app, "secscan") try: @@ -66,17 +70,23 @@ def _check_jwt_proxy(app): def _check_database(app): - """ Returns the status of the database, as accessed from this instance. """ + """ + Returns the status of the database, as accessed from this instance. + """ return model.check_health(app.config) def _check_redis(app): - """ Returns the status of Redis, as accessed from this instance. """ + """ + Returns the status of Redis, as accessed from this instance. + """ return build_logs.check_health() def _check_storage(app): - """ Returns the status of storage, as accessed from this instance. """ + """ + Returns the status of storage, as accessed from this instance. + """ if app.config.get("REGISTRY_STATE", "normal") == "readonly": return (True, "Storage check disabled for readonly mode") @@ -89,14 +99,18 @@ def _check_storage(app): def _check_auth(app): - """ Returns the status of the auth engine, as accessed from this instance. """ + """ + Returns the status of the auth engine, as accessed from this instance. + """ return authentication.ping() def _check_service_key(app): - """ Returns the status of the service key for this instance. If the key has disappeared or - has expired, then will return False. - """ + """ + Returns the status of the service key for this instance. + + If the key has disappeared or has expired, then will return False. + """ if not app.config.get("SETUP_COMPLETE", False): return (True, "Stack not fully setup; skipping check") @@ -125,9 +139,11 @@ def _disk_within_threshold(path, threshold): def _check_disk_space(for_warning): def _check_disk_space(app): - """ Returns the status of the disk space for this instance. If the available disk space is below - a certain threshold, then will return False. - """ + """ + Returns the status of the disk space for this instance. + + If the available disk space is below a certain threshold, then will return False. + """ if not app.config.get("SETUP_COMPLETE", False): return (True, "Stack not fully setup; skipping check") @@ -181,7 +197,9 @@ _WARNING_SERVICES = { def check_all_services(app, skip, for_instance=False): - """ Returns a dictionary containing the status of all the services defined. """ + """ + Returns a dictionary containing the status of all the services defined. + """ if for_instance: services = dict(_INSTANCE_SERVICES) services.update(_GLOBAL_SERVICES) @@ -192,7 +210,9 @@ def check_all_services(app, skip, for_instance=False): def check_warning_services(app, skip): - """ Returns a dictionary containing the status of all the warning services defined. """ + """ + Returns a dictionary containing the status of all the warning services defined. + """ return _check_services(app, skip, _WARNING_SERVICES) diff --git a/image/appc/__init__.py b/image/appc/__init__.py index 3a55d0aed..5db24538d 100644 --- a/image/appc/__init__.py +++ b/image/appc/__init__.py @@ -15,8 +15,8 @@ ACNAME_REGEX = re.compile(r"[^a-z-]+") class AppCImageFormatter(TarImageFormatter): """ - Image formatter which produces an tarball according to the AppC specification. - """ + Image formatter which produces an tarball according to the AppC specification. + """ def stream_generator( self, @@ -56,8 +56,8 @@ class DockerV1ToACIManifestTranslator(object): @staticmethod def _build_isolators(docker_config): """ - Builds ACI isolator config from the docker config. - """ + Builds ACI isolator config from the docker config. + """ def _isolate_memory(memory): return {"name": "memory/limit", "value": {"request": str(memory) + "B",}} @@ -91,13 +91,13 @@ class DockerV1ToACIManifestTranslator(object): @staticmethod def _build_ports(docker_config): """ - Builds the ports definitions for the ACI. + Builds the ports definitions for the ACI. - Formats: - port/tcp - port/udp - port - """ + Formats: + port/tcp + port/udp + port + """ ports = [] exposed_ports = docker_config["ExposedPorts"] @@ -134,7 +134,9 @@ class DockerV1ToACIManifestTranslator(object): @staticmethod def _build_volumes(docker_config): - """ Builds the volumes definitions for the ACI. """ + """ + Builds the volumes definitions for the ACI. + """ volumes = [] def get_name(docker_volume_path): @@ -157,7 +159,9 @@ class DockerV1ToACIManifestTranslator(object): @staticmethod def build_manifest(tag, manifest, synthetic_image_id): - """ Builds an ACI manifest of an existing repository image. """ + """ + Builds an ACI manifest of an existing repository image. + """ docker_layer_data = JSONPathDict(json.loads(manifest.leaf_layer.raw_v1_metadata)) config = docker_layer_data["config"] or JSONPathDict({}) diff --git a/image/common.py b/image/common.py index 4363c320c..207a00cbd 100644 --- a/image/common.py +++ b/image/common.py @@ -4,8 +4,8 @@ from util.registry.gzipwrap import GzipWrap class TarImageFormatter(object): """ - Base class for classes which produce a tar containing image and layer data. - """ + Base class for classes which produce a tar containing image and layer data. + """ def build_stream( self, @@ -17,9 +17,9 @@ class TarImageFormatter(object): reporter=None, ): """ - Builds and streams a synthetic .tar.gz that represents the formatted tar created by this class's - implementation. - """ + Builds and streams a synthetic .tar.gz that represents the formatted tar created by this + class's implementation. + """ return GzipWrap( self.stream_generator( tag, @@ -44,8 +44,8 @@ class TarImageFormatter(object): def tar_file(self, name, contents, mtime=None): """ - Returns the tar binary representation for a file with the given name and file contents. - """ + Returns the tar binary representation for a file with the given name and file contents. + """ length = len(contents) tar_data = self.tar_file_header(name, length, mtime=mtime) tar_data += contents @@ -54,8 +54,8 @@ class TarImageFormatter(object): def tar_file_padding(self, length): """ - Returns tar file padding for file data of the given length. - """ + Returns tar file padding for file data of the given length. + """ if length % 512 != 0: return "\0" * (512 - (length % 512)) @@ -63,8 +63,8 @@ class TarImageFormatter(object): def tar_file_header(self, name, file_size, mtime=None): """ - Returns tar file header data for a file with the given name and size. - """ + Returns tar file header data for a file with the given name and size. + """ info = tarfile.TarInfo(name=name) info.type = tarfile.REGTYPE info.size = file_size @@ -75,8 +75,8 @@ class TarImageFormatter(object): def tar_folder(self, name, mtime=None): """ - Returns tar file header data for a folder with the given name. - """ + Returns tar file header data for a folder with the given name. + """ info = tarfile.TarInfo(name=name) info.type = tarfile.DIRTYPE diff --git a/image/docker/interfaces.py b/image/docker/interfaces.py index 9ab669965..661f840ae 100644 --- a/image/docker/interfaces.py +++ b/image/docker/interfaces.py @@ -4,149 +4,200 @@ from six import add_metaclass @add_metaclass(ABCMeta) class ManifestInterface(object): - """ Defines the interface for the various manifests types supported. """ + """ + Defines the interface for the various manifests types supported. + """ @abstractproperty def is_manifest_list(self): - """ Returns whether this manifest is a list. """ + """ + Returns whether this manifest is a list. + """ @abstractproperty def schema_version(self): - """ The version of the schema. """ + """ + The version of the schema. + """ @abstractproperty def digest(self): - """ The digest of the manifest, including type prefix. """ + """ + The digest of the manifest, including type prefix. + """ pass @abstractproperty def media_type(self): - """ The media type of the schema. """ + """ + The media type of the schema. + """ pass @abstractproperty def manifest_dict(self): - """ Returns the manifest as a dictionary ready to be serialized to JSON. """ + """ + Returns the manifest as a dictionary ready to be serialized to JSON. + """ pass @abstractproperty def bytes(self): - """ Returns the bytes of the manifest. """ + """ + Returns the bytes of the manifest. + """ pass @abstractproperty def layers_compressed_size(self): - """ Returns the total compressed size of all the layers in this manifest. Returns None if this - cannot be computed locally. - """ + """ + Returns the total compressed size of all the layers in this manifest. + + Returns None if this cannot be computed locally. + """ @abstractmethod def validate(self, content_retriever): - """ Performs validation of required assertions about the manifest. Raises a ManifestException - on failure. - """ + """ + Performs validation of required assertions about the manifest. + + Raises a ManifestException on failure. + """ pass @abstractmethod def get_layers(self, content_retriever): - """ Returns the layers of this manifest, from base to leaf or None if this kind of manifest - does not support layers. The layer must be of type ManifestImageLayer. """ + """ + Returns the layers of this manifest, from base to leaf or None if this kind of manifest does + not support layers. + + The layer must be of type ManifestImageLayer. + """ pass @abstractmethod def get_leaf_layer_v1_image_id(self, content_retriever): - """ Returns the Docker V1 image ID for the leaf (top) layer, if any, or None if - not applicable. """ + """ + Returns the Docker V1 image ID for the leaf (top) layer, if any, or None if not applicable. + """ pass @abstractmethod def get_legacy_image_ids(self, content_retriever): - """ Returns the Docker V1 image IDs for the layers of this manifest or None if not applicable. - """ + """ + Returns the Docker V1 image IDs for the layers of this manifest or None if not applicable. + """ pass @abstractproperty def blob_digests(self): - """ Returns an iterator over all the blob digests referenced by this manifest, - from base to leaf. The blob digests are strings with prefixes. For manifests that reference - config as a blob, the blob will be included here as the last entry. - """ + """ + Returns an iterator over all the blob digests referenced by this manifest, from base to + leaf. + + The blob digests are strings with prefixes. For manifests that reference config as a blob, + the blob will be included here as the last entry. + """ @abstractmethod def get_blob_digests_for_translation(self): - """ Returns the blob digests for translation of this manifest into another manifest. This - method will ignore missing IDs in layers, unlike `blob_digests`. - """ + """ + Returns the blob digests for translation of this manifest into another manifest. + + This method will ignore missing IDs in layers, unlike `blob_digests`. + """ @abstractproperty def local_blob_digests(self): - """ Returns an iterator over all the *non-remote* blob digests referenced by this manifest, - from base to leaf. The blob digests are strings with prefixes. For manifests that reference - config as a blob, the blob will be included here as the last entry. - """ + """ + Returns an iterator over all the *non-remote* blob digests referenced by this manifest, from + base to leaf. + + The blob digests are strings with prefixes. For manifests that reference config as a blob, + the blob will be included here as the last entry. + """ @abstractmethod def child_manifests(self, content_retriever): - """ Returns an iterator of all manifests that live under this manifest, if any or None if not + """ + Returns an iterator of all manifests that live under this manifest, if any or None if not applicable. - """ + """ @abstractmethod def get_manifest_labels(self, content_retriever): - """ Returns a dictionary of all the labels defined inside this manifest or None if this kind - of manifest does not support labels. """ + """ + Returns a dictionary of all the labels defined inside this manifest or None if this kind of + manifest does not support labels. + """ pass @abstractmethod def get_requires_empty_layer_blob(self, content_retriever): - """ Whether this schema requires the special empty layer blob. """ + """ + Whether this schema requires the special empty layer blob. + """ pass @abstractmethod def unsigned(self): - """ Returns an unsigned version of this manifest. """ + """ + Returns an unsigned version of this manifest. + """ @abstractproperty def has_legacy_image(self): - """ Returns True if this manifest has a legacy V1 image, or False if not. """ + """ + Returns True if this manifest has a legacy V1 image, or False if not. + """ @abstractmethod def generate_legacy_layers(self, images_map, content_retriever): """ - Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata, starting - at the base layer and working towards the leaf. + Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata, starting at the + base layer and working towards the leaf. - If Docker gives us a layer with a v1 image ID that already points to existing - content, but the checksums don't match, then we need to rewrite the image ID - to something new in order to ensure consistency. + If Docker gives us a layer with a v1 image ID that already points to existing + content, but the checksums don't match, then we need to rewrite the image ID + to something new in order to ensure consistency. - Returns None if there are no legacy images associated with the manifest. - """ + Returns None if there are no legacy images associated with the manifest. + """ @abstractmethod def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever): - """ Returns a schema1 version of the manifest. If this is a mainfest list, should return the - manifest that is compatible with V1, by virtue of being `amd64` and `linux`. - If none, returns None. - """ + """ + Returns a schema1 version of the manifest. + + If this is a mainfest list, should return the manifest that is compatible with V1, by virtue + of being `amd64` and `linux`. If none, returns None. + """ @abstractmethod def convert_manifest( self, allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever ): - """ Returns a version of this schema that has a media type found in the given media type set. + """ + Returns a version of this schema that has a media type found in the given media type set. + If not possible, or an error occurs, returns None. - """ + """ @add_metaclass(ABCMeta) class ContentRetriever(object): - """ Defines the interface for retrieval of various content referenced by a manifest. """ + """ + Defines the interface for retrieval of various content referenced by a manifest. + """ @abstractmethod def get_manifest_bytes_with_digest(self, digest): - """ Returns the bytes of the manifest with the given digest or None if none found. """ + """ + Returns the bytes of the manifest with the given digest or None if none found. + """ @abstractmethod def get_blob_bytes_with_digest(self, digest): - """ Returns the bytes of the blob with the given digest or None if none found. """ + """ + Returns the bytes of the blob with the given digest or None if none found. + """ diff --git a/image/docker/schema1.py b/image/docker/schema1.py index 607086409..0f0bcb892 100644 --- a/image/docker/schema1.py +++ b/image/docker/schema1.py @@ -66,17 +66,17 @@ _JWS_SIGNING_ALGORITHM = "RS256" class MalformedSchema1Manifest(ManifestException): """ - Raised when a manifest fails an assertion that should be true according to the Docker Manifest - v2.1 Specification. - """ + Raised when a manifest fails an assertion that should be true according to the Docker Manifest + v2.1 Specification. + """ pass class InvalidSchema1Signature(ManifestException): """ - Raised when there is a failure verifying the signature of a signed Docker 2.1 Manifest. - """ + Raised when there is a failure verifying the signature of a signed Docker 2.1 Manifest. + """ pass @@ -88,9 +88,10 @@ class Schema1Layer( ) ): """ - Represents all of the data about an individual layer in a given Manifest. - This is the union of the fsLayers (digest) and the history entries (v1_compatibility). - """ + Represents all of the data about an individual layer in a given Manifest. + + This is the union of the fsLayers (digest) and the history entries (v1_compatibility). + """ class Schema1V1Metadata( @@ -100,9 +101,9 @@ class Schema1V1Metadata( ) ): """ - Represents the necessary data extracted from the v1 compatibility string in a given layer of a - Manifest. - """ + Represents the necessary data extracted from the v1 compatibility string in a given layer of a + Manifest. + """ class DockerSchema1Manifest(ManifestInterface): @@ -213,14 +214,18 @@ class DockerSchema1Manifest(ManifestInterface): raise InvalidSchema1Signature() def validate(self, content_retriever): - """ Performs validation of required assertions about the manifest. Raises a ManifestException - on failure. - """ + """ + Performs validation of required assertions about the manifest. + + Raises a ManifestException on failure. + """ # Already validated. @property def is_signed(self): - """ Returns whether the schema is signed. """ + """ + Returns whether the schema is signed. + """ return bool(self._signatures) @property @@ -322,8 +327,10 @@ class DockerSchema1Manifest(ManifestInterface): return self._layers def get_layers(self, content_retriever): - """ Returns the layers of this manifest, from base to leaf or None if this kind of manifest - does not support layers. """ + """ + Returns the layers of this manifest, from base to leaf or None if this kind of manifest does + not support layers. + """ for layer in self.layers: created_datetime = None try: @@ -355,9 +362,11 @@ class DockerSchema1Manifest(ManifestInterface): return self.blob_digests def get_blob_digests_for_translation(self): - """ Returns the blob digests for translation of this manifest into another manifest. This - method will ignore missing IDs in layers, unlike `blob_digests`. - """ + """ + Returns the blob digests for translation of this manifest into another manifest. + + This method will ignore missing IDs in layers, unlike `blob_digests`. + """ layers = self._generate_layers(allow_missing_ids=True) return [str(layer.digest) for layer in layers] @@ -387,7 +396,9 @@ class DockerSchema1Manifest(ManifestInterface): return self._unsigned_builder().build() def with_tag_name(self, tag_name, json_web_key=None): - """ Returns a copy of this manifest, with the tag changed to the given tag name. """ + """ + Returns a copy of this manifest, with the tag changed to the given tag name. + """ builder = DockerSchema1ManifestBuilder( self._namespace, self._repo_name, tag_name, self._architecture ) @@ -398,9 +409,9 @@ class DockerSchema1Manifest(ManifestInterface): def _generate_layers(self, allow_missing_ids=False): """ - Returns a generator of objects that have the blobSum and v1Compatibility keys in them, - starting from the base image and working toward the leaf node. - """ + Returns a generator of objects that have the blobSum and v1Compatibility keys in them, + starting from the base image and working toward the leaf node. + """ for blob_sum_obj, history_obj in reversed( zip( self._parsed[DOCKER_SCHEMA1_FS_LAYERS_KEY], self._parsed[DOCKER_SCHEMA1_HISTORY_KEY] @@ -485,9 +496,11 @@ class DockerSchema1Manifest(ManifestInterface): return self.layers[-1].v1_metadata.image_id def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever): - """ Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`. + """ + Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`. + If none, returns None. - """ + """ # Note: schema1 *technically* supports non-amd64 architectures, but in practice these were never # used, so to ensure full backwards compatibility, we just always return the schema. return self @@ -506,12 +519,12 @@ class DockerSchema1Manifest(ManifestInterface): def rewrite_invalid_image_ids(self, images_map): """ - Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata. + Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata. - If Docker gives us a layer with a v1 image ID that already points to existing - content, but the checksums don't match, then we need to rewrite the image ID - to something new in order to ensure consistency. - """ + If Docker gives us a layer with a v1 image ID that already points to existing content, but + the checksums don't match, then we need to rewrite the image ID to something new in order to + ensure consistency. + """ # Used to synthesize a new "content addressable" image id digest_history = hashlib.sha256() @@ -572,8 +585,8 @@ class DockerSchema1Manifest(ManifestInterface): class DockerSchema1ManifestBuilder(object): """ - A convenient abstraction around creating new DockerSchema1Manifests. - """ + A convenient abstraction around creating new DockerSchema1Manifests. + """ def __init__(self, namespace_name, repo_name, tag, architecture="amd64"): repo_name_key = "{0}/{1}".format(namespace_name, repo_name) @@ -604,9 +617,10 @@ class DockerSchema1ManifestBuilder(object): return self def with_metadata_removed(self): - """ Returns a copy of the builder where every layer but the leaf layer has - its metadata stripped down to the bare essentials. - """ + """ + Returns a copy of the builder where every layer but the leaf layer has its metadata stripped + down to the bare essentials. + """ builder = DockerSchema1ManifestBuilder( self._namespace_name, self._repo_name, self._tag, self._architecture ) @@ -650,8 +664,8 @@ class DockerSchema1ManifestBuilder(object): def build(self, json_web_key=None, ensure_ascii=True): """ - Builds a DockerSchema1Manifest object, with optional signature. - """ + Builds a DockerSchema1Manifest object, with optional signature. + """ payload = OrderedDict(self._base_payload) payload.update( { @@ -701,8 +715,8 @@ class DockerSchema1ManifestBuilder(object): def _updated_v1_metadata(v1_metadata_json, updated_id_map): """ - Updates v1_metadata with new image IDs. - """ + Updates v1_metadata with new image IDs. + """ parsed = json.loads(v1_metadata_json) parsed["id"] = updated_id_map[parsed["id"]] diff --git a/image/docker/schema2/config.py b/image/docker/schema2/config.py index 6d8f2222f..2f68c0001 100644 --- a/image/docker/schema2/config.py +++ b/image/docker/schema2/config.py @@ -28,7 +28,7 @@ Example: "Entrypoint": null, "OnBuild": null, "Labels": { - + } }, "container": "b7a43694b435c8e9932615643f61f975a9213e453b15cd6c2a386f144a2d2de9", @@ -56,7 +56,7 @@ Example: "Entrypoint": null, "OnBuild": null, "Labels": { - + } }, "created": "2018-04-16T10:41:19.079522722Z", @@ -123,9 +123,9 @@ LayerHistory = namedtuple( class MalformedSchema2Config(ManifestException): """ - Raised when a config fails an assertion that should be true according to the Docker Manifest - v2.2 Config Specification. - """ + Raised when a config fails an assertion that should be true according to the Docker Manifest + v2.2 Config Specification. + """ pass @@ -202,27 +202,37 @@ class DockerSchema2Config(object): @property def digest(self): - """ Returns the digest of this config object. """ + """ + Returns the digest of this config object. + """ return digest_tools.sha256_digest(self._config_bytes.as_encoded_str()) @property def size(self): - """ Returns the size of this config object. """ + """ + Returns the size of this config object. + """ return len(self._config_bytes.as_encoded_str()) @property def bytes(self): - """ Returns the bytes of this config object. """ + """ + Returns the bytes of this config object. + """ return self._config_bytes @property def labels(self): - """ Returns a dictionary of all the labels defined in this configuration. """ + """ + Returns a dictionary of all the labels defined in this configuration. + """ return self._parsed.get("config", {}).get("Labels", {}) or {} @property def has_empty_layer(self): - """ Returns whether this config contains an empty layer. """ + """ + Returns whether this config contains an empty layer. + """ for history_entry in self._parsed[DOCKER_SCHEMA2_CONFIG_HISTORY_KEY]: if history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False): return True @@ -231,7 +241,9 @@ class DockerSchema2Config(object): @property def history(self): - """ Returns the history of the image, started at the base layer. """ + """ + Returns the history of the image, started at the base layer. + """ for history_entry in self._parsed[DOCKER_SCHEMA2_CONFIG_HISTORY_KEY]: created_datetime_str = history_entry.get(DOCKER_SCHEMA2_CONFIG_CREATED_KEY) created_datetime = parse_date(created_datetime_str) if created_datetime_str else None @@ -246,8 +258,9 @@ class DockerSchema2Config(object): ) def build_v1_compatibility(self, history, v1_id, v1_parent_id, is_leaf, compressed_size=None): - """ Builds the V1 compatibility block for the given layer. - """ + """ + Builds the V1 compatibility block for the given layer. + """ # If the layer is the leaf, it gets the full config (minus 2 fields). Otherwise, it gets only # IDs. v1_compatibility = copy.deepcopy(self._parsed) if is_leaf else {} diff --git a/image/docker/schema2/list.py b/image/docker/schema2/list.py index 1232cb2e3..794179859 100644 --- a/image/docker/schema2/list.py +++ b/image/docker/schema2/list.py @@ -36,17 +36,18 @@ DOCKER_SCHEMA2_MANIFESTLIST_VARIANT_KEY = "variant" class MalformedSchema2ManifestList(ManifestException): """ - Raised when a manifest list fails an assertion that should be true according to the - Docker Manifest v2.2 Specification. - """ + Raised when a manifest list fails an assertion that should be true according to the Docker + Manifest v2.2 Specification. + """ pass class MismatchManifestException(MalformedSchema2ManifestList): - """ Raised when a manifest list contains a schema 1 manifest with a differing architecture - from that specified in the manifest list for the manifest. - """ + """ + Raised when a manifest list contains a schema 1 manifest with a differing architecture from that + specified in the manifest list for the manifest. + """ pass @@ -211,7 +212,9 @@ class DockerSchema2ManifestList(ManifestInterface): @property def is_manifest_list(self): - """ Returns whether this manifest is a list. """ + """ + Returns whether this manifest is a list. + """ return True @property @@ -220,17 +223,23 @@ class DockerSchema2ManifestList(ManifestInterface): @property def digest(self): - """ The digest of the manifest, including type prefix. """ + """ + The digest of the manifest, including type prefix. + """ return digest_tools.sha256_digest(self._manifest_bytes.as_encoded_str()) @property def media_type(self): - """ The media type of the schema. """ + """ + The media type of the schema. + """ return self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY] @property def manifest_dict(self): - """ Returns the manifest as a dictionary ready to be serialized to JSON. """ + """ + Returns the manifest as a dictionary ready to be serialized to JSON. + """ return self._parsed @property @@ -238,8 +247,10 @@ class DockerSchema2ManifestList(ManifestInterface): return self._manifest_bytes def get_layers(self, content_retriever): - """ Returns the layers of this manifest, from base to leaf or None if this kind of manifest - does not support layers. """ + """ + Returns the layers of this manifest, from base to leaf or None if this kind of manifest does + not support layers. + """ return None @property @@ -260,15 +271,18 @@ class DockerSchema2ManifestList(ManifestInterface): @lru_cache(maxsize=1) def manifests(self, content_retriever): - """ Returns the manifests in the list. - """ + """ + Returns the manifests in the list. + """ manifests = self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY] return [LazyManifestLoader(m, content_retriever) for m in manifests] def validate(self, content_retriever): - """ Performs validation of required assertions about the manifest. Raises a ManifestException - on failure. - """ + """ + Performs validation of required assertions about the manifest. + + Raises a ManifestException on failure. + """ for index, m in enumerate(self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]): if m[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY] == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE: platform = m[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY] @@ -311,9 +325,11 @@ class DockerSchema2ManifestList(ManifestInterface): return False def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever): - """ Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`. + """ + Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`. + If none, returns None. - """ + """ legacy_manifest = self._get_legacy_manifest(content_retriever) if legacy_manifest is None: return None @@ -337,9 +353,10 @@ class DockerSchema2ManifestList(ManifestInterface): ) def _get_legacy_manifest(self, content_retriever): - """ Returns the manifest under this list with architecture amd64 and os linux, if any, or None + """ + Returns the manifest under this list with architecture amd64 and os linux, if any, or None if none or error. - """ + """ for manifest_ref in self.manifests(content_retriever): platform = manifest_ref._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY] architecture = platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY] @@ -364,14 +381,16 @@ class DockerSchema2ManifestList(ManifestInterface): class DockerSchema2ManifestListBuilder(object): """ - A convenient abstraction around creating new DockerSchema2ManifestList's. - """ + A convenient abstraction around creating new DockerSchema2ManifestList's. + """ def __init__(self): self.manifests = [] def add_manifest(self, manifest, architecture, os): - """ Adds a manifest to the list. """ + """ + Adds a manifest to the list. + """ manifest = manifest.unsigned() # Make sure we add the unsigned version to the list. self.add_manifest_digest( manifest.digest, @@ -382,7 +401,9 @@ class DockerSchema2ManifestListBuilder(object): ) def add_manifest_digest(self, manifest_digest, manifest_size, media_type, architecture, os): - """ Adds a manifest to the list. """ + """ + Adds a manifest to the list. + """ self.manifests.append( ( manifest_digest, @@ -396,7 +417,9 @@ class DockerSchema2ManifestListBuilder(object): ) def build(self): - """ Builds and returns the DockerSchema2ManifestList. """ + """ + Builds and returns the DockerSchema2ManifestList. + """ assert self.manifests manifest_list_dict = { diff --git a/image/docker/schema2/manifest.py b/image/docker/schema2/manifest.py index 3f5ac8dc4..4f16fb002 100644 --- a/image/docker/schema2/manifest.py +++ b/image/docker/schema2/manifest.py @@ -46,9 +46,9 @@ logger = logging.getLogger(__name__) class MalformedSchema2Manifest(ManifestException): """ - Raised when a manifest fails an assertion that should be true according to the Docker Manifest - v2.2 Specification. - """ + Raised when a manifest fails an assertion that should be true according to the Docker Manifest + v2.2 Specification. + """ pass @@ -167,9 +167,11 @@ class DockerSchema2Manifest(ManifestInterface): raise MalformedSchema2Manifest("missing `urls` for remote layer") def validate(self, content_retriever): - """ Performs validation of required assertions about the manifest. Raises a ManifestException - on failure. - """ + """ + Performs validation of required assertions about the manifest. + + Raises a ManifestException on failure. + """ # Nothing to validate. @property @@ -202,14 +204,18 @@ class DockerSchema2Manifest(ManifestInterface): @property def filesystem_layers(self): - """ Returns the file system layers of this manifest, from base to leaf. """ + """ + Returns the file system layers of this manifest, from base to leaf. + """ if self._filesystem_layers is None: self._filesystem_layers = list(self._generate_filesystem_layers()) return self._filesystem_layers @property def leaf_filesystem_layer(self): - """ Returns the leaf file system layer for this manifest. """ + """ + Returns the leaf file system layer for this manifest. + """ return self.filesystem_layers[-1] @property @@ -241,8 +247,10 @@ class DockerSchema2Manifest(ManifestInterface): return self._get_built_config(content_retriever).labels def get_layers(self, content_retriever): - """ Returns the layers of this manifest, from base to leaf or None if this kind of manifest - does not support layers. """ + """ + Returns the layers of this manifest, from base to leaf or None if this kind of manifest does + not support layers. + """ for image_layer in self._manifest_image_layers(content_retriever): is_remote = image_layer.blob_layer.is_remote if image_layer.blob_layer else False urls = image_layer.blob_layer.urls if image_layer.blob_layer else None @@ -371,9 +379,9 @@ class DockerSchema2Manifest(ManifestInterface): return schema2_config.has_empty_layer def _populate_schema1_builder(self, v1_builder, content_retriever): - """ Populates a DockerSchema1ManifestBuilder with the layers and config from - this schema. - """ + """ + Populates a DockerSchema1ManifestBuilder with the layers and config from this schema. + """ assert not self.has_remote_layer schema2_config = self._get_built_config(content_retriever) layers = list(self._manifest_image_layers(content_retriever)) @@ -428,23 +436,29 @@ class DockerSchema2Manifest(ManifestInterface): class DockerSchema2ManifestBuilder(object): """ - A convenient abstraction around creating new DockerSchema2Manifests. - """ + A convenient abstraction around creating new DockerSchema2Manifests. + """ def __init__(self): self.config = None self.filesystem_layers = [] def set_config(self, schema2_config): - """ Sets the configuration for the manifest being built. """ + """ + Sets the configuration for the manifest being built. + """ self.set_config_digest(schema2_config.digest, schema2_config.size) def set_config_digest(self, config_digest, config_size): - """ Sets the digest and size of the configuration layer. """ + """ + Sets the digest and size of the configuration layer. + """ self.config = DockerV2ManifestConfig(size=config_size, digest=config_digest) def add_layer(self, digest, size, urls=None): - """ Adds a filesystem layer to the manifest. """ + """ + Adds a filesystem layer to the manifest. + """ self.filesystem_layers.append( DockerV2ManifestLayer( index=len(self.filesystem_layers), @@ -456,7 +470,9 @@ class DockerSchema2ManifestBuilder(object): ) def build(self, ensure_ascii=True): - """ Builds and returns the DockerSchema2Manifest. """ + """ + Builds and returns the DockerSchema2Manifest. + """ assert self.filesystem_layers assert self.config diff --git a/image/docker/schemas.py b/image/docker/schemas.py index 858faf1ce..487e7ab7b 100644 --- a/image/docker/schemas.py +++ b/image/docker/schemas.py @@ -10,9 +10,11 @@ from util.bytes import Bytes def parse_manifest_from_bytes(manifest_bytes, media_type, validate=True): - """ Parses and returns a manifest from the given bytes, for the given media type. - Raises a ManifestException if the parse fails for some reason. - """ + """ + Parses and returns a manifest from the given bytes, for the given media type. + + Raises a ManifestException if the parse fails for some reason. + """ assert isinstance(manifest_bytes, Bytes) if media_type == DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE: diff --git a/image/docker/schemautil.py b/image/docker/schemautil.py index d2947f9f1..4f2ac94c4 100644 --- a/image/docker/schemautil.py +++ b/image/docker/schemautil.py @@ -36,11 +36,12 @@ class _CustomEncoder(json.JSONEncoder): def to_canonical_json(value, ensure_ascii=True, indent=None): - """ Returns the canonical JSON string form of the given value, - as per the guidelines in https://github.com/docker/distribution/blob/master/docs/spec/json.md. + """ + Returns the canonical JSON string form of the given value, as per the guidelines in + https://github.com/docker/distribution/blob/master/docs/spec/json.md. - `indent` is allowed only for the purposes of indenting for debugging. - """ + `indent` is allowed only for the purposes of indenting for debugging. + """ return json.dumps( value, ensure_ascii=ensure_ascii, diff --git a/image/docker/squashed.py b/image/docker/squashed.py index d7acbadb5..79e15ee5b 100644 --- a/image/docker/squashed.py +++ b/image/docker/squashed.py @@ -11,17 +11,19 @@ from util.registry.streamlayerformat import StreamLayerMerger class FileEstimationException(Exception): """ - Exception raised by build_docker_load_stream if the estimated size of the layer tar was lower - than the actual size. This means the sent tar header is wrong, and we have to fail. - """ + Exception raised by build_docker_load_stream if the estimated size of the layer tar was lower + than the actual size. + + This means the sent tar header is wrong, and we have to fail. + """ pass class SquashedDockerImageFormatter(TarImageFormatter): """ - Image formatter which produces a squashed image compatible with the `docker load` command. - """ + Image formatter which produces a squashed image compatible with the `docker load` command. + """ # Multiplier against the image size reported by Docker to account for the tar metadata. # Note: This multiplier was not formally calculated in anyway and should be adjusted overtime diff --git a/image/docker/v1.py b/image/docker/v1.py index d15564001..5993078ad 100644 --- a/image/docker/v1.py +++ b/image/docker/v1.py @@ -26,6 +26,7 @@ class DockerV1Metadata( ) ): """ - DockerV1Metadata represents all of the metadata for a given Docker v1 Image. - The original form of the metadata is stored in the compat_json field. - """ + DockerV1Metadata represents all of the metadata for a given Docker v1 Image. + + The original form of the metadata is stored in the compat_json field. + """ diff --git a/initdb.py b/initdb.py index 6e01503ca..416ba142e 100644 --- a/initdb.py +++ b/initdb.py @@ -282,9 +282,10 @@ testcases = {} def finished_database_for_testing(testcase): - """ Called when a testcase has finished using the database, indicating that - any changes should be discarded. - """ + """ + Called when a testcase has finished using the database, indicating that any changes should be + discarded. + """ testcases[testcase]["savepoint"].rollback() testcases[testcase]["savepoint"].__exit__(True, None, None) @@ -292,9 +293,10 @@ def finished_database_for_testing(testcase): def setup_database_for_testing(testcase, with_storage=False, force_rebuild=False): - """ Called when a testcase has started using the database, indicating that - the database should be setup (if not already) and a savepoint created. - """ + """ + Called when a testcase has started using the database, indicating that the database should be + setup (if not already) and a savepoint created. + """ # Sanity check to make sure we're not killing our prod db if not IS_TESTING_REAL_DATABASE and not isinstance(db.obj, SqliteDatabase): diff --git a/loghandler.py b/loghandler.py index b9307299e..a3eac4a23 100755 --- a/loghandler.py +++ b/loghandler.py @@ -15,9 +15,10 @@ LOG_FORMAT_REGEXP = re.compile(r"\((.+?)\)", re.IGNORECASE) def _json_default(obj): """ - Coerce everything to strings. - All objects representing time get output as ISO8601. - """ + Coerce everything to strings. + + All objects representing time get output as ISO8601. + """ if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)): return obj.isoformat() @@ -60,8 +61,8 @@ RESERVED_ATTRS = set( class JsonFormatter(logging.Formatter): """ A custom formatter to format logging records as json strings. - extra values will be formatted as str() if nor supported by - json default encoder + + extra values will be formatted as str() if nor supported by json default encoder """ def __init__(self, *args, **kwargs): @@ -86,14 +87,16 @@ class JsonFormatter(logging.Formatter): self._skip_fields.update(RESERVED_ATTRS) def _parse_format_string(self): - """Parses format string looking for substitutions""" + """ + Parses format string looking for substitutions. + """ standard_formatters = LOG_FORMAT_REGEXP return standard_formatters.findall(self._fmt) def add_fields(self, log_record, record, message_dict): """ Override this method to implement custom logic for adding fields. - """ + """ target = log_record if self.prefix_key: @@ -110,7 +113,9 @@ class JsonFormatter(logging.Formatter): target.update(self.default_values) def format(self, record): - """Formats a log record and serializes to json""" + """ + Formats a log record and serializes to json. + """ message_dict = {} if isinstance(record.msg, dict): message_dict = record.msg diff --git a/notifications/__init__.py b/notifications/__init__.py index 77153a22d..094487825 100644 --- a/notifications/__init__.py +++ b/notifications/__init__.py @@ -10,10 +10,12 @@ DEFAULT_BATCH_SIZE = 1000 def build_repository_event_data(namespace_name, repo_name, extra_data=None, subpage=None): - """ Builds the basic repository data for an event, including the repository's name, Docker URL - and homepage. If extra_data is specified, it is appended to the dictionary before it is - returned. - """ + """ + Builds the basic repository data for an event, including the repository's name, Docker URL and + homepage. + + If extra_data is specified, it is appended to the dictionary before it is returned. + """ repo_string = "%s/%s" % (namespace_name, repo_name) homepage = "%s://%s/repository/%s" % ( app.config["PREFERRED_URL_SCHEME"], @@ -64,10 +66,12 @@ def build_notification_data(notification, event_data, performer_data=None): @contextmanager def notification_batch(batch_size=DEFAULT_BATCH_SIZE): """ - Context manager implementation which returns a target callable with the same signature - as spawn_notification. When the the context block exits the notifications generated by - the callable will be bulk inserted into the queue with the specified batch size. - """ + Context manager implementation which returns a target callable with the same signature as + spawn_notification. + + When the the context block exits the notifications generated by the callable will be bulk + inserted into the queue with the specified batch size. + """ with notification_queue.batch_insert(batch_size) as queue_put: def spawn_notification_batch( diff --git a/notifications/models_interface.py b/notifications/models_interface.py index 10d52b5d9..b7988a2ac 100644 --- a/notifications/models_interface.py +++ b/notifications/models_interface.py @@ -3,8 +3,8 @@ from collections import namedtuple class Repository(namedtuple("Repository", ["namespace_name", "name"])): """ - Repository represents a repository. - """ + Repository represents a repository. + """ class Notification( @@ -21,5 +21,5 @@ class Notification( ) ): """ - Notification represents a registered notification of some kind. - """ + Notification represents a registered notification of some kind. + """ diff --git a/notifications/notificationevent.py b/notifications/notificationevent.py index 28111c780..7874c1da0 100644 --- a/notifications/notificationevent.py +++ b/notifications/notificationevent.py @@ -22,42 +22,46 @@ class NotificationEvent(object): def get_level(self, event_data, notification_data): """ - Returns a 'level' representing the severity of the event. - Valid values are: 'info', 'warning', 'error', 'primary', 'success' - """ + Returns a 'level' representing the severity of the event. + + Valid values are: 'info', 'warning', 'error', 'primary', 'success' + """ raise NotImplementedError def get_summary(self, event_data, notification_data): """ - Returns a human readable one-line summary for the given notification data. - """ + Returns a human readable one-line summary for the given notification data. + """ raise NotImplementedError def get_message(self, event_data, notification_data): """ - Returns a human readable HTML message for the given notification data. - """ + Returns a human readable HTML message for the given notification data. + """ return TEMPLATE_ENV.get_template(self.event_name() + ".html").render( {"event_data": event_data, "notification_data": notification_data} ) def get_sample_data(self, namespace_name, repo_name, event_config): """ - Returns sample data for testing the raising of this notification, with an example notification. - """ + Returns sample data for testing the raising of this notification, with an example + notification. + """ raise NotImplementedError def should_perform(self, event_data, notification_data): """ - Whether a notification for this event should be performed. By default returns True. - """ + Whether a notification for this event should be performed. + + By default returns True. + """ return True @classmethod def event_name(cls): """ - Particular event implemented by subclasses. - """ + Particular event implemented by subclasses. + """ raise NotImplementedError @classmethod @@ -157,7 +161,9 @@ class RepoMirrorSyncFailedEvent(NotificationEvent): def _build_summary(event_data): - """ Returns a summary string for the build data found in the event data block. """ + """ + Returns a summary string for the build data found in the event data block. + """ summary = "for repository %s [%s]" % (event_data["repository"], event_data["build_id"][0:7]) return summary diff --git a/notifications/notificationmethod.py b/notifications/notificationmethod.py index e55a67b5e..e9981f6e2 100644 --- a/notifications/notificationmethod.py +++ b/notifications/notificationmethod.py @@ -42,25 +42,26 @@ class NotificationMethod(object): @classmethod def method_name(cls): """ - Particular method implemented by subclasses. - """ + Particular method implemented by subclasses. + """ raise NotImplementedError def validate(self, namespace_name, repo_name, config_data): """ - Validates that the notification can be created with the given data. Throws - a CannotValidateNotificationMethodException on failure. - """ + Validates that the notification can be created with the given data. + + Throws a CannotValidateNotificationMethodException on failure. + """ raise NotImplementedError def perform(self, notification_obj, event_handler, notification_data): """ - Performs the notification method. + Performs the notification method. - notification_obj: The notification namedtuple. - event_handler: The NotificationEvent handler. - notification_data: The dict of notification data placed in the queue. - """ + notification_obj: The notification namedtuple. + event_handler: The NotificationEvent handler. + notification_data: The dict of notification data placed in the queue. + """ raise NotImplementedError @classmethod @@ -234,9 +235,11 @@ class WebhookMethod(NotificationMethod): class FlowdockMethod(NotificationMethod): - """ Method for sending notifications to Flowdock via the Team Inbox API: - https://www.flowdock.com/api/team-inbox - """ + """ + Method for sending notifications to Flowdock via the Team Inbox API: + + https://www.flowdock.com/api/team-inbox + """ @classmethod def method_name(cls): @@ -293,9 +296,11 @@ class FlowdockMethod(NotificationMethod): class HipchatMethod(NotificationMethod): - """ Method for sending notifications to Hipchat via the API: - https://www.hipchat.com/docs/apiv2/method/send_room_notification - """ + """ + Method for sending notifications to Hipchat via the API: + + https://www.hipchat.com/docs/apiv2/method/send_room_notification + """ @classmethod def method_name(cls): @@ -411,9 +416,11 @@ def adjust_tags(html): class SlackMethod(NotificationMethod): - """ Method for sending notifications to Slack via the API: - https://api.slack.com/docs/attachments - """ + """ + Method for sending notifications to Slack via the API: + + https://api.slack.com/docs/attachments + """ @classmethod def method_name(cls): diff --git a/oauth/base.py b/oauth/base.py index a9b729bdb..49e2941d7 100644 --- a/oauth/base.py +++ b/oauth/base.py @@ -33,20 +33,26 @@ class OAuthEndpoint(object): class OAuthExchangeCodeException(Exception): - """ Exception raised if a code exchange fails. """ + """ + Exception raised if a code exchange fails. + """ pass class OAuthGetUserInfoException(Exception): - """ Exception raised if a call to get user information fails. """ + """ + Exception raised if a call to get user information fails. + """ pass @add_metaclass(ABCMeta) class OAuthService(object): - """ A base class for defining an external service, exposed via OAuth. """ + """ + A base class for defining an external service, exposed via OAuth. + """ def __init__(self, config, key_name): self.key_name = key_name @@ -54,37 +60,52 @@ class OAuthService(object): @abstractmethod def service_id(self): - """ The internal ID for this service. Must match the URL portion for the service, e.g. `github` - """ + """ + The internal ID for this service. + + Must match the URL portion for the service, e.g. `github` + """ pass @abstractmethod def service_name(self): - """ The user-readable name for the service, e.g. `GitHub`""" + """ + The user-readable name for the service, e.g. `GitHub` + """ pass @abstractmethod def token_endpoint(self): - """ Returns the endpoint at which the OAuth code can be exchanged for a token. """ + """ + Returns the endpoint at which the OAuth code can be exchanged for a token. + """ pass @abstractmethod def user_endpoint(self): - """ Returns the endpoint at which user information can be looked up. """ + """ + Returns the endpoint at which user information can be looked up. + """ pass @abstractmethod def authorize_endpoint(self): - """ Returns the for authorization of the OAuth service. """ + """ + Returns the for authorization of the OAuth service. + """ pass @abstractmethod def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname): - """ Performs validation of the client ID and secret, raising an exception on failure. """ + """ + Performs validation of the client ID and secret, raising an exception on failure. + """ pass def requires_form_encoding(self): - """ Returns True if form encoding is necessary for the exchange_code_for_token call. """ + """ + Returns True if form encoding is necessary for the exchange_code_for_token call. + """ return False def client_id(self): @@ -94,18 +115,22 @@ class OAuthService(object): return self.config.get("CLIENT_SECRET") def login_binding_field(self): - """ Returns the name of the field (`username` or `email`) used for auto binding an external - login service account to an *internal* login service account. For example, if the external - login service is GitHub and the internal login service is LDAP, a value of `email` here - will cause login-with-Github to conduct a search (via email) in LDAP for a user, an auto - bind the external and internal users together. May return None, in which case no binding - is performing, and login with this external account will simply create a new account in the - database. - """ + """ + Returns the name of the field (`username` or `email`) used for auto binding an external + login service account to an *internal* login service account. + + For example, if the external login service is GitHub and the internal login service is LDAP, + a value of `email` here will cause login-with-Github to conduct a search (via email) in LDAP + for a user, an auto bind the external and internal users together. May return None, in which + case no binding is performing, and login with this external account will simply create a new + account in the database. + """ return self.config.get("LOGIN_BINDING_FIELD", None) def get_auth_url(self, url_scheme_and_hostname, redirect_suffix, csrf_token, scopes): - """ Retrieves the authorization URL for this login service. """ + """ + Retrieves the authorization URL for this login service. + """ redirect_uri = "%s/oauth2/%s/callback%s" % ( url_scheme_and_hostname.get_url(), self.service_id(), @@ -160,7 +185,9 @@ class OAuthService(object): redirect_suffix="", client_auth=False, ): - """ Exchanges an OAuth access code for the associated OAuth token. """ + """ + Exchanges an OAuth access code for the associated OAuth token. + """ json_data = self.exchange_code( app_config, http_client, code, form_encode, redirect_suffix, client_auth ) @@ -183,7 +210,9 @@ class OAuthService(object): redirect_suffix="", client_auth=False, ): - """ Exchanges an OAuth access code for associated OAuth token and other data. """ + """ + Exchanges an OAuth access code for associated OAuth token and other data. + """ url_scheme_and_hostname = URLSchemeAndHostname.from_app_config(app_config) payload = { "code": code, diff --git a/oauth/login.py b/oauth/login.py index eced7112f..17fd2a349 100644 --- a/oauth/login.py +++ b/oauth/login.py @@ -11,58 +11,79 @@ logger = logging.getLogger(__name__) class OAuthLoginException(Exception): - """ Exception raised if a login operation fails. """ + """ + Exception raised if a login operation fails. + """ pass @add_metaclass(ABCMeta) class OAuthLoginService(OAuthService): - """ A base class for defining an OAuth-compliant service that can be used for, amongst other - things, login and authentication. """ + """ + A base class for defining an OAuth-compliant service that can be used for, amongst other things, + login and authentication. + """ @abstractmethod def login_enabled(self): - """ Returns true if the login service is enabled. """ + """ + Returns true if the login service is enabled. + """ pass @abstractmethod def get_login_service_id(self, user_info): - """ Returns the internal ID for the given user under this login service. """ + """ + Returns the internal ID for the given user under this login service. + """ pass @abstractmethod def get_login_service_username(self, user_info): - """ Returns the username for the given user under this login service. """ + """ + Returns the username for the given user under this login service. + """ pass @abstractmethod def get_verified_user_email(self, app_config, http_client, token, user_info): - """ Returns the verified email address for the given user, if any or None if none. """ + """ + Returns the verified email address for the given user, if any or None if none. + """ pass @abstractmethod def get_icon(self): - """ Returns the icon to display for this login service. """ + """ + Returns the icon to display for this login service. + """ pass @abstractmethod def get_login_scopes(self): - """ Returns the list of scopes for login for this service. """ + """ + Returns the list of scopes for login for this service. + """ pass def service_verify_user_info_for_login(self, app_config, http_client, token, user_info): - """ Performs service-specific verification of user information for login. On failure, a service - should raise a OAuthLoginService. - """ + """ + Performs service-specific verification of user information for login. + + On failure, a service should raise a OAuthLoginService. + """ # By default, does nothing. pass def exchange_code_for_login(self, app_config, http_client, code, redirect_suffix): - """ Exchanges the given OAuth access code for user information on behalf of a user trying to - login or attach their account. Raises a OAuthLoginService exception on failure. Returns - a tuple consisting of (service_id, service_username, email) - """ + """ + Exchanges the given OAuth access code for user information on behalf of a user trying to + login or attach their account. + + Raises a OAuthLoginService exception on failure. Returns a tuple consisting of (service_id, + service_username, email) + """ # Retrieve the token for the OAuth code. try: diff --git a/oauth/loginmanager.py b/oauth/loginmanager.py index 4b3c58634..4a5888282 100644 --- a/oauth/loginmanager.py +++ b/oauth/loginmanager.py @@ -11,7 +11,9 @@ PREFIX_BLACKLIST = ["ldap", "jwt", "keystone"] class OAuthLoginManager(object): - """ Helper class which manages all registered OAuth login services. """ + """ + Helper class which manages all registered OAuth login services. + """ def __init__(self, config, client=None): self.services = [] diff --git a/oauth/oidc.py b/oauth/oidc.py index c79794425..8c81effe4 100644 --- a/oauth/oidc.py +++ b/oauth/oidc.py @@ -30,19 +30,25 @@ JWT_CLOCK_SKEW_SECONDS = 30 class DiscoveryFailureException(Exception): - """ Exception raised when OIDC discovery fails. """ + """ + Exception raised when OIDC discovery fails. + """ pass class PublicKeyLoadException(Exception): - """ Exception raised if loading the OIDC public key fails. """ + """ + Exception raised if loading the OIDC public key fails. + """ pass class OIDCLoginService(OAuthService): - """ Defines a generic service for all OpenID-connect compatible login services. """ + """ + Defines a generic service for all OpenID-connect compatible login services. + """ def __init__(self, config, key_name, client=None): super(OIDCLoginService, self).__init__(config, key_name) @@ -84,11 +90,12 @@ class OIDCLoginService(OAuthService): return self._get_endpoint("userinfo_endpoint") def _get_endpoint(self, endpoint_key, **kwargs): - """ Returns the OIDC endpoint with the given key found in the OIDC discovery - document, with the given kwargs added as query parameters. Additionally, - any defined parameters found in the OIDC configuration block are also - added. - """ + """ + Returns the OIDC endpoint with the given key found in the OIDC discovery document, with the + given kwargs added as query parameters. + + Additionally, any defined parameters found in the OIDC configuration block are also added. + """ endpoint = self._oidc_config().get(endpoint_key, "") if not endpoint: return None @@ -232,9 +239,12 @@ class OIDCLoginService(OAuthService): return {} def _load_oidc_config_via_discovery(self, is_debugging): - """ Attempts to load the OIDC config via the OIDC discovery mechanism. If is_debugging is True, - non-secure connections are alllowed. Raises an DiscoveryFailureException on failure. - """ + """ + Attempts to load the OIDC config via the OIDC discovery mechanism. + + If is_debugging is True, non-secure connections are alllowed. Raises an + DiscoveryFailureException on failure. + """ oidc_server = self.config["OIDC_SERVER"] if not oidc_server.startswith("https://") and not is_debugging: raise DiscoveryFailureException("OIDC server must be accessed over SSL") @@ -254,10 +264,12 @@ class OIDCLoginService(OAuthService): raise DiscoveryFailureException("Could not parse OIDC discovery information") def decode_user_jwt(self, token): - """ Decodes the given JWT under the given provider and returns it. Raises an InvalidTokenError - exception on an invalid token or a PublicKeyLoadException if the public key could not be - loaded for decoding. - """ + """ + Decodes the given JWT under the given provider and returns it. + + Raises an InvalidTokenError exception on an invalid token or a PublicKeyLoadException if the + public key could not be loaded for decoding. + """ # Find the key to use. headers = jwt.get_unverified_header(token) kid = headers.get("kid", None) @@ -322,8 +334,11 @@ class OIDCLoginService(OAuthService): raise ite def _get_public_key(self, kid, force_refresh=False): - """ Retrieves the public key for this handler with the given kid. Raises a - PublicKeyLoadException on failure. """ + """ + Retrieves the public key for this handler with the given kid. + + Raises a PublicKeyLoadException on failure. + """ # If force_refresh is true, we expire all the items in the cache by setting the time to # the current time + the expiration TTL. @@ -342,9 +357,11 @@ class _PublicKeyCache(TTLCache): self._login_service = login_service def __missing__(self, kid): - """ Loads the public key for this handler from the OIDC service. Raises PublicKeyLoadException - on failure. - """ + """ + Loads the public key for this handler from the OIDC service. + + Raises PublicKeyLoadException on failure. + """ keys_url = self._login_service._oidc_config()["jwks_uri"] # Load the keys. diff --git a/path_converters.py b/path_converters.py index efecb0a74..f8f49d0fa 100644 --- a/path_converters.py +++ b/path_converters.py @@ -4,8 +4,11 @@ import features class APIRepositoryPathConverter(BaseConverter): - """ Converter for handling repository paths. Does not handle library paths. - """ + """ + Converter for handling repository paths. + + Does not handle library paths. + """ def __init__(self, url_map): super(APIRepositoryPathConverter, self).__init__(url_map) @@ -14,9 +17,11 @@ class APIRepositoryPathConverter(BaseConverter): class RepositoryPathConverter(BaseConverter): - """ Converter for handling repository paths. Handles both library and non-library paths (if - configured). - """ + """ + Converter for handling repository paths. + + Handles both library and non-library paths (if configured). + """ def __init__(self, url_map): super(RepositoryPathConverter, self).__init__(url_map) @@ -30,7 +35,9 @@ class RepositoryPathConverter(BaseConverter): class RegexConverter(BaseConverter): - """ Converter for handling custom regular expression patterns in paths. """ + """ + Converter for handling custom regular expression patterns in paths. + """ def __init__(self, url_map, regex_value): super(RegexConverter, self).__init__(url_map) diff --git a/storage/__init__.py b/storage/__init__.py index c88ee582e..e2a6bfe34 100644 --- a/storage/__init__.py +++ b/storage/__init__.py @@ -28,8 +28,10 @@ STORAGE_DRIVER_CLASSES = { def get_storage_driver(location, chunk_cleanup_queue, config_provider, ip_resolver, storage_params): - """ Returns a storage driver class for the given storage configuration - (a pair of string name and a dict of parameters). """ + """ + Returns a storage driver class for the given storage configuration (a pair of string name and a + dict of parameters). + """ driver = storage_params[0] parameters = storage_params[1] driver_class = STORAGE_DRIVER_CLASSES.get(driver, FakeStorage) diff --git a/storage/azurestorage.py b/storage/azurestorage.py index f657ca966..e81209005 100644 --- a/storage/azurestorage.py +++ b/storage/azurestorage.py @@ -1,6 +1,7 @@ -""" Azure storage driver. +""" +Azure storage driver. - Based on: https://docs.microsoft.com/en-us/azure/storage/blobs/storage-python-how-to-use-blob-storage +Based on: https://docs.microsoft.com/en-us/azure/storage/blobs/storage-python-how-to-use-blob-storage """ import logging @@ -251,9 +252,11 @@ class AzureStorage(BaseStorage): return total_bytes_written, new_metadata, None def complete_chunked_upload(self, uuid, final_path, storage_metadata): - """ Complete the chunked upload and store the final results in the path indicated. + """ + Complete the chunked upload and store the final results in the path indicated. + Returns nothing. - """ + """ # Commit the blob's blocks. upload_blob_path = self._upload_blob_path_from_uuid(uuid) block_list = [BlobBlock(block_id) for block_id in storage_metadata[_BLOCKS_KEY]] @@ -307,9 +310,11 @@ class AzureStorage(BaseStorage): raise IOError("Exception when trying to delete uploaded blob") def cancel_chunked_upload(self, uuid, storage_metadata): - """ Cancel the chunked upload and clean up any outstanding partially uploaded data. + """ + Cancel the chunked upload and clean up any outstanding partially uploaded data. + Returns nothing. - """ + """ upload_blob_path = self._upload_blob_path_from_uuid(uuid) logger.debug("Canceling chunked upload %s at path %s", uuid, upload_blob_path) self._blob_service.delete_blob(self._azure_container, upload_blob_path) diff --git a/storage/basestorage.py b/storage/basestorage.py index 56c842e39..a4ce54ea9 100644 --- a/storage/basestorage.py +++ b/storage/basestorage.py @@ -39,12 +39,17 @@ class BaseStorage(StoragePaths): self.buffer_size = 64 * 1024 def setup(self): - """ Called to perform any storage system setup. """ + """ + Called to perform any storage system setup. + """ pass def validate(self, client): - """ Called to perform storage system validation. The client is an HTTP - client to use for any external calls. """ + """ + Called to perform storage system validation. + + The client is an HTTP client to use for any external calls. + """ # Put a temporary file to make sure the normal storage paths work. self.put_content("_verify", "testing 123") if not self.exists("_verify"): @@ -86,9 +91,11 @@ class BaseStorage(StoragePaths): raise NotImplementedError def stream_write_to_fp(self, in_fp, out_fp, num_bytes=READ_UNTIL_END): - """ Copy the specified number of bytes from the input file stream to the output stream. If - num_bytes < 0 copy until the stream ends. Returns the number of bytes copied. - """ + """ + Copy the specified number of bytes from the input file stream to the output stream. + + If num_bytes < 0 copy until the stream ends. Returns the number of bytes copied. + """ bytes_copied = 0 while bytes_copied < num_bytes or num_bytes == READ_UNTIL_END: size_to_read = min(num_bytes - bytes_copied, self.buffer_size) @@ -109,26 +116,34 @@ class BaseStorage(StoragePaths): class BaseStorageV2(BaseStorage): def initiate_chunked_upload(self): - """ Start a new chunked upload, returning the uuid and any associated storage metadata - """ + """ + Start a new chunked upload, returning the uuid and any associated storage metadata. + """ raise NotImplementedError def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata, content_type=None): - """ Upload the specified amount of data from the given file pointer to the chunked destination - specified, starting at the given offset. Returns the number of bytes uploaded, a new - version of the storage_metadata and an error object (if one occurred or None if none). - Pass length as -1 to upload as much data from the in_fp as possible. - """ + """ + Upload the specified amount of data from the given file pointer to the chunked destination + specified, starting at the given offset. + + Returns the number of bytes uploaded, a new version of the storage_metadata and an error + object (if one occurred or None if none). Pass length as -1 to upload as much data from the + in_fp as possible. + """ raise NotImplementedError def complete_chunked_upload(self, uuid, final_path, storage_metadata): - """ Complete the chunked upload and store the final results in the path indicated. + """ + Complete the chunked upload and store the final results in the path indicated. + Returns nothing. - """ + """ raise NotImplementedError def cancel_chunked_upload(self, uuid, storage_metadata): - """ Cancel the chunked upload and clean up any outstanding partially uploaded data. + """ + Cancel the chunked upload and clean up any outstanding partially uploaded data. + Returns nothing. - """ + """ raise NotImplementedError diff --git a/storage/cloud.py b/storage/cloud.py index 6a44a3b8a..54accf81a 100644 --- a/storage/cloud.py +++ b/storage/cloud.py @@ -107,7 +107,9 @@ class _CloudStorage(BaseStorageV2): self._initialized = True def _debug_key(self, key): - """Used for debugging only.""" + """ + Used for debugging only. + """ orig_meth = key.bucket.connection.make_request def new_meth(*args, **kwargs): @@ -212,8 +214,11 @@ class _CloudStorage(BaseStorageV2): ) def stream_write(self, path, fp, content_type=None, content_encoding=None): - """ Writes the data found in the file-like stream to the given path. Raises an IOError - if the write fails. """ + """ + Writes the data found in the file-like stream to the given path. + + Raises an IOError if the write fails. + """ _, write_error = self._stream_write_internal(path, fp, content_type, content_encoding) if write_error is not None: logger.error("Error when trying to stream_write path `%s`: %s", path, write_error) @@ -228,10 +233,13 @@ class _CloudStorage(BaseStorageV2): cancel_on_error=True, size=filelike.READ_UNTIL_END, ): - """ Writes the data found in the file-like stream to the given path, with optional limit - on size. Note that this method returns a *tuple* of (bytes_written, write_error) and should + """ + Writes the data found in the file-like stream to the given path, with optional limit on + size. Note that this method returns a *tuple* of (bytes_written, write_error) and should. + *not* raise an exception (such as IOError) if a problem uploading occurred. ALWAYS check - the returned tuple on calls to this method. """ + the returned tuple on calls to this method. + """ write_error = None try: @@ -318,7 +326,9 @@ class _CloudStorage(BaseStorageV2): return k.etag[1:-1][:7] def copy_to(self, destination, path): - """ Copies the given path from this storage to the destination storage. """ + """ + Copies the given path from this storage to the destination storage. + """ self._initialize_cloud_conn() # First try to copy directly via boto, but only if the storages are the @@ -458,7 +468,9 @@ class _CloudStorage(BaseStorageV2): @staticmethod def _rechunk(chunk, max_chunk_size): - """ Rechunks the chunk list to meet maximum chunk size restrictions for the storage engine. """ + """ + Rechunks the chunk list to meet maximum chunk size restrictions for the storage engine. + """ if max_chunk_size is None or chunk.length <= max_chunk_size: yield chunk else: @@ -637,11 +649,13 @@ class GoogleCloudStorage(_CloudStorage): cancel_on_error=True, size=filelike.READ_UNTIL_END, ): - """ Writes the data found in the file-like stream to the given path, with optional limit - on size. Note that this method returns a *tuple* of (bytes_written, write_error) and should + """ + Writes the data found in the file-like stream to the given path, with optional limit on + size. Note that this method returns a *tuple* of (bytes_written, write_error) and should. + *not* raise an exception (such as IOError) if a problem uploading occurred. ALWAYS check the returned tuple on calls to this method. - """ + """ # Minimum size of upload part size on S3 is 5MB self._initialize_cloud_conn() path = self._init_path(path) @@ -737,15 +751,20 @@ class RadosGWStorage(_CloudStorage): class RHOCSStorage(RadosGWStorage): - """ RHOCSStorage implements storage explicitly via RHOCS. For now, this uses the same protocol - as RadowsGW, but we create a distinct driver for future additional capabilities. - """ + """ + RHOCSStorage implements storage explicitly via RHOCS. + + For now, this uses the same protocol as RadowsGW, but we create a distinct driver for future + additional capabilities. + """ pass class CloudFrontedS3Storage(S3Storage): - """ An S3Storage engine that redirects to CloudFront for all requests outside of AWS. """ + """ + An S3Storage engine that redirects to CloudFront for all requests outside of AWS. + """ def __init__( self, @@ -814,9 +833,10 @@ class CloudFrontedS3Storage(S3Storage): @lru_cache(maxsize=1) def _load_private_key(self, cloudfront_privatekey_filename): - """ Returns the private key, loaded from the config provider, used to sign direct - download URLs to CloudFront. - """ + """ + Returns the private key, loaded from the config provider, used to sign direct download URLs + to CloudFront. + """ if self._context.config_provider is None: return None diff --git a/storage/distributedstorage.py b/storage/distributedstorage.py index 6aa941861..4a4c7c938 100644 --- a/storage/distributedstorage.py +++ b/storage/distributedstorage.py @@ -46,7 +46,9 @@ class DistributedStorage(StoragePaths): @property def locations(self): - """ Returns the names of the locations supported. """ + """ + Returns the names of the locations supported. + """ return list(self._storages.keys()) _get_direct_download_url = _location_aware(BaseStorage.get_direct_download_url) @@ -89,7 +91,9 @@ class DistributedStorage(StoragePaths): return self.proxy.proxy_download_url(download_url) def copy_between(self, path, source_location, destination_location): - """ Copies a file between the source location and the destination location. """ + """ + Copies a file between the source location and the destination location. + """ source_storage = self._storages[source_location] destination_storage = self._storages[destination_location] source_storage.copy_to(destination_storage, path) diff --git a/storage/downloadproxy.py b/storage/downloadproxy.py index cdce1c2f7..4238c7770 100644 --- a/storage/downloadproxy.py +++ b/storage/downloadproxy.py @@ -49,9 +49,10 @@ ACCESS_SCHEMA = { class DownloadProxy(object): - """ Helper class to enable proxying of direct download URLs for storage via the registry's - local NGINX. - """ + """ + Helper class to enable proxying of direct download URLs for storage via the registry's local + NGINX. + """ def __init__(self, app, instance_keys): self.app = app @@ -60,8 +61,9 @@ class DownloadProxy(object): app.add_url_rule("/_storage_proxy_auth", "_storage_proxy_auth", self._validate_proxy_url) def proxy_download_url(self, download_url): - """ Returns a URL to proxy the specified blob download URL. - """ + """ + Returns a URL to proxy the specified blob download URL. + """ # Parse the URL to be downloaded into its components (host, path, scheme). parsed = urlparse(download_url) diff --git a/storage/swift.py b/storage/swift.py index 32703b1ab..dfca14d87 100644 --- a/storage/swift.py +++ b/storage/swift.py @@ -1,6 +1,7 @@ -""" Swift storage driver. +""" +Swift storage driver. - Uses: http://docs.openstack.org/developer/swift/overview_large_objects.html +Uses: http://docs.openstack.org/developer/swift/overview_large_objects.html """ import os.path import copy @@ -96,9 +97,10 @@ class SwiftStorage(BaseStorage): ) def _normalize_path(self, object_path): - """ No matter what inputs we get, we are going to return a path without a leading or trailing - '/' - """ + """ + No matter what inputs we get, we are going to return a path without a leading or trailing + '/'. + """ if self._simple_path_concat: return (self._storage_path + object_path).rstrip("/") else: @@ -169,9 +171,11 @@ class SwiftStorage(BaseStorage): @lru_cache(maxsize=1) def _get_root_storage_url(self): - """ Returns the root storage URL for this Swift storage. Note that since this requires a call - to Swift, we cache the result of this function call. - """ + """ + Returns the root storage URL for this Swift storage. + + Note that since this requires a call to Swift, we cache the result of this function call. + """ storage_url, _ = self._get_connection().get_auth() return storage_url @@ -416,9 +420,11 @@ class SwiftStorage(BaseStorage): return bytes_written, updated_metadata def complete_chunked_upload(self, uuid, final_path, storage_metadata): - """ Complete the chunked upload and store the final results in the path indicated. + """ + Complete the chunked upload and store the final results in the path indicated. + Returns nothing. - """ + """ # Check all potentially empty segments against the segments that were uploaded; if the path # is still empty, then we queue the segment to be deleted. if self._context.chunk_cleanup_queue is not None: @@ -453,9 +459,11 @@ class SwiftStorage(BaseStorage): ) def cancel_chunked_upload(self, uuid, storage_metadata): - """ Cancel the chunked upload and clean up any outstanding partially uploaded data. + """ + Cancel the chunked upload and clean up any outstanding partially uploaded data. + Returns nothing. - """ + """ if not self._context.chunk_cleanup_queue: return diff --git a/test/clients/client.py b/test/clients/client.py index 013b73ab0..93f9ca695 100644 --- a/test/clients/client.py +++ b/test/clients/client.py @@ -10,40 +10,57 @@ FileCopy = namedtuple("FileCopy", ["source", "destination"]) @add_metaclass(ABCMeta) class Client(object): - """ Client defines the interface for all clients being tested. """ - - @abstractmethod - def setup_client(self, registry_host, verify_tls): - """ Returns the commands necessary to setup the client inside the VM. + """ + Client defines the interface for all clients being tested. """ + @abstractmethod + def setup_client(self, registry_host, verify_tls): + """ + Returns the commands necessary to setup the client inside the VM. + """ + @abstractmethod def populate_test_image(self, registry_host, namespace, name): - """ Returns the commands necessary to populate the test image. """ + """ + Returns the commands necessary to populate the test image. + """ @abstractmethod def print_version(self): - """ Returns the commands necessary to print the version of the client. """ + """ + Returns the commands necessary to print the version of the client. + """ @abstractmethod def login(self, registry_host, username, password): - """ Returns the commands necessary to login. """ + """ + Returns the commands necessary to login. + """ @abstractmethod def push(self, registry_host, namespace, name): - """ Returns the commands necessary to test pushing. """ + """ + Returns the commands necessary to test pushing. + """ @abstractmethod def pre_pull_cleanup(self, registry_host, namespace, name): - """ Returns the commands necessary to cleanup before pulling. """ + """ + Returns the commands necessary to cleanup before pulling. + """ @abstractmethod def pull(self, registry_host, namespace, name): - """ Returns the commands necessary to test pulling. """ + """ + Returns the commands necessary to test pulling. + """ @abstractmethod def verify(self, registry_host, namespace, name): - """ Returns the commands necessary to verify the pulled image. """ + """ + Returns the commands necessary to verify the pulled image. + """ class DockerClient(Client): diff --git a/test/clients/clients_test.py b/test/clients/clients_test.py index ab56c4b1d..fa41329af 100644 --- a/test/clients/clients_test.py +++ b/test/clients/clients_test.py @@ -169,9 +169,10 @@ def _indent(text, amount): def scp_to_vagrant(source, destination): - """scp_to_vagrant copies the file from source to destination in the default - vagrant box without vagrant scp, which may fail on some coreos boxes. - """ + """ + scp_to_vagrant copies the file from source to destination in the default vagrant box without + vagrant scp, which may fail on some coreos boxes. + """ config = _run_and_wait(["vagrant", "ssh-config"]) config_lines = config.split("\n") params = ["scp"] diff --git a/test/conftest.py b/test/conftest.py index b3205fdcc..1680053e5 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -5,17 +5,17 @@ import pytest def pytest_collection_modifyitems(config, items): """ - This adds a pytest marker that consistently shards all collected tests. + This adds a pytest marker that consistently shards all collected tests. - Use it like the following: - $ py.test -m shard_1_of_3 - $ py.test -m shard_2_of_3 - $ py.test -m shard_3_of_3 + Use it like the following: + $ py.test -m shard_1_of_3 + $ py.test -m shard_2_of_3 + $ py.test -m shard_3_of_3 - This code was originally adopted from the MIT-licensed ansible/molecule@9e7b79b: - Copyright (c) 2015-2018 Cisco Systems, Inc. - Copyright (c) 2018 Red Hat, Inc. - """ + This code was originally adopted from the MIT-licensed ansible/molecule@9e7b79b: + Copyright (c) 2015-2018 Cisco Systems, Inc. + Copyright (c) 2018 Red Hat, Inc. + """ mark_opt = config.getoption("-m") if not mark_opt.startswith("shard_"): return diff --git a/test/fixtures.py b/test/fixtures.py index 5aa15244f..d6b5c12c1 100644 --- a/test/fixtures.py +++ b/test/fixtures.py @@ -41,10 +41,13 @@ INIT_DB_PATH = 0 @pytest.fixture(scope="session") def init_db_path(tmpdir_factory): - """ Creates a new database and appropriate configuration. Note that the initial database - is created *once* per session. In the non-full-db-test case, the database_uri fixture - makes a copy of the SQLite database file on disk and passes a new copy to each test. - """ + """ + Creates a new database and appropriate configuration. + + Note that the initial database is created *once* per session. In the non-full-db-test case, the + database_uri fixture makes a copy of the SQLite database file on disk and passes a new copy to + each test. + """ # NOTE: We use a global here because pytest runs this code multiple times, due to the fixture # being imported instead of being in a conftest. Moving to conftest has its own issues, and this # call is quite slow, so we simply cache it here. @@ -61,10 +64,12 @@ def _init_db_path(tmpdir_factory): def _init_db_path_real_db(db_uri): - """ Initializes a real database for testing by populating it from scratch. Note that this does - *not* add the tables (merely data). Callers must have migrated the database before calling - the test suite. - """ + """ + Initializes a real database for testing by populating it from scratch. Note that this does. + + *not* add the tables (merely data). Callers must have migrated the database before calling + the test suite. + """ configure( { "DB_URI": db_uri, @@ -80,9 +85,10 @@ def _init_db_path_real_db(db_uri): def _init_db_path_sqlite(tmpdir_factory): - """ Initializes a SQLite database for testing by populating it from scratch and placing it into - a temp directory file. - """ + """ + Initializes a SQLite database for testing by populating it from scratch and placing it into a + temp directory file. + """ sqlitedbfile = str(tmpdir_factory.mktemp("data").join("test.db")) sqlitedb = "sqlite:///{0}".format(sqlitedbfile) conf = { @@ -108,11 +114,13 @@ def _init_db_path_sqlite(tmpdir_factory): @pytest.yield_fixture() def database_uri(monkeypatch, init_db_path, sqlitedb_file): - """ Returns the database URI to use for testing. In the SQLite case, a new, distinct copy of - the SQLite database is created by copying the initialized database file (sqlitedb_file) - on a per-test basis. In the non-SQLite case, a reference to the existing database URI is - returned. - """ + """ + Returns the database URI to use for testing. + + In the SQLite case, a new, distinct copy of the SQLite database is created by copying the + initialized database file (sqlitedb_file) on a per-test basis. In the non-SQLite case, a + reference to the existing database URI is returned. + """ if os.environ.get("TEST_DATABASE_URI"): db_uri = os.environ["TEST_DATABASE_URI"] monkeypatch.setenv("DB_URI", db_uri) @@ -134,7 +142,9 @@ def database_uri(monkeypatch, init_db_path, sqlitedb_file): @pytest.fixture() def sqlitedb_file(tmpdir): - """ Returns the path at which the initialized, golden SQLite database file will be placed. """ + """ + Returns the path at which the initialized, golden SQLite database file will be placed. + """ test_db_file = tmpdir.mkdir("quaydb").join("test.db") return str(test_db_file) @@ -145,7 +155,9 @@ def _create_transaction(db): @pytest.fixture() def appconfig(database_uri): - """ Returns application configuration for testing that references the proper database URI. """ + """ + Returns application configuration for testing that references the proper database URI. + """ conf = { "TESTING": True, "DEBUG": True, @@ -177,7 +189,9 @@ FRAME_NAME_INDEX = 3 @pytest.fixture() def initialized_db(appconfig): - """ Configures the database for the database found in the appconfig. """ + """ + Configures the database for the database found in the appconfig. + """ under_test_real_database = bool(os.environ.get("TEST_DATABASE_URI")) # Configure the database. @@ -271,7 +285,9 @@ def initialized_db(appconfig): @pytest.fixture() def app(appconfig, initialized_db): - """ Used by pytest-flask plugin to inject a custom app instance for testing. """ + """ + Used by pytest-flask plugin to inject a custom app instance for testing. + """ app = Flask(__name__) login_manager = LoginManager(app) diff --git a/test/helpers.py b/test/helpers.py index cf1a85520..d11d31604 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -7,9 +7,10 @@ from data.database import LogEntryKind, LogEntry3 class assert_action_logged(object): - """ Specialized assertion for ensuring that a log entry of a particular kind was added under the - context of this call. - """ + """ + Specialized assertion for ensuring that a log entry of a particular kind was added under the + context of this call. + """ def __init__(self, log_kind): self.log_kind = log_kind @@ -37,15 +38,15 @@ _LIVESERVER_TIMEOUT = 5 @contextmanager def liveserver_app(flask_app, port): """ - Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py + Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py. - Runs the given Flask app as a live web server locally, on the given port, starting it - when called and terminating after the yield. + Runs the given Flask app as a live web server locally, on the given port, starting it + when called and terminating after the yield. - Usage: - with liveserver_app(flask_app, port): - # Code that makes use of the app. - """ + Usage: + with liveserver_app(flask_app, port): + # Code that makes use of the app. + """ shared = {} def _can_ping_server(): diff --git a/test/registry/fixtures.py b/test/registry/fixtures.py index c6b772bc4..73cdcd671 100644 --- a/test/registry/fixtures.py +++ b/test/registry/fixtures.py @@ -206,13 +206,14 @@ def app_reloader(request, liveserver, registry_server_executor): class FeatureFlagValue(object): - """ Helper object which temporarily sets the value of a feature flag. + """ + Helper object which temporarily sets the value of a feature flag. - Usage: + Usage: - with FeatureFlagValue('ANONYMOUS_ACCESS', False, registry_server_executor.on(liveserver)): - ... Features.ANONYMOUS_ACCESS is False in this context ... - """ + with FeatureFlagValue('ANONYMOUS_ACCESS', False, registry_server_executor.on(liveserver)): + ... Features.ANONYMOUS_ACCESS is False in this context ... + """ def __init__(self, feature_flag, test_value, executor): self.feature_flag = feature_flag @@ -230,13 +231,14 @@ class FeatureFlagValue(object): class ConfigChange(object): - """ Helper object which temporarily sets the value of a config key. + """ + Helper object which temporarily sets the value of a config key. - Usage: + Usage: - with ConfigChange('SOMEKEY', 'value', registry_server_executor.on(liveserver)): - ... app.config['SOMEKEY'] is 'value' in this context ... - """ + with ConfigChange('SOMEKEY', 'value', registry_server_executor.on(liveserver)): + ... app.config['SOMEKEY'] is 'value' in this context ... + """ def __init__(self, config_key, test_value, executor, liveserver): self.config_key = config_key diff --git a/test/registry/liveserverfixture.py b/test/registry/liveserverfixture.py index e9f6b40bc..f0fe57e28 100644 --- a/test/registry/liveserverfixture.py +++ b/test/registry/liveserverfixture.py @@ -16,10 +16,11 @@ from flask.blueprints import Blueprint class liveFlaskServer(object): - """ Helper class for spawning a live Flask server for testing. + """ + Helper class for spawning a live Flask server for testing. - Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py#L421 - """ + Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py#L421 + """ def __init__(self, app, port_value): self.app = app @@ -28,8 +29,8 @@ class liveFlaskServer(object): def get_server_url(self): """ - Return the url of the test server - """ + Return the url of the test server. + """ return "http://localhost:%s" % self._port_value.value def terminate_live_server(self): @@ -111,10 +112,10 @@ class liveFlaskServer(object): def _get_server_address(self): """ - Gets the server address used to test the connection with a socket. - Respects both the LIVESERVER_PORT config value and overriding - get_server_url() - """ + Gets the server address used to test the connection with a socket. + + Respects both the LIVESERVER_PORT config value and overriding get_server_url() + """ parts = urlparse(self.get_server_url()) host = parts.hostname @@ -132,7 +133,9 @@ class liveFlaskServer(object): class LiveFixtureServerSession(object): - """ Helper class for calling the live server via a single requests Session. """ + """ + Helper class for calling the live server via a single requests Session. + """ def __init__(self, base_url): self.base_url = base_url @@ -158,27 +161,34 @@ class LiveFixtureServerSession(object): class LiveFixtureServer(object): - """ Helper for interacting with a live server. """ + """ + Helper for interacting with a live server. + """ def __init__(self, url): self.url = url @contextmanager def session(self): - """ Yields a session for speaking to the live server. """ + """ + Yields a session for speaking to the live server. + """ yield LiveFixtureServerSession(self.url) def new_session(self): - """ Returns a new session for speaking to the live server. """ + """ + Returns a new session for speaking to the live server. + """ return LiveFixtureServerSession(self.url) @pytest.fixture(scope="function") def liveserver(liveserver_app): - """ Runs a live Flask server for the app for the duration of the test. + """ + Runs a live Flask server for the app for the duration of the test. - Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py#L421 - """ + Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py#L421 + """ context = liveserver_app.test_request_context() context.push() @@ -195,51 +205,58 @@ def liveserver(liveserver_app): @pytest.fixture(scope="function") def liveserver_session(liveserver, liveserver_app): - """ Fixtures which instantiates a liveserver and returns a single session for - interacting with that server. - """ + """ + Fixtures which instantiates a liveserver and returns a single session for interacting with that + server. + """ return LiveFixtureServerSession(liveserver.url) class LiveServerExecutor(object): - """ Helper class which can be used to register functions to be executed in the - same process as the live server. This is necessary because the live server - runs in a different process and, therefore, in order to execute state changes - outside of the server's normal flows (i.e. via code), it must be executed - *in-process* via an HTTP call. The LiveServerExecutor class abstracts away - all the setup for this process. + """ + Helper class which can be used to register functions to be executed in the same process as the + live server. This is necessary because the live server runs in a different process and, + therefore, in order to execute state changes outside of the server's normal flows (i.e. via + code), it must be executed. - Usage: - def _perform_operation(first_param, second_param): - ... do some operation in the app ... - return 'some value' + *in-process* via an HTTP call. The LiveServerExecutor class abstracts away + all the setup for this process. - @pytest.fixture(scope="session") - def my_server_executor(): - executor = LiveServerExecutor() - executor.register('performoperation', _perform_operation) - return executor + Usage: + def _perform_operation(first_param, second_param): + ... do some operation in the app ... + return 'some value' - @pytest.fixture() - def liveserver_app(app, my_server_executor): - ... other app setup here ... - my_server_executor.apply_blueprint_to_app(app) - return app + @pytest.fixture(scope="session") + def my_server_executor(): + executor = LiveServerExecutor() + executor.register('performoperation', _perform_operation) + return executor - def test_mytest(liveserver, my_server_executor): - # Invokes 'performoperation' in the liveserver's process. - my_server_executor.on(liveserver).performoperation('first', 'second') - """ + @pytest.fixture() + def liveserver_app(app, my_server_executor): + ... other app setup here ... + my_server_executor.apply_blueprint_to_app(app) + return app + + def test_mytest(liveserver, my_server_executor): + # Invokes 'performoperation' in the liveserver's process. + my_server_executor.on(liveserver).performoperation('first', 'second') + """ def __init__(self): self.funcs = {} def register(self, fn_name, fn): - """ Registers the given function under the given name. """ + """ + Registers the given function under the given name. + """ self.funcs[fn_name] = fn def apply_blueprint_to_app(self, app): - """ Applies a blueprint to the app, to support invocation from this executor. """ + """ + Applies a blueprint to the app, to support invocation from this executor. + """ testbp = Blueprint("testbp", __name__) def build_invoker(fn_name, fn): @@ -256,11 +273,15 @@ class LiveServerExecutor(object): app.register_blueprint(testbp, url_prefix="/__test") def on(self, server): - """ Returns an invoker for the given live server. """ + """ + Returns an invoker for the given live server. + """ return liveServerExecutorInvoker(self.funcs, server) def on_session(self, server_session): - """ Returns an invoker for the given live server session. """ + """ + Returns an invoker for the given live server session. + """ return liveServerExecutorInvoker(self.funcs, server_session) diff --git a/test/registry/protocol_fixtures.py b/test/registry/protocol_fixtures.py index 7252222fd..828885ada 100644 --- a/test/registry/protocol_fixtures.py +++ b/test/registry/protocol_fixtures.py @@ -16,7 +16,9 @@ from test.registry.protocol_v2 import V2Protocol @pytest.fixture(scope="session") def basic_images(): - """ Returns basic images for push and pull testing. """ + """ + Returns basic images for push and pull testing. + """ # Note: order is from base layer down to leaf. parent_bytes = layer_bytes_for_contents("parent contents") image_bytes = layer_bytes_for_contents("some contents") @@ -28,7 +30,9 @@ def basic_images(): @pytest.fixture(scope="session") def unicode_images(): - """ Returns basic images for push and pull testing that contain unicode in the image metadata. """ + """ + Returns basic images for push and pull testing that contain unicode in the image metadata. + """ # Note: order is from base layer down to leaf. parent_bytes = layer_bytes_for_contents("parent contents") image_bytes = layer_bytes_for_contents("some contents") @@ -45,7 +49,9 @@ def unicode_images(): @pytest.fixture(scope="session") def different_images(): - """ Returns different basic images for push and pull testing. """ + """ + Returns different basic images for push and pull testing. + """ # Note: order is from base layer down to leaf. parent_bytes = layer_bytes_for_contents("different parent contents") image_bytes = layer_bytes_for_contents("some different contents") @@ -57,7 +63,9 @@ def different_images(): @pytest.fixture(scope="session") def sized_images(): - """ Returns basic images (with sizes) for push and pull testing. """ + """ + Returns basic images (with sizes) for push and pull testing. + """ # Note: order is from base layer down to leaf. parent_bytes = layer_bytes_for_contents("parent contents", mode="") image_bytes = layer_bytes_for_contents("some contents", mode="") @@ -82,7 +90,9 @@ def sized_images(): @pytest.fixture(scope="session") def multi_layer_images(): - """ Returns complex images (with sizes) for push and pull testing. """ + """ + Returns complex images (with sizes) for push and pull testing. + """ # Note: order is from base layer down to leaf. layer1_bytes = layer_bytes_for_contents( "layer 1 contents", mode="", other_files={"file1": "from-layer-1",} @@ -145,7 +155,9 @@ def multi_layer_images(): @pytest.fixture(scope="session") def remote_images(): - """ Returns images with at least one remote layer for push and pull testing. """ + """ + Returns images with at least one remote layer for push and pull testing. + """ # Note: order is from base layer down to leaf. remote_bytes = layer_bytes_for_contents("remote contents") parent_bytes = layer_bytes_for_contents("parent contents") @@ -159,7 +171,9 @@ def remote_images(): @pytest.fixture(scope="session") def images_with_empty_layer(): - """ Returns images for push and pull testing that contain an empty layer. """ + """ + Returns images for push and pull testing that contain an empty layer. + """ # Note: order is from base layer down to leaf. parent_bytes = layer_bytes_for_contents("parent contents") empty_bytes = layer_bytes_for_contents("", empty=True) @@ -177,7 +191,9 @@ def images_with_empty_layer(): @pytest.fixture(scope="session") def unicode_emoji_images(): - """ Returns basic images for push and pull testing that contain unicode in the image metadata. """ + """ + Returns basic images for push and pull testing that contain unicode in the image metadata. + """ # Note: order is from base layer down to leaf. parent_bytes = layer_bytes_for_contents("parent contents") image_bytes = layer_bytes_for_contents("some contents") diff --git a/test/registry/protocol_v1.py b/test/registry/protocol_v1.py index fb43498bc..767d9ac1e 100644 --- a/test/registry/protocol_v1.py +++ b/test/registry/protocol_v1.py @@ -15,7 +15,9 @@ from test.registry.protocols import ( @unique class V1ProtocolSteps(Enum): - """ Defines the various steps of the protocol, for matching failures. """ + """ + Defines the various steps of the protocol, for matching failures. + """ PUT_IMAGES = "put-images" GET_IMAGES = "get-images" diff --git a/test/registry/protocol_v2.py b/test/registry/protocol_v2.py index 47bfcf1e8..f24ae1609 100644 --- a/test/registry/protocol_v2.py +++ b/test/registry/protocol_v2.py @@ -25,7 +25,9 @@ from util.bytes import Bytes @unique class V2ProtocolSteps(Enum): - """ Defines the various steps of the protocol, for matching failures. """ + """ + Defines the various steps of the protocol, for matching failures. + """ AUTH = "auth" BLOB_HEAD_CHECK = "blob-head-check" @@ -127,10 +129,10 @@ class V2Protocol(RegistryProtocol): def auth(self, session, credentials, namespace, repo_name, scopes=None, expected_failure=None): """ - Performs the V2 Auth flow, returning the token (if any) and the response. + Performs the V2 Auth flow, returning the token (if any) and the response. - Spec: https://docs.docker.com/registry/spec/auth/token/ - """ + Spec: https://docs.docker.com/registry/spec/auth/token/ + """ scopes = scopes or [] auth = None diff --git a/test/registry/protocols.py b/test/registry/protocols.py index 85b64ccd7..e88fe6308 100644 --- a/test/registry/protocols.py +++ b/test/registry/protocols.py @@ -48,7 +48,9 @@ def layer_bytes_for_contents(contents, mode="|gz", other_files=None, empty=False @unique class Failures(Enum): - """ Defines the various forms of expected failure. """ + """ + Defines the various forms of expected failure. + """ UNAUTHENTICATED = "unauthenticated" UNAUTHORIZED = "unauthorized" @@ -95,13 +97,17 @@ class ProtocolOptions(object): @add_metaclass(ABCMeta) class RegistryProtocol(object): - """ Interface for protocols. """ + """ + Interface for protocols. + """ FAILURE_CODES = {} @abstractmethod def login(self, session, username, password, scopes, expect_success): - """ Performs the login flow with the given credentials, over the given scopes. """ + """ + Performs the login flow with the given credentials, over the given scopes. + """ @abstractmethod def pull( @@ -115,9 +121,10 @@ class RegistryProtocol(object): expected_failure=None, options=None, ): - """ Pulls the given tag via the given session, using the given credentials, and - ensures the given images match. - """ + """ + Pulls the given tag via the given session, using the given credentials, and ensures the + given images match. + """ @abstractmethod def push( @@ -131,9 +138,10 @@ class RegistryProtocol(object): expected_failure=None, options=None, ): - """ Pushes the specified images as the given tag via the given session, using - the given credentials. - """ + """ + Pushes the specified images as the given tag via the given session, using the given + credentials. + """ @abstractmethod def delete( @@ -146,7 +154,9 @@ class RegistryProtocol(object): expected_failure=None, options=None, ): - """ Deletes some tags. """ + """ + Deletes some tags. + """ def repo_name(self, namespace, repo_name): if namespace: diff --git a/test/registry/registry_tests.py b/test/registry/registry_tests.py index 576043931..cfc421e8f 100644 --- a/test/registry/registry_tests.py +++ b/test/registry/registry_tests.py @@ -3266,8 +3266,8 @@ def test_repository_states( app_reloader, ): """ - Verify the push behavior of the Repository dependent upon its state. - """ + Verify the push behavior of the Repository dependent upon its state. + """ namespace = "devtable" repo = "staterepo" tag = "latest" diff --git a/test/registry_tests.py b/test/registry_tests.py index 53799a1b1..cf00e80cb 100644 --- a/test/registry_tests.py +++ b/test/registry_tests.py @@ -153,8 +153,9 @@ app.register_blueprint(testbp, url_prefix="/__test") class TestFeature(object): - """ Helper object which temporarily sets the value of a feature flag. - """ + """ + Helper object which temporarily sets the value of a feature flag. + """ def __init__(self, test_case, feature_flag, test_value): self.test_case = test_case @@ -187,8 +188,11 @@ _JWK = RSAKey(key=RSA.generate(2048)) class FailureCodes: - """ Defines tuples representing the HTTP status codes for various errors. The tuple - is defined as ('errordescription', V1HTTPStatusCode, V2HTTPStatusCode). """ + """ + Defines tuples representing the HTTP status codes for various errors. + + The tuple is defined as ('errordescription', V1HTTPStatusCode, V2HTTPStatusCode). + """ UNAUTHENTICATED = ("unauthenticated", 401, 401) UNAUTHORIZED = ("unauthorized", 403, 401) @@ -199,8 +203,12 @@ class FailureCodes: def _get_expected_code(expected_failure, version, success_status_code): - """ Returns the HTTP status code for the expected failure under the specified protocol version - (1 or 2). If none, returns the success status code. """ + """ + Returns the HTTP status code for the expected failure under the specified protocol version (1 or + 2). + + If none, returns the success status code. + """ if not expected_failure: return success_status_code @@ -1404,7 +1412,9 @@ class V1RegistryTests( RegistryTestCaseMixin, LiveServerTestCase, ): - """ Tests for V1 registry. """ + """ + Tests for V1 registry. + """ def test_search(self): # Public @@ -1517,7 +1527,9 @@ class V2RegistryTests( RegistryTestCaseMixin, LiveServerTestCase, ): - """ Tests for V2 registry. """ + """ + Tests for V2 registry. + """ def test_proper_auth_response(self): response = self.conduct( @@ -2126,10 +2138,14 @@ class V1PushV2PullRegistryTests( RegistryTestCaseMixin, LiveServerTestCase, ): - """ Tests for V1 push, V2 pull registry. """ + """ + Tests for V1 push, V2 pull registry. + """ def test_multiple_tag_with_pull(self): - """ Tagging the same exact V1 tag multiple times and then pulling with V2. """ + """ + Tagging the same exact V1 tag multiple times and then pulling with V2. + """ images = self._get_default_images() self.do_push("devtable", "newrepo", "devtable", "password", images=images) @@ -2146,11 +2162,15 @@ class V1PullV2PushRegistryTests( RegistryTestCaseMixin, LiveServerTestCase, ): - """ Tests for V1 pull, V2 push registry. """ + """ + Tests for V1 pull, V2 push registry. + """ class TorrentTestMixin(V2RegistryPullMixin): - """ Mixin of tests for torrent support. """ + """ + Mixin of tests for torrent support. + """ def get_torrent(self, blobsum): # Enable direct download URLs in fake storage. @@ -2208,7 +2228,9 @@ class TorrentTestMixin(V2RegistryPullMixin): class TorrentV1PushTests( RegistryTestCaseMixin, TorrentTestMixin, V1RegistryPushMixin, LiveServerTestCase ): - """ Torrent tests via V1 push. """ + """ + Torrent tests via V1 push. + """ pass @@ -2216,13 +2238,17 @@ class TorrentV1PushTests( class TorrentV2PushTests( RegistryTestCaseMixin, TorrentTestMixin, V2RegistryPushMixin, LiveServerTestCase ): - """ Torrent tests via V2 push. """ + """ + Torrent tests via V2 push. + """ pass class SquashingTests(RegistryTestCaseMixin, V1RegistryPushMixin, LiveServerTestCase): - """ Tests for registry squashing. """ + """ + Tests for registry squashing. + """ def get_squashed_image(self, auth="sig"): response = self.conduct("GET", "/c1/squash/devtable/newrepo/latest", auth=auth) @@ -2380,7 +2406,9 @@ class SquashingTests(RegistryTestCaseMixin, V1RegistryPushMixin, LiveServerTestC class LoginTests(object): - """ Generic tests for registry login. """ + """ + Generic tests for registry login. + """ def test_invalid_username_knownrepo(self): self.do_login( @@ -2433,7 +2461,9 @@ class LoginTests(object): class V1LoginTests( V1RegistryLoginMixin, LoginTests, RegistryTestCaseMixin, BaseRegistryMixin, LiveServerTestCase ): - """ Tests for V1 login. """ + """ + Tests for V1 login. + """ pass # No additional tests. @@ -2441,7 +2471,9 @@ class V1LoginTests( class V2LoginTests( V2RegistryLoginMixin, LoginTests, RegistryTestCaseMixin, BaseRegistryMixin, LiveServerTestCase ): - """ Tests for V2 login. """ + """ + Tests for V2 login. + """ def do_logincheck( self, username, password, scope, expected_actions=[], expect_success=True, **kwargs diff --git a/test/test_api_usage.py b/test/test_api_usage.py index 80c805ad8..0021b7329 100644 --- a/test/test_api_usage.py +++ b/test/test_api_usage.py @@ -186,8 +186,12 @@ CSRF_TOKEN_KEY = "_csrf_token" class AppConfigChange(object): - """ AppConfigChange takes a dictionary that overrides the global app config - for a given block of code. The values are restored on exit. """ + """ + AppConfigChange takes a dictionary that overrides the global app config for a given block of + code. + + The values are restored on exit. + """ def __init__(self, changes=None): self._changes = changes or {} diff --git a/test/test_external_jwt_authn.py b/test/test_external_jwt_authn.py index 292c3b540..1c9389f4e 100644 --- a/test/test_external_jwt_authn.py +++ b/test/test_external_jwt_authn.py @@ -22,13 +22,14 @@ _PORT_NUMBER = 5001 @contextmanager def fake_jwt(requires_email=True): - """ Context manager which instantiates and runs a webserver with a fake JWT implementation, - until the result is yielded. + """ + Context manager which instantiates and runs a webserver with a fake JWT implementation, until + the result is yielded. - Usage: - with fake_jwt() as jwt_auth: - # Make jwt_auth requests. - """ + Usage: + with fake_jwt() as jwt_auth: + # Make jwt_auth requests. + """ jwt_app, port, public_key = _create_app(requires_email) server_url = "http://" + jwt_app.config["SERVER_HOSTNAME"] @@ -172,7 +173,9 @@ def _create_app(emails=True): class JWTAuthTestMixin: - """ Mixin defining all the JWT auth tests. """ + """ + Mixin defining all the JWT auth tests. + """ maxDiff = None @@ -314,7 +317,9 @@ class JWTAuthTestMixin: class JWTAuthNoEmailTestCase(JWTAuthTestMixin, unittest.TestCase): - """ Test cases for JWT auth, with emails disabled. """ + """ + Test cases for JWT auth, with emails disabled. + """ @property def emails(self): @@ -322,7 +327,9 @@ class JWTAuthNoEmailTestCase(JWTAuthTestMixin, unittest.TestCase): class JWTAuthTestCase(JWTAuthTestMixin, unittest.TestCase): - """ Test cases for JWT auth, with emails enabled. """ + """ + Test cases for JWT auth, with emails enabled. + """ @property def emails(self): diff --git a/test/test_keystone_auth.py b/test/test_keystone_auth.py index 726672020..c344b47f7 100644 --- a/test/test_keystone_auth.py +++ b/test/test_keystone_auth.py @@ -16,13 +16,14 @@ _PORT_NUMBER = 5001 @contextmanager def fake_keystone(version=3, requires_email=True): - """ Context manager which instantiates and runs a webserver with a fake Keystone implementation, - until the result is yielded. + """ + Context manager which instantiates and runs a webserver with a fake Keystone implementation, + until the result is yielded. - Usage: - with fake_keystone(version) as keystone_auth: - # Make keystone_auth requests. - """ + Usage: + with fake_keystone(version) as keystone_auth: + # Make keystone_auth requests. + """ keystone_app, port = _create_app(requires_email) server_url = "http://" + keystone_app.config["SERVER_HOSTNAME"] endpoint_url = server_url + "/v3" diff --git a/test/test_secscan.py b/test/test_secscan.py index b12f38f15..c89f4255b 100644 --- a/test/test_secscan.py +++ b/test/test_secscan.py @@ -78,7 +78,9 @@ class TestSecurityScanner(unittest.TestCase): self.assertTrue(security_scanner.has_layer(security_scanner.layer_id(parent))) def test_get_layer(self): - """ Test for basic retrieval of layers from the security scanner. """ + """ + Test for basic retrieval of layers from the security scanner. + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True ) @@ -97,7 +99,9 @@ class TestSecurityScanner(unittest.TestCase): self.assertEquals(result["Layer"]["Name"], security_scanner.layer_id(layer)) def test_analyze_layer_nodirectdownload_success(self): - """ Tests analyzing a layer when direct download is disabled. """ + """ + Tests analyzing a layer when direct download is disabled. + """ # Disable direct download in fake storage. storage.put_content(["local_us"], "supports_direct_download", "false") @@ -137,7 +141,9 @@ class TestSecurityScanner(unittest.TestCase): self.assertAnalyzed(layer, security_scanner, True, 1) def test_analyze_layer_success(self): - """ Tests that analyzing a layer successfully marks it as analyzed. """ + """ + Tests that analyzing a layer successfully marks it as analyzed. + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True @@ -153,7 +159,9 @@ class TestSecurityScanner(unittest.TestCase): self.assertAnalyzed(layer, security_scanner, True, 1) def test_analyze_layer_failure(self): - """ Tests that failing to analyze a layer (because it 422s) marks it as analyzed but failed. """ + """ + Tests that failing to analyze a layer (because it 422s) marks it as analyzed but failed. + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True @@ -171,7 +179,9 @@ class TestSecurityScanner(unittest.TestCase): self.assertAnalyzed(layer, security_scanner, False, 1) def test_analyze_layer_internal_error(self): - """ Tests that failing to analyze a layer (because it 500s) marks it as not analyzed. """ + """ + Tests that failing to analyze a layer (because it 500s) marks it as not analyzed. + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True @@ -190,7 +200,9 @@ class TestSecurityScanner(unittest.TestCase): self.assertAnalyzed(layer, security_scanner, False, -1) def test_analyze_layer_error(self): - """ Tests that failing to analyze a layer (because it 400s) marks it as analyzed but failed. """ + """ + Tests that failing to analyze a layer (because it 400s) marks it as analyzed but failed. + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True @@ -211,7 +223,9 @@ class TestSecurityScanner(unittest.TestCase): self.assertAnalyzed(layer, security_scanner, False, 1) def test_analyze_layer_unexpected_status(self): - """ Tests that a response from a scanner with an unexpected status code fails correctly. """ + """ + Tests that a response from a scanner with an unexpected status code fails correctly. + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True @@ -233,7 +247,9 @@ class TestSecurityScanner(unittest.TestCase): self.assertAnalyzed(layer, security_scanner, False, -1) def test_analyze_layer_missing_parent_handled(self): - """ Tests that a missing parent causes an automatic reanalysis, which succeeds. """ + """ + Tests that a missing parent causes an automatic reanalysis, which succeeds. + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True @@ -265,9 +281,10 @@ class TestSecurityScanner(unittest.TestCase): self.assertAnalyzed(layer, security_scanner, True, 1) def test_analyze_layer_invalid_parent(self): - """ Tests that trying to reanalyze a parent that is invalid causes the layer to be marked - as analyzed, but failed. - """ + """ + Tests that trying to reanalyze a parent that is invalid causes the layer to be marked as + analyzed, but failed. + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True @@ -302,9 +319,10 @@ class TestSecurityScanner(unittest.TestCase): self.assertAnalyzed(layer, security_scanner, False, 1) def test_analyze_layer_unsupported_parent(self): - """ Tests that attempting to analyze a layer whose parent is unanalyzable, results in the layer + """ + Tests that attempting to analyze a layer whose parent is unanalyzable, results in the layer being marked as analyzed, but failed. - """ + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True @@ -324,7 +342,9 @@ class TestSecurityScanner(unittest.TestCase): self.assertAnalyzed(layer, security_scanner, False, 1) def test_analyze_layer_missing_storage(self): - """ Tests trying to analyze a layer with missing storage. """ + """ + Tests trying to analyze a layer with missing storage. + """ layer = model.tag.get_tag_image( ADMIN_ACCESS_USER, SIMPLE_REPO, "latest", include_storage=True diff --git a/tools/invoices.py b/tools/invoices.py index 4332a4cb1..3bad8c03d 100644 --- a/tools/invoices.py +++ b/tools/invoices.py @@ -47,25 +47,27 @@ def _paginate_list(stripe_klass, num_days, **incoming_kwargs): def list_charges(num_days): - """ List all charges that have occurred in the past specified number of days. - """ + """ + List all charges that have occurred in the past specified number of days. + """ for charge in _paginate_list(stripe.Charge, num_days, expand=["data.invoice"]): yield charge def list_refunds(num_days): - """ List all refunds that have occurred in the past specified number of days. - """ + """ + List all refunds that have occurred in the past specified number of days. + """ expand = ["data.charge", "data.charge.invoice"] for refund in _paginate_list(stripe.Refund, num_days, expand=expand): yield refund def format_refund(refund): - """ Generator which will return one or more line items corresponding to the - specified refund. - """ + """ + Generator which will return one or more line items corresponding to the specified refund. + """ refund_period_start = None refund_period_end = None invoice_iterable = expand_invoice(refund.charge.invoice, refund.charge.amount) @@ -115,9 +117,10 @@ def expand_invoice(invoice, total_amount): def format_charge(charge): - """ Generator which will return one or more line items corresponding to the - line items for this charge. - """ + """ + Generator which will return one or more line items corresponding to the line items for this + charge. + """ ch_status = "Paid" if charge.failure_code is not None: ch_status = "Failed" @@ -228,9 +231,8 @@ def format_charge(charge): class _UnicodeWriter(object): """ - A CSV writer which will write rows to CSV file "f", - which is encoded in the given encoding. - """ + A CSV writer which will write rows to CSV file "f", which is encoded in the given encoding. + """ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): # Redirect output to a queue @@ -259,9 +261,11 @@ class _UnicodeWriter(object): def _merge_row_streams(*row_generators): - """ Descending merge sort of multiple row streams in the form of (tx_date, [row data]). - Works recursively on an arbitrary number of row streams. - """ + """ + Descending merge sort of multiple row streams in the form of (tx_date, [row data]). + + Works recursively on an arbitrary number of row streams. + """ if len(row_generators) == 1: for only_candidate in row_generators[0]: yield only_candidate diff --git a/util/__init__.py b/util/__init__.py index abec1f5ec..1833eb310 100644 --- a/util/__init__.py +++ b/util/__init__.py @@ -1,14 +1,16 @@ def get_app_url(config): - """ Returns the application's URL, based on the given config. """ + """ + Returns the application's URL, based on the given config. + """ return "%s://%s" % (config["PREFERRED_URL_SCHEME"], config["SERVER_HOSTNAME"]) def slash_join(*args): """ - Joins together strings and guarantees there is only one '/' in between the - each string joined. Double slashes ('//') are assumed to be intentional and - are not deduplicated. - """ + Joins together strings and guarantees there is only one '/' in between the each string joined. + + Double slashes ('//') are assumed to be intentional and are not deduplicated. + """ def rmslash(path): path = path[1:] if len(path) > 0 and path[0] == "/" else path diff --git a/util/abchelpers.py b/util/abchelpers.py index 596ba982b..cd4b1c696 100644 --- a/util/abchelpers.py +++ b/util/abchelpers.py @@ -1,13 +1,16 @@ class NoopIsANoopException(TypeError): - """ Raised if the nooper decorator is unnecessary on a class. """ + """ + Raised if the nooper decorator is unnecessary on a class. + """ pass def nooper(cls): - """ Decorates a class that derives from an ABCMeta, filling in any unimplemented methods with - no-ops. - """ + """ + Decorates a class that derives from an ABCMeta, filling in any unimplemented methods with no- + ops. + """ def empty_func(*args, **kwargs): # pylint: disable=unused-argument diff --git a/util/asyncwrapper.py b/util/asyncwrapper.py index 135749e3b..760b2e237 100644 --- a/util/asyncwrapper.py +++ b/util/asyncwrapper.py @@ -6,17 +6,18 @@ from concurrent.futures import Executor, Future, CancelledError class AsyncExecutorWrapper(object): - """ This class will wrap a syncronous library transparently in a way which - will move all calls off to an asynchronous Executor, and will change all - returned values to be Future objects. - """ + """ + This class will wrap a syncronous library transparently in a way which will move all calls off + to an asynchronous Executor, and will change all returned values to be Future objects. + """ SYNC_FLAG_FIELD = "__AsyncExecutorWrapper__sync__" def __init__(self, delegate, executor): - """ Wrap the specified synchronous delegate instance, and submit() all - method calls to the specified Executor instance. - """ + """ + Wrap the specified synchronous delegate instance, and submit() all method calls to the + specified Executor instance. + """ self._delegate = delegate self._executor = executor @@ -48,9 +49,10 @@ class AsyncExecutorWrapper(object): @classmethod def sync(cls, f): - """ Annotate the given method to flag it as synchronous so that AsyncExecutorWrapper - will return the result immediately without submitting it to the executor. - """ + """ + Annotate the given method to flag it as synchronous so that AsyncExecutorWrapper will return + the result immediately without submitting it to the executor. + """ setattr(f, cls.SYNC_FLAG_FIELD, True) return f @@ -61,8 +63,9 @@ class NullExecutorCancelled(CancelledError): class NullExecutor(Executor): - """ Executor instance which always returns a Future completed with a - CancelledError exception. """ + """ + Executor instance which always returns a Future completed with a CancelledError exception. + """ def submit(self, _, *args, **kwargs): always_fail = Future() diff --git a/util/bytes.py b/util/bytes.py index 869398f2a..311788c25 100644 --- a/util/bytes.py +++ b/util/bytes.py @@ -1,7 +1,8 @@ class Bytes(object): - """ Wrapper around strings and unicode objects to ensure we are always using - the correct encoded or decoded data. - """ + """ + Wrapper around strings and unicode objects to ensure we are always using the correct encoded or + decoded data. + """ def __init__(self, data): assert isinstance(data, str) diff --git a/util/canonicaljson.py b/util/canonicaljson.py index 0c75459c5..deeb6f436 100644 --- a/util/canonicaljson.py +++ b/util/canonicaljson.py @@ -2,15 +2,15 @@ import collections def canonicalize(json_obj): - """This function canonicalizes a Python object that will be serialized as JSON. + """ + This function canonicalizes a Python object that will be serialized as JSON. - Args: - json_obj (object): the Python object that will later be serialized as JSON. + Args: + json_obj (object): the Python object that will later be serialized as JSON. - Returns: - object: json_obj now sorted to its canonical form. - - """ + Returns: + object: json_obj now sorted to its canonical form. + """ if isinstance(json_obj, collections.MutableMapping): sorted_obj = sorted({key: canonicalize(val) for key, val in json_obj.items()}.items()) return collections.OrderedDict(sorted_obj) diff --git a/util/config/__init__.py b/util/config/__init__.py index 85dfc248a..058e0d4ba 100644 --- a/util/config/__init__.py +++ b/util/config/__init__.py @@ -1,7 +1,8 @@ class URLSchemeAndHostname: """ - Immutable configuration for a given preferred url scheme (e.g. http or https), and a hostname (e.g. localhost:5000) - """ + Immutable configuration for a given preferred url scheme (e.g. http or https), and a hostname + (e.g. localhost:5000) + """ def __init__(self, url_scheme, hostname): self._url_scheme = url_scheme @@ -10,10 +11,11 @@ class URLSchemeAndHostname: @classmethod def from_app_config(cls, app_config): """ - Helper method to instantiate class from app config, a frequent pattern - :param app_config: - :return: - """ + Helper method to instantiate class from app config, a frequent pattern. + + :param app_config: + :return: + """ return cls(app_config["PREFERRED_URL_SCHEME"], app_config["SERVER_HOSTNAME"]) @property @@ -25,5 +27,7 @@ class URLSchemeAndHostname: return self._hostname def get_url(self): - """ Returns the application's URL, based on the given url scheme and hostname. """ + """ + Returns the application's URL, based on the given url scheme and hostname. + """ return "%s://%s" % (self._url_scheme, self._hostname) diff --git a/util/config/configdocs/configdoc.py b/util/config/configdocs/configdoc.py index 406933c52..04c9e5cdd 100644 --- a/util/config/configdocs/configdoc.py +++ b/util/config/configdocs/configdoc.py @@ -1,4 +1,6 @@ -""" Generates html documentation from JSON Schema """ +""" +Generates html documentation from JSON Schema. +""" import json @@ -11,7 +13,9 @@ from util.config.schema import CONFIG_SCHEMA def make_custom_sort(orders): - """ Sort in a specified order any dictionary nested in a complex structure """ + """ + Sort in a specified order any dictionary nested in a complex structure. + """ orders = [{k: -i for (i, k) in enumerate(reversed(order), 1)} for order in orders] diff --git a/util/config/configdocs/docsmodel.py b/util/config/configdocs/docsmodel.py index b30e144a8..ce5b7ca2b 100644 --- a/util/config/configdocs/docsmodel.py +++ b/util/config/configdocs/docsmodel.py @@ -3,10 +3,14 @@ import collections class ParsedItem(dict): - """ Parsed Schema item """ + """ + Parsed Schema item. + """ def __init__(self, json_object, name, required, level): - """Fills dict with basic item information""" + """ + Fills dict with basic item information. + """ super(ParsedItem, self).__init__() self["name"] = name self["title"] = json_object.get("title", "") @@ -21,21 +25,28 @@ class ParsedItem(dict): class DocsModel: - """ Documentation model and Schema Parser """ + """ + Documentation model and Schema Parser. + """ def __init__(self): self.__parsed_items = None def parse(self, json_object): - """ Returns multi-level list of recursively parsed items """ + """ + Returns multi-level list of recursively parsed items. + """ self.__parsed_items = list() self.__parse_schema(json_object, "root", True, 0) return self.__parsed_items def __parse_schema(self, schema, name, required, level): - """ Parses schema, which type is object, array or leaf. - Appends new ParsedItem to self.__parsed_items lis """ + """ + Parses schema, which type is object, array or leaf. + + Appends new ParsedItem to self.__parsed_items lis + """ parsed_item = ParsedItem(schema, name, required, level) self.__parsed_items.append(parsed_item) required = schema.get("required", []) @@ -51,12 +62,16 @@ class DocsModel: parse_leaf(parsed_item, schema) def __parse_object(self, parsed_item, schema, required, level): - """ Parses schema of type object """ + """ + Parses schema of type object. + """ for key, value in schema.get("properties", {}).items(): self.__parse_schema(value, key, key in required, level + 1) def __parse_array(self, parsed_item, schema, required, level): - """ Parses schema of type array """ + """ + Parses schema of type array. + """ items = schema.get("items") parsed_item["minItems"] = schema.get("minItems", None) parsed_item["maxItems"] = schema.get("maxItems", None) @@ -74,7 +89,9 @@ class DocsModel: def parse_leaf(parsed_item, schema): - """ Parses schema of a number and a string """ + """ + Parses schema of a number and a string. + """ if parsed_item["name"] != "root": parsed_item["description"] = schema.get("description", "") parsed_item["x-reference"] = schema.get("x-reference", "") diff --git a/util/config/configdocs/html_output.py b/util/config/configdocs/html_output.py index c1da712a0..379297b6f 100644 --- a/util/config/configdocs/html_output.py +++ b/util/config/configdocs/html_output.py @@ -1,11 +1,15 @@ class HtmlOutput: - """ Generates HTML from documentation model """ + """ + Generates HTML from documentation model. + """ def __init__(self): pass def generate_output(self, parsed_items): - """Returns generated HTML strin""" + """ + Returns generated HTML strin. + """ return ( self.__get_html_begin() + self.__get_html_middle(parsed_items) + self.__get_html_end() ) diff --git a/util/config/configutil.py b/util/config/configutil.py index 11a2b6831..9abb99619 100644 --- a/util/config/configutil.py +++ b/util/config/configutil.py @@ -8,7 +8,9 @@ def generate_secret_key(): def add_enterprise_config_defaults(config_obj, current_secret_key): - """ Adds/Sets the config defaults for enterprise registry config. """ + """ + Adds/Sets the config defaults for enterprise registry config. + """ # These have to be false. config_obj["TESTING"] = False config_obj["USE_CDN"] = False diff --git a/util/config/database.py b/util/config/database.py index a6fdb6951..5fe41d4f0 100644 --- a/util/config/database.py +++ b/util/config/database.py @@ -4,7 +4,9 @@ from data.appr_model.models import NEW_MODELS def sync_database_with_config(config): - """ This ensures all implicitly required reference table entries exist in the database. """ + """ + This ensures all implicitly required reference table entries exist in the database. + """ location_names = config.get("DISTRIBUTED_STORAGE_CONFIG", {}).keys() if location_names: diff --git a/util/config/provider/__init__.py b/util/config/provider/__init__.py index f82758abb..baa2ee001 100644 --- a/util/config/provider/__init__.py +++ b/util/config/provider/__init__.py @@ -4,7 +4,9 @@ from util.config.provider.k8sprovider import KubernetesConfigProvider def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False): - """ Loads and returns the config provider for the current environment. """ + """ + Loads and returns the config provider for the current environment. + """ if testing: return TestConfigProvider() diff --git a/util/config/provider/basefileprovider.py b/util/config/provider/basefileprovider.py index 8503821f9..cf94adf4b 100644 --- a/util/config/provider/basefileprovider.py +++ b/util/config/provider/basefileprovider.py @@ -12,7 +12,9 @@ logger = logging.getLogger(__name__) class BaseFileProvider(BaseProvider): - """ Base implementation of the config provider that reads the data from the file system. """ + """ + Base implementation of the config provider that reads the data from the file system. + """ def __init__(self, config_volume, yaml_filename, py_filename): self.config_volume = config_volume diff --git a/util/config/provider/baseprovider.py b/util/config/provider/baseprovider.py index 737c7261d..959a5036e 100644 --- a/util/config/provider/baseprovider.py +++ b/util/config/provider/baseprovider.py @@ -12,13 +12,17 @@ logger = logging.getLogger(__name__) class CannotWriteConfigException(Exception): - """ Exception raised when the config cannot be written. """ + """ + Exception raised when the config cannot be written. + """ pass class SetupIncompleteException(Exception): - """ Exception raised when attempting to verify config that has not yet been setup. """ + """ + Exception raised when attempting to verify config that has not yet been setup. + """ pass @@ -63,8 +67,9 @@ def export_yaml(config_obj, config_file): @add_metaclass(ABCMeta) class BaseProvider(object): - """ A configuration provider helps to load, save, and handle config override in the application. - """ + """ + A configuration provider helps to load, save, and handle config override in the application. + """ @property def provider_id(self): @@ -72,54 +77,78 @@ class BaseProvider(object): @abstractmethod def update_app_config(self, app_config): - """ Updates the given application config object with the loaded override config. """ + """ + Updates the given application config object with the loaded override config. + """ @abstractmethod def get_config(self): - """ Returns the contents of the config override file, or None if none. """ + """ + Returns the contents of the config override file, or None if none. + """ @abstractmethod def save_config(self, config_object): - """ Updates the contents of the config override file to those given. """ + """ + Updates the contents of the config override file to those given. + """ @abstractmethod def config_exists(self): - """ Returns true if a config override file exists in the config volume. """ + """ + Returns true if a config override file exists in the config volume. + """ @abstractmethod def volume_exists(self): - """ Returns whether the config override volume exists. """ + """ + Returns whether the config override volume exists. + """ @abstractmethod def volume_file_exists(self, relative_file_path): - """ Returns whether the file with the given relative path exists under the config override - volume. """ + """ + Returns whether the file with the given relative path exists under the config override + volume. + """ @abstractmethod def get_volume_file(self, relative_file_path, mode="r"): - """ Returns a Python file referring to the given path under the config override volume. """ + """ + Returns a Python file referring to the given path under the config override volume. + """ @abstractmethod def remove_volume_file(self, relative_file_path): - """ Removes the config override volume file with the given path. """ + """ + Removes the config override volume file with the given path. + """ @abstractmethod def list_volume_directory(self, path): - """ Returns a list of strings representing the names of the files found in the config override - directory under the given path. If the path doesn't exist, returns None. - """ + """ + Returns a list of strings representing the names of the files found in the config override + directory under the given path. + + If the path doesn't exist, returns None. + """ @abstractmethod def save_volume_file(self, flask_file, relative_file_path): - """ Saves the given flask file to the config override volume, with the given - relative path. - """ + """ + Saves the given flask file to the config override volume, with the given relative path. + """ @abstractmethod def get_volume_path(self, directory, filename): - """ Helper for constructing relative file paths, which may differ between providers. - For example, kubernetes can't have subfolders in configmaps """ + """ + Helper for constructing relative file paths, which may differ between providers. + + For example, kubernetes can't have subfolders in configmaps + """ @abstractmethod def get_config_root(self): - """ Returns the config root directory. """ + """ + Returns the config root directory. + """ diff --git a/util/config/provider/fileprovider.py b/util/config/provider/fileprovider.py index bb120620e..65de47771 100644 --- a/util/config/provider/fileprovider.py +++ b/util/config/provider/fileprovider.py @@ -9,7 +9,9 @@ logger = logging.getLogger(__name__) def _ensure_parent_dir(filepath): - """ Ensures that the parent directory of the given file path exists. """ + """ + Ensures that the parent directory of the given file path exists. + """ try: parentpath = os.path.abspath(os.path.join(filepath, os.pardir)) if not os.path.isdir(parentpath): @@ -19,8 +21,9 @@ def _ensure_parent_dir(filepath): class FileConfigProvider(BaseFileProvider): - """ Implementation of the config provider that reads and writes the data - from/to the file system. """ + """ + Implementation of the config provider that reads and writes the data from/to the file system. + """ def __init__(self, config_volume, yaml_filename, py_filename): super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename) diff --git a/util/config/provider/k8sprovider.py b/util/config/provider/k8sprovider.py index e07a9d748..eebf48bf6 100644 --- a/util/config/provider/k8sprovider.py +++ b/util/config/provider/k8sprovider.py @@ -25,8 +25,10 @@ QE_CONFIG_SECRET = os.environ.get("QE_K8S_CONFIG_SECRET", "quay-enterprise-confi class KubernetesConfigProvider(BaseFileProvider): - """ Implementation of the config provider that reads and writes configuration - data from a Kubernetes Secret. """ + """ + Implementation of the config provider that reads and writes configuration data from a Kubernetes + Secret. + """ def __init__( self, diff --git a/util/config/provider/testprovider.py b/util/config/provider/testprovider.py index 65436d0e3..f4c8cf005 100644 --- a/util/config/provider/testprovider.py +++ b/util/config/provider/testprovider.py @@ -9,8 +9,11 @@ REAL_FILES = ["test/data/signing-private.gpg", "test/data/signing-public.gpg", " class TestConfigProvider(BaseProvider): - """ Implementation of the config provider for testing. Everything is kept in-memory instead on - the real file system. """ + """ + Implementation of the config provider for testing. + + Everything is kept in-memory instead on the real file system. + """ def get_config_root(self): raise Exception("Test Config does not have a config root") diff --git a/util/config/superusermanager.py b/util/config/superusermanager.py index 6bf2d761b..17c4a53d3 100644 --- a/util/config/superusermanager.py +++ b/util/config/superusermanager.py @@ -3,10 +3,11 @@ from util.validation import MAX_USERNAME_LENGTH class SuperUserManager(object): - """ In-memory helper class for quickly accessing (and updating) the valid - set of super users. This class communicates across processes to ensure - that the shared set is always the same. - """ + """ + In-memory helper class for quickly accessing (and updating) the valid set of super users. + + This class communicates across processes to ensure that the shared set is always the same. + """ def __init__(self, app): usernames = app.config.get("SUPER_USERS", []) @@ -17,14 +18,18 @@ class SuperUserManager(object): self._array.value = usernames_str def is_superuser(self, username): - """ Returns if the given username represents a super user. """ + """ + Returns if the given username represents a super user. + """ usernames = self._array.value.split(",") return username in usernames def register_superuser(self, username): - """ Registers a new username as a super user for the duration of the container. + """ + Registers a new username as a super user for the duration of the container. + Note that this does *not* change any underlying config files. - """ + """ usernames = self._array.value.split(",") usernames.append(username) new_string = ",".join(usernames) @@ -35,5 +40,7 @@ class SuperUserManager(object): raise Exception("Maximum superuser count reached. Please report this to support.") def has_superusers(self): - """ Returns whether there are any superusers defined. """ + """ + Returns whether there are any superusers defined. + """ return bool(self._array.value) diff --git a/util/config/validator.py b/util/config/validator.py index aa7f33b79..e167fd655 100644 --- a/util/config/validator.py +++ b/util/config/validator.py @@ -29,7 +29,9 @@ logger = logging.getLogger(__name__) class ConfigValidationException(Exception): - """ Exception raised when the configuration fails to validate for a known reason. """ + """ + Exception raised when the configuration fails to validate for a known reason. + """ pass @@ -71,7 +73,9 @@ VALIDATORS = { def validate_service_for_config(service, validator_context): - """ Attempts to validate the configuration for the given service. """ + """ + Attempts to validate the configuration for the given service. + """ if not service in VALIDATORS: return {"status": False} @@ -84,9 +88,10 @@ def validate_service_for_config(service, validator_context): def is_valid_config_upload_filename(filename): - """ Returns true if and only if the given filename is one which is supported for upload - from the configuration UI tool. - """ + """ + Returns true if and only if the given filename is one which is supported for upload from the + configuration UI tool. + """ if filename in CONFIG_FILENAMES: return True @@ -94,8 +99,9 @@ def is_valid_config_upload_filename(filename): class ValidatorContext(object): - """ Context to run validators in, with any additional runtime configuration they need - """ + """ + Context to run validators in, with any additional runtime configuration they need. + """ def __init__( self, @@ -143,17 +149,18 @@ class ValidatorContext(object): init_scripts_location=None, ): """ - Creates a ValidatorContext from an app config, with a given config to validate - :param app: the Flask app to pull configuration information from - :param config: the config to validate - :param user_password: request password - :param instance_keys: The instance keys handler - :param ip_resolver: an App - :param client: http client used to connect to services - :param config_provider: config provider used to access config volume(s) - :param init_scripts_location: location where initial load scripts are stored - :return: ValidatorContext - """ + Creates a ValidatorContext from an app config, with a given config to validate. + + :param app: the Flask app to pull configuration information from + :param config: the config to validate + :param user_password: request password + :param instance_keys: The instance keys handler + :param ip_resolver: an App + :param client: http client used to connect to services + :param config_provider: config provider used to access config volume(s) + :param init_scripts_location: location where initial load scripts are stored + :return: ValidatorContext + """ url_scheme_and_hostname = URLSchemeAndHostname.from_app_config(app.config) return cls( diff --git a/util/config/validators/__init__.py b/util/config/validators/__init__.py index aa69bbdc7..fbabfd878 100644 --- a/util/config/validators/__init__.py +++ b/util/config/validators/__init__.py @@ -3,7 +3,9 @@ from six import add_metaclass class ConfigValidationException(Exception): - """ Exception raised when the configuration fails to validate for a known reason. """ + """ + Exception raised when the configuration fails to validate for a known reason. + """ pass @@ -12,11 +14,15 @@ class ConfigValidationException(Exception): class BaseValidator(object): @abstractproperty def name(self): - """ The key for the validation API. """ + """ + The key for the validation API. + """ pass @classmethod @abstractmethod def validate(cls, validator_context): - """ Raises Exception if failure to validate. """ + """ + Raises Exception if failure to validate. + """ pass diff --git a/util/config/validators/validate_bitbucket_trigger.py b/util/config/validators/validate_bitbucket_trigger.py index 14455e19d..2d02ff92f 100644 --- a/util/config/validators/validate_bitbucket_trigger.py +++ b/util/config/validators/validate_bitbucket_trigger.py @@ -8,7 +8,9 @@ class BitbucketTriggerValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the config for BitBucket. """ + """ + Validates the config for BitBucket. + """ config = validator_context.config trigger_config = config.get("BITBUCKET_TRIGGER_CONFIG") diff --git a/util/config/validators/validate_database.py b/util/config/validators/validate_database.py index af4bb21c9..bffea1b33 100644 --- a/util/config/validators/validate_database.py +++ b/util/config/validators/validate_database.py @@ -9,7 +9,9 @@ class DatabaseValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates connecting to the database. """ + """ + Validates connecting to the database. + """ config = validator_context.config try: diff --git a/util/config/validators/validate_github.py b/util/config/validators/validate_github.py index 7c6a21a64..4fc500c9c 100644 --- a/util/config/validators/validate_github.py +++ b/util/config/validators/validate_github.py @@ -8,7 +8,9 @@ class BaseGitHubValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the OAuth credentials and API endpoint for a Github service. """ + """ + Validates the OAuth credentials and API endpoint for a Github service. + """ config = validator_context.config client = validator_context.http_client url_scheme_and_hostname = validator_context.url_scheme_and_hostname diff --git a/util/config/validators/validate_gitlab_trigger.py b/util/config/validators/validate_gitlab_trigger.py index 925b8e9fc..151a852b4 100644 --- a/util/config/validators/validate_gitlab_trigger.py +++ b/util/config/validators/validate_gitlab_trigger.py @@ -7,7 +7,9 @@ class GitLabTriggerValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the OAuth credentials and API endpoint for a GitLab service. """ + """ + Validates the OAuth credentials and API endpoint for a GitLab service. + """ config = validator_context.config url_scheme_and_hostname = validator_context.url_scheme_and_hostname client = validator_context.http_client diff --git a/util/config/validators/validate_google_login.py b/util/config/validators/validate_google_login.py index c8a038563..1328704b9 100644 --- a/util/config/validators/validate_google_login.py +++ b/util/config/validators/validate_google_login.py @@ -7,7 +7,9 @@ class GoogleLoginValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the Google Login client ID and secret. """ + """ + Validates the Google Login client ID and secret. + """ config = validator_context.config client = validator_context.http_client url_scheme_and_hostname = validator_context.url_scheme_and_hostname diff --git a/util/config/validators/validate_jwt.py b/util/config/validators/validate_jwt.py index 8dce41f94..cfd41afbb 100644 --- a/util/config/validators/validate_jwt.py +++ b/util/config/validators/validate_jwt.py @@ -8,7 +8,9 @@ class JWTAuthValidator(BaseValidator): @classmethod def validate(cls, validator_context, public_key_path=None): - """ Validates the JWT authentication system. """ + """ + Validates the JWT authentication system. + """ config = validator_context.config http_client = validator_context.http_client jwt_auth_max = validator_context.jwt_auth_max diff --git a/util/config/validators/validate_keystone.py b/util/config/validators/validate_keystone.py index d3606961f..b33fa78ca 100644 --- a/util/config/validators/validate_keystone.py +++ b/util/config/validators/validate_keystone.py @@ -7,7 +7,9 @@ class KeystoneValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the Keystone authentication system. """ + """ + Validates the Keystone authentication system. + """ config = validator_context.config if config.get("AUTHENTICATION_TYPE", "Database") != "Keystone": diff --git a/util/config/validators/validate_ldap.py b/util/config/validators/validate_ldap.py index 67100f990..e8923d5ea 100644 --- a/util/config/validators/validate_ldap.py +++ b/util/config/validators/validate_ldap.py @@ -12,7 +12,9 @@ class LDAPValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the LDAP connection. """ + """ + Validates the LDAP connection. + """ config = validator_context.config config_provider = validator_context.config_provider init_scripts_location = validator_context.init_scripts_location diff --git a/util/config/validators/validate_redis.py b/util/config/validators/validate_redis.py index 993283685..7944328d4 100644 --- a/util/config/validators/validate_redis.py +++ b/util/config/validators/validate_redis.py @@ -8,7 +8,9 @@ class RedisValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates connecting to redis. """ + """ + Validates connecting to redis. + """ config = validator_context.config redis_config = config.get("BUILDLOGS_REDIS", {}) diff --git a/util/config/validators/validate_secscan.py b/util/config/validators/validate_secscan.py index 1445d7df9..ea645b794 100644 --- a/util/config/validators/validate_secscan.py +++ b/util/config/validators/validate_secscan.py @@ -10,7 +10,9 @@ class SecurityScannerValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the configuration for talking to a Quay Security Scanner. """ + """ + Validates the configuration for talking to a Quay Security Scanner. + """ config = validator_context.config client = validator_context.http_client feature_sec_scanner = validator_context.feature_sec_scanner diff --git a/util/config/validators/validate_signer.py b/util/config/validators/validate_signer.py index 2a8b6d9ce..633c0f4c9 100644 --- a/util/config/validators/validate_signer.py +++ b/util/config/validators/validate_signer.py @@ -9,7 +9,9 @@ class SignerValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the GPG public+private key pair used for signing converted ACIs. """ + """ + Validates the GPG public+private key pair used for signing converted ACIs. + """ config = validator_context.config config_provider = validator_context.config_provider diff --git a/util/config/validators/validate_ssl.py b/util/config/validators/validate_ssl.py index c28dc1eb4..5a438a791 100644 --- a/util/config/validators/validate_ssl.py +++ b/util/config/validators/validate_ssl.py @@ -9,7 +9,9 @@ class SSLValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the SSL configuration (if enabled). """ + """ + Validates the SSL configuration (if enabled). + """ config = validator_context.config config_provider = validator_context.config_provider @@ -64,7 +66,9 @@ class SSLValidator(BaseValidator): def _ssl_cn(server_hostname): - """ Return the common name (fully qualified host name) from the SERVER_HOSTNAME. """ + """ + Return the common name (fully qualified host name) from the SERVER_HOSTNAME. + """ host_port = server_hostname.rsplit(":", 1) # SERVER_HOSTNAME includes the port diff --git a/util/config/validators/validate_storage.py b/util/config/validators/validate_storage.py index eab0997e3..cd2a97e0a 100644 --- a/util/config/validators/validate_storage.py +++ b/util/config/validators/validate_storage.py @@ -7,7 +7,9 @@ class StorageValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates registry storage. """ + """ + Validates registry storage. + """ config = validator_context.config client = validator_context.http_client ip_resolver = validator_context.ip_resolver diff --git a/util/config/validators/validate_torrent.py b/util/config/validators/validate_torrent.py index 636fdb73b..5faa29031 100644 --- a/util/config/validators/validate_torrent.py +++ b/util/config/validators/validate_torrent.py @@ -13,7 +13,9 @@ class BittorrentValidator(BaseValidator): @classmethod def validate(cls, validator_context): - """ Validates the configuration for using BitTorrent for downloads. """ + """ + Validates the configuration for using BitTorrent for downloads. + """ config = validator_context.config client = validator_context.http_client diff --git a/util/dict_wrappers.py b/util/dict_wrappers.py index 8eb1b1450..c08002d97 100644 --- a/util/dict_wrappers.py +++ b/util/dict_wrappers.py @@ -3,14 +3,15 @@ from jsonpath_rw import parse class SafeDictSetter(object): - """ Specialized write-only dictionary wrapper class that allows for setting - nested keys via a path syntax. + """ + Specialized write-only dictionary wrapper class that allows for setting nested keys via a path + syntax. - Example: - sds = SafeDictSetter() - sds['foo.bar.baz'] = 'hello' # Sets 'foo' = {'bar': {'baz': 'hello'}} - sds['somekey'] = None # Does not set the key since the value is None - """ + Example: + sds = SafeDictSetter() + sds['foo.bar.baz'] = 'hello' # Sets 'foo' = {'bar': {'baz': 'hello'}} + sds['somekey'] = None # Does not set the key since the value is None + """ def __init__(self, initial_object=None): self._object = initial_object or {} @@ -19,7 +20,9 @@ class SafeDictSetter(object): self.set(path, value) def set(self, path, value, allow_none=False): - """ Sets the value of the given path to the given value. """ + """ + Sets the value of the given path to the given value. + """ if value is None and not allow_none: return @@ -37,26 +40,32 @@ class SafeDictSetter(object): current[pieces[-1]] = value def dict_value(self): - """ Returns the dict value built. """ + """ + Returns the dict value built. + """ return self._object def json_value(self): - """ Returns the JSON string value of the dictionary built. """ + """ + Returns the JSON string value of the dictionary built. + """ return json.dumps(self._object) class JSONPathDict(object): - """ Specialized read-only dictionary wrapper class that uses the jsonpath_rw library - to access keys via an X-Path-like syntax. + """ + Specialized read-only dictionary wrapper class that uses the jsonpath_rw library to access keys + via an X-Path-like syntax. - Example: - pd = JSONPathDict({'hello': {'hi': 'there'}}) - pd['hello.hi'] # Returns 'there' - """ + Example: + pd = JSONPathDict({'hello': {'hi': 'there'}}) + pd['hello.hi'] # Returns 'there' + """ def __init__(self, dict_value): - """ Init the helper with the JSON object. - """ + """ + Init the helper with the JSON object. + """ self._object = dict_value def __getitem__(self, path): @@ -69,7 +78,11 @@ class JSONPathDict(object): return self._object.iterkeys() def get(self, path, not_found_handler=None): - """ Returns the value found at the given path. Path is a json-path expression. """ + """ + Returns the value found at the given path. + + Path is a json-path expression. + """ if self._object == {} or self._object is None: return None jsonpath_expr = parse(path) diff --git a/util/dockerfileparse.py b/util/dockerfileparse.py index 6a5192d3c..af04c40bb 100644 --- a/util/dockerfileparse.py +++ b/util/dockerfileparse.py @@ -23,9 +23,10 @@ class ParsedDockerfile(object): @staticmethod def parse_image_identifier(image_identifier): - """ Parses a docker image identifier, and returns a tuple of image name and tag, where the tag + """ + Parses a docker image identifier, and returns a tuple of image name and tag, where the tag is filled in with "latest" if left unspecified. - """ + """ # Note: # Dockerfile images references can be of multiple forms: # server:port/some/path @@ -51,11 +52,15 @@ class ParsedDockerfile(object): return (":".join(parts[0:-1]), parts[-1]) def get_base_image(self): - """ Return the base image without the tag name. """ + """ + Return the base image without the tag name. + """ return self.get_image_and_tag()[0] def get_image_and_tag(self): - """ Returns the image and tag from the FROM line of the dockerfile. """ + """ + Returns the image and tag from the FROM line of the dockerfile. + """ image_identifier = self._get_from_image_identifier() if image_identifier is None: return (None, None) diff --git a/util/dynamic.py b/util/dynamic.py index 80f3f26ab..7a05adc42 100644 --- a/util/dynamic.py +++ b/util/dynamic.py @@ -1,5 +1,7 @@ def import_class(module_name, class_name): - """ Import a class given the specified module name and class name. """ + """ + Import a class given the specified module name and class name. + """ klass = __import__(module_name) class_segments = class_name.split(".") for segment in class_segments: diff --git a/util/expiresdict.py b/util/expiresdict.py index 1769da01b..88fb64357 100644 --- a/util/expiresdict.py +++ b/util/expiresdict.py @@ -4,7 +4,9 @@ from six import iteritems class ExpiresEntry(object): - """ A single entry under a ExpiresDict. """ + """ + A single entry under a ExpiresDict. + """ def __init__(self, value, expires=None): self.value = value @@ -19,10 +21,13 @@ class ExpiresEntry(object): class ExpiresDict(object): - """ ExpiresDict defines a dictionary-like class whose keys have expiration. The rebuilder is - a function that returns the full contents of the cached dictionary as a dict of the keys - and whose values are TTLEntry's. If the rebuilder is None, then no rebuilding is performed. - """ + """ + ExpiresDict defines a dictionary-like class whose keys have expiration. + + The rebuilder is a function that returns the full contents of the cached dictionary as a dict of + the keys and whose values are TTLEntry's. If the rebuilder is None, then no rebuilding is + performed. + """ def __init__(self, rebuilder=None): self._rebuilder = rebuilder diff --git a/util/failover.py b/util/failover.py index 556669561..9ebb4ecbe 100644 --- a/util/failover.py +++ b/util/failover.py @@ -7,9 +7,11 @@ logger = logging.getLogger(__name__) class FailoverException(Exception): - """ Exception raised when an operation should be retried by the failover decorator. - Wraps the exception of the initial failure. - """ + """ + Exception raised when an operation should be retried by the failover decorator. + + Wraps the exception of the initial failure. + """ def __init__(self, exception): super(FailoverException, self).__init__() diff --git a/util/greenlet_tracing.py b/util/greenlet_tracing.py index 38ecb0324..d1a288539 100644 --- a/util/greenlet_tracing.py +++ b/util/greenlet_tracing.py @@ -21,7 +21,9 @@ def enable_tracing(): def greenlet_callback(event, args): - """ This is a callback that is executed greenlet on all events. """ + """ + This is a callback that is executed greenlet on all events. + """ if event in ("switch", "throw"): # It's only safe to unpack args under these two events. (origin, _target) = args @@ -39,7 +41,9 @@ def greenlet_callback(event, args): def switch_callback(_args): - """ This is a callback that is executed specifically on greenlet switches. """ + """ + This is a callback that is executed specifically on greenlet switches. + """ global _latest_switch greenlet_switch.inc() @@ -54,7 +58,10 @@ def switch_callback(_args): def throw_callback(_args): - """ This is a callback that is executed on execeptions from origin to target. - This callback is running in the context of the target greenlet and any exceptions will - replace the original, as if target.throw() was used replacing the exception. """ + """ + This is a callback that is executed on execeptions from origin to target. + + This callback is running in the context of the target greenlet and any exceptions will replace + the original, as if target.throw() was used replacing the exception. + """ greenlet_throw.inc() diff --git a/util/headers.py b/util/headers.py index 067312803..86c8b4810 100644 --- a/util/headers.py +++ b/util/headers.py @@ -2,7 +2,9 @@ import base64 def parse_basic_auth(header_value): - """ Attempts to parse the given header value as a Base64-encoded Basic auth header. """ + """ + Attempts to parse the given header value as a Base64-encoded Basic auth header. + """ if not header_value: return None diff --git a/util/invoice.py b/util/invoice.py index e78e858e0..d6adab60d 100644 --- a/util/invoice.py +++ b/util/invoice.py @@ -14,7 +14,9 @@ env = Environment(**jinja_options) def renderInvoiceToPdf(invoice, user): - """ Renders a nice PDF display for the given invoice. """ + """ + Renders a nice PDF display for the given invoice. + """ sourceHtml = renderInvoiceToHtml(invoice, user) output = StringIO.StringIO() pisaStatus = pisa.CreatePDF(sourceHtml, dest=output) @@ -27,7 +29,9 @@ def renderInvoiceToPdf(invoice, user): def renderInvoiceToHtml(invoice, user): - """ Renders a nice HTML display for the given invoice. """ + """ + Renders a nice HTML display for the given invoice. + """ from endpoints.api.billing import get_invoice_fields def get_price(price): diff --git a/util/ipresolver/__init__.py b/util/ipresolver/__init__.py index 187a53be4..5fa708b4b 100644 --- a/util/ipresolver/__init__.py +++ b/util/ipresolver/__init__.py @@ -42,26 +42,34 @@ def _get_aws_ip_ranges(): @add_metaclass(ABCMeta) class IPResolverInterface(object): - """ Helper class for resolving information about an IP address. """ + """ + Helper class for resolving information about an IP address. + """ @abstractmethod def resolve_ip(self, ip_address): - """ Attempts to return resolved information about the specified IP Address. If such an attempt - fails, returns None. - """ + """ + Attempts to return resolved information about the specified IP Address. + + If such an attempt fails, returns None. + """ pass @abstractmethod def is_ip_possible_threat(self, ip_address): - """ Attempts to return whether the given IP address is a possible abuser or spammer. + """ + Attempts to return whether the given IP address is a possible abuser or spammer. + Returns False if the IP address information could not be looked up. - """ + """ pass @nooper class NoopIPResolver(IPResolverInterface): - """ No-op version of the security scanner API. """ + """ + No-op version of the security scanner API. + """ pass @@ -116,9 +124,11 @@ class IPResolver(IPResolverInterface): return False def resolve_ip(self, ip_address): - """ Attempts to return resolved information about the specified IP Address. If such an attempt - fails, returns None. - """ + """ + Attempts to return resolved information about the specified IP Address. + + If such an attempt fails, returns None. + """ if not ip_address: return None diff --git a/util/itertoolrecipes.py b/util/itertoolrecipes.py index 0b57827e3..b36aaed61 100644 --- a/util/itertoolrecipes.py +++ b/util/itertoolrecipes.py @@ -2,5 +2,7 @@ from itertools import islice # From: https://docs.python.org/2/library/itertools.html def take(n, iterable): - """ Return first n items of the iterable as a list """ + """ + Return first n items of the iterable as a list. + """ return list(islice(iterable, n)) diff --git a/util/jsontemplate.py b/util/jsontemplate.py index 00fc2b259..ccb6368ec 100644 --- a/util/jsontemplate.py +++ b/util/jsontemplate.py @@ -8,11 +8,15 @@ INLINE_PATH_PATTERN = r"\$\{([^}]*)\}" class JSONTemplateParseException(Exception): - """ Exception raised if a JSON template could not be parsed. """ + """ + Exception raised if a JSON template could not be parsed. + """ class JSONTemplate(object): - """ Represents a parsed template for producing JSON. """ + """ + Represents a parsed template for producing JSON. + """ def __init__(self, template_string): try: diff --git a/util/label_validator.py b/util/label_validator.py index 782d97bfb..96fcf90a3 100644 --- a/util/label_validator.py +++ b/util/label_validator.py @@ -1,5 +1,7 @@ class LabelValidator(object): - """ Helper class for validating that labels meet prefix requirements. """ + """ + Helper class for validating that labels meet prefix requirements. + """ def __init__(self, app): self.app = app @@ -15,7 +17,9 @@ class LabelValidator(object): self.reserved_prefixed_set = set(default_prefixes + overridden_prefixes) def has_reserved_prefix(self, label_key): - """ Validates that the provided label key does not match any reserved prefixes. """ + """ + Validates that the provided label key does not match any reserved prefixes. + """ for prefix in self.reserved_prefixed_set: if label_key.startswith(prefix): return True diff --git a/util/locking.py b/util/locking.py index d4f13dd80..06a755620 100644 --- a/util/locking.py +++ b/util/locking.py @@ -9,13 +9,18 @@ logger = logging.getLogger(__name__) class LockNotAcquiredException(Exception): - """ Exception raised if a GlobalLock could not be acquired. """ + """ + Exception raised if a GlobalLock could not be acquired. + """ class GlobalLock(object): - """ A lock object that blocks globally via Redis. Note that Redis is not considered a tier-1 - service, so this lock should not be used for any critical code paths. - """ + """ + A lock object that blocks globally via Redis. + + Note that Redis is not considered a tier-1 service, so this lock should not be used for any + critical code paths. + """ def __init__(self, name, lock_ttl=600): self._lock_name = name diff --git a/util/log.py b/util/log.py index 45e61987b..8df8b0971 100644 --- a/util/log.py +++ b/util/log.py @@ -5,12 +5,13 @@ from _init import CONF_DIR def logfile_path(jsonfmt=False, debug=False): """ Returns the a logfileconf path following this rules: + - conf/logging_debug_json.conf # jsonfmt=true, debug=true - conf/logging_json.conf # jsonfmt=true, debug=false - conf/logging_debug.conf # jsonfmt=false, debug=true - conf/logging.conf # jsonfmt=false, debug=false Can be parametrized via envvars: JSONLOG=true, DEBUGLOG=true - """ + """ _json = "" _debug = "" diff --git a/util/metrics/prometheus.py b/util/metrics/prometheus.py index 0b40c09c8..73515e8e6 100644 --- a/util/metrics/prometheus.py +++ b/util/metrics/prometheus.py @@ -29,8 +29,10 @@ ONE_DAY_IN_SECONDS = 60 * 60 * 24 @lru_cache(maxsize=1) def process_grouping_key(): - """ Implements a grouping key based on the last argument used to run the current process. - https://github.com/prometheus/client_python#exporting-to-a-pushgateway + """ + Implements a grouping key based on the last argument used to run the current process. + + https://github.com/prometheus/client_python#exporting-to-a-pushgateway """ return { "host": socket.gethostname(), @@ -40,7 +42,9 @@ def process_grouping_key(): class PrometheusPlugin(object): - """ Application plugin for reporting metrics to Prometheus. """ + """ + Application plugin for reporting metrics to Prometheus. + """ def __init__(self, app=None): self.app = app @@ -104,7 +108,9 @@ class ThreadPusher(threading.Thread): def timed_blueprint(bp): - """ Decorates a blueprint to have its request duration tracked by Prometheus. """ + """ + Decorates a blueprint to have its request duration tracked by Prometheus. + """ def _time_before_request(): g._request_start_time = time.time() diff --git a/util/migrate/__init__.py b/util/migrate/__init__.py index 23840949b..cd3086829 100644 --- a/util/migrate/__init__.py +++ b/util/migrate/__init__.py @@ -8,11 +8,12 @@ logger = logging.getLogger(__name__) class UTF8LongText(TypeDecorator): - """ Platform-independent UTF-8 LONGTEXT type. + """ + Platform-independent UTF-8 LONGTEXT type. - Uses MySQL's LongText with charset utf8mb4, otherwise uses TEXT, because - other engines default to UTF-8 and have longer TEXT fields. - """ + Uses MySQL's LongText with charset utf8mb4, otherwise uses TEXT, because other engines default + to UTF-8 and have longer TEXT fields. + """ impl = Text @@ -26,11 +27,12 @@ class UTF8LongText(TypeDecorator): class UTF8CharField(TypeDecorator): - """ Platform-independent UTF-8 Char type. + """ + Platform-independent UTF-8 Char type. - Uses MySQL's VARCHAR with charset utf8mb4, otherwise uses String, because - other engines default to UTF-8. - """ + Uses MySQL's VARCHAR with charset utf8mb4, otherwise uses String, because other engines default + to UTF-8. + """ impl = String diff --git a/util/migrate/allocator.py b/util/migrate/allocator.py index 416ec2c26..cdd9e9cba 100644 --- a/util/migrate/allocator.py +++ b/util/migrate/allocator.py @@ -136,12 +136,14 @@ class CompletedKeys(object): def yield_random_entries(batch_query, primary_key_field, batch_size, max_id, min_id=0): - """ This method will yield items from random blocks in the database. We will track metadata - about which keys are available for work, and we will complete the backfill when there is no - more work to be done. The method yields tuples of (candidate, Event), and if the work was - already done by another worker, the caller should set the event. Batch candidates must have - an "id" field which can be inspected. - """ + """ + This method will yield items from random blocks in the database. + + We will track metadata about which keys are available for work, and we will complete the + backfill when there is no more work to be done. The method yields tuples of (candidate, Event), + and if the work was already done by another worker, the caller should set the event. Batch + candidates must have an "id" field which can be inspected. + """ min_id = max(min_id, 0) max_id = max(max_id, 1) diff --git a/util/migrate/cleanup_old_robots.py b/util/migrate/cleanup_old_robots.py index 72b230239..a556fb9df 100644 --- a/util/migrate/cleanup_old_robots.py +++ b/util/migrate/cleanup_old_robots.py @@ -9,7 +9,9 @@ logger.setLevel(logging.INFO) def cleanup_old_robots(page_size=50, force=False): - """ Deletes any robots that live under namespaces that no longer exist. """ + """ + Deletes any robots that live under namespaces that no longer exist. + """ if not force and not app.config.get("SETUP_COMPLETE", False): return diff --git a/util/morecollections.py b/util/morecollections.py index 343ea711d..df0ba5a83 100644 --- a/util/morecollections.py +++ b/util/morecollections.py @@ -13,9 +13,10 @@ class AttrDict(dict): class FastIndexList(object): - """ List which keeps track of the indicies of its items in a fast manner, and allows for - quick removal of items. - """ + """ + List which keeps track of the indicies of its items in a fast manner, and allows for quick + removal of items. + """ def __init__(self): self._list = [] @@ -24,17 +25,23 @@ class FastIndexList(object): self._counter = 0 def add(self, item): - """ Adds an item to the index list. """ + """ + Adds an item to the index list. + """ self._list.append(item) self._index_map[item] = self._counter self._counter = self._counter + 1 def values(self): - """ Returns an iterable stream of all the values in the list. """ + """ + Returns an iterable stream of all the values in the list. + """ return list(self._list) def index(self, value): - """ Returns the index of the given item in the list or None if none. """ + """ + Returns the index of the given item in the list or None if none. + """ found = self._index_map.get(value, None) if found is None: return None @@ -42,7 +49,9 @@ class FastIndexList(object): return found - self._index_offset def pop_until(self, index_inclusive): - """ Pops off any items in the list until the given index, inclusive, and returns them. """ + """ + Pops off any items in the list until the given index, inclusive, and returns them. + """ values = self._list[0 : index_inclusive + 1] for value in values: self._index_map.pop(value, None) @@ -53,15 +62,16 @@ class FastIndexList(object): class IndexedStreamingDiffTracker(object): - """ Helper class which tracks the difference between two streams of strings, - calling the `added` callback for strings when they are successfully verified - as being present in the first stream and not present in the second stream. - Unlike StreamingDiffTracker, this class expects each string value to have an - associated `index` value, which must be the same for equal values in both - streams and *must* be in order. This allows us to be a bit more efficient - in clearing up items that we know won't be present. The `index` is *not* - assumed to start at 0 or be contiguous, merely increasing. - """ + """ + Helper class which tracks the difference between two streams of strings, calling the `added` + callback for strings when they are successfully verified as being present in the first stream + and not present in the second stream. + + Unlike StreamingDiffTracker, this class expects each string value to have an associated `index` + value, which must be the same for equal values in both streams and *must* be in order. This + allows us to be a bit more efficient in clearing up items that we know won't be present. The + `index` is *not* assumed to start at 0 or be contiguous, merely increasing. + """ def __init__(self, reporter, result_per_stream): self._reporter = reporter @@ -76,8 +86,9 @@ class IndexedStreamingDiffTracker(object): self._old_stream_map = {} def push_new(self, stream_tuples): - """ Pushes a list of values for the `New` stream. - """ + """ + Pushes a list of values for the `New` stream. + """ stream_tuples_list = list(stream_tuples) assert len(stream_tuples_list) <= self._reports_per_stream @@ -94,8 +105,9 @@ class IndexedStreamingDiffTracker(object): self._process() def push_old(self, stream_tuples): - """ Pushes a list of values for the `Old` stream. - """ + """ + Pushes a list of values for the `Old` stream. + """ if self._new_stream_finished and not self._new_stream: # Nothing more to do. return @@ -152,12 +164,14 @@ class IndexedStreamingDiffTracker(object): class StreamingDiffTracker(object): - """ Helper class which tracks the difference between two streams of strings, calling the - `added` callback for strings when they are successfully verified as being present in - the first stream and not present in the second stream. This class requires that the - streams of strings be consistently ordered *in some way common to both* (but the - strings themselves do not need to be sorted). - """ + """ + Helper class which tracks the difference between two streams of strings, calling the `added` + callback for strings when they are successfully verified as being present in the first stream + and not present in the second stream. + + This class requires that the streams of strings be consistently ordered *in some way common to + both* (but the strings themselves do not need to be sorted). + """ def __init__(self, reporter, result_per_stream): self._reporter = reporter @@ -172,8 +186,9 @@ class StreamingDiffTracker(object): self.push_new([]) def push_new(self, stream_values): - """ Pushes a list of values for the `New` stream. - """ + """ + Pushes a list of values for the `New` stream. + """ # Add all the new values to the list. counter = 0 @@ -199,8 +214,9 @@ class StreamingDiffTracker(object): self._new_stream.pop_until(self._new_stream.index(value)) def push_old(self, stream_values): - """ Pushes a stream of values for the `Old` stream. - """ + """ + Pushes a stream of values for the `Old` stream. + """ if self._old_stream_finished: return diff --git a/util/names.py b/util/names.py index b1278bd80..4384bd60c 100644 --- a/util/names.py +++ b/util/names.py @@ -18,14 +18,17 @@ TAG_ERROR = ( class ImplicitLibraryNamespaceNotAllowed(Exception): - """ Exception raised if the implicit library namespace was specified but is - not allowed. """ + """ + Exception raised if the implicit library namespace was specified but is not allowed. + """ pass def escape_tag(tag, default="latest"): - """ Escapes a Docker tag, ensuring it matches the tag regular expression. """ + """ + Escapes a Docker tag, ensuring it matches the tag regular expression. + """ if not tag: return default @@ -73,9 +76,10 @@ def parse_robot_username(robot_username): def parse_urn(urn): - """ Parses a URN, returning a pair that contains a list of URN - namespace parts, followed by the URN's unique ID. - """ + """ + Parses a URN, returning a pair that contains a list of URN namespace parts, followed by the + URN's unique ID. + """ if not urn.startswith("urn:"): return None @@ -84,9 +88,10 @@ def parse_urn(urn): def parse_single_urn(urn): - """ Parses a URN, returning a pair that contains the first - namespace part, followed by the URN's unique ID. - """ + """ + Parses a URN, returning a pair that contains the first namespace part, followed by the URN's + unique ID. + """ result = parse_urn(urn) if result is None or not len(result[0]): return None diff --git a/util/registry/aufs.py b/util/registry/aufs.py index 67d2b6e1b..e6a6f98e0 100644 --- a/util/registry/aufs.py +++ b/util/registry/aufs.py @@ -6,15 +6,18 @@ AUFS_WHITEOUT_PREFIX_LENGTH = len(AUFS_WHITEOUT) def is_aufs_metadata(absolute): - """ Returns whether the given absolute references an AUFS metadata file. """ + """ + Returns whether the given absolute references an AUFS metadata file. + """ filename = os.path.basename(absolute) return filename.startswith(AUFS_METADATA) or absolute.startswith(AUFS_METADATA) def get_deleted_filename(absolute): - """ Returns the name of the deleted file referenced by the AUFS whiteout file at - the given path or None if the file path does not reference a whiteout file. - """ + """ + Returns the name of the deleted file referenced by the AUFS whiteout file at the given path or + None if the file path does not reference a whiteout file. + """ filename = os.path.basename(absolute) if not filename.startswith(AUFS_WHITEOUT): return None @@ -23,9 +26,10 @@ def get_deleted_filename(absolute): def get_deleted_prefix(absolute): - """ Returns the path prefix of the deleted file referenced by the AUFS whiteout file at - the given path or None if the file path does not reference a whiteout file. - """ + """ + Returns the path prefix of the deleted file referenced by the AUFS whiteout file at the given + path or None if the file path does not reference a whiteout file. + """ deleted_filename = get_deleted_filename(absolute) if deleted_filename is None: return None diff --git a/util/registry/dockerver.py b/util/registry/dockerver.py index 04c6e353f..852a4cda6 100644 --- a/util/registry/dockerver.py +++ b/util/registry/dockerver.py @@ -8,11 +8,12 @@ _ONE_FIVE_ZERO = "1.5.0" def docker_version(user_agent_string): - """ Extract the Docker version from the user agent, taking special care to - handle the case of a 1.5 client requesting an auth token, which sends - a broken user agent. If we can not positively identify a version, return - None. - """ + """ + Extract the Docker version from the user agent, taking special care to handle the case of a 1.5 + client requesting an auth token, which sends a broken user agent. + + If we can not positively identify a version, return None. + """ # First search for a well defined semver portion in the UA header. found_semver = _USER_AGENT_SEARCH_REGEX.search(user_agent_string) diff --git a/util/registry/filelike.py b/util/registry/filelike.py index 7db0ec3a1..2e33366d8 100644 --- a/util/registry/filelike.py +++ b/util/registry/filelike.py @@ -71,9 +71,10 @@ def wrap_with_handler(in_fp, handler): class FilelikeStreamConcat(object): - """ A file-like object which concats all the file-like objects in the specified generator into - a single stream. - """ + """ + A file-like object which concats all the file-like objects in the specified generator into a + single stream. + """ def __init__(self, file_generator): self._file_generator = file_generator @@ -111,9 +112,12 @@ class FilelikeStreamConcat(object): class StreamSlice(BaseStreamFilelike): - """ A file-like object which returns a file-like object that represents a slice of the data in - the specified file obj. All methods will act as if the slice is its own file. - """ + """ + A file-like object which returns a file-like object that represents a slice of the data in the + specified file obj. + + All methods will act as if the slice is its own file. + """ def __init__(self, fileobj, start_offset=0, end_offset_exclusive=READ_UNTIL_END): super(StreamSlice, self).__init__(fileobj) @@ -153,10 +157,12 @@ class StreamSlice(BaseStreamFilelike): class LimitingStream(StreamSlice): - """ A file-like object which mimics the specified file stream being limited to the given number - of bytes. All calls after that limit (if specified) will act as if the file has no additional - data. - """ + """ + A file-like object which mimics the specified file stream being limited to the given number of + bytes. + + All calls after that limit (if specified) will act as if the file has no additional data. + """ def __init__(self, fileobj, read_limit=READ_UNTIL_END, seekable=True): super(LimitingStream, self).__init__(fileobj, 0, read_limit) diff --git a/util/registry/generatorfile.py b/util/registry/generatorfile.py index aee6b8f71..6a4266907 100644 --- a/util/registry/generatorfile.py +++ b/util/registry/generatorfile.py @@ -4,9 +4,11 @@ def _complain_ifclosed(closed): class GeneratorFile(object): - """ File-like object which wraps a Python generator to produce the file contents. - Modeled on StringIO and comments on the file-like interface copied from there. - """ + """ + File-like object which wraps a Python generator to produce the file contents. + + Modeled on StringIO and comments on the file-like interface copied from there. + """ def __init__(self, generator): self._generator = generator @@ -18,17 +20,20 @@ class GeneratorFile(object): return self def tell(self): - """Return the file's current position, like stdio's ftell().""" + """ + Return the file's current position, like stdio's ftell(). + """ _complain_ifclosed(self._closed) return self._position def next(self): - """A file object is its own iterator, for example iter(f) returns f - (unless f is closed). When a file is used as an iterator, typically - in a for loop (for example, for line in f: print line), the next() - method is called repeatedly. This method returns the next input line, - or raises StopIteration when EOF is hit. - """ + """ + A file object is its own iterator, for example iter(f) returns f (unless f is closed). + + When a file is used as an iterator, typically in a for loop (for example, for line in f: + print line), the next() method is called repeatedly. This method returns the next input + line, or raises StopIteration when EOF is hit. + """ _complain_ifclosed(self._closed) r = self.read() if not r: @@ -50,13 +55,14 @@ class GeneratorFile(object): _complain_ifclosed(self._closed) def read(self, size=-1): - """Read at most size bytes from the file - (less if the read hits EOF before obtaining size bytes). + """ + Read at most size bytes from the file (less if the read hits EOF before obtaining size + bytes). - If the size argument is negative or omitted, read all data until EOF - is reached. The bytes are returned as a string object. An empty - string is returned when EOF is encountered immediately. - """ + If the size argument is negative or omitted, read all data until EOF is reached. The bytes + are returned as a string object. An empty string is returned when EOF is encountered + immediately. + """ _complain_ifclosed(self._closed) buf = self._buf while size < 0 or len(buf) < size: diff --git a/util/registry/gzipinputstream.py b/util/registry/gzipinputstream.py index e063811dc..5b918c890 100644 --- a/util/registry/gzipinputstream.py +++ b/util/registry/gzipinputstream.py @@ -10,20 +10,20 @@ WINDOW_BUFFER_SIZE = 16 + zlib.MAX_WBITS class GzipInputStream(object): """ - Simple class that allow streaming reads from GZip files. + Simple class that allow streaming reads from GZip files. - Python 2.x gzip.GZipFile relies on .seek() and .tell(), so it - doesn't support this (@see: http://bo4.me/YKWSsL). + Python 2.x gzip.GZipFile relies on .seek() and .tell(), so it + doesn't support this (@see: http://bo4.me/YKWSsL). - Adapted from: https://gist.github.com/beaufour/4205533 - """ + Adapted from: https://gist.github.com/beaufour/4205533 + """ def __init__(self, fileobj): """ - Initialize with the given file-like object. + Initialize with the given file-like object. - @param fileobj: file-like object, - """ + @param fileobj: file-like object, + """ self._file = fileobj self._zip = zlib.decompressobj(WINDOW_BUFFER_SIZE) self._offset = 0 # position in unzipped stream @@ -31,10 +31,10 @@ class GzipInputStream(object): def __fill(self, num_bytes): """ - Fill the internal buffer with 'num_bytes' of data. + Fill the internal buffer with 'num_bytes' of data. - @param num_bytes: int, number of bytes to read in (0 = everything) - """ + @param num_bytes: int, number of bytes to read in (0 = everything) + """ if not self._zip: return diff --git a/util/registry/gzipstream.py b/util/registry/gzipstream.py index 026ae8edb..d12106412 100644 --- a/util/registry/gzipstream.py +++ b/util/registry/gzipstream.py @@ -21,9 +21,11 @@ class SizeInfo(object): def calculate_size_handler(): - """ Returns an object and a SocketReader handler. The handler will gunzip the data it receives, - adding the size found to the object. - """ + """ + Returns an object and a SocketReader handler. + + The handler will gunzip the data it receives, adding the size found to the object. + """ size_info = SizeInfo() decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW) diff --git a/util/registry/queuefile.py b/util/registry/queuefile.py index 7525c391f..5ba7392f9 100644 --- a/util/registry/queuefile.py +++ b/util/registry/queuefile.py @@ -2,9 +2,10 @@ from multiprocessing.queues import Empty, Queue class QueueFile(object): - """ Class which implements a file-like interface and reads QueueResult's from a blocking - multiprocessing queue. - """ + """ + Class which implements a file-like interface and reads QueueResult's from a blocking + multiprocessing queue. + """ def __init__(self, queue, name=None, timeout=None): self._queue = queue diff --git a/util/registry/queueprocess.py b/util/registry/queueprocess.py index 63ebec99c..43cb1b349 100644 --- a/util/registry/queueprocess.py +++ b/util/registry/queueprocess.py @@ -13,9 +13,9 @@ logger.setLevel(logging.INFO) class QueueProcess(object): - """ Helper class which invokes a worker in a process to produce - data for one (or more) queues. - """ + """ + Helper class which invokes a worker in a process to produce data for one (or more) queues. + """ def __init__(self, get_producer, chunk_size, max_size, args, finished=None): self._get_producer = get_producer @@ -26,9 +26,11 @@ class QueueProcess(object): self._finished = finished def create_queue(self): - """ Adds a multiprocessing queue to the list of queues. Any queues added - will have the data produced appended. - """ + """ + Adds a multiprocessing queue to the list of queues. + + Any queues added will have the data produced appended. + """ queue = Queue(self._max_size / self._chunk_size) self._queues.append(queue) return queue diff --git a/util/registry/replication.py b/util/registry/replication.py index a17020646..e7b55cec8 100644 --- a/util/registry/replication.py +++ b/util/registry/replication.py @@ -12,10 +12,12 @@ DEFAULT_BATCH_SIZE = 1000 @contextmanager def queue_replication_batch(namespace, batch_size=DEFAULT_BATCH_SIZE): """ - Context manager implementation which returns a target callable that takes the storage - to queue for replication. When the the context block exits the items generated by - the callable will be bulk inserted into the queue with the specified batch size. - """ + Context manager implementation which returns a target callable that takes the storage to queue + for replication. + + When the the context block exits the items generated by the callable will be bulk inserted into + the queue with the specified batch size. + """ namespace_user = model.user.get_namespace_user(namespace) with image_replication_queue.batch_insert(batch_size) as queue_put: @@ -33,6 +35,8 @@ def queue_replication_batch(namespace, batch_size=DEFAULT_BATCH_SIZE): def queue_storage_replication(namespace, storage): - """ Queues replication for the given image storage under the given namespace (if enabled). """ + """ + Queues replication for the given image storage under the given namespace (if enabled). + """ with queue_replication_batch(namespace, 1) as batch_spawn: batch_spawn(storage) diff --git a/util/registry/streamlayerformat.py b/util/registry/streamlayerformat.py index 494a65aed..e32d3109d 100644 --- a/util/registry/streamlayerformat.py +++ b/util/registry/streamlayerformat.py @@ -8,7 +8,9 @@ from util.registry.tarlayerformat import TarLayerFormat class StreamLayerMerger(TarLayerFormat): - """ Class which creates a generator of the combined TAR data for a set of Docker layers. """ + """ + Class which creates a generator of the combined TAR data for a set of Docker layers. + """ def __init__(self, get_tar_stream_iterator, path_prefix=None, reporter=None): super(StreamLayerMerger, self).__init__( diff --git a/util/registry/tarlayerformat.py b/util/registry/tarlayerformat.py index 781dd1e26..1452d03c9 100644 --- a/util/registry/tarlayerformat.py +++ b/util/registry/tarlayerformat.py @@ -10,7 +10,9 @@ from util.abchelpers import nooper class TarLayerReadException(Exception): - """ Exception raised when reading a layer has failed. """ + """ + Exception raised when reading a layer has failed. + """ pass @@ -23,7 +25,9 @@ CHUNK_SIZE = 1024 * 1024 * 9 class TarLayerFormatterReporter(object): @abstractmethod def report_pass(self, stream_count): - """ Reports a formatting pass. """ + """ + Reports a formatting pass. + """ pass @@ -34,7 +38,9 @@ class NoopReporter(TarLayerFormatterReporter): @add_metaclass(ABCMeta) class TarLayerFormat(object): - """ Class which creates a generator of the combined TAR data. """ + """ + Class which creates a generator of the combined TAR data. + """ def __init__(self, tar_stream_getter_iterator, path_prefix=None, reporter=None): self.tar_stream_getter_iterator = tar_stream_getter_iterator @@ -146,20 +152,23 @@ class TarLayerFormat(object): @abstractmethod def is_skipped_file(self, filename): - """ Returns true if the file with the given name will be skipped during append. - """ + """ + Returns true if the file with the given name will be skipped during append. + """ pass @abstractmethod def should_append_file(self, filename): - """ Returns true if the file with the given name should be appended when producing - the new TAR. - """ + """ + Returns true if the file with the given name should be appended when producing the new TAR. + """ pass @abstractmethod def after_tar_layer(self): - """ Invoked after a TAR layer is added, to do any post-add work. """ + """ + Invoked after a TAR layer is added, to do any post-add work. + """ pass @staticmethod diff --git a/util/registry/torrent.py b/util/registry/torrent.py index 8eecdd823..1fc8bb579 100644 --- a/util/registry/torrent.py +++ b/util/registry/torrent.py @@ -30,18 +30,20 @@ class TorrentConfiguration(object): def _jwt_from_infodict(torrent_config, infodict): - """ Returns an encoded JWT for the given BitTorrent info dict, signed by the local instance's - private key. - """ + """ + Returns an encoded JWT for the given BitTorrent info dict, signed by the local instance's + private key. + """ digest = hashlib.sha1() digest.update(bencode.bencode(infodict)) return jwt_from_infohash(torrent_config, digest.digest()) def jwt_from_infohash(torrent_config, infohash_digest): - """ Returns an encoded JWT for the given BitTorrent infohash, signed by the local instance's - private key. - """ + """ + Returns an encoded JWT for the given BitTorrent infohash, signed by the local instance's private + key. + """ token_data = { "iss": torrent_config.instance_keys.service_name, "aud": torrent_config.announce_url, @@ -78,21 +80,28 @@ def make_torrent(torrent_config, name, webseed, length, piece_length, pieces): def public_torrent_filename(blob_uuid): - """ Returns the filename for the given blob UUID in a public image. """ + """ + Returns the filename for the given blob UUID in a public image. + """ return hashlib.sha256(blob_uuid).hexdigest() def per_user_torrent_filename(torrent_config, user_uuid, blob_uuid): - """ Returns the filename for the given blob UUID for a private image. """ + """ + Returns the filename for the given blob UUID for a private image. + """ joined = torrent_config.filename_pepper + "||" + blob_uuid + "||" + user_uuid return hashlib.sha256(joined).hexdigest() class PieceHasher(object): - """ Utility for computing torrent piece hashes as the data flows through the update - method of this class. Users should get the final value by calling final_piece_hashes - since new chunks are allocated lazily. - """ + """ + Utility for computing torrent piece hashes as the data flows through the update method of this + class. + + Users should get the final value by calling final_piece_hashes since new chunks are allocated + lazily. + """ def __init__( self, diff --git a/util/repomirror/api.py b/util/repomirror/api.py index 1c713047c..68f7b658f 100644 --- a/util/repomirror/api.py +++ b/util/repomirror/api.py @@ -19,21 +19,28 @@ logger = logging.getLogger(__name__) class RepoMirrorException(Exception): - """ Exception raised when a layer fails to analyze due to a request issue. """ + """ + Exception raised when a layer fails to analyze due to a request issue. + """ class RepoMirrorRetryException(Exception): - """ Exception raised when a layer fails to analyze due to a request issue, and the request should - be retried. - """ + """ + Exception raised when a layer fails to analyze due to a request issue, and the request should be + retried. + """ class APIRequestFailure(Exception): - """ Exception raised when there is a failure to conduct an API request. """ + """ + Exception raised when there is a failure to conduct an API request. + """ class Non200ResponseException(Exception): - """ Exception raised when the upstream API returns a non-200 HTTP status code. """ + """ + Exception raised when the upstream API returns a non-200 HTTP status code. + """ def __init__(self, response): super(Non200ResponseException, self).__init__() @@ -45,7 +52,9 @@ _API_METHOD_PING = "metrics" class RepoMirrorAPI(object): - """ Helper class for talking to the Repository Mirror service (usually Skopeo). """ + """ + Helper class for talking to the Repository Mirror service (usually Skopeo). + """ def __init__(self, config, server_hostname=None, skip_validation=False, instance_keys=None): feature_enabled = config.get("FEATURE_REPO_MIRROR", False) @@ -67,38 +76,53 @@ class RepoMirrorAPI(object): @add_metaclass(ABCMeta) class RepoMirrorAPIInterface(object): - """ Helper class for talking to the Repository Mirror service (usually Skopeo Worker). """ + """ + Helper class for talking to the Repository Mirror service (usually Skopeo Worker). + """ @abstractmethod def ping(self): - """ Calls GET on the metrics endpoint of the repo mirror to ensure it is running - and properly configured. Returns the HTTP response. - """ + """ + Calls GET on the metrics endpoint of the repo mirror to ensure it is running and properly + configured. + + Returns the HTTP response. + """ pass @abstractmethod def repository_mirror(self, repository): - """ Posts the given repository to the repo mirror for processing, blocking until complete. + """ + Posts the given repository to the repo mirror for processing, blocking until complete. + Returns the analysis version on success or raises an exception deriving from AnalyzeLayerException on failure. Callers should handle all cases of AnalyzeLayerException. - """ + """ pass @abstractmethod def get_repository_data(self, repository): - """ Returns the layer data for the specified layer. On error, returns None. """ + """ + Returns the layer data for the specified layer. + + On error, returns None. + """ pass @nooper class NoopRepoMirrorAPI(RepoMirrorAPIInterface): - """ No-op version of the repo mirror API. """ + """ + No-op version of the repo mirror API. + """ pass class ImplementedRepoMirrorAPI(RepoMirrorAPIInterface): - """ Helper class for talking to the repo mirror service. """ + """ + Helper class for talking to the repo mirror service. + """ def __init__(self, config, server_hostname, client=None, instance_keys=None): self._config = config @@ -107,9 +131,12 @@ class ImplementedRepoMirrorAPI(RepoMirrorAPIInterface): self._server_hostname = server_hostname def repository_mirror(self, repository): - """ Posts the given repository and config information to the mirror endpoint, blocking until complete. + """ + Posts the given repository and config information to the mirror endpoint, blocking until + complete. + Returns the results on success or raises an exception. - """ + """ def _response_json(request, response): try: @@ -123,13 +150,20 @@ class ImplementedRepoMirrorAPI(RepoMirrorAPIInterface): return def get_repository_data(self, repository): - """ Returns the layer data for the specified layer. On error, returns None. """ + """ + Returns the layer data for the specified layer. + + On error, returns None. + """ return None def ping(self): - """ Calls GET on the metrics endpoint of the repository mirror to ensure it is running - and properly configured. Returns the HTTP response. - """ + """ + Calls GET on the metrics endpoint of the repository mirror to ensure it is running and + properly configured. + + Returns the HTTP response. + """ try: return self._call("GET", _API_METHOD_PING) except requests.exceptions.Timeout as tie: diff --git a/util/repomirror/skopeomirror.py b/util/repomirror/skopeomirror.py index cbba0bb6c..a958d4425 100644 --- a/util/repomirror/skopeomirror.py +++ b/util/repomirror/skopeomirror.py @@ -60,9 +60,11 @@ class SkopeoMirror(object): verbose_logs=False, ): """ - Unless a specific tag is known, 'skopeo inspect' won't work. Here first 'latest' is checked - and then the tag expression, split at commas, is each checked until one works. - """ + Unless a specific tag is known, 'skopeo inspect' won't work. + + Here first 'latest' is checked and then the tag expression, split at commas, is each checked + until one works. + """ args = ["/usr/bin/skopeo"] if verbose_logs: diff --git a/util/repomirror/validator.py b/util/repomirror/validator.py index aea790301..2fe1f9a89 100644 --- a/util/repomirror/validator.py +++ b/util/repomirror/validator.py @@ -5,7 +5,9 @@ logger = logging.getLogger(__name__) class RepoMirrorConfigValidator(object): - """ Helper class for validating the repository mirror configuration. """ + """ + Helper class for validating the repository mirror configuration. + """ def __init__(self, feature_repo_mirror): self._feature_repo_mirror = feature_repo_mirror diff --git a/util/request.py b/util/request.py index 7a939236b..27f86824a 100644 --- a/util/request.py +++ b/util/request.py @@ -4,7 +4,9 @@ from flask import request def get_request_ip(): - """ Returns the IP address of the client making the current Flask request or None if none. """ + """ + Returns the IP address of the client making the current Flask request or None if none. + """ remote_addr = request.remote_addr or None if os.getenv("TEST", "false").lower() == "true": remote_addr = request.headers.get("X-Override-Remote-Addr-For-Testing", remote_addr) diff --git a/util/saas/cloudwatch.py b/util/saas/cloudwatch.py index 7b08e7a1e..ecede40d3 100644 --- a/util/saas/cloudwatch.py +++ b/util/saas/cloudwatch.py @@ -20,8 +20,8 @@ FAILED_SEND_SLEEP_SECS = 15 def start_cloudwatch_sender(metrics, app): """ - Starts sending from metrics to a new CloudWatchSender. - """ + Starts sending from metrics to a new CloudWatchSender. + """ access_key = app.config.get("CLOUDWATCH_AWS_ACCESS_KEY") secret_key = app.config.get("CLOUDWATCH_AWS_SECRET_KEY") namespace = app.config.get("CLOUDWATCH_NAMESPACE") @@ -36,8 +36,9 @@ def start_cloudwatch_sender(metrics, app): class CloudWatchSender(Thread): """ - CloudWatchSender loops indefinitely and pulls metrics off of a queue then sends it to CloudWatch. - """ + CloudWatchSender loops indefinitely and pulls metrics off of a queue then sends it to + CloudWatch. + """ def __init__(self, metrics, aws_access_key, aws_secret_key, namespace): Thread.__init__(self) diff --git a/util/saas/useranalytics.py b/util/saas/useranalytics.py index f4ee506af..f50797e96 100644 --- a/util/saas/useranalytics.py +++ b/util/saas/useranalytics.py @@ -27,15 +27,15 @@ def build_error_callback(message_when_exception): class _MarketoAnalyticsClient(object): - """ User analytics implementation which will report user changes to the - Marketo API. - """ + """ + User analytics implementation which will report user changes to the Marketo API. + """ def __init__(self, marketo_client, munchkin_private_key, lead_source): - """ Instantiate with the given marketorestpython.client, the Marketo - Munchkin Private Key, and the Lead Source that we want to set when we - create new lead records in Marketo. - """ + """ + Instantiate with the given marketorestpython.client, the Marketo Munchkin Private Key, and + the Lead Source that we want to set when we create new lead records in Marketo. + """ self._marketo = marketo_client self._munchkin_private_key = munchkin_private_key self._lead_source = lead_source @@ -114,9 +114,10 @@ class _MarketoAnalyticsClient(object): @AsyncExecutorWrapper.sync def get_user_analytics_metadata(self, user_obj): - """ Return a list of properties that should be added to the user object to allow - analytics associations. - """ + """ + Return a list of properties that should be added to the user object to allow analytics + associations. + """ if not self._munchkin_private_key: return dict() diff --git a/util/secscan/analyzer.py b/util/secscan/analyzer.py index 5fff26396..83cc8cab9 100644 --- a/util/secscan/analyzer.py +++ b/util/secscan/analyzer.py @@ -24,21 +24,26 @@ logger = logging.getLogger(__name__) class PreemptedException(Exception): - """ Exception raised if another worker analyzed the image before this worker was able to do so. - """ + """ + Exception raised if another worker analyzed the image before this worker was able to do so. + """ class LayerAnalyzer(object): - """ Helper class to perform analysis of a layer via the security scanner. """ + """ + Helper class to perform analysis of a layer via the security scanner. + """ def __init__(self, config, api): self._api = api self._target_version = config.get("SECURITY_SCANNER_ENGINE_VERSION_TARGET", 2) def analyze_recursively(self, layer): - """ Analyzes a layer and all its parents. Raises a PreemptedException if the analysis was - preempted by another worker. - """ + """ + Analyzes a layer and all its parents. + + Raises a PreemptedException if the analysis was preempted by another worker. + """ try: self._analyze_recursively_and_check(layer) except MissingParentLayerException: @@ -51,9 +56,10 @@ class LayerAnalyzer(object): raise PreemptedException def _analyze_recursively_and_check(self, layer, force_parents=False): - """ Analyzes a layer and all its parents, optionally forcing parents to be reanalyzed, - and checking for various exceptions that can occur during analysis. - """ + """ + Analyzes a layer and all its parents, optionally forcing parents to be reanalyzed, and + checking for various exceptions that can occur during analysis. + """ try: self._analyze_recursively(layer, force_parents=force_parents) except InvalidLayerException: @@ -96,13 +102,14 @@ class LayerAnalyzer(object): self._analyze(layer, force_parents=force_parents) def _analyze(self, layer, force_parents=False): - """ Analyzes a single layer. + """ + Analyzes a single layer. Return a tuple of two bools: - The first one tells us if we should evaluate its children. - The second one is set to False when another worker pre-empted the candidate's analysis for us. - """ + """ # If the parent couldn't be analyzed with the target version or higher, we can't analyze # this image. Mark it as failed with the current target version. if not force_parents and ( diff --git a/util/secscan/api.py b/util/secscan/api.py index 4f87e0363..f9cab80b2 100644 --- a/util/secscan/api.py +++ b/util/secscan/api.py @@ -31,29 +31,40 @@ logger = logging.getLogger(__name__) class AnalyzeLayerException(Exception): - """ Exception raised when a layer fails to analyze due to a request issue. """ + """ + Exception raised when a layer fails to analyze due to a request issue. + """ class AnalyzeLayerRetryException(Exception): - """ Exception raised when a layer fails to analyze due to a request issue, and the request should - be retried. - """ + """ + Exception raised when a layer fails to analyze due to a request issue, and the request should be + retried. + """ class MissingParentLayerException(AnalyzeLayerException): - """ Exception raised when the parent of the layer is missing from the security scanner. """ + """ + Exception raised when the parent of the layer is missing from the security scanner. + """ class InvalidLayerException(AnalyzeLayerException): - """ Exception raised when the layer itself cannot be handled by the security scanner. """ + """ + Exception raised when the layer itself cannot be handled by the security scanner. + """ class APIRequestFailure(Exception): - """ Exception raised when there is a failure to conduct an API request. """ + """ + Exception raised when there is a failure to conduct an API request. + """ class Non200ResponseException(Exception): - """ Exception raised when the upstream API returns a non-200 HTTP status code. """ + """ + Exception raised when the upstream API returns a non-200 HTTP status code. + """ def __init__(self, response): super(Non200ResponseException, self).__init__() @@ -69,7 +80,9 @@ _API_METHOD_PING = "metrics" def compute_layer_id(layer): - """ Returns the ID for the layer in the security scanner. """ + """ + Returns the ID for the layer in the security scanner. + """ # NOTE: this is temporary until we switch to Clair V3. if isinstance(layer, ManifestDataType): if layer._is_tag_manifest: @@ -89,7 +102,9 @@ def compute_layer_id(layer): class SecurityScannerAPI(object): - """ Helper class for talking to the Security Scan service (usually Clair). """ + """ + Helper class for talking to the Security Scan service (usually Clair). + """ def __init__( self, @@ -128,69 +143,91 @@ class SecurityScannerAPI(object): @add_metaclass(ABCMeta) class SecurityScannerAPIInterface(object): - """ Helper class for talking to the Security Scan service (usually Clair). """ + """ + Helper class for talking to the Security Scan service (usually Clair). + """ @abstractmethod def cleanup_layers(self, layers): - """ Callback invoked by garbage collection to cleanup any layers that no longer - need to be stored in the security scanner. - """ + """ + Callback invoked by garbage collection to cleanup any layers that no longer need to be + stored in the security scanner. + """ pass @abstractmethod def ping(self): - """ Calls GET on the metrics endpoint of the security scanner to ensure it is running - and properly configured. Returns the HTTP response. - """ + """ + Calls GET on the metrics endpoint of the security scanner to ensure it is running and + properly configured. + + Returns the HTTP response. + """ pass @abstractmethod def delete_layer(self, layer): - """ Calls DELETE on the given layer in the security scanner, removing it from - its database. - """ + """ + Calls DELETE on the given layer in the security scanner, removing it from its database. + """ pass @abstractmethod def analyze_layer(self, layer): - """ Posts the given layer to the security scanner for analysis, blocking until complete. + """ + Posts the given layer to the security scanner for analysis, blocking until complete. + Returns the analysis version on success or raises an exception deriving from AnalyzeLayerException on failure. Callers should handle all cases of AnalyzeLayerException. - """ + """ pass @abstractmethod def check_layer_vulnerable(self, layer_id, cve_name): - """ Checks to see if the layer with the given ID is vulnerable to the specified CVE. """ + """ + Checks to see if the layer with the given ID is vulnerable to the specified CVE. + """ pass @abstractmethod def get_notification(self, notification_name, layer_limit=100, page=None): - """ Gets the data for a specific notification, with optional page token. + """ + Gets the data for a specific notification, with optional page token. + Returns a tuple of the data (None on failure) and whether to retry. - """ + """ pass @abstractmethod def mark_notification_read(self, notification_name): - """ Marks a security scanner notification as read. """ + """ + Marks a security scanner notification as read. + """ pass @abstractmethod def get_layer_data(self, layer, include_features=False, include_vulnerabilities=False): - """ Returns the layer data for the specified layer. On error, returns None. """ + """ + Returns the layer data for the specified layer. + + On error, returns None. + """ pass @nooper class NoopSecurityScannerAPI(SecurityScannerAPIInterface): - """ No-op version of the security scanner API. """ + """ + No-op version of the security scanner API. + """ pass class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): - """ Helper class for talking to the Security Scan service (Clair). """ + """ + Helper class for talking to the Security Scan service (Clair). + """ # TODO refactor this to not take an app config, and instead just the things it needs as a config object def __init__( @@ -206,10 +243,12 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): self._uri_creator = uri_creator def _get_image_url_and_auth(self, image): - """ Returns a tuple of the url and the auth header value that must be used - to fetch the layer data itself. If the image can't be addressed, we return - None. - """ + """ + Returns a tuple of the url and the auth header value that must be used to fetch the layer + data itself. + + If the image can't be addressed, we return None. + """ if self._instance_keys is None: raise Exception("No Instance keys provided to Security Scanner API") @@ -251,9 +290,11 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): return uri, auth_header def _new_analyze_request(self, layer): - """ Create the request body to submit the given layer for analysis. If the layer's URL cannot - be found, returns None. - """ + """ + Create the request body to submit the given layer for analysis. + + If the layer's URL cannot be found, returns None. + """ layer_id = compute_layer_id(layer) if layer_id is None: return None @@ -282,16 +323,20 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): } def cleanup_layers(self, layers): - """ Callback invoked by garbage collection to cleanup any layers that no longer - need to be stored in the security scanner. - """ + """ + Callback invoked by garbage collection to cleanup any layers that no longer need to be + stored in the security scanner. + """ for layer in layers: self.delete_layer(layer) def ping(self): - """ Calls GET on the metrics endpoint of the security scanner to ensure it is running - and properly configured. Returns the HTTP response. - """ + """ + Calls GET on the metrics endpoint of the security scanner to ensure it is running and + properly configured. + + Returns the HTTP response. + """ try: return self._call("GET", _API_METHOD_PING) except requests.exceptions.Timeout as tie: @@ -311,9 +356,9 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): raise Exception(msg) def delete_layer(self, layer): - """ Calls DELETE on the given layer in the security scanner, removing it from - its database. - """ + """ + Calls DELETE on the given layer in the security scanner, removing it from its database. + """ layer_id = compute_layer_id(layer) if layer_id is None: return None @@ -335,10 +380,12 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): return False def analyze_layer(self, layer): - """ Posts the given layer to the security scanner for analysis, blocking until complete. + """ + Posts the given layer to the security scanner for analysis, blocking until complete. + Returns the analysis version on success or raises an exception deriving from AnalyzeLayerException on failure. Callers should handle all cases of AnalyzeLayerException. - """ + """ def _response_json(request, response): try: @@ -398,7 +445,9 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): return _response_json(request, response)["Layer"]["IndexedByVersion"] def check_layer_vulnerable(self, layer_id, cve_name): - """ Checks to see if the layer with the given ID is vulnerable to the specified CVE. """ + """ + Checks to see if the layer with the given ID is vulnerable to the specified CVE. + """ layer_data = self._get_layer_data(layer_id, include_vulnerabilities=True) if layer_data is None or "Layer" not in layer_data or "Features" not in layer_data["Layer"]: return False @@ -411,9 +460,11 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): return False def get_notification(self, notification_name, layer_limit=100, page=None): - """ Gets the data for a specific notification, with optional page token. + """ + Gets the data for a specific notification, with optional page token. + Returns a tuple of the data (None on failure) and whether to retry. - """ + """ try: params = {"limit": layer_limit} @@ -441,7 +492,9 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): return json_response, False def mark_notification_read(self, notification_name): - """ Marks a security scanner notification as read. """ + """ + Marks a security scanner notification as read. + """ try: self._call("DELETE", _API_METHOD_MARK_NOTIFICATION_READ % notification_name) return True @@ -452,7 +505,11 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): return False def get_layer_data(self, layer, include_features=False, include_vulnerabilities=False): - """ Returns the layer data for the specified layer. On error, returns None. """ + """ + Returns the layer data for the specified layer. + + On error, returns None. + """ layer_id = compute_layer_id(layer) if layer_id is None: return None @@ -509,7 +566,9 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): raise APIRequestFailure() def _request(self, method, endpoint, path, body, params, timeout): - """ Issues an HTTP request to the security endpoint. """ + """ + Issues an HTTP request to the security endpoint. + """ url = _join_api_url(endpoint, self._config.get("SECURITY_SCANNER_API_VERSION", "v1"), path) signer_proxy_url = self._config.get("JWTPROXY_SIGNER", "localhost:8081") @@ -529,9 +588,10 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): return resp def _call(self, method, path, params=None, body=None): - """ Issues an HTTP request to the security endpoint handling the logic of using an alternative + """ + Issues an HTTP request to the security endpoint handling the logic of using an alternative BATCH endpoint for non-GET requests and failover for GET requests. - """ + """ timeout = self._config.get("SECURITY_SCANNER_API_TIMEOUT_SECONDS", 1) endpoint = self._config["SECURITY_SCANNER_ENDPOINT"] @@ -564,7 +624,9 @@ def _join_api_url(endpoint, api_version, path): @failover def _failover_read_request(request_fn, endpoint, path, body, params, timeout): - """ This function auto-retries read-only requests until they return a 2xx status code. """ + """ + This function auto-retries read-only requests until they return a 2xx status code. + """ try: return request_fn("GET", endpoint, path, body, params, timeout) except (requests.exceptions.RequestException, Non200ResponseException) as ex: diff --git a/util/secscan/fake.py b/util/secscan/fake.py index ef9257b2b..e63af9bf5 100644 --- a/util/secscan/fake.py +++ b/util/secscan/fake.py @@ -11,18 +11,22 @@ from util.secscan.api import UNKNOWN_PARENT_LAYER_ERROR_MSG, compute_layer_id @contextmanager def fake_security_scanner(hostname="fakesecurityscanner"): - """ Context manager which yields a fake security scanner. All requests made to the given - hostname (default: fakesecurityscanner) will be handled by the fake. - """ + """ + Context manager which yields a fake security scanner. + + All requests made to the given hostname (default: fakesecurityscanner) will be handled by the + fake. + """ scanner = FakeSecurityScanner(hostname) with HTTMock(*(scanner.get_endpoints())): yield scanner class FakeSecurityScanner(object): - """ Implements a fake security scanner (with somewhat real responses) for testing API calls and - responses. - """ + """ + Implements a fake security scanner (with somewhat real responses) for testing API calls and + responses. + """ def __init__(self, hostname, index_version=1): self.hostname = hostname @@ -38,41 +42,52 @@ class FakeSecurityScanner(object): self.unexpected_status_layer_id = None def set_ok_layer_id(self, ok_layer_id): - """ Sets a layer ID that, if encountered when the analyze call is made, causes a 200 - to be immediately returned. - """ + """ + Sets a layer ID that, if encountered when the analyze call is made, causes a 200 to be + immediately returned. + """ self.ok_layer_id = ok_layer_id def set_fail_layer_id(self, fail_layer_id): - """ Sets a layer ID that, if encountered when the analyze call is made, causes a 422 - to be raised. - """ + """ + Sets a layer ID that, if encountered when the analyze call is made, causes a 422 to be + raised. + """ self.fail_layer_id = fail_layer_id def set_internal_error_layer_id(self, internal_error_layer_id): - """ Sets a layer ID that, if encountered when the analyze call is made, causes a 500 - to be raised. - """ + """ + Sets a layer ID that, if encountered when the analyze call is made, causes a 500 to be + raised. + """ self.internal_error_layer_id = internal_error_layer_id def set_error_layer_id(self, error_layer_id): - """ Sets a layer ID that, if encountered when the analyze call is made, causes a 400 - to be raised. - """ + """ + Sets a layer ID that, if encountered when the analyze call is made, causes a 400 to be + raised. + """ self.error_layer_id = error_layer_id def set_unexpected_status_layer_id(self, layer_id): - """ Sets a layer ID that, if encountered when the analyze call is made, causes an HTTP 600 - to be raised. This is useful in testing the robustness of the to unknown status codes. - """ + """ + Sets a layer ID that, if encountered when the analyze call is made, causes an HTTP 600 to be + raised. + + This is useful in testing the robustness of the to unknown status codes. + """ self.unexpected_status_layer_id = layer_id def has_layer(self, layer_id): - """ Returns true if the layer with the given ID has been analyzed. """ + """ + Returns true if the layer with the given ID has been analyzed. + """ return layer_id in self.layers def has_notification(self, notification_id): - """ Returns whether a notification with the given ID is found in the scanner. """ + """ + Returns whether a notification with the given ID is found in the scanner. + """ return notification_id in self.notifications def add_notification( @@ -85,9 +100,10 @@ class FakeSecurityScanner(object): indexed_old_layer_ids=None, indexed_new_layer_ids=None, ): - """ Adds a new notification over the given sets of layer IDs and vulnerability information, + """ + Adds a new notification over the given sets of layer IDs and vulnerability information, returning the structural data of the notification created. - """ + """ notification_id = str(uuid.uuid4()) if old_vuln is None: old_vuln = dict(new_vuln) @@ -105,11 +121,15 @@ class FakeSecurityScanner(object): return self._get_notification_data(notification_id, 0, 100) def layer_id(self, layer): - """ Returns the Quay Security Scanner layer ID for the given layer (Image row). """ + """ + Returns the Quay Security Scanner layer ID for the given layer (Image row). + """ return compute_layer_id(layer) def add_layer(self, layer_id): - """ Adds a layer to the security scanner, with no features or vulnerabilities. """ + """ + Adds a layer to the security scanner, with no features or vulnerabilities. + """ self.layers[layer_id] = { "Name": layer_id, "Format": "Docker", @@ -117,11 +137,15 @@ class FakeSecurityScanner(object): } def remove_layer(self, layer_id): - """ Removes a layer from the security scanner. """ + """ + Removes a layer from the security scanner. + """ self.layers.pop(layer_id, None) def set_vulns(self, layer_id, vulns): - """ Sets the vulnerabilities for the layer with the given ID to those given. """ + """ + Sets the vulnerabilities for the layer with the given ID to those given. + """ self.layer_vulns[layer_id] = vulns # Since this call may occur before the layer is "anaylzed", we only add the data @@ -139,8 +163,10 @@ class FakeSecurityScanner(object): ) def _get_notification_data(self, notification_id, page, limit): - """ Returns the structural data for the notification with the given ID, paginated using - the given page and limit. """ + """ + Returns the structural data for the notification with the given ID, paginated using the + given page and limit. + """ notification = self.notifications[notification_id] limit = min(limit, notification["max_per_page"]) @@ -193,7 +219,9 @@ class FakeSecurityScanner(object): return notification_data def get_endpoints(self): - """ Returns the HTTMock endpoint definitions for the fake security scanner. """ + """ + Returns the HTTMock endpoint definitions for the fake security scanner. + """ @urlmatch(netloc=r"(.*\.)?" + self.hostname, path=r"/v1/layers/(.+)", method="GET") def get_layer_mock(url, request): diff --git a/util/secscan/notifier.py b/util/secscan/notifier.py index 216a34565..d817d8a53 100644 --- a/util/secscan/notifier.py +++ b/util/secscan/notifier.py @@ -21,14 +21,14 @@ class ProcessNotificationPageResult(Enum): class SecurityNotificationHandler(object): - """ Class to process paginated notifications from the security scanner and issue - Quay vulnerability_found notifications for all necessary tags. Callers should - initialize, call process_notification_page_data for each page until it returns - FINISHED_PROCESSING or FAILED and, if succeeded, then call send_notifications - to send out the notifications queued. + """ + Class to process paginated notifications from the security scanner and issue Quay + vulnerability_found notifications for all necessary tags. Callers should initialize, call + process_notification_page_data for each page until it returns FINISHED_PROCESSING or FAILED and, + if succeeded, then call send_notifications to send out the notifications queued. - NOTE: This is legacy code and should be removed once we're fully moved to Clair V4. - """ + NOTE: This is legacy code and should be removed once we're fully moved to Clair V4. + """ def __init__(self, legacy_secscan_api, results_per_stream): self.tags_by_repository_map = defaultdict(set) @@ -42,7 +42,9 @@ class SecurityNotificationHandler(object): self.vulnerability_info = None def send_notifications(self): - """ Sends all queued up notifications. """ + """ + Sends all queued up notifications. + """ if self.vulnerability_info is None: return @@ -70,9 +72,12 @@ class SecurityNotificationHandler(object): ) def process_notification_page_data(self, notification_page_data): - """ Processes the given notification page data to spawn vulnerability notifications as - necessary. Returns the status of the processing. - """ + """ + Processes the given notification page data to spawn vulnerability notifications as + necessary. + + Returns the status of the processing. + """ if not "New" in notification_page_data: return self._done() diff --git a/util/secscan/secscan_util.py b/util/secscan/secscan_util.py index 237c554a0..dbd8a3ecd 100644 --- a/util/secscan/secscan_util.py +++ b/util/secscan/secscan_util.py @@ -5,16 +5,18 @@ from flask import url_for def get_blob_download_uri_getter(context, url_scheme_and_hostname): """ - Returns a function with context to later generate the uri for a download blob - :param context: Flask RequestContext - :param url_scheme_and_hostname: URLSchemeAndHostname class instance - :return: function (repository_and_namespace, checksum) -> uri - """ + Returns a function with context to later generate the uri for a download blob. + + :param context: Flask RequestContext + :param url_scheme_and_hostname: URLSchemeAndHostname class instance + :return: function (repository_and_namespace, checksum) -> uri + """ def create_uri(repository_and_namespace, checksum): """ - Creates a uri for a download blob from a repository, namespace, and checksum from earlier context - """ + Creates a uri for a download blob from a repository, namespace, and checksum from earlier + context. + """ with context: relative_layer_url = url_for( "v2.download_blob", repository=repository_and_namespace, digest=checksum diff --git a/util/secscan/validator.py b/util/secscan/validator.py index 00023235c..2c8940e85 100644 --- a/util/secscan/validator.py +++ b/util/secscan/validator.py @@ -5,7 +5,9 @@ logger = logging.getLogger(__name__) class SecurityConfigValidator(object): - """ Helper class for validating the security scanner configuration. """ + """ + Helper class for validating the security scanner configuration. + """ def __init__(self, feature_sec_scan, sec_scan_endpoint): self._feature_sec_scan = feature_sec_scan diff --git a/util/security/aes.py b/util/security/aes.py index ee56f247e..5017615e2 100644 --- a/util/security/aes.py +++ b/util/security/aes.py @@ -7,10 +7,11 @@ from Crypto.Cipher import AES class AESCipher(object): - """ Helper class for encrypting and decrypting data via AES. + """ + Helper class for encrypting and decrypting data via AES. - Copied From: http://stackoverflow.com/a/21928790 - """ + Copied From: http://stackoverflow.com/a/21928790 + """ def __init__(self, key): self.bs = 32 diff --git a/util/security/crypto.py b/util/security/crypto.py index 1c51e14d2..ff1648d48 100644 --- a/util/security/crypto.py +++ b/util/security/crypto.py @@ -4,13 +4,21 @@ from cryptography.fernet import Fernet, InvalidToken def encrypt_string(string, key): - """ Encrypts a string with the specified key. The key must be 32 raw bytes. """ + """ + Encrypts a string with the specified key. + + The key must be 32 raw bytes. + """ f = Fernet(key) return f.encrypt(string) def decrypt_string(string, key, ttl=None): - """ Decrypts an encrypted string with the specified key. The key must be 32 raw bytes. """ + """ + Decrypts an encrypted string with the specified key. + + The key must be 32 raw bytes. + """ f = Fernet(key) try: return f.decrypt(str(string), ttl=ttl) diff --git a/util/security/fingerprint.py b/util/security/fingerprint.py index bd7fc1c8b..dd56bbc31 100644 --- a/util/security/fingerprint.py +++ b/util/security/fingerprint.py @@ -5,13 +5,13 @@ from util.canonicaljson import canonicalize def canonical_kid(jwk): - """This function returns the SHA256 hash of a canonical JWK. + """ + This function returns the SHA256 hash of a canonical JWK. - Args: - jwk (object): the JWK for which a kid will be generated. + Args: + jwk (object): the JWK for which a kid will be generated. - Returns: - string: the unique kid for the given JWK. - - """ + Returns: + string: the unique kid for the given JWK. + """ return sha256(json.dumps(canonicalize(jwk), separators=(",", ":"))).hexdigest() diff --git a/util/security/instancekeys.py b/util/security/instancekeys.py index 3be36ab77..f20e6e29f 100644 --- a/util/security/instancekeys.py +++ b/util/security/instancekeys.py @@ -22,17 +22,21 @@ class CachingKey(object): class InstanceKeys(object): - """ InstanceKeys defines a helper class for interacting with the Quay instance service keys - used for JWT signing of registry tokens as well as requests from Quay to other services - such as Clair. Each container will have a single registered instance key. - """ + """ + InstanceKeys defines a helper class for interacting with the Quay instance service keys used for + JWT signing of registry tokens as well as requests from Quay to other services such as Clair. + + Each container will have a single registered instance key. + """ def __init__(self, app): self.app = app self.instance_keys = ExpiresDict(self._load_instance_keys) def clear_cache(self): - """ Clears the cache of instance keys. """ + """ + Clears the cache of instance keys. + """ self.instance_keys = ExpiresDict(self._load_instance_keys) def _load_instance_keys(self): @@ -45,28 +49,38 @@ class InstanceKeys(object): @property def service_name(self): - """ Returns the name of the instance key's service (i.e. 'quay'). """ + """ + Returns the name of the instance key's service (i.e. 'quay'). + """ return self.app.config["INSTANCE_SERVICE_KEY_SERVICE"] @property def service_key_expiration(self): - """ Returns the defined expiration for instance service keys, in minutes. """ + """ + Returns the defined expiration for instance service keys, in minutes. + """ return self.app.config.get("INSTANCE_SERVICE_KEY_EXPIRATION", 120) @property @lru_cache(maxsize=1) def local_key_id(self): - """ Returns the ID of the local instance service key. """ + """ + Returns the ID of the local instance service key. + """ return _load_file_contents(self.app.config["INSTANCE_SERVICE_KEY_KID_LOCATION"]) @property @lru_cache(maxsize=1) def local_private_key(self): - """ Returns the private key of the local instance service key. """ + """ + Returns the private key of the local instance service key. + """ return _load_file_contents(self.app.config["INSTANCE_SERVICE_KEY_LOCATION"]) def get_service_key_public_key(self, kid): - """ Returns the public key associated with the given instance service key or None if none. """ + """ + Returns the public key associated with the given instance service key or None if none. + """ caching_key = self.instance_keys.get(kid) if caching_key is None: return None @@ -75,6 +89,8 @@ class InstanceKeys(object): def _load_file_contents(path): - """ Returns the contents of the specified file path. """ + """ + Returns the contents of the specified file path. + """ with open(path) as f: return f.read() diff --git a/util/security/jwtutil.py b/util/security/jwtutil.py index 2209c4449..b8609bdc1 100644 --- a/util/security/jwtutil.py +++ b/util/security/jwtutil.py @@ -30,7 +30,9 @@ ALGORITHM_WHITELIST = ["rs256"] class _StrictJWT(PyJWT): - """ _StrictJWT defines a JWT decoder with extra checks. """ + """ + _StrictJWT defines a JWT decoder with extra checks. + """ @staticmethod def _get_default_options(): @@ -84,7 +86,9 @@ class _StrictJWT(PyJWT): def decode(jwt, key="", verify=True, algorithms=None, options=None, **kwargs): - """ Decodes a JWT. """ + """ + Decodes a JWT. + """ if not algorithms: raise InvalidAlgorithmError("algorithms must be specified") @@ -101,14 +105,18 @@ def decode(jwt, key="", verify=True, algorithms=None, options=None, **kwargs): def exp_max_s_option(max_exp_s): - """ Returns an options dictionary that sets the maximum expiration seconds for a JWT. """ + """ + Returns an options dictionary that sets the maximum expiration seconds for a JWT. + """ return { "exp_max_s": max_exp_s, } def jwk_dict_to_public_key(jwk): - """ Converts the specified JWK into a public key. """ + """ + Converts the specified JWK into a public key. + """ jwkest_key = keyrep(jwk) if isinstance(jwkest_key, RSAKey): pycrypto_key = jwkest_key.key diff --git a/util/security/registry_jwt.py b/util/security/registry_jwt.py index cfd2b4af8..2110cb175 100644 --- a/util/security/registry_jwt.py +++ b/util/security/registry_jwt.py @@ -38,9 +38,12 @@ class InvalidBearerTokenException(Exception): def decode_bearer_header(bearer_header, instance_keys, config): - """ decode_bearer_header decodes the given bearer header that contains an encoded JWT with both - a Key ID as well as the signed JWT and returns the decoded and validated JWT. On any error, - raises an InvalidBearerTokenException with the reason for failure. """ + """ + decode_bearer_header decodes the given bearer header that contains an encoded JWT with both a + Key ID as well as the signed JWT and returns the decoded and validated JWT. + + On any error, raises an InvalidBearerTokenException with the reason for failure. + """ # Extract the jwt token from the header match = jwtutil.TOKEN_REGEX.match(bearer_header) if match is None: @@ -53,9 +56,9 @@ def decode_bearer_header(bearer_header, instance_keys, config): def observe_decode(): """ - Decorates `decode_bearer_tokens` to record a metric into Prometheus such that any exceptions - raised get recorded as a failure and the return of a payload is considered a success. - """ + Decorates `decode_bearer_tokens` to record a metric into Prometheus such that any exceptions + raised get recorded as a failure and the return of a payload is considered a success. + """ def decorator(func): @wraps(func) @@ -75,9 +78,12 @@ def observe_decode(): @observe_decode() def decode_bearer_token(bearer_token, instance_keys, config): - """ decode_bearer_token decodes the given bearer token that contains both a Key ID as well as the - encoded JWT and returns the decoded and validated JWT. On any error, raises an - InvalidBearerTokenException with the reason for failure. """ + """ + decode_bearer_token decodes the given bearer token that contains both a Key ID as well as the + encoded JWT and returns the decoded and validated JWT. + + On any error, raises an InvalidBearerTokenException with the reason for failure. + """ # Decode the key ID. try: headers = jwt.get_unverified_header(bearer_token) @@ -124,9 +130,10 @@ def decode_bearer_token(bearer_token, instance_keys, config): def generate_bearer_token(audience, subject, context, access, lifetime_s, instance_keys): - """ Generates a registry bearer token (without the 'Bearer ' portion) based on the given - information. - """ + """ + Generates a registry bearer token (without the 'Bearer ' portion) based on the given + information. + """ return _generate_jwt_object( audience, subject, @@ -142,7 +149,9 @@ def generate_bearer_token(audience, subject, context, access, lifetime_s, instan def _generate_jwt_object( audience, subject, context, access, lifetime_s, issuer, key_id, private_key ): - """ Generates a compact encoded JWT with the values specified. """ + """ + Generates a compact encoded JWT with the values specified. + """ token_data = { "iss": issuer, "aud": audience, @@ -162,8 +171,10 @@ def _generate_jwt_object( def build_context_and_subject(auth_context=None, tuf_roots=None): - """ Builds the custom context field for the JWT signed token and returns it, - along with the subject for the JWT signed token. """ + """ + Builds the custom context field for the JWT signed token and returns it, along with the subject + for the JWT signed token. + """ # Serialize to a dictionary. context = auth_context.to_signed_dict() if auth_context else {} diff --git a/util/security/secret.py b/util/security/secret.py index 9a23e4bf7..0c6578278 100644 --- a/util/security/secret.py +++ b/util/security/secret.py @@ -3,8 +3,9 @@ import uuid def convert_secret_key(config_secret_key): - """ Converts the secret key from the app config into a secret key that is usable by AES - Cipher. """ + """ + Converts the secret key from the app config into a secret key that is usable by AES Cipher. + """ secret_key = None # First try parsing the key as an int. diff --git a/util/security/signing.py b/util/security/signing.py index 7023de2c6..7fd7f3904 100644 --- a/util/security/signing.py +++ b/util/security/signing.py @@ -9,7 +9,9 @@ from StringIO import StringIO class GPG2Signer(object): - """ Helper class for signing data using GPG2. """ + """ + Helper class for signing data using GPG2. + """ def __init__(self, config, config_provider): if not config.get("GPG2_PRIVATE_KEY_NAME"): @@ -41,7 +43,9 @@ class GPG2Signer(object): return self._config_provider.get_volume_file(self._public_key_filename, mode="rb") def detached_sign(self, stream): - """ Signs the given stream, returning the signature. """ + """ + Signs the given stream, returning the signature. + """ ctx = self._ctx try: ctx.signers = [ctx.get_key(self._private_key_name)] diff --git a/util/security/ssh.py b/util/security/ssh.py index d227dd8e9..6dc0598a5 100644 --- a/util/security/ssh.py +++ b/util/security/ssh.py @@ -5,8 +5,8 @@ from Crypto.PublicKey import RSA def generate_ssh_keypair(): """ - Generates a new 2048 bit RSA public key in OpenSSH format and private key in PEM format. - """ + Generates a new 2048 bit RSA public key in OpenSSH format and private key in PEM format. + """ key = RSA.generate(2048) public_key = key.publickey().exportKey("OpenSSH") private_key = key.exportKey("PEM") diff --git a/util/security/ssl.py b/util/security/ssl.py index cf637b413..fd6c9f409 100644 --- a/util/security/ssl.py +++ b/util/security/ssl.py @@ -4,21 +4,26 @@ import OpenSSL class CertInvalidException(Exception): - """ Exception raised when a certificate could not be parsed/loaded. """ + """ + Exception raised when a certificate could not be parsed/loaded. + """ pass class KeyInvalidException(Exception): - """ Exception raised when a key could not be parsed/loaded or successfully applied to a cert. """ + """ + Exception raised when a key could not be parsed/loaded or successfully applied to a cert. + """ pass def load_certificate(cert_contents): - """ Loads the certificate from the given contents and returns it or raises a CertInvalidException - on failure. - """ + """ + Loads the certificate from the given contents and returns it or raises a CertInvalidException on + failure. + """ try: cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_contents) return SSLCertificate(cert) @@ -30,15 +35,19 @@ _SUBJECT_ALT_NAME = "subjectAltName" class SSLCertificate(object): - """ Helper class for easier working with SSL certificates. """ + """ + Helper class for easier working with SSL certificates. + """ def __init__(self, openssl_cert): self.openssl_cert = openssl_cert def validate_private_key(self, private_key_path): - """ Validates that the private key found at the given file path applies to this certificate. + """ + Validates that the private key found at the given file path applies to this certificate. + Raises a KeyInvalidException on failure. - """ + """ context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD) context.use_certificate(self.openssl_cert) @@ -49,7 +58,9 @@ class SSLCertificate(object): raise KeyInvalidException(ex.args[0][0][2]) def matches_name(self, check_name): - """ Returns true if this SSL certificate matches the given DNS hostname. """ + """ + Returns true if this SSL certificate matches the given DNS hostname. + """ for dns_name in self.names: if fnmatch(check_name, dns_name): return True @@ -58,17 +69,25 @@ class SSLCertificate(object): @property def expired(self): - """ Returns whether the SSL certificate has expired. """ + """ + Returns whether the SSL certificate has expired. + """ return self.openssl_cert.has_expired() @property def common_name(self): - """ Returns the defined common name for the certificate, if any. """ + """ + Returns the defined common name for the certificate, if any. + """ return self.openssl_cert.get_subject().commonName @property def names(self): - """ Returns all the DNS named to which the certificate applies. May be empty. """ + """ + Returns all the DNS named to which the certificate applies. + + May be empty. + """ dns_names = set() common_name = self.common_name if common_name is not None: diff --git a/util/security/test/test_ssl_util.py b/util/security/test/test_ssl_util.py index df9737abf..7e7b06f3b 100644 --- a/util/security/test/test_ssl_util.py +++ b/util/security/test/test_ssl_util.py @@ -8,7 +8,9 @@ from util.security.ssl import load_certificate, CertInvalidException, KeyInvalid def generate_test_cert(hostname="somehostname", san_list=None, expires=1000000): - """ Generates a test SSL certificate and returns the certificate data and private key data. """ + """ + Generates a test SSL certificate and returns the certificate data and private key data. + """ # Based on: http://blog.richardknop.com/2012/08/create-a-self-signed-x509-certificate-in-python/ # Create a key pair. diff --git a/util/streamingjsonencoder.py b/util/streamingjsonencoder.py index 8ddccdcd2..a38244ac7 100644 --- a/util/streamingjsonencoder.py +++ b/util/streamingjsonencoder.py @@ -36,19 +36,19 @@ from types import GeneratorType class StreamingJSONEncoder(json.JSONEncoder): def iterencode(self, o, _one_shot=False): - """Encode the given object and yield each string - representation as available. + """ + Encode the given object and yield each string representation as available. - For example:: + For example:: - for chunk in StreamingJSONEncoder().iterencode(bigobject): - mysocket.write(chunk) + for chunk in StreamingJSONEncoder().iterencode(bigobject): + mysocket.write(chunk) - This method is a verbatim copy of - :meth:`json.JSONEncoder.iterencode`. It is - needed because we need to call our patched - :func:`streamingjsonencoder._make_iterencode`. - """ + This method is a verbatim copy of + :meth:`json.JSONEncoder.iterencode`. It is + needed because we need to call our patched + :func:`streamingjsonencoder._make_iterencode`. + """ if self.check_circular: markers = {} else: diff --git a/util/test/test_failover.py b/util/test/test_failover.py index cbe796af6..970cee78b 100644 --- a/util/test/test_failover.py +++ b/util/test/test_failover.py @@ -4,11 +4,15 @@ from util.failover import failover, FailoverException class FinishedException(Exception): - """ Exception raised at the end of every iteration to force failover. """ + """ + Exception raised at the end of every iteration to force failover. + """ class Counter(object): - """ Wraps a counter in an object so that it'll be passed by reference. """ + """ + Wraps a counter in an object so that it'll be passed by reference. + """ def __init__(self): self.calls = 0 @@ -19,7 +23,9 @@ class Counter(object): @failover def my_failover_func(i, should_raise=None): - """ Increments a counter and raises an exception when told. """ + """ + Increments a counter and raises an exception when told. + """ i.increment() if should_raise is not None: raise should_raise() @@ -28,10 +34,10 @@ def my_failover_func(i, should_raise=None): @pytest.mark.parametrize("stop_on,exception", [(10, None), (5, IndexError),]) def test_readonly_failover(stop_on, exception): - """ Generates failover arguments and checks against a counter to ensure that - the failover function has been called the proper amount of times and stops - at unhandled exceptions. - """ + """ + Generates failover arguments and checks against a counter to ensure that the failover function + has been called the proper amount of times and stops at unhandled exceptions. + """ counter = Counter() arg_sets = [] for i in xrange(stop_on): diff --git a/util/tufmetadata/api.py b/util/tufmetadata/api.py index 115275aa8..df0b214c9 100644 --- a/util/tufmetadata/api.py +++ b/util/tufmetadata/api.py @@ -27,13 +27,17 @@ logger = logging.getLogger(__name__) class InvalidMetadataException(Exception): - """ Exception raised when the upstream API metadata that doesn't parse correctly. """ + """ + Exception raised when the upstream API metadata that doesn't parse correctly. + """ pass class Non200ResponseException(Exception): - """ Exception raised when the upstream API returns a non-200 HTTP status code. """ + """ + Exception raised when the upstream API returns a non-200 HTTP status code. + """ def __init__(self, response): super(Non200ResponseException, self).__init__() @@ -41,7 +45,9 @@ class Non200ResponseException(Exception): class TUFMetadataAPI(object): - """ Helper class for talking to the TUF Metadata service (Apostille). """ + """ + Helper class for talking to the TUF Metadata service (Apostille). + """ def __init__(self, app, config, client=None): feature_enabled = config.get("FEATURE_SIGNING", False) @@ -56,22 +62,24 @@ class TUFMetadataAPI(object): @add_metaclass(ABCMeta) class TUFMetadataAPIInterface(object): - """ Helper class for talking to the TUF Metadata service (Apostille). """ + """ + Helper class for talking to the TUF Metadata service (Apostille). + """ @abstractmethod def get_default_tags_with_expiration(self, namespace, repository, targets_file=None): """ - Gets the tag -> sha mappings for a repo, as well as the expiration of the signatures. - Does not verify the metadata, this is purely for display purposes. + Gets the tag -> sha mappings for a repo, as well as the expiration of the signatures. Does + not verify the metadata, this is purely for display purposes. - Args: - namespace: namespace containing the repository - repository: the repo to get tags for - targets_file: the specific delegation to read from. Default: targets/releases.json + Args: + namespace: namespace containing the repository + repository: the repo to get tags for + targets_file: the specific delegation to read from. Default: targets/releases.json - Returns: - targets, expiration or None, None - """ + Returns: + targets, expiration or None, None + """ pass @abstractmethod @@ -79,37 +87,39 @@ class TUFMetadataAPIInterface(object): self, namespace, repository, targets_file=None, targets_map=None ): """ - Gets the tag -> sha mappings of all delegations for a repo, as well as the expiration of the signatures. - Does not verify the metadata, this is purely for display purposes. - - Args: - namespace: namespace containing the repository - repository: the repo to get tags for - targets_file: the specific target or delegation to read from. Default: targets.json - - Returns: - targets - """ + Gets the tag -> sha mappings of all delegations for a repo, as well as the expiration of the + signatures. Does not verify the metadata, this is purely for display purposes. + + Args: + namespace: namespace containing the repository + repository: the repo to get tags for + targets_file: the specific target or delegation to read from. Default: targets.json + + Returns: + targets + """ pass @abstractmethod def delete_metadata(self, namespace, repository): """ - Deletes the TUF metadata for a repo + Deletes the TUF metadata for a repo. - Args: - namespace: namespace containing the repository - repository: the repo to delete metadata for + Args: + namespace: namespace containing the repository + repository: the repo to delete metadata for - Returns: - True if successful, False otherwise - """ + Returns: + True if successful, False otherwise + """ pass @nooper class NoopTUFMetadataAPI(TUFMetadataAPIInterface): - """ No-op version of the TUF API. """ + """ + No-op version of the TUF API. + """ pass @@ -124,17 +134,17 @@ class ImplementedTUFMetadataAPI(TUFMetadataAPIInterface): def get_default_tags_with_expiration(self, namespace, repository, targets_file=None): """ - Gets the tag -> sha mappings for a repo, as well as the expiration of the signatures. - Does not verify the metadata, this is purely for display purposes. + Gets the tag -> sha mappings for a repo, as well as the expiration of the signatures. Does + not verify the metadata, this is purely for display purposes. - Args: - namespace: namespace containing the repository - repository: the repo to get tags for - targets_file: the specific delegation to read from. Default: targets/releases.json + Args: + namespace: namespace containing the repository + repository: the repo to get tags for + targets_file: the specific delegation to read from. Default: targets/releases.json - Returns: - targets, expiration or None, None - """ + Returns: + targets, expiration or None, None + """ if not targets_file: targets_file = "targets/releases.json" @@ -149,17 +159,17 @@ class ImplementedTUFMetadataAPI(TUFMetadataAPIInterface): self, namespace, repository, targets_file=None, targets_map=None ): """ - Gets the tag -> sha mappings of all delegations for a repo, as well as the expiration of the signatures. - Does not verify the metadata, this is purely for display purposes. + Gets the tag -> sha mappings of all delegations for a repo, as well as the expiration of the + signatures. Does not verify the metadata, this is purely for display purposes. - Args: - namespace: namespace containing the repository - repository: the repo to get tags for - targets_file: the specific target or delegation to read from. Default: targets.json + Args: + namespace: namespace containing the repository + repository: the repo to get tags for + targets_file: the specific target or delegation to read from. Default: targets.json - Returns: - targets - """ + Returns: + targets + """ if not targets_file: targets_file = "targets.json" @@ -193,15 +203,15 @@ class ImplementedTUFMetadataAPI(TUFMetadataAPIInterface): def delete_metadata(self, namespace, repository): """ - Deletes the TUF metadata for a repo + Deletes the TUF metadata for a repo. - Args: - namespace: namespace containing the repository - repository: the repo to delete metadata for + Args: + namespace: namespace containing the repository + repository: the repo to delete metadata for - Returns: - True if successful, False otherwise - """ + Returns: + True if successful, False otherwise + """ gun = self._gun(namespace, repository) try: self._delete(gun) @@ -242,7 +252,9 @@ class ImplementedTUFMetadataAPI(TUFMetadataAPIInterface): return None def _parse_signed(self, json_response): - """ Attempts to parse the targets from a metadata response """ + """ + Attempts to parse the targets from a metadata response. + """ signed = json_response.get("signed") if not signed: raise InvalidMetadataException( @@ -251,7 +263,9 @@ class ImplementedTUFMetadataAPI(TUFMetadataAPIInterface): return signed def _auth_header(self, gun, actions): - """ Generate a registry auth token for apostille""" + """ + Generate a registry auth token for apostille. + """ access = [{"type": "repository", "name": gun, "actions": actions,}] context, subject = build_context_and_subject( auth_context=None, tuf_roots={gun: SIGNER_TUF_ROOT} @@ -279,7 +293,9 @@ class ImplementedTUFMetadataAPI(TUFMetadataAPIInterface): ) def _request(self, method, endpoint, path, body, headers, params, timeout): - """ Issues an HTTP request to the signing endpoint. """ + """ + Issues an HTTP request to the signing endpoint. + """ url = urljoin(endpoint, path) logger.debug("%sing signing URL %s", method.upper(), url) @@ -292,8 +308,9 @@ class ImplementedTUFMetadataAPI(TUFMetadataAPIInterface): return resp def _call(self, method, path, params=None, body=None, headers=None): - """ Issues an HTTP request to signing service and handles failover for GET requests. - """ + """ + Issues an HTTP request to signing service and handles failover for GET requests. + """ timeout = self._config.get("TUF_API_TIMEOUT_SECONDS", 1) endpoint = self._config["TUF_SERVER"] @@ -314,7 +331,9 @@ class ImplementedTUFMetadataAPI(TUFMetadataAPIInterface): @failover def _failover_read_request(request_fn, endpoint, path, body, headers, params, timeout): - """ This function auto-retries read-only requests until they return a 2xx status code. """ + """ + This function auto-retries read-only requests until they return a 2xx status code. + """ try: return request_fn("GET", endpoint, path, body, headers, params, timeout) except (requests.exceptions.RequestException, Non200ResponseException) as ex: diff --git a/util/useremails.py b/util/useremails.py index 6836b9fd3..444685704 100644 --- a/util/useremails.py +++ b/util/useremails.py @@ -22,7 +22,9 @@ class CannotSendEmailException(Exception): class GmailAction(object): - """ Represents an action that can be taken in Gmail in response to the email. """ + """ + Represents an action that can be taken in Gmail in response to the email. + """ def __init__(self, metadata): self.metadata = metadata diff --git a/util/vendor/paxtarfile.py b/util/vendor/paxtarfile.py index 5eb748ae8..2c40091ee 100644 --- a/util/vendor/paxtarfile.py +++ b/util/vendor/paxtarfile.py @@ -31,7 +31,8 @@ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # -"""Read from and write to tar format archives. +""" +Read from and write to tar format archives. """ __version__ = "$Revision: 85213 $" @@ -179,13 +180,15 @@ if ENCODING is None: def stn(s, length): - """Convert a python string to a null-terminated string buffer. + """ + Convert a python string to a null-terminated string buffer. """ return s[:length] + (length - len(s)) * NUL def nts(s): - """Convert a null-terminated string field to a python string. + """ + Convert a null-terminated string field to a python string. """ # Use the string up to the first null char. p = s.find("\0") @@ -195,7 +198,8 @@ def nts(s): def nti(s): - """Convert a number field to a python number. + """ + Convert a number field to a python number. """ # There are two possible encodings for a number field, see # itn() below. @@ -213,7 +217,8 @@ def nti(s): def itn(n, digits=8, format=DEFAULT_FORMAT): - """Convert a python number to a number field. + """ + Convert a python number to a number field. """ # POSIX 1003.1-1988 requires numbers to be encoded as a string of # octal digits followed by a null-byte, this allows values up to @@ -241,7 +246,8 @@ def itn(n, digits=8, format=DEFAULT_FORMAT): def uts(s, encoding, errors): - """Convert a unicode object to a string. + """ + Convert a unicode object to a string. """ if errors == "utf-8": # An extra error handler similar to the -o invalid=UTF-8 option @@ -262,13 +268,13 @@ def uts(s, encoding, errors): def calc_chksums(buf): - """Calculate the checksum for a member's header by summing up all - characters except for the chksum field which is treated as if - it was filled with spaces. According to the GNU tar sources, - some tars (Sun and NeXT) calculate chksum with signed char, - which will be different if there are chars in the buffer with - the high bit set. So we calculate two checksums, unsigned and - signed. + """ + Calculate the checksum for a member's header by summing up all characters except for the chksum + field which is treated as if it was filled with spaces. + + According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with the high bit set. So we calculate + two checksums, unsigned and signed. """ unsigned_chksum = 256 + sum( struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]) @@ -280,8 +286,10 @@ def calc_chksums(buf): def copyfileobj(src, dst, length=None): - """Copy length bytes from fileobj src to fileobj dst. - If length is None, copy the entire content. + """ + Copy length bytes from fileobj src to fileobj dst. + + If length is None, copy the entire content. """ if length == 0: return @@ -327,9 +335,11 @@ filemode_table = ( def filemode(mode): - """Convert a file's mode to a string of the form - -rwxrwxrwx. - Used by TarFile.list() + """ + Convert a file's mode to a string of the form. + + -rwxrwxrwx. + Used by TarFile.list() """ perm = [] for table in filemode_table: @@ -343,67 +353,89 @@ def filemode(mode): class TarError(Exception): - """Base exception.""" + """ + Base exception. + """ pass class ExtractError(TarError): - """General exception for extract errors.""" + """ + General exception for extract errors. + """ pass class ReadError(TarError): - """Exception for unreadable tar archives.""" + """ + Exception for unreadable tar archives. + """ pass class CompressionError(TarError): - """Exception for unavailable compression methods.""" + """ + Exception for unavailable compression methods. + """ pass class StreamError(TarError): - """Exception for unsupported operations on stream-like TarFiles.""" + """ + Exception for unsupported operations on stream-like TarFiles. + """ pass class HeaderError(TarError): - """Base exception for header errors.""" + """ + Base exception for header errors. + """ pass class EmptyHeaderError(HeaderError): - """Exception for empty headers.""" + """ + Exception for empty headers. + """ pass class TruncatedHeaderError(HeaderError): - """Exception for truncated headers.""" + """ + Exception for truncated headers. + """ pass class EOFHeaderError(HeaderError): - """Exception for end of file headers.""" + """ + Exception for end of file headers. + """ pass class InvalidHeaderError(HeaderError): - """Exception for invalid headers.""" + """ + Exception for invalid headers. + """ pass class SubsequentHeaderError(HeaderError): - """Exception for missing and invalid extended headers.""" + """ + Exception for missing and invalid extended headers. + """ pass @@ -412,9 +444,10 @@ class SubsequentHeaderError(HeaderError): # internal stream interface # --------------------------- class _LowLevelFile: - """Low-level file object. Supports reading and writing. - It is used instead of a regular file object for streaming - access. + """ + Low-level file object. + + Supports reading and writing. It is used instead of a regular file object for streaming access. """ def __init__(self, name, mode): @@ -434,18 +467,18 @@ class _LowLevelFile: class _Stream: - """Class that serves as an adapter between TarFile and - a stream-like object. The stream-like object only - needs to have a read() or write() method and is accessed - blockwise. Use of gzip or bzip2 compression is possible. - A stream-like object could be for example: sys.stdin, - sys.stdout, a socket, a tape device etc. + """ + Class that serves as an adapter between TarFile and a stream-like object. The stream-like + object only needs to have a read() or write() method and is accessed blockwise. Use of gzip or + bzip2 compression is possible. A stream-like object could be for example: sys.stdin, sys.stdout, + a socket, a tape device etc. - _Stream is intended to be used only internally. + _Stream is intended to be used only internally. """ def __init__(self, name, mode, comptype, fileobj, bufsize): - """Construct a _Stream object. + """ + Construct a _Stream object. """ self._extfileobj = True if fileobj is None: @@ -501,7 +534,8 @@ class _Stream: self.close() def _init_write_gz(self): - """Initialize for writing with gzip compression. + """ + Initialize for writing with gzip compression. """ self.cmp = self.zlib.compressobj( 9, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0 @@ -515,7 +549,8 @@ class _Stream: self.__write(self.name + NUL) def write(self, s): - """Write string s to the stream. + """ + Write string s to the stream. """ if self.comptype == "gz": self.crc = self.zlib.crc32(s, self.crc) & 0xFFFFFFFFL @@ -525,8 +560,8 @@ class _Stream: self.__write(s) def __write(self, s): - """Write string s to the stream if a whole new block - is ready to be written. + """ + Write string s to the stream if a whole new block is ready to be written. """ self.buf += s while len(self.buf) > self.bufsize: @@ -534,8 +569,10 @@ class _Stream: self.buf = self.buf[self.bufsize :] def close(self): - """Close the _Stream object. No operation should be - done on it afterwards. + """ + Close the _Stream object. + + No operation should be done on it afterwards. """ if self.closed: return @@ -562,7 +599,8 @@ class _Stream: self.fileobj.close() def _init_read_gz(self): - """Initialize for reading a gzip compressed fileobj. + """ + Initialize for reading a gzip compressed fileobj. """ self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) self.dbuf = "" @@ -593,13 +631,16 @@ class _Stream: self.__read(2) def tell(self): - """Return the stream's file pointer position. + """ + Return the stream's file pointer position. """ return self.pos def seek(self, pos=0): - """Set the stream's file pointer to pos. Negative seeking - is forbidden. + """ + Set the stream's file pointer to pos. + + Negative seeking is forbidden. """ if pos - self.pos >= 0: blocks, remainder = divmod(pos - self.pos, self.bufsize) @@ -611,9 +652,10 @@ class _Stream: return self.pos def read(self, size=None): - """Return the next size number of bytes from the stream. - If size is not defined, return all bytes of the stream - up to EOF. + """ + Return the next size number of bytes from the stream. + + If size is not defined, return all bytes of the stream up to EOF. """ if size is None: t = [] @@ -629,7 +671,8 @@ class _Stream: return buf def _read(self, size): - """Return size bytes from the stream. + """ + Return size bytes from the stream. """ if self.comptype == "tar": return self.__read(size) @@ -651,8 +694,10 @@ class _Stream: return t[:size] def __read(self, size): - """Return size bytes from stream. If internal buffer is empty, - read another block from the stream. + """ + Return size bytes from stream. + + If internal buffer is empty, read another block from the stream. """ c = len(self.buf) t = [self.buf] @@ -671,8 +716,9 @@ class _Stream: class _StreamProxy(object): - """Small proxy class that enables transparent compression - detection for the Stream interface (mode 'r|*'). + """ + Small proxy class that enables transparent compression detection for the Stream interface (mode + 'r|*'). """ def __init__(self, fileobj): @@ -698,11 +744,11 @@ class _StreamProxy(object): class _BZ2Proxy(object): - """Small proxy class that enables external file object - support for "r:bz2" and "w:bz2" modes. This is actually - a workaround for a limitation in bz2 module's BZ2File - class which (unlike gzip.GzipFile) has no support for - a file object argument. + """ + Small proxy class that enables external file object support for "r:bz2" and "w:bz2" modes. + + This is actually a workaround for a limitation in bz2 module's BZ2File class which (unlike + gzip.GzipFile) has no support for a file object argument. """ blocksize = 16 * 1024 @@ -766,9 +812,9 @@ class _BZ2Proxy(object): # Extraction file object # ------------------------ class _FileInFile(object): - """A thin wrapper around an existing file object that - provides a part of its data as an individual file - object. + """ + A thin wrapper around an existing file object that provides a part of its data as an individual + file object. """ def __init__(self, fileobj, offset, size, sparse=None): @@ -779,17 +825,20 @@ class _FileInFile(object): self.position = 0 def tell(self): - """Return the current file position. + """ + Return the current file position. """ return self.position def seek(self, position): - """Seek to a position in the file. + """ + Seek to a position in the file. """ self.position = position def read(self, size=None): - """Read data from the file. + """ + Read data from the file. """ if size is None: size = self.size - self.position @@ -808,14 +857,16 @@ class _FileInFile(object): return buf def readnormal(self, size): - """Read operation for regular files. + """ + Read operation for regular files. """ self.fileobj.seek(self.offset + self.position) self.position += size return self.__read(size) def readsparse(self, size): - """Read operation for sparse files. + """ + Read operation for sparse files. """ data = [] while size > 0: @@ -827,7 +878,8 @@ class _FileInFile(object): return "".join(data) def readsparsesection(self, size): - """Read a single section of a sparse file. + """ + Read a single section of a sparse file. """ section = self.sparse.find(self.position) @@ -850,8 +902,10 @@ class _FileInFile(object): class ExFileObject(object): - """File-like object for reading an archive member. - Is returned by TarFile.extractfile(). + """ + File-like object for reading an archive member. + + Is returned by TarFile.extractfile(). """ blocksize = 1024 @@ -869,8 +923,10 @@ class ExFileObject(object): self.buffer = "" def read(self, size=None): - """Read at most size bytes from the file. If size is not - present or None, read all data until EOF is reached. + """ + Read at most size bytes from the file. + + If size is not present or None, read all data until EOF is reached. """ if self.closed: raise ValueError("I/O operation on closed file") @@ -893,9 +949,11 @@ class ExFileObject(object): return buf def readline(self, size=-1): - """Read one entire line from the file. If size is present - and non-negative, return a string with at most that - size, which may be an incomplete line. + """ + Read one entire line from the file. + + If size is present and non-negative, return a string with at most that size, which may be an + incomplete line. """ if self.closed: raise ValueError("I/O operation on closed file") @@ -924,7 +982,8 @@ class ExFileObject(object): return buf def readlines(self): - """Return a list with all remaining lines. + """ + Return a list with all remaining lines. """ result = [] while True: @@ -935,7 +994,8 @@ class ExFileObject(object): return result def tell(self): - """Return the current file position. + """ + Return the current file position. """ if self.closed: raise ValueError("I/O operation on closed file") @@ -943,7 +1003,8 @@ class ExFileObject(object): return self.position def seek(self, pos, whence=os.SEEK_SET): - """Seek to a position in the file. + """ + Seek to a position in the file. """ if self.closed: raise ValueError("I/O operation on closed file") @@ -964,12 +1025,14 @@ class ExFileObject(object): self.fileobj.seek(self.position) def close(self): - """Close the file object. + """ + Close the file object. """ self.closed = True def __iter__(self): - """Get an iterator over the file's lines. + """ + Get an iterator over the file's lines. """ while True: line = self.readline() @@ -984,16 +1047,18 @@ class ExFileObject(object): # Exported Classes # ------------------ class TarInfo(object): - """Informational class which holds the details about an - archive member given by a tar header block. - TarInfo objects are returned by TarFile.getmember(), - TarFile.getmembers() and TarFile.gettarinfo() and are - usually created internally. + """ + Informational class which holds the details about an archive member given by a tar header block. + + TarInfo objects are returned by TarFile.getmember(), TarFile.getmembers() and + TarFile.gettarinfo() and are usually created internally. """ def __init__(self, name=""): - """Construct a TarInfo object. name is the optional name - of the member. + """ + Construct a TarInfo object. + + name is the optional name of the member. """ self.name = name # member name self.mode = 0644 # file permissions @@ -1036,7 +1101,8 @@ class TarInfo(object): return "<%s %r at %#x>" % (self.__class__.__name__, self.name, id(self)) def get_info(self, encoding, errors): - """Return the TarInfo's attributes as a dictionary. + """ + Return the TarInfo's attributes as a dictionary. """ info = { "name": self.name, @@ -1064,7 +1130,8 @@ class TarInfo(object): return info def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"): - """Return a tar header as a string of 512 byte blocks. + """ + Return a tar header as a string of 512 byte blocks. """ info = self.get_info(encoding, errors) @@ -1078,7 +1145,8 @@ class TarInfo(object): raise ValueError("invalid format") def create_ustar_header(self, info): - """Return the object as a ustar header block. + """ + Return the object as a ustar header block. """ info["magic"] = POSIX_MAGIC @@ -1091,7 +1159,8 @@ class TarInfo(object): return self._create_header(info, USTAR_FORMAT) def create_gnu_header(self, info): - """Return the object as a GNU header block sequence. + """ + Return the object as a GNU header block sequence. """ info["magic"] = GNU_MAGIC @@ -1105,9 +1174,11 @@ class TarInfo(object): return buf + self._create_header(info, GNU_FORMAT) def create_pax_header(self, info, encoding, errors): - """Return the object as a ustar header block. If it cannot be - represented this way, prepend a pax extended header sequence - with supplement information. + """ + Return the object as a ustar header block. + + If it cannot be represented this way, prepend a pax extended header sequence with supplement + information. """ info["magic"] = POSIX_MAGIC pax_headers = self.pax_headers.copy() @@ -1160,13 +1231,14 @@ class TarInfo(object): @classmethod def create_pax_global_header(cls, pax_headers): - """Return the object as a pax global header block sequence. + """ + Return the object as a pax global header block sequence. """ return cls._create_pax_generic_header(pax_headers, type=XGLTYPE) def _posix_split_name(self, name): - """Split a name longer than 100 chars into a prefix - and a name part. + """ + Split a name longer than 100 chars into a prefix and a name part. """ prefix = name[: LENGTH_PREFIX + 1] while prefix and prefix[-1] != "/": @@ -1181,8 +1253,10 @@ class TarInfo(object): @staticmethod def _create_header(info, format): - """Return a header block. info is a dictionary with file - information, format must be one of the *_FORMAT constants. + """ + Return a header block. + + info is a dictionary with file information, format must be one of the *_FORMAT constants. """ parts = [ stn(info.get("name", ""), 100), @@ -1209,8 +1283,8 @@ class TarInfo(object): @staticmethod def _create_payload(payload): - """Return the string payload filled with zero bytes - up to the next 512 byte border. + """ + Return the string payload filled with zero bytes up to the next 512 byte border. """ blocks, remainder = divmod(len(payload), BLOCKSIZE) if remainder > 0: @@ -1219,8 +1293,8 @@ class TarInfo(object): @classmethod def _create_gnu_long_header(cls, name, type): - """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence - for name. + """ + Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name. """ name += NUL @@ -1235,9 +1309,11 @@ class TarInfo(object): @classmethod def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE): - """Return a POSIX.1-2001 extended or global header sequence - that contains a list of keyword, value pairs. The values - must be unicode objects. + """ + Return a POSIX.1-2001 extended or global header sequence that contains a list of keyword, + value pairs. + + The values must be unicode objects. """ records = [] for keyword, value in pax_headers.iteritems(): @@ -1266,7 +1342,8 @@ class TarInfo(object): @classmethod def frombuf(cls, buf): - """Construct a TarInfo object from a 512 byte string buffer. + """ + Construct a TarInfo object from a 512 byte string buffer. """ if len(buf) == 0: raise EmptyHeaderError("empty header") @@ -1312,8 +1389,8 @@ class TarInfo(object): @classmethod def fromtarfile(cls, tarfile): - """Return the next TarInfo object from TarFile object - tarfile. + """ + Return the next TarInfo object from TarFile object tarfile. """ buf = tarfile.fileobj.read(BLOCKSIZE) obj = cls.frombuf(buf) @@ -1332,8 +1409,8 @@ class TarInfo(object): # begin. # 3. Return self or another valid TarInfo object. def _proc_member(self, tarfile): - """Choose the right processing method depending on - the type and call it. + """ + Choose the right processing method depending on the type and call it. """ if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): return self._proc_gnulong(tarfile) @@ -1345,8 +1422,8 @@ class TarInfo(object): return self._proc_builtin(tarfile) def _proc_builtin(self, tarfile): - """Process a builtin type or an unknown type which - will be treated as a regular file. + """ + Process a builtin type or an unknown type which will be treated as a regular file. """ self.offset_data = tarfile.fileobj.tell() offset = self.offset_data @@ -1362,8 +1439,8 @@ class TarInfo(object): return self def _proc_gnulong(self, tarfile): - """Process the blocks that hold a GNU longname - or longlink member. + """ + Process the blocks that hold a GNU longname or longlink member. """ buf = tarfile.fileobj.read(self._block(self.size)) @@ -1384,7 +1461,8 @@ class TarInfo(object): return next def _proc_sparse(self, tarfile): - """Process a GNU sparse header plus extra headers. + """ + Process a GNU sparse header plus extra headers. """ buf = self.buf sp = _ringbuffer() @@ -1440,8 +1518,8 @@ class TarInfo(object): return self def _proc_pax(self, tarfile): - """Process an extended or global header as described in - POSIX.1-2001. + """ + Process an extended or global header as described in POSIX.1-2001. """ # Read the header information. buf = tarfile.fileobj.read(self._block(self.size)) @@ -1505,8 +1583,8 @@ class TarInfo(object): return next def _apply_pax_info(self, pax_headers, encoding, errors): - """Replace fields with supplemental information from a previous - pax extended or global header. + """ + Replace fields with supplemental information from a previous pax extended or global header. """ for keyword, value in pax_headers.iteritems(): if keyword not in PAX_FIELDS: @@ -1528,8 +1606,8 @@ class TarInfo(object): self.pax_headers = pax_headers.copy() def _block(self, count): - """Round up a byte count by BLOCKSIZE and return it, - e.g. _block(834) => 1024. + """ + Round up a byte count by BLOCKSIZE and return it, e.g. _block(834) => 1024. """ blocks, remainder = divmod(count, BLOCKSIZE) if remainder: @@ -1571,7 +1649,8 @@ class TarInfo(object): class TarFile(object): - """The TarFile Class provides an interface to tar archives. + """ + The TarFile Class provides an interface to tar archives. """ debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) @@ -1611,13 +1690,13 @@ class TarFile(object): debug=None, errorlevel=None, ): - """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to - read from an existing archive, 'a' to append data to an existing - file or 'w' to create a new file overwriting an existing one. `mode' - defaults to 'r'. - If `fileobj' is given, it is used for reading or writing data. If it - can be determined, `mode' is overridden by `fileobj's mode. - `fileobj' is not closed, when TarFile is closed. + """ + Open an (uncompressed) tar archive `name'. + + `mode' is either 'r' to read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' defaults to 'r'. If + `fileobj' is given, it is used for reading or writing data. If it can be determined, `mode' + is overridden by `fileobj's mode. `fileobj' is not closed, when TarFile is closed. """ modes = {"r": "rb", "a": "r+b", "w": "wb"} if mode not in modes: @@ -1738,26 +1817,26 @@ class TarFile(object): @classmethod def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): - """Open a tar archive for reading, writing or appending. Return - an appropriate TarFile class. + """ + Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. - mode: - 'r' or 'r:*' open for reading with transparent compression - 'r:' open for reading exclusively uncompressed - 'r:gz' open for reading with gzip compression - 'r:bz2' open for reading with bzip2 compression - 'a' or 'a:' open for appending, creating the file if necessary - 'w' or 'w:' open for writing without compression - 'w:gz' open for writing with gzip compression - 'w:bz2' open for writing with bzip2 compression + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression - 'r|*' open a stream of tar blocks with transparent compression - 'r|' open an uncompressed stream of tar blocks for reading - 'r|gz' open a gzip compressed stream of tar blocks - 'r|bz2' open a bzip2 compressed stream of tar blocks - 'w|' open an uncompressed stream for writing - 'w|gz' open a gzip compressed stream for writing - 'w|bz2' open a bzip2 compressed stream for writing + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing """ if not name and not fileobj: @@ -1814,7 +1893,8 @@ class TarFile(object): @classmethod def taropen(cls, name, mode="r", fileobj=None, **kwargs): - """Open uncompressed tar archive name for reading or writing. + """ + Open uncompressed tar archive name for reading or writing. """ if mode not in ("r", "a", "w"): raise ValueError("mode must be 'r', 'a' or 'w'") @@ -1822,8 +1902,10 @@ class TarFile(object): @classmethod def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open gzip compressed tar archive name for reading or writing. - Appending is not allowed. + """ + Open gzip compressed tar archive name for reading or writing. + + Appending is not allowed. """ if mode not in ("r", "w"): raise ValueError("mode must be 'r' or 'w'") @@ -1857,8 +1939,10 @@ class TarFile(object): @classmethod def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open bzip2 compressed tar archive name for reading or writing. - Appending is not allowed. + """ + Open bzip2 compressed tar archive name for reading or writing. + + Appending is not allowed. """ if mode not in ("r", "w"): raise ValueError("mode must be 'r' or 'w'.") @@ -1897,8 +1981,10 @@ class TarFile(object): # The public methods which TarFile provides: def close(self): - """Close the TarFile. In write-mode, two finishing zero blocks are - appended to the archive. + """ + Close the TarFile. + + In write-mode, two finishing zero blocks are appended to the archive. """ if self.closed: return @@ -1918,10 +2004,11 @@ class TarFile(object): self.fileobj.close() def getmember(self, name): - """Return a TarInfo object for member `name'. If `name' can not be - found in the archive, KeyError is raised. If a member occurs more - than once in the archive, its last occurrence is assumed to be the - most up-to-date version. + """ + Return a TarInfo object for member `name'. + + If `name' can not be found in the archive, KeyError is raised. If a member occurs more than + once in the archive, its last occurrence is assumed to be the most up-to-date version. """ tarinfo = self._getmember(name) if tarinfo is None: @@ -1929,8 +2016,10 @@ class TarFile(object): return tarinfo def getmembers(self): - """Return the members of the archive as a list of TarInfo objects. The - list has the same order as the members in the archive. + """ + Return the members of the archive as a list of TarInfo objects. + + The list has the same order as the members in the archive. """ self._check() if not self._loaded: # if we want to obtain a list of @@ -1939,17 +2028,20 @@ class TarFile(object): return self.members def getnames(self): - """Return the members of the archive as a list of their names. It has - the same order as the list returned by getmembers(). + """ + Return the members of the archive as a list of their names. + + It has the same order as the list returned by getmembers(). """ return [tarinfo.name for tarinfo in self.getmembers()] def gettarinfo(self, name=None, arcname=None, fileobj=None): - """Create a TarInfo object for either the file `name' or the file - object `fileobj' (using os.fstat on its file descriptor). You can - modify some of the TarInfo's attributes before you add it using - addfile(). If given, `arcname' specifies an alternative name for the - file in the archive. + """ + Create a TarInfo object for either the file `name' or the file object `fileobj' (using + os.fstat on its file descriptor). + + You can modify some of the TarInfo's attributes before you add it using addfile(). If given, + `arcname' specifies an alternative name for the file in the archive. """ self._check("aw") @@ -2047,9 +2139,11 @@ class TarFile(object): return tarinfo def list(self, verbose=True): - """Print a table of contents to sys.stdout. If `verbose' is False, only - the names of the members are printed. If it is True, an `ls -l'-like - output is produced. + """ + Print a table of contents to sys.stdout. + + If `verbose' is False, only the names of the members are printed. If it is True, an `ls + -l'-like output is produced. """ self._check() @@ -2073,15 +2167,15 @@ class TarFile(object): print def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): - """Add the file `name' to the archive. `name' may be any type of file - (directory, fifo, symbolic link, etc.). If given, `arcname' - specifies an alternative name for the file in the archive. - Directories are added recursively by default. This can be avoided by - setting `recursive' to False. `exclude' is a function that should - return True for each filename to be excluded. `filter' is a function - that expects a TarInfo object argument and returns the changed - TarInfo object, if it returns None the TarInfo object will be - excluded from the archive. + """ + Add the file `name' to the archive. + + `name' may be any type of file (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. Directories are added recursively + by default. This can be avoided by setting `recursive' to False. `exclude' is a function + that should return True for each filename to be excluded. `filter' is a function that + expects a TarInfo object argument and returns the changed TarInfo object, if it returns None + the TarInfo object will be excluded from the archive. """ self._check("aw") @@ -2135,11 +2229,12 @@ class TarFile(object): self.addfile(tarinfo) def addfile(self, tarinfo, fileobj=None): - """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is - given, tarinfo.size bytes are read from it and added to the archive. - You can create TarInfo objects using gettarinfo(). - On Windows platforms, `fileobj' should always be opened with mode - 'rb' to avoid irritation about the file size. + """ + Add the TarInfo object `tarinfo' to the archive. + + If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can + create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be + opened with mode 'rb' to avoid irritation about the file size. """ self._check("aw") @@ -2161,11 +2256,12 @@ class TarFile(object): self.members.append(tarinfo) def extractall(self, path=".", members=None): - """Extract all members from the archive to the current working - directory and set owner, modification time and permissions on - directories afterwards. `path' specifies a different directory - to extract to. `members' is optional and must be a subset of the - list returned by getmembers(). + """ + Extract all members from the archive to the current working directory and set owner, + modification time and permissions on directories afterwards. + + `path' specifies a different directory to extract to. `members' is optional and must be a + subset of the list returned by getmembers(). """ directories = [] @@ -2198,10 +2294,11 @@ class TarFile(object): self._dbg(1, "tarfile: %s" % e) def extract(self, member, path=""): - """Extract a member from the archive to the current working directory, - using its full name. Its file information is extracted as accurately - as possible. `member' may be a filename or a TarInfo object. You can - specify a different directory using `path'. + """ + Extract a member from the archive to the current working directory, using its full name. + + Its file information is extracted as accurately as possible. `member' may be a filename or a + TarInfo object. You can specify a different directory using `path'. """ self._check("r") @@ -2231,13 +2328,16 @@ class TarFile(object): self._dbg(1, "tarfile: %s" % e) def extractfile(self, member): - """Extract a member from the archive as a file object. `member' may be - a filename or a TarInfo object. If `member' is a regular file, a - file-like object is returned. If `member' is a link, a file-like - object is constructed from the link's target. If `member' is none of - the above, None is returned. - The file-like object is read-only and provides the following - methods: read(), readline(), readlines(), seek() and tell() + """ + Extract a member from the archive as a file object. + + `member' may be + a filename or a TarInfo object. If `member' is a regular file, a + file-like object is returned. If `member' is a link, a file-like + object is constructed from the link's target. If `member' is none of + the above, None is returned. + The file-like object is read-only and provides the following + methods: read(), readline(), readlines(), seek() and tell() """ self._check("r") @@ -2269,8 +2369,8 @@ class TarFile(object): return None def _extract_member(self, tarinfo, targetpath): - """Extract the TarInfo object tarinfo to a physical - file called targetpath. + """ + Extract the TarInfo object tarinfo to a physical file called targetpath. """ # Fetch the TarInfo object for the given name # and build the destination pathname, replacing @@ -2316,7 +2416,8 @@ class TarFile(object): # subclass to implement other functionality. def makedir(self, tarinfo, targetpath): - """Make a directory called targetpath. + """ + Make a directory called targetpath. """ try: # Use a safe mode for the directory, the real mode is set @@ -2327,7 +2428,8 @@ class TarFile(object): raise def makefile(self, tarinfo, targetpath): - """Make a file called targetpath. + """ + Make a file called targetpath. """ source = self.extractfile(tarinfo) try: @@ -2337,14 +2439,15 @@ class TarFile(object): source.close() def makeunknown(self, tarinfo, targetpath): - """Make a file from a TarInfo object with an unknown type - at targetpath. + """ + Make a file from a TarInfo object with an unknown type at targetpath. """ self.makefile(tarinfo, targetpath) self._dbg(1, "tarfile: Unknown file type %r, " "extracted as regular file." % tarinfo.type) def makefifo(self, tarinfo, targetpath): - """Make a fifo called targetpath. + """ + Make a fifo called targetpath. """ if hasattr(os, "mkfifo"): os.mkfifo(targetpath) @@ -2352,7 +2455,8 @@ class TarFile(object): raise ExtractError("fifo not supported by system") def makedev(self, tarinfo, targetpath): - """Make a character or block device called targetpath. + """ + Make a character or block device called targetpath. """ if not hasattr(os, "mknod") or not hasattr(os, "makedev"): raise ExtractError("special devices not supported by system") @@ -2366,9 +2470,11 @@ class TarFile(object): os.mknod(targetpath, mode, os.makedev(tarinfo.devmajor, tarinfo.devminor)) def makelink(self, tarinfo, targetpath): - """Make a (symbolic) link called targetpath. If it cannot be created - (platform limitation), we try to make a copy of the referenced file - instead of a link. + """ + Make a (symbolic) link called targetpath. + + If it cannot be created (platform limitation), we try to make a copy of the referenced file + instead of a link. """ if hasattr(os, "symlink") and hasattr(os, "link"): # For systems that support symbolic and hard links. @@ -2391,7 +2497,8 @@ class TarFile(object): raise ExtractError("unable to resolve link inside archive") def chown(self, tarinfo, targetpath): - """Set owner of targetpath according to tarinfo. + """ + Set owner of targetpath according to tarinfo. """ if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: # We have to be root to do so. @@ -2413,7 +2520,8 @@ class TarFile(object): raise ExtractError("could not change owner") def chmod(self, tarinfo, targetpath): - """Set file permissions of targetpath according to tarinfo. + """ + Set file permissions of targetpath according to tarinfo. """ if hasattr(os, "chmod"): try: @@ -2422,7 +2530,8 @@ class TarFile(object): raise ExtractError("could not change mode") def utime(self, tarinfo, targetpath): - """Set modification time of targetpath according to tarinfo. + """ + Set modification time of targetpath according to tarinfo. """ if not hasattr(os, "utime"): return @@ -2433,9 +2542,11 @@ class TarFile(object): # -------------------------------------------------------------------------- def next(self): - """Return the next member of the archive as a TarInfo object, when - TarFile is opened for reading. Return None if there is no more - available. + """ + Return the next member of the archive as a TarInfo object, when TarFile is opened for + reading. + + Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: @@ -2487,8 +2598,10 @@ class TarFile(object): # Little helper methods: def _getmember(self, name, tarinfo=None, normalize=False): - """Find an archive member by name from bottom to top. - If tarinfo is given, it is used as the starting point. + """ + Find an archive member by name from bottom to top. + + If tarinfo is given, it is used as the starting point. """ # Ensure that all members have been loaded. members = self.getmembers() @@ -2510,8 +2623,8 @@ class TarFile(object): return member def _load(self): - """Read through the entire archive file and look for readable - members. + """ + Read through the entire archive file and look for readable members. """ while True: tarinfo = self.next() @@ -2520,8 +2633,8 @@ class TarFile(object): self._loaded = True def _check(self, mode=None): - """Check if TarFile is still open, and if the operation's mode - corresponds to TarFile's mode. + """ + Check if TarFile is still open, and if the operation's mode corresponds to TarFile's mode. """ if self.closed: raise IOError("%s is closed" % self.__class__.__name__) @@ -2529,8 +2642,8 @@ class TarFile(object): raise IOError("bad operation for mode %r" % self.mode) def _find_link_target(self, tarinfo): - """Find the target member of a symlink or hardlink member in the - archive. + """ + Find the target member of a symlink or hardlink member in the archive. """ if tarinfo.issym(): # Always search the entire archive. @@ -2548,7 +2661,8 @@ class TarFile(object): return member def __iter__(self): - """Provide an iterator object. + """ + Provide an iterator object. """ if self._loaded: return iter(self.members) @@ -2556,7 +2670,8 @@ class TarFile(object): return TarIter(self) def _dbg(self, level, msg): - """Write debugging output to sys.stderr. + """ + Write debugging output to sys.stderr. """ if level <= self.debug: print >> sys.stderr, msg @@ -2580,26 +2695,30 @@ class TarFile(object): class TarIter: - """Iterator Class. + """ + Iterator Class. - for tarinfo in TarFile(...): - suite... + for tarinfo in TarFile(...): suite... """ def __init__(self, tarfile): - """Construct a TarIter object. + """ + Construct a TarIter object. """ self.tarfile = tarfile self.index = 0 def __iter__(self): - """Return iterator object. + """ + Return iterator object. """ return self def next(self): - """Return the next item using TarFile's next() method. - When all members have been read, set TarFile as _loaded. + """ + Return the next item using TarFile's next() method. + + When all members have been read, set TarFile as _loaded. """ # Fix for SF #1100429: Under rare circumstances it can # happen that getmembers() is called during iteration, @@ -2622,7 +2741,8 @@ class TarIter: # Helper classes for sparse file support class _section: - """Base class for _data and _hole. + """ + Base class for _data and _hole. """ def __init__(self, offset, size): @@ -2634,7 +2754,8 @@ class _section: class _data(_section): - """Represent a data section in a sparse file. + """ + Represent a data section in a sparse file. """ def __init__(self, offset, size, realpos): @@ -2643,15 +2764,16 @@ class _data(_section): class _hole(_section): - """Represent a hole section in a sparse file. + """ + Represent a hole section in a sparse file. """ pass class _ringbuffer(list): - """Ringbuffer class which increases performance - over a regular list. + """ + Ringbuffer class which increases performance over a regular list. """ def __init__(self): @@ -2681,8 +2803,8 @@ TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED class TarFileCompat: - """TarFile class compatible with standard module zipfile's - ZipFile class. + """ + TarFile class compatible with standard module zipfile's ZipFile class. """ def __init__(self, file, mode="r", compression=TAR_PLAIN): @@ -2745,8 +2867,8 @@ class TarFileCompat: # exported functions # -------------------- def is_tarfile(name): - """Return True if name points to a tar archive that we - are able to handle, else return False. + """ + Return True if name points to a tar archive that we are able to handle, else return False. """ try: t = open(name) diff --git a/util/verifyplacements.py b/util/verifyplacements.py index bdc9243cf..b7fceecab 100644 --- a/util/verifyplacements.py +++ b/util/verifyplacements.py @@ -1,9 +1,9 @@ """ -Usage (from the root in the container): venv/bin/python -m util.verifyplacements +Usage (from the root in the container): venv/bin/python -m util.verifyplacements. This script verifies that if a blob is listed as being in a specific storage location, the file -actually exists there. If the file is not found in that storage location, the placement entry in -the database is removed. +actually exists there. If the file is not found in that storage location, the placement entry in the +database is removed. """ import logging diff --git a/util/workers.py b/util/workers.py index c092647e8..76dcbc448 100644 --- a/util/workers.py +++ b/util/workers.py @@ -3,10 +3,10 @@ import psutil def get_worker_count(worker_kind_name, multiplier, minimum=None, maximum=None): - """ Returns the number of gunicorn workers to run for the given worker kind, - based on a combination of environment variable, multiplier, minimum (if any), - and number of accessible CPU cores. - """ + """ + Returns the number of gunicorn workers to run for the given worker kind, based on a combination + of environment variable, multiplier, minimum (if any), and number of accessible CPU cores. + """ minimum = minimum or multiplier maximum = maximum or (multiplier * multiplier) diff --git a/workers/blobuploadcleanupworker/blobuploadcleanupworker.py b/workers/blobuploadcleanupworker/blobuploadcleanupworker.py index 23c2a2075..50d6ea015 100644 --- a/workers/blobuploadcleanupworker/blobuploadcleanupworker.py +++ b/workers/blobuploadcleanupworker/blobuploadcleanupworker.py @@ -21,7 +21,9 @@ class BlobUploadCleanupWorker(Worker): self.add_operation(self._cleanup_uploads, BLOBUPLOAD_CLEANUP_FREQUENCY) def _cleanup_uploads(self): - """ Performs garbage collection on the blobupload table. """ + """ + Performs garbage collection on the blobupload table. + """ while True: # Find all blob uploads older than the threshold (typically a week) and delete them. with UseThenDisconnect(app.config): diff --git a/workers/blobuploadcleanupworker/models_interface.py b/workers/blobuploadcleanupworker/models_interface.py index 3fcfe8117..18cd33ef5 100644 --- a/workers/blobuploadcleanupworker/models_interface.py +++ b/workers/blobuploadcleanupworker/models_interface.py @@ -5,33 +5,44 @@ from six import add_metaclass class BlobUpload(namedtuple("BlobUpload", ["uuid", "storage_metadata", "location_name"])): """ - BlobUpload represents a single upload of a blob in progress or previously started. - """ + BlobUpload represents a single upload of a blob in progress or previously started. + """ @add_metaclass(ABCMeta) class BlobUploadCleanupWorkerDataInterface(object): """ - Interface that represents all data store interactions required by the blob upload cleanup worker. - """ + Interface that represents all data store interactions required by the blob upload cleanup + worker. + """ @abstractmethod def get_stale_blob_upload(self, stale_threshold): - """ Returns a BlobUpload that was created on or before the current date/time minus the - stale threshold. If none, returns None. """ + """ + Returns a BlobUpload that was created on or before the current date/time minus the stale + threshold. + + If none, returns None. + """ pass @abstractmethod def delete_blob_upload(self, blob_upload): - """ Deletes a blob upload from the database. """ + """ + Deletes a blob upload from the database. + """ pass @abstractmethod def create_stale_upload_for_testing(self): - """ Creates a new stale blob upload for testing. """ + """ + Creates a new stale blob upload for testing. + """ pass @abstractmethod def blob_upload_exists(self, upload_uuid): - """ Returns True if a blob upload with the given UUID exists. """ + """ + Returns True if a blob upload with the given UUID exists. + """ pass diff --git a/workers/buildlogsarchiver/buildlogsarchiver.py b/workers/buildlogsarchiver/buildlogsarchiver.py index 605f8b4c2..51493dacc 100644 --- a/workers/buildlogsarchiver/buildlogsarchiver.py +++ b/workers/buildlogsarchiver/buildlogsarchiver.py @@ -24,8 +24,11 @@ class ArchiveBuildLogsWorker(Worker): self.add_operation(self._archive_redis_buildlogs, POLL_PERIOD_SECONDS) def _archive_redis_buildlogs(self): - """ Archive a single build, choosing a candidate at random. This process must be idempotent to - avoid needing two-phase commit. """ + """ + Archive a single build, choosing a candidate at random. + + This process must be idempotent to avoid needing two-phase commit. + """ # Get a random build to archive to_archive = model.get_archivable_build() if to_archive is None: diff --git a/workers/buildlogsarchiver/models_interface.py b/workers/buildlogsarchiver/models_interface.py index 940e87e0d..ec0bdb48f 100644 --- a/workers/buildlogsarchiver/models_interface.py +++ b/workers/buildlogsarchiver/models_interface.py @@ -5,34 +5,45 @@ from six import add_metaclass class Build(namedtuple("Build", ["uuid", "logs_archived"])): """ - Build represents a single build in the build system. - """ + Build represents a single build in the build system. + """ @add_metaclass(ABCMeta) class BuildLogsArchiverWorkerDataInterface(object): """ - Interface that represents all data store interactions required by the build logs archiver worker. - """ + Interface that represents all data store interactions required by the build logs archiver + worker. + """ @abstractmethod def get_archivable_build(self): - """ Returns a build whose logs are available for archiving. If none, returns None. """ + """ + Returns a build whose logs are available for archiving. + + If none, returns None. + """ pass @abstractmethod def get_build(self, build_uuid): - """ Returns the build with the matching UUID or None if none. """ + """ + Returns the build with the matching UUID or None if none. + """ pass @abstractmethod def mark_build_archived(self, build_uuid): - """ Marks the build with the given UUID as having its logs archived. Returns False if - the build was already marked as archived. - """ + """ + Marks the build with the given UUID as having its logs archived. + + Returns False if the build was already marked as archived. + """ pass @abstractmethod def create_build_for_testing(self): - """ Creates an unarchived build for testing of archiving. """ + """ + Creates an unarchived build for testing of archiving. + """ pass diff --git a/workers/chunkcleanupworker.py b/workers/chunkcleanupworker.py index d8f6c520f..5dde25ea9 100644 --- a/workers/chunkcleanupworker.py +++ b/workers/chunkcleanupworker.py @@ -12,9 +12,11 @@ POLL_PERIOD_SECONDS = 10 class ChunkCleanupWorker(QueueWorker): - """ Worker which cleans up chunks enqueued by the storage engine(s). This is typically used to - cleanup empty chunks which are no longer needed. - """ + """ + Worker which cleans up chunks enqueued by the storage engine(s). + + This is typically used to cleanup empty chunks which are no longer needed. + """ def process_queue_item(self, job_details): logger.debug("Got chunk cleanup queue item: %s", job_details) diff --git a/workers/expiredappspecifictokenworker.py b/workers/expiredappspecifictokenworker.py index 9217b944b..4d8d909d1 100644 --- a/workers/expiredappspecifictokenworker.py +++ b/workers/expiredappspecifictokenworker.py @@ -27,8 +27,9 @@ class ExpiredAppSpecificTokenWorker(Worker): self.add_operation(self._gc_expired_tokens, POLL_PERIOD_SECONDS) def _gc_expired_tokens(self): - """ Garbage collects any expired app specific tokens outside of the configured - window. """ + """ + Garbage collects any expired app specific tokens outside of the configured window. + """ logger.debug( "Garbage collecting expired app specific tokens with window: %s", self.expiration_window ) diff --git a/workers/exportactionlogsworker.py b/workers/exportactionlogsworker.py index 498c3f190..9a2332d24 100644 --- a/workers/exportactionlogsworker.py +++ b/workers/exportactionlogsworker.py @@ -43,9 +43,10 @@ class ExportResult(Enum): class ExportActionLogsWorker(QueueWorker): - """ Worker which exports action logs for a namespace or a repository based on - a queued request from the API. - """ + """ + Worker which exports action logs for a namespace or a repository based on a queued request from + the API. + """ def process_queue_item(self, job_details): return self._process_queue_item(job_details, app_storage) diff --git a/workers/gc/gcworker.py b/workers/gc/gcworker.py index c38fe7f38..e3bf2f02a 100644 --- a/workers/gc/gcworker.py +++ b/workers/gc/gcworker.py @@ -21,7 +21,9 @@ class GarbageCollectionWorker(Worker): ) def _garbage_collection_repos(self): - """ Performs garbage collection on repositories. """ + """ + Performs garbage collection on repositories. + """ with UseThenDisconnect(app.config): policy = get_random_gc_policy() if policy is None: diff --git a/workers/globalpromstats/globalpromstats.py b/workers/globalpromstats/globalpromstats.py index 768b71772..1c8cca3e8 100644 --- a/workers/globalpromstats/globalpromstats.py +++ b/workers/globalpromstats/globalpromstats.py @@ -23,8 +23,9 @@ WORKER_FREQUENCY = app.config.get("GLOBAL_PROMETHEUS_STATS_FREQUENCY", 60 * 60) class GlobalPrometheusStatsWorker(Worker): - """ Worker which reports global stats (# of users, orgs, repos, etc) to Prometheus periodically. - """ + """ + Worker which reports global stats (# of users, orgs, repos, etc) to Prometheus periodically. + """ def __init__(self): super(GlobalPrometheusStatsWorker, self).__init__() diff --git a/workers/globalpromstats/models_interface.py b/workers/globalpromstats/models_interface.py index 713512bb7..878699d15 100644 --- a/workers/globalpromstats/models_interface.py +++ b/workers/globalpromstats/models_interface.py @@ -5,25 +5,33 @@ from six import add_metaclass @add_metaclass(ABCMeta) class GlobalPromStatsWorkerDataInterface(object): """ - Interface that represents all data store interactions required by the global prom stats worker. - """ + Interface that represents all data store interactions required by the global prom stats worker. + """ @abstractmethod def get_repository_count(self): - """ Returns the number of repositories in the database. """ + """ + Returns the number of repositories in the database. + """ pass @abstractmethod def get_active_user_count(self): - """ Returns the number of active users in the database. """ + """ + Returns the number of active users in the database. + """ pass @abstractmethod def get_active_org_count(self): - """ Returns the number of active organizations in the database. """ + """ + Returns the number of active organizations in the database. + """ pass @abstractmethod def get_robot_count(self): - """ Returns the number of robots in the database. """ + """ + Returns the number of robots in the database. + """ pass diff --git a/workers/logrotateworker.py b/workers/logrotateworker.py index 8092d9900..d91c3d173 100644 --- a/workers/logrotateworker.py +++ b/workers/logrotateworker.py @@ -33,7 +33,9 @@ SAVE_LOCATION = app.config.get("ACTION_LOG_ARCHIVE_LOCATION") class LogRotateWorker(Worker): - """ Worker used to rotate old logs out the database and into storage. """ + """ + Worker used to rotate old logs out the database and into storage. + """ def __init__(self): super(LogRotateWorker, self).__init__() @@ -74,7 +76,9 @@ class LogRotateWorker(Worker): def log_dict(log): - """ Pretty prints a LogEntry in JSON. """ + """ + Pretty prints a LogEntry in JSON. + """ try: metadata_json = json.loads(str(log.metadata_json)) except ValueError: diff --git a/workers/namespacegcworker.py b/workers/namespacegcworker.py index 1af7cea81..1b2a81c09 100644 --- a/workers/namespacegcworker.py +++ b/workers/namespacegcworker.py @@ -13,8 +13,9 @@ NAMESPACE_GC_TIMEOUT = 60 * 15 # 15 minutes class NamespaceGCWorker(QueueWorker): - """ Worker which cleans up namespaces enqueued to be GCed. - """ + """ + Worker which cleans up namespaces enqueued to be GCed. + """ def process_queue_item(self, job_details): logger.debug("Got namespace GC queue item: %s", job_details) diff --git a/workers/notificationworker/models_interface.py b/workers/notificationworker/models_interface.py index df033e178..6a421625f 100644 --- a/workers/notificationworker/models_interface.py +++ b/workers/notificationworker/models_interface.py @@ -5,8 +5,8 @@ from six import add_metaclass class Repository(namedtuple("Repository", ["namespace_name", "name"])): """ - Repository represents a repository. - """ + Repository represents a repository. + """ class Notification( @@ -23,39 +23,49 @@ class Notification( ) ): """ - Notification represents a registered notification of some kind. - """ + Notification represents a registered notification of some kind. + """ @add_metaclass(ABCMeta) class NotificationWorkerDataInterface(object): """ - Interface that represents all data store interactions required by the notification worker. - """ + Interface that represents all data store interactions required by the notification worker. + """ @abstractmethod def get_enabled_notification(self, notification_uuid): - """ Returns an *enabled* notification with the given UUID, or None if none. """ + """ + Returns an *enabled* notification with the given UUID, or None if none. + """ pass @abstractmethod def reset_number_of_failures_to_zero(self, notification): - """ Resets the number of failures for the given notification back to zero. """ + """ + Resets the number of failures for the given notification back to zero. + """ pass @abstractmethod def increment_notification_failure_count(self, notification): - """ Increments the number of failures on the given notification. """ + """ + Increments the number of failures on the given notification. + """ pass @abstractmethod def create_notification_for_testing( self, target_username, method_name=None, method_config=None ): - """ Creates a notification for testing. """ + """ + Creates a notification for testing. + """ pass @abstractmethod def user_has_local_notifications(self, target_username): - """ Returns whether there are any Quay-local notifications for the given user. """ + """ + Returns whether there are any Quay-local notifications for the given user. + """ pass diff --git a/workers/notificationworker/models_pre_oci.py b/workers/notificationworker/models_pre_oci.py index 2802d52ca..15fb912da 100644 --- a/workers/notificationworker/models_pre_oci.py +++ b/workers/notificationworker/models_pre_oci.py @@ -9,7 +9,9 @@ from workers.notificationworker.models_interface import ( def notification(notification_row): - """ Converts the given notification row into a notification tuple. """ + """ + Converts the given notification row into a notification tuple. + """ return Notification( uuid=notification_row.uuid, event_name=notification_row.event.name, diff --git a/workers/queuecleanupworker.py b/workers/queuecleanupworker.py index 6af2501aa..b35c4f1dc 100644 --- a/workers/queuecleanupworker.py +++ b/workers/queuecleanupworker.py @@ -23,7 +23,9 @@ class QueueCleanupWorker(Worker): self.add_operation(self._cleanup_queue, QUEUE_CLEANUP_FREQUENCY) def _cleanup_queue(self): - """ Performs garbage collection on the queueitem table. """ + """ + Performs garbage collection on the queueitem table. + """ with UseThenDisconnect(app.config): while True: # Find all queue items older than the threshold (typically a week) and delete them. diff --git a/workers/queueworker.py b/workers/queueworker.py index 0c49090d6..3f7ffd4cb 100644 --- a/workers/queueworker.py +++ b/workers/queueworker.py @@ -12,17 +12,24 @@ logger = logging.getLogger(__name__) class JobException(Exception): - """ A job exception is an exception that is caused by something being malformed in the job. When - a worker raises this exception the job will be terminated and the retry will not be returned - to the queue. """ + """ + A job exception is an exception that is caused by something being malformed in the job. + + When a worker raises this exception the job will be terminated and the retry will not be + returned to the queue. + """ pass class WorkerUnhealthyException(Exception): - """ When this exception is raised, the worker is no longer healthy and will not accept any more - work. When this is raised while processing a queue item, the item should be returned to the - queue along with another retry. """ + """ + When this exception is raised, the worker is no longer healthy and will not accept any more + work. + + When this is raised while processing a queue item, the item should be returned to the queue + along with another retry. + """ pass @@ -56,15 +63,20 @@ class QueueWorker(Worker): self.add_operation(self.run_watchdog, self._watchdog_period_seconds) def process_queue_item(self, job_details): - """ Processes the work for the given job. If the job fails and should be retried, - this method should raise a WorkerUnhealthyException. If the job should be marked - as permanently failed, it should raise a JobException. Otherwise, a successful return - of this method will remove the job from the queue as completed. - """ + """ + Processes the work for the given job. + + If the job fails and should be retried, this method should raise a WorkerUnhealthyException. + If the job should be marked as permanently failed, it should raise a JobException. + Otherwise, a successful return of this method will remove the job from the queue as + completed. + """ raise NotImplementedError("Workers must implement run.") def watchdog(self): - """ Function that gets run once every watchdog_period_seconds. """ + """ + Function that gets run once every watchdog_period_seconds. + """ pass def extend_processing(self, seconds_from_now, updated_data=None): diff --git a/workers/repomirrorworker/__init__.py b/workers/repomirrorworker/__init__.py index f9171ec7b..9a8bb4f7c 100644 --- a/workers/repomirrorworker/__init__.py +++ b/workers/repomirrorworker/__init__.py @@ -32,13 +32,15 @@ unmirrored_repositories = Gauge( class PreemptedException(Exception): - """ Exception raised if another worker analyzed the image before this worker was able to do so. - """ + """ + Exception raised if another worker analyzed the image before this worker was able to do so. + """ class RepoMirrorSkopeoException(Exception): - """ Exception from skopeo - """ + """ + Exception from skopeo. + """ def __init__(self, message, stdout, stderr): self.message = message @@ -47,9 +49,11 @@ class RepoMirrorSkopeoException(Exception): def process_mirrors(skopeo, token=None): - """ Performs mirroring of repositories whose last sync time is greater than sync interval. - If a token is provided, scanning will begin where the token indicates it previously completed. - """ + """ + Performs mirroring of repositories whose last sync time is greater than sync interval. + + If a token is provided, scanning will begin where the token indicates it previously completed. + """ if not features.REPO_MIRROR: logger.debug("Repository mirror disabled; skipping RepoMirrorWorker process_mirrors") @@ -79,7 +83,9 @@ def process_mirrors(skopeo, token=None): def perform_mirror(skopeo, mirror): - """Run mirror on all matching tags of remote repository.""" + """ + Run mirror on all matching tags of remote repository. + """ if os.getenv("DEBUGLOG", "false").lower() == "true": verbose_logs = True @@ -302,11 +308,11 @@ def get_all_tags(skopeo, mirror): def _skopeo_inspect_failure(result): """ - Custom processing of skopeo error messages for user friendly description + Custom processing of skopeo error messages for user friendly description. - :param result: SkopeoResults object - :return: Message to display - """ + :param result: SkopeoResults object + :return: Message to display + """ lines = result.stderr.split("\n") for line in lines: diff --git a/workers/repomirrorworker/models_interface.py b/workers/repomirrorworker/models_interface.py index 81093c94f..37cf693aa 100644 --- a/workers/repomirrorworker/models_interface.py +++ b/workers/repomirrorworker/models_interface.py @@ -6,11 +6,12 @@ from six import add_metaclass class RepoMirrorToken(namedtuple("NextRepoMirrorToken", ["min_id"])): """ - RepoMirrorToken represents an opaque token that can be passed between runs of the repository - mirror worker to continue mirroring whereever the previous run left off. Note that the data of the - token is *opaque* to the repository mirror worker, and the worker should *not* pull any data out - or modify the token in any way. - """ + RepoMirrorToken represents an opaque token that can be passed between runs of the repository + mirror worker to continue mirroring whereever the previous run left off. + + Note that the data of the token is *opaque* to the repository mirror worker, and the worker + should *not* pull any data out or modify the token in any way. + """ @add_metaclass(ABCMeta) @@ -18,8 +19,9 @@ class RepoMirrorWorkerDataInterface(object): @abstractmethod def repositories_to_mirror(self, target_time, start_token=None): """ - Returns a tuple consisting of an iterator of all the candidates to scan and a NextScanToken. - The iterator returns a tuple for each iteration consisting of the candidate Repository, the abort - signal, and the number of remaining candidates. If the iterator returned is None, there are - no candidates to process. - """ + Returns a tuple consisting of an iterator of all the candidates to scan and a NextScanToken. + + The iterator returns a tuple for each iteration consisting of the candidate Repository, the + abort signal, and the number of remaining candidates. If the iterator returned is None, + there are no candidates to process. + """ diff --git a/workers/repomirrorworker/test/test_repomirrorworker.py b/workers/repomirrorworker/test/test_repomirrorworker.py index aceb6741a..cbdacaad2 100644 --- a/workers/repomirrorworker/test/test_repomirrorworker.py +++ b/workers/repomirrorworker/test/test_repomirrorworker.py @@ -70,8 +70,8 @@ def _create_tag(repo, name): @mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo") def test_successful_mirror(run_skopeo_mock, initialized_db, app): """ - Basic test of successful mirror - """ + Basic test of successful mirror. + """ mirror, repo = create_mirror_repo_robot(["latest", "7.1"]) @@ -124,8 +124,8 @@ def test_successful_mirror(run_skopeo_mock, initialized_db, app): @mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo") def test_successful_disabled_sync_now(run_skopeo_mock, initialized_db, app): """ - Disabled mirrors still allow "sync now" - """ + Disabled mirrors still allow "sync now". + """ mirror, repo = create_mirror_repo_robot(["latest", "7.1"]) mirror.is_enabled = False @@ -181,8 +181,8 @@ def test_successful_disabled_sync_now(run_skopeo_mock, initialized_db, app): @mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo") def test_successful_mirror_verbose_logs(run_skopeo_mock, initialized_db, app, monkeypatch): """ - Basic test of successful mirror with verbose logs turned on - """ + Basic test of successful mirror with verbose logs turned on. + """ mirror, repo = create_mirror_repo_robot(["latest", "7.1"]) @@ -238,11 +238,12 @@ def test_successful_mirror_verbose_logs(run_skopeo_mock, initialized_db, app, mo @mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo") def test_rollback(run_skopeo_mock, initialized_db, app): """ - Tags in the repo: - "updated" - this tag will be updated during the mirror - "removed" - this tag will be removed during the mirror - "created" - this tag will be created during the mirror - """ + Tags in the repo: + + "updated" - this tag will be updated during the mirror + "removed" - this tag will be removed during the mirror + "created" - this tag will be created during the mirror + """ mirror, repo = create_mirror_repo_robot(["updated", "created", "zzerror"]) _create_tag(repo, "updated") @@ -331,9 +332,11 @@ def test_rollback(run_skopeo_mock, initialized_db, app): def test_remove_obsolete_tags(initialized_db): """ - As part of the mirror, the set of tags on the remote repository is compared to the local - existing tags. Those not present on the remote are removed locally. - """ + As part of the mirror, the set of tags on the remote repository is compared to the local + existing tags. + + Those not present on the remote are removed locally. + """ mirror, repository = create_mirror_repo_robot(["updated", "created"], repo_name="removed") manifest = Manifest.get() @@ -352,8 +355,8 @@ def test_remove_obsolete_tags(initialized_db): @mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo") def test_mirror_config_server_hostname(run_skopeo_mock, initialized_db, app, monkeypatch): """ - Set REPO_MIRROR_SERVER_HOSTNAME to override SERVER_HOSTNAME config - """ + Set REPO_MIRROR_SERVER_HOSTNAME to override SERVER_HOSTNAME config. + """ mirror, repo = create_mirror_repo_robot(["latest", "7.1"]) @@ -412,8 +415,8 @@ def test_mirror_config_server_hostname(run_skopeo_mock, initialized_db, app, mon @mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo") def test_quote_params(run_skopeo_mock, initialized_db, app): """ - Basic test of successful mirror - """ + Basic test of successful mirror. + """ mirror, repo = create_mirror_repo_robot(["latest", "7.1"]) mirror.external_reference = "& rm -rf /;/namespace/repository" @@ -473,8 +476,8 @@ def test_quote_params(run_skopeo_mock, initialized_db, app): @mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo") def test_quote_params_password(run_skopeo_mock, initialized_db, app): """ - Basic test of successful mirror - """ + Basic test of successful mirror. + """ mirror, repo = create_mirror_repo_robot(["latest", "7.1"]) mirror.external_reference = "& rm -rf /;/namespace/repository" @@ -535,9 +538,11 @@ def test_quote_params_password(run_skopeo_mock, initialized_db, app): @mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo") def test_inspect_error_mirror(run_skopeo_mock, initialized_db, app): """ - Test for no tag for skopeo inspect. The mirror is processed four times, asserting that the remaining syncs - decrement until next sync is bumped to the future, confirming the fourth is never processed. - """ + Test for no tag for skopeo inspect. + + The mirror is processed four times, asserting that the remaining syncs decrement until next sync + is bumped to the future, confirming the fourth is never processed. + """ def skopeo_test(args, proxy): try: diff --git a/workers/repositoryactioncounter.py b/workers/repositoryactioncounter.py index db833c128..b825145de 100644 --- a/workers/repositoryactioncounter.py +++ b/workers/repositoryactioncounter.py @@ -21,8 +21,9 @@ class RepositoryActionCountWorker(Worker): @with_exponential_backoff(backoff_multiplier=10, max_backoff=3600, max_retries=10) def _count_repository_actions(self): - """ Counts actions and aggregates search scores for a random repository for the - previous day. """ + """ + Counts actions and aggregates search scores for a random repository for the previous day. + """ to_count = model.repositoryactioncount.find_uncounted_repository() if to_count is None: logger.debug("No further repositories to count") diff --git a/workers/repositorygcworker.py b/workers/repositorygcworker.py index e765023f9..6059c918a 100644 --- a/workers/repositorygcworker.py +++ b/workers/repositorygcworker.py @@ -13,8 +13,9 @@ REPOSITORY_GC_TIMEOUT = 6 # 0 * 15 # 15 minutes class RepositoryGCWorker(QueueWorker): - """ Worker which cleans up repositories enqueued to be GCed. - """ + """ + Worker which cleans up repositories enqueued to be GCed. + """ def process_queue_item(self, job_details): logger.debug("Got repository GC queue item: %s", job_details) diff --git a/workers/security_notification_worker.py b/workers/security_notification_worker.py index f3052a8ce..27d1b1cb1 100644 --- a/workers/security_notification_worker.py +++ b/workers/security_notification_worker.py @@ -26,10 +26,13 @@ class SecurityNotificationWorker(QueueWorker): self.perform_notification_work(data) def perform_notification_work(self, data, layer_limit=_LAYER_LIMIT): - """ Performs the work for handling a security notification as referenced by the given data - object. Returns True on successful handling, False on non-retryable failure and raises - a JobException on retryable failure. - """ + """ + Performs the work for handling a security notification as referenced by the given data + object. + + Returns True on successful handling, False on non-retryable failure and raises a + JobException on retryable failure. + """ secscan_api = secscan_model.legacy_api_handler notification_name = data["Name"] diff --git a/workers/servicekeyworker/models_interface.py b/workers/servicekeyworker/models_interface.py index 26284ffa6..bf7c98164 100644 --- a/workers/servicekeyworker/models_interface.py +++ b/workers/servicekeyworker/models_interface.py @@ -5,24 +5,30 @@ from six import add_metaclass @add_metaclass(ABCMeta) class ServiceKeyWorkerDataInterface(object): """ - Interface that represents all data store interactions required by the service key worker. - """ + Interface that represents all data store interactions required by the service key worker. + """ @abstractmethod def set_key_expiration(self, key_id, expiration_date): - """ Sets the expiration date of the service key with the given key ID to that given. """ + """ + Sets the expiration date of the service key with the given key ID to that given. + """ pass @abstractmethod def create_service_key_for_testing(self, expiration): - """ Creates a service key for testing with the given expiration. Returns the KID for - key. - """ + """ + Creates a service key for testing with the given expiration. + + Returns the KID for key. + """ pass @abstractmethod def get_service_key_expiration(self, key_id): - """ Returns the expiration date for the key with the given ID. If the key doesn't exist or - does not have an expiration, returns None. - """ + """ + Returns the expiration date for the key with the given ID. + + If the key doesn't exist or does not have an expiration, returns None. + """ pass diff --git a/workers/servicekeyworker/servicekeyworker.py b/workers/servicekeyworker/servicekeyworker.py index acec590e0..b5ec2a480 100644 --- a/workers/servicekeyworker/servicekeyworker.py +++ b/workers/servicekeyworker/servicekeyworker.py @@ -26,7 +26,9 @@ class ServiceKeyWorker(Worker): ) def _refresh_service_key(self): - """ Refreshes the instance's active service key so it doesn't get garbage collected. """ + """ + Refreshes the instance's active service key so it doesn't get garbage collected. + """ expiration_time = timedelta(minutes=instance_keys.service_key_expiration) new_expiration = datetime.utcnow() + expiration_time diff --git a/workers/tagbackfillworker.py b/workers/tagbackfillworker.py index 6eeff217d..e1408c39c 100644 --- a/workers/tagbackfillworker.py +++ b/workers/tagbackfillworker.py @@ -50,9 +50,11 @@ WORKER_TIMEOUT = app.config.get("BACKFILL_TAGS_TIMEOUT", 6000) class BrokenManifest(ManifestInterface): - """ Implementation of the ManifestInterface for "broken" manifests. This allows us to add the - new manifest row while not adding any additional rows for it. - """ + """ + Implementation of the ManifestInterface for "broken" manifests. + + This allows us to add the new manifest row while not adding any additional rows for it. + """ def __init__(self, digest, payload): self._digest = digest diff --git a/workers/teamsyncworker/teamsyncworker.py b/workers/teamsyncworker/teamsyncworker.py index 4adb6f146..31a69bb02 100644 --- a/workers/teamsyncworker/teamsyncworker.py +++ b/workers/teamsyncworker/teamsyncworker.py @@ -18,8 +18,9 @@ STALE_CUTOFF = convert_to_timedelta(app.config.get("TEAM_RESYNC_STALE_TIME", "30 class TeamSynchronizationWorker(Worker): - """ Worker which synchronizes teams with their backing groups in LDAP/Keystone/etc. - """ + """ + Worker which synchronizes teams with their backing groups in LDAP/Keystone/etc. + """ def __init__(self): super(TeamSynchronizationWorker, self).__init__() diff --git a/workers/test/test_tagbackfillworker.py b/workers/test/test_tagbackfillworker.py index 2a32c373e..83de90493 100644 --- a/workers/test/test_tagbackfillworker.py +++ b/workers/test/test_tagbackfillworker.py @@ -170,7 +170,9 @@ def test_manifestbackfillworker_broken_manifest(clear_rows, initialized_db): def test_manifestbackfillworker_mislinked_manifest(clear_rows, initialized_db): - """ Tests that a manifest whose image is mislinked will have its storages relinked properly. """ + """ + Tests that a manifest whose image is mislinked will have its storages relinked properly. + """ # Delete existing tag manifest so we can reuse the tag. TagManifestLabel.delete().execute() TagManifest.delete().execute() @@ -205,8 +207,10 @@ def test_manifestbackfillworker_mislinked_manifest(clear_rows, initialized_db): def test_manifestbackfillworker_mislinked_invalid_manifest(clear_rows, initialized_db): - """ Tests that a manifest whose image is mislinked will attempt to have its storages relinked - properly. """ + """ + Tests that a manifest whose image is mislinked will attempt to have its storages relinked + properly. + """ # Delete existing tag manifest so we can reuse the tag. TagManifestLabel.delete().execute() TagManifest.delete().execute() @@ -239,7 +243,9 @@ def test_manifestbackfillworker_mislinked_invalid_manifest(clear_rows, initializ def test_manifestbackfillworker_repeat_digest(clear_rows, initialized_db): - """ Tests that a manifest with a shared digest will be properly linked. """ + """ + Tests that a manifest with a shared digest will be properly linked. + """ # Delete existing tag manifest so we can reuse the tag. TagManifestLabel.delete().execute() TagManifest.delete().execute() @@ -271,7 +277,9 @@ def test_manifestbackfillworker_repeat_digest(clear_rows, initialized_db): def test_manifest_backfill_broken_tag(clear_rows, initialized_db): - """ Tests backfilling a broken tag. """ + """ + Tests backfilling a broken tag. + """ # Delete existing tag manifest so we can reuse the tag. TagManifestLabel.delete().execute() TagManifest.delete().execute() diff --git a/workers/worker.py b/workers/worker.py index a55544ed8..116c98bb2 100644 --- a/workers/worker.py +++ b/workers/worker.py @@ -21,10 +21,12 @@ logger = logging.getLogger(__name__) def with_exponential_backoff(backoff_multiplier=10, max_backoff=3600, max_retries=10): def inner(func): - """ Decorator to retry the operation with exponential backoff if it raised an exception. + """ + Decorator to retry the operation with exponential backoff if it raised an exception. + Waits 2^attempts * `backoff_multiplier`, up to `max_backoff`, up to `max_retries` number of time, then re-raise the exception. - """ + """ def wrapper(*args, **kwargs): attempts = 0 @@ -50,7 +52,9 @@ def with_exponential_backoff(backoff_multiplier=10, max_backoff=3600, max_retrie class Worker(object): - """ Base class for workers which perform some work periodically. """ + """ + Base class for workers which perform some work periodically. + """ def __init__(self): self._sched = BackgroundScheduler() @@ -70,7 +74,9 @@ class Worker(object): return self._terminated.is_set() def ungracefully_terminated(self): - """ Method called when the worker has been terminated in an ungraceful fashion. """ + """ + Method called when the worker has been terminated in an ungraceful fashion. + """ pass def add_operation(self, operation_func, operation_sec):