1
0
mirror of https://github.com/quay/quay.git synced 2026-01-27 18:42:52 +03:00
Files
quay/data/logs_model/inmemory_model.py
Kurtis Mullins 38be6d05d0 Python 3 (#153)
* Convert all Python2 to Python3 syntax.

* Removes oauth2lib dependency

* Replace mockredis with fakeredis

* byte/str conversions

* Removes nonexisting __nonzero__ in Python3

* Python3 Dockerfile and related

* [PROJQUAY-98] Replace resumablehashlib with rehash

* PROJQUAY-123 - replace gpgme with python3-gpg

* [PROJQUAY-135] Fix unhashable class error

* Update external dependencies for Python 3

- Move github.com/app-registry/appr to github.com/quay/appr
- github.com/coderanger/supervisor-stdout
- github.com/DevTable/container-cloud-config
- Update to latest mockldap with changes applied from coreos/mockldap
- Update dependencies in requirements.txt and requirements-dev.txt

* Default FLOAT_REPR function to str in json encoder and removes keyword assignment

True, False, and str were not keywords in Python2...

* [PROJQUAY-165] Replace package `bencode` with `bencode.py`

- Bencode is not compatible with Python 3.x and is no longer
  maintained. Bencode.py appears to be a drop-in replacement/fork
  that is compatible with Python 3.

* Make sure monkey.patch is called before anything else (

* Removes anunidecode dependency and replaces it with text_unidecode

* Base64 encode/decode pickle dumps/loads when storing value in DB

Base64 encodes/decodes the serialized values when storing them in the
DB. Also make sure to return a Python3 string instead of a Bytes when
coercing for db, otherwise, Postgres' TEXT field will convert it into
a hex representation when storing the value.

* Implement __hash__ on Digest class

In Python 3, if a class defines __eq__() but not __hash__(), its
instances will not be usable as items in hashable collections (e.g sets).

* Remove basestring check

* Fix expected message in credentials tests

* Fix usage of Cryptography.Fernet for Python3 (#219)

- Specifically, this addresses the issue where Byte<->String
  conversions weren't being applied correctly.

* Fix utils

- tar+stream layer format utils
- filelike util

* Fix storage tests

* Fix endpoint tests

* Fix workers tests

* Fix docker's empty layer bytes

* Fix registry tests

* Appr

* Enable CI for Python 3.6

* Skip buildman tests

Skip buildman tests while it's being rewritten to allow ci to pass.

* Install swig for CI

* Update expected exception type in redis validation test

* Fix gpg signing calls

Fix gpg calls for updated gpg wrapper, and add signing tests.

* Convert / to // for Python3 integer division

* WIP: Update buildman to use asyncio instead of trollius.

This dependency is considered deprecated/abandoned and was only
used as an implementation/backport of asyncio on Python 2.x
This is a work in progress, and is included in the PR just to get the
rest of the tests passing. The builder is actually being rewritten.

* Target Python 3.8

* Removes unused files

- Removes unused files that were added accidentally while rebasing
- Small fixes/cleanup
- TODO tasks comments

* Add TODO to verify rehash backward compat with resumablehashlib

* Revert "[PROJQUAY-135] Fix unhashable class error" and implements __hash__ instead.

This reverts commit 735e38e3c1d072bf50ea864bc7e119a55d3a8976.
Instead, defines __hash__ for encryped fields class, using the parent
field's implementation.

* Remove some unused files ad imports

Co-authored-by: Kenny Lee Sin Cheong <kenny.lee@redhat.com>
Co-authored-by: Tom McKay <thomasmckay@redhat.com>
2020-06-05 16:50:13 -04:00

326 lines
9.8 KiB
Python

import logging
import json
from collections import namedtuple
from datetime import datetime
from tzlocal import get_localzone
from dateutil.relativedelta import relativedelta
from data import model
from data.logs_model.datatypes import AggregatedLogCount, LogEntriesPage, Log
from data.logs_model.interface import (
ActionLogsDataInterface,
LogRotationContextInterface,
LogsIterationTimeout,
)
logger = logging.getLogger(__name__)
LogAndRepository = namedtuple("LogAndRepository", ["log", "stored_log", "repository"])
StoredLog = namedtuple(
"StoredLog",
["kind_id", "account_id", "performer_id", "ip", "metadata_json", "repository_id", "datetime"],
)
class InMemoryModel(ActionLogsDataInterface):
"""
InMemoryModel implements the data model for logs in-memory.
FOR TESTING ONLY.
"""
def __init__(self):
self.logs = []
def _filter_logs(
self,
start_datetime,
end_datetime,
performer_name=None,
repository_name=None,
namespace_name=None,
filter_kinds=None,
):
if filter_kinds is not None:
assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
for log_and_repo in self.logs:
if (
log_and_repo.log.datetime < start_datetime
or log_and_repo.log.datetime > end_datetime
):
continue
if performer_name and log_and_repo.log.performer_username != performer_name:
continue
if repository_name and (
not log_and_repo.repository or log_and_repo.repository.name != repository_name
):
continue
if namespace_name and log_and_repo.log.account_username != namespace_name:
continue
if filter_kinds:
kind_map = model.log.get_log_entry_kinds()
ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds]
if log_and_repo.log.kind_id in ignore_ids:
continue
yield log_and_repo
def _filter_latest_logs(
self, performer_name=None, repository_name=None, namespace_name=None, filter_kinds=None
):
if filter_kinds is not None:
assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
for log_and_repo in sorted(self.logs, key=lambda t: t.log.datetime, reverse=True):
if performer_name and log_and_repo.log.performer_username != performer_name:
continue
if repository_name and (
not log_and_repo.repository or log_and_repo.repository.name != repository_name
):
continue
if namespace_name and log_and_repo.log.account_username != namespace_name:
continue
if filter_kinds:
kind_map = model.log.get_log_entry_kinds()
ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds]
if log_and_repo.log.kind_id in ignore_ids:
continue
yield log_and_repo
def lookup_logs(
self,
start_datetime,
end_datetime,
performer_name=None,
repository_name=None,
namespace_name=None,
filter_kinds=None,
page_token=None,
max_page_count=None,
):
logs = []
for log_and_repo in self._filter_logs(
start_datetime,
end_datetime,
performer_name,
repository_name,
namespace_name,
filter_kinds,
):
logs.append(log_and_repo.log)
return LogEntriesPage(logs, None)
def lookup_latest_logs(
self,
performer_name=None,
repository_name=None,
namespace_name=None,
filter_kinds=None,
size=20,
):
latest_logs = []
for log_and_repo in self._filter_latest_logs(
performer_name, repository_name, namespace_name, filter_kinds
):
if size is not None and len(latest_logs) == size:
break
latest_logs.append(log_and_repo.log)
return latest_logs
def get_aggregated_log_counts(
self,
start_datetime,
end_datetime,
performer_name=None,
repository_name=None,
namespace_name=None,
filter_kinds=None,
):
entries = {}
for log_and_repo in self._filter_logs(
start_datetime,
end_datetime,
performer_name,
repository_name,
namespace_name,
filter_kinds,
):
entry = log_and_repo.log
synthetic_date = datetime(
start_datetime.year,
start_datetime.month,
int(entry.datetime.day),
tzinfo=get_localzone(),
)
if synthetic_date.day < start_datetime.day:
synthetic_date = synthetic_date + relativedelta(months=1)
key = "%s-%s" % (entry.kind_id, entry.datetime.day)
if key in entries:
entries[key] = AggregatedLogCount(
entry.kind_id, entries[key].count + 1, synthetic_date
)
else:
entries[key] = AggregatedLogCount(entry.kind_id, 1, synthetic_date)
return list(entries.values())
def count_repository_actions(self, repository, day):
count = 0
for log_and_repo in self.logs:
if log_and_repo.repository != repository:
continue
if log_and_repo.log.datetime.day != day.day:
continue
count += 1
return count
def queue_logs_export(
self,
start_datetime,
end_datetime,
export_action_logs_queue,
namespace_name=None,
repository_name=None,
callback_url=None,
callback_email=None,
filter_kinds=None,
):
raise NotImplementedError
def log_action(
self,
kind_name,
namespace_name=None,
performer=None,
ip=None,
metadata=None,
repository=None,
repository_name=None,
timestamp=None,
is_free_namespace=False,
):
timestamp = timestamp or datetime.today()
if not repository and repository_name and namespace_name:
repository = model.repository.get_repository(namespace_name, repository_name)
account = None
account_id = None
performer_id = None
repository_id = None
if namespace_name is not None:
account = model.user.get_namespace_user(namespace_name)
account_id = account.id
if performer is not None:
performer_id = performer.id
if repository is not None:
repository_id = repository.id
metadata_json = json.dumps(metadata or {})
kind_id = model.log.get_log_entry_kinds()[kind_name]
stored_log = StoredLog(
kind_id, account_id, performer_id, ip, metadata_json, repository_id, timestamp
)
log = Log(
metadata_json=metadata,
ip=ip,
datetime=timestamp,
performer_email=performer.email if performer else None,
performer_username=performer.username if performer else None,
performer_robot=performer.robot if performer else None,
account_organization=account.organization if account else None,
account_username=account.username if account else None,
account_email=account.email if account else None,
account_robot=account.robot if account else None,
kind_id=kind_id,
)
self.logs.append(LogAndRepository(log, stored_log, repository))
def yield_logs_for_export(
self,
start_datetime,
end_datetime,
repository_id=None,
namespace_id=None,
max_query_time=None,
):
# Just for testing.
if max_query_time is not None:
raise LogsIterationTimeout()
logs = []
for log_and_repo in self._filter_logs(start_datetime, end_datetime):
if repository_id and (
not log_and_repo.repository or log_and_repo.repository.id != repository_id
):
continue
if namespace_id:
if log_and_repo.log.account_username is None:
continue
namespace = model.user.get_namespace_user(log_and_repo.log.account_username)
if namespace.id != namespace_id:
continue
logs.append(log_and_repo.log)
yield logs
def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
expired_logs = [
log_and_repo for log_and_repo in self.logs if log_and_repo.log.datetime <= cutoff_date
]
while True:
if not expired_logs:
break
context = InMemoryLogRotationContext(expired_logs[:min_logs_per_rotation], self.logs)
expired_logs = expired_logs[min_logs_per_rotation:]
yield context
class InMemoryLogRotationContext(LogRotationContextInterface):
def __init__(self, expired_logs, all_logs):
self.expired_logs = expired_logs
self.all_logs = all_logs
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
if ex_type is None and ex_value is None and ex_traceback is None:
for log in self.expired_logs:
self.all_logs.remove(log)
def yield_logs_batch(self):
"""
Yield a batch of logs and a filename for that batch.
"""
filename = "inmemory_model_filename_placeholder"
filename = ".".join((filename, "txt.gz"))
yield [log_and_repo.stored_log for log_and_repo in self.expired_logs], filename