+
+
+
+
+
+
+
+
+ Save Configuration Changes
+ Configuration Saved
+
+
+
+
+ {{ configform.$error['required'].length }} configuration fields remaining
+
+
+ Invalid configuration field
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
{{ serviceInfo.service.title }}
+
+
+ {{ serviceInfo.errorMessage }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/config_app/js/core-config-setup/core-config-setup.js b/config_app/js/core-config-setup/core-config-setup.js
new file mode 100644
index 000000000..fe5fc512c
--- /dev/null
+++ b/config_app/js/core-config-setup/core-config-setup.js
@@ -0,0 +1,1454 @@
+import * as URI from 'urijs';
+import * as angular from 'angular';
+const templateUrl = require('./config-setup-tool.html');
+const urlParsedField = require('../config-field-templates/config-parsed-field.html');
+const urlVarField = require('../config-field-templates/config-variable-field.html');
+const urlListField = require('../config-field-templates/config-list-field.html');
+const urlFileField = require('../config-field-templates/config-file-field.html');
+const urlBoolField = require('../config-field-templates/config-bool-field.html');
+const urlNumericField = require('../config-field-templates/config-numeric-field.html');
+const urlContactField = require('../config-field-templates/config-contact-field.html');
+const urlContactsField = require('../config-field-templates/config-contacts-field.html');
+const urlMapField = require('../config-field-templates/config-map-field.html');
+const urlServiceKeyField = require('../config-field-templates/config-service-key-field.html');
+const urlStringField = require('../config-field-templates/config-string-field.html');
+const urlPasswordField = require('../config-field-templates/config-password-field.html');
+
+const urlStringListField = require('../config-field-templates/config-string-list-field.html');
+const urlCertField = require('../config-field-templates/config-certificates-field.html');
+
+
+angular.module("quay-config")
+ .directive('configSetupTool', () => {
+ var directiveDefinitionObject = {
+ priority: 1,
+ templateUrl,
+ replace: true,
+ transclude: true,
+ restrict: 'C',
+ scope: {
+ 'isActive': '=isActive',
+ 'configurationSaved': '&configurationSaved',
+ 'setupCompleted': '&setupCompleted',
+ },
+ controller: function($rootScope, $scope, $element, $timeout, ApiService) {
+ var authPassword = null;
+
+ $scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9\.]+(:[0-9]+)?$';
+ $scope.GITHOST_REGEX = '^https?://([a-zA-Z0-9]+\.?\/?)+$';
+
+ $scope.SERVICES = [
+ {'id': 'redis', 'title': 'Redis'},
+
+ {'id': 'registry-storage', 'title': 'Registry Storage'},
+
+ {'id': 'time-machine', 'title': 'Time Machine'},
+
+ {'id': 'access', 'title': 'Access Settings'},
+
+ {'id': 'ssl', 'title': 'SSL certificate and key', 'condition': function(config) {
+ return config.PREFERRED_URL_SCHEME == 'https';
+ }},
+
+ {'id': 'ldap', 'title': 'LDAP Authentication', 'condition': function(config) {
+ return config.AUTHENTICATION_TYPE == 'LDAP';
+ }},
+
+ {'id': 'jwt', 'title': 'JWT Authentication', 'condition': function(config) {
+ return config.AUTHENTICATION_TYPE == 'JWT';
+ }},
+
+ {'id': 'keystone', 'title': 'Keystone Authentication', 'condition': function(config) {
+ return config.AUTHENTICATION_TYPE == 'Keystone';
+ }},
+
+ {'id': 'apptoken-auth', 'title': 'App Token Authentication', 'condition': function(config) {
+ return config.AUTHENTICATION_TYPE == 'AppToken';
+ }},
+
+ {'id': 'signer', 'title': 'ACI Signing', 'condition': function(config) {
+ return config.FEATURE_ACI_CONVERSION;
+ }},
+
+ {'id': 'github-login', 'title': 'Github (Enterprise) Authentication', 'condition': function(config) {
+ return config.FEATURE_GITHUB_LOGIN;
+ }},
+
+ {'id': 'google-login', 'title': 'Google Authentication', 'condition': function(config) {
+ return config.FEATURE_GOOGLE_LOGIN;
+ }},
+
+ {'id': 'github-trigger', 'title': 'GitHub (Enterprise) Build Triggers', 'condition': function(config) {
+ return config.FEATURE_GITHUB_BUILD;
+ }},
+
+ {'id': 'bitbucket-trigger', 'title': 'BitBucket Build Triggers', 'condition': function(config) {
+ return config.FEATURE_BITBUCKET_BUILD;
+ }},
+
+ {'id': 'gitlab-trigger', 'title': 'GitLab Build Triggers', 'condition': function(config) {
+ return config.FEATURE_GITLAB_BUILD;
+ }},
+
+ {'id': 'security-scanner', 'title': 'Quay Security Scanner', 'condition': function(config) {
+ return config.FEATURE_SECURITY_SCANNER;
+ }},
+
+ {'id': 'bittorrent', 'title': 'BitTorrent downloads', 'condition': function(config) {
+ return config.FEATURE_BITTORRENT;
+ }},
+
+ {'id': 'oidc-login', 'title': 'OIDC Login(s)', 'condition': function(config) {
+ return $scope.getOIDCProviders(config).length > 0;
+ }},
+
+ {'id': 'actionlogarchiving', 'title': 'Action Log Rotation', 'condition': function(config) {
+ return config.FEATURE_ACTION_LOG_ROTATION;
+ }},
+
+ {'id': 'repomirroring', 'title': 'Repository Mirroring', 'condition': function(config) {
+ return config.FEATURE_REPOSITORY_MIRRORING;
+ }},
+ ];
+
+ $scope.STORAGE_CONFIG_FIELDS = {
+ 'LocalStorage': [
+ {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/some/directory', 'kind': 'text'}
+ ],
+
+ 'S3Storage': [
+ {'name': 's3_bucket', 'title': 'S3 Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'},
+ {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'},
+ {'name': 's3_access_key', 'title': 'AWS Access Key (optional if using IAM)', 'placeholder': 'accesskeyhere', 'kind': 'text', 'optional': true},
+ {'name': 's3_secret_key', 'title': 'AWS Secret Key (optional if using IAM)', 'placeholder': 'secretkeyhere', 'kind': 'password', 'optional': true},
+ {'name': 'host', 'title': 'S3 Host', 'placeholder': 's3.amazonaws.com', 'kind': 'text', 'optional': true},
+ {'name': 'port', 'title': 'S3 Port', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true}
+ ],
+
+ 'AzureStorage': [
+ {'name': 'azure_container', 'title': 'Azure Storage Container', 'placeholder': 'container', 'kind': 'text'},
+ {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/container', 'kind': 'text'},
+ {'name': 'azure_account_name', 'title': 'Azure Account Name', 'placeholder': 'accountnamehere', 'kind': 'text'},
+ {'name': 'azure_account_key', 'title': 'Azure Account Key', 'placeholder': 'accountkeyhere', 'kind': 'text', 'optional': true},
+ {'name': 'sas_token', 'title': 'Azure SAS Token', 'placeholder': 'sastokenhere', 'kind': 'text', 'optional': true},
+ ],
+
+ 'GoogleCloudStorage': [
+ {'name': 'access_key', 'title': 'Cloud Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text'},
+ {'name': 'secret_key', 'title': 'Cloud Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'},
+ {'name': 'bucket_name', 'title': 'GCS Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'},
+ {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'}
+ ],
+
+ 'RHOCSStorage': [
+ {'name': 'hostname', 'title': 'NooBaa Server Hostname', 'placeholder': 'my.noobaa.hostname', 'kind': 'text'},
+ {'name': 'port', 'title': 'Custom Port (optional)', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true},
+ {'name': 'is_secure', 'title': 'Is Secure', 'placeholder': 'Require SSL', 'kind': 'bool'},
+ {'name': 'access_key', 'title': 'Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text'},
+ {'name': 'secret_key', 'title': 'Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'},
+ {'name': 'bucket_name', 'title': 'Bucket Name', 'placeholder': 'my-cool-bucket', 'kind': 'text'},
+ {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'}
+ ],
+
+ 'RadosGWStorage': [
+ {'name': 'hostname', 'title': 'Rados Server Hostname', 'placeholder': 'my.rados.hostname', 'kind': 'text'},
+ {'name': 'port', 'title': 'Custom Port (optional)', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true},
+ {'name': 'is_secure', 'title': 'Is Secure', 'placeholder': 'Require SSL', 'kind': 'bool'},
+ {'name': 'access_key', 'title': 'Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text', 'help_url': 'http://ceph.com/docs/master/radosgw/admin/'},
+ {'name': 'secret_key', 'title': 'Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'},
+ {'name': 'bucket_name', 'title': 'Bucket Name', 'placeholder': 'my-cool-bucket', 'kind': 'text'},
+ {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'}
+ ],
+
+ 'SwiftStorage': [
+ {'name': 'auth_version', 'title': 'Swift Auth Version', 'kind': 'option', 'values': [1, 2, 3]},
+ {'name': 'auth_url', 'title': 'Swift Auth URL', 'placeholder': 'http://swiftdomain/auth/v1.0', 'kind': 'text'},
+ {'name': 'swift_container', 'title': 'Swift Container Name', 'placeholder': 'mycontainer', 'kind': 'text',
+ 'help_text': 'The swift container for all objects. Must already exist inside Swift.'},
+
+ {'name': 'storage_path', 'title': 'Storage Path', 'placeholder': '/path/inside/container', 'kind': 'text'},
+
+ {'name': 'swift_user', 'title': 'Username', 'placeholder': 'accesskeyhere', 'kind': 'text',
+ 'help_text': 'Note: For Swift V1, this is "username:password" (-U on the CLI).'},
+ {'name': 'swift_password', 'title': 'Key/Password', 'placeholder': 'secretkeyhere', 'kind': 'text',
+ 'help_text': 'Note: For Swift V1, this is the API token (-K on the CLI).'},
+
+ {'name': 'ca_cert_path', 'title': 'CA Cert Filename', 'placeholder': 'conf/stack/swift.cert', 'kind': 'text', 'optional': true},
+
+ {'name': 'temp_url_key', 'title': 'Temp URL Key (optional)', 'placholder': 'key-here', 'kind': 'text', 'optional': true,
+ 'help_url': 'https://coreos.com/products/enterprise-registry/docs/latest/swift-temp-url.html',
+ 'help_text': 'If enabled, will allow for faster pulls directly from Swift.'},
+
+ {'name': 'os_options', 'title': 'OS Options', 'kind': 'map',
+ 'keys': ['tenant_id', 'auth_token', 'service_type', 'endpoint_type', 'tenant_name', 'object_storage_url', 'region_name',
+ 'project_id', 'project_name', 'project_domain_name', 'user_domain_name', 'user_domain_id']}
+ ],
+
+ 'CloudFrontedS3Storage': [
+ {'name': 's3_bucket', 'title': 'S3 Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'},
+ {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'},
+ {'name': 's3_access_key', 'title': 'AWS Access Key (optional if using IAM)', 'placeholder': 'accesskeyhere', 'kind': 'text', 'optional': true},
+ {'name': 's3_secret_key', 'title': 'AWS Secret Key (optional if using IAM)', 'placeholder': 'secretkeyhere', 'kind': 'text', 'optional': true},
+ {'name': 'host', 'title': 'S3 Host', 'placeholder': 's3.amazonaws.com', 'kind': 'text', 'optional': true},
+ {'name': 'port', 'title': 'S3 Port', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true},
+
+ {'name': 'cloudfront_distribution_domain', 'title': 'CloudFront Distribution Domain Name', 'placeholder': 'somesubdomain.cloudfront.net', 'pattern': '^([0-9a-zA-Z]+\\.)+[0-9a-zA-Z]+$', 'kind': 'text'},
+ {'name': 'cloudfront_key_id', 'title': 'CloudFront Key ID', 'placeholder': 'APKATHISISAKEYID', 'kind': 'text'},
+ {'name': 'cloudfront_privatekey_filename', 'title': 'CloudFront Private Key', 'filesuffix': 'cloudfront-signing-key.pem', 'kind': 'file'},
+ ],
+ };
+
+ $scope.enableFeature = function(config, feature) {
+ config[feature] = true;
+ };
+
+ $scope.validateHostname = function(hostname) {
+ if (hostname.indexOf('127.0.0.1') == 0 || hostname.indexOf('localhost') == 0) {
+ return 'Please specify a non-localhost hostname. "localhost" will refer to the container, not your machine.'
+ }
+
+ return null;
+ };
+
+ $scope.config = null;
+ $scope.mapped = {
+ '$hasChanges': false
+ };
+
+ $scope.hasfile = {};
+ $scope.validating = null;
+ $scope.savingConfiguration = false;
+
+ $scope.removeOIDCProvider = function(provider) {
+ delete $scope.config[provider];
+ };
+
+ $scope.addOIDCProvider = () => {
+ bootbox.prompt('Enter an ID for the OIDC provider', function(result) {
+ if (!result) {
+ return;
+ }
+
+ result = result.toUpperCase();
+
+ if (!result.match(/^[A-Z0-9]+$/)) {
+ bootbox.alert('Invalid ID for OIDC provider: must be alphanumeric');
+ return;
+ }
+
+ if (result == 'GITHUB' || result == 'GOOGLE') {
+ bootbox.alert('Invalid ID for OIDC provider: cannot be a reserved name');
+ return;
+ }
+
+ var key = result + '_LOGIN_CONFIG';
+ if ($scope.config[key]) {
+ bootbox.alert('Invalid ID for OIDC provider: already exists');
+ return;
+ }
+
+ $scope.config[key] = {};
+ });
+ };
+
+ $scope.getOIDCProviderId = function(key) {
+ var index = key.indexOf('_LOGIN_CONFIG');
+ if (index <= 0) {
+ return null;
+ }
+
+ return key.substr(0, index).toLowerCase();
+ };
+
+ $scope.getOIDCProviders = function(config) {
+ var keys = Object.keys(config || {});
+ return keys.filter(function(key) {
+ if (key == 'GITHUB_LOGIN_CONFIG' || key == 'GOOGLE_LOGIN_CONFIG') {
+ // Has custom UI and config.
+ return false;
+ }
+
+ return !!$scope.getOIDCProviderId(key);
+ });
+ };
+
+ $scope.getServices = function(config) {
+ var services = [];
+ if (!config) { return services; }
+
+ for (var i = 0; i < $scope.SERVICES.length; ++i) {
+ var service = $scope.SERVICES[i];
+ if (!service.condition || service.condition(config)) {
+ services.push({
+ 'service': service,
+ 'status': 'validating'
+ });
+ }
+ }
+
+ return services;
+ };
+
+ $scope.validationStatus = function(serviceInfos) {
+ if (!serviceInfos) { return 'validating'; }
+
+ var hasError = false;
+ for (var i = 0; i < serviceInfos.length; ++i) {
+ if (serviceInfos[i].status == 'validating') {
+ return 'validating';
+ }
+ if (serviceInfos[i].status == 'error') {
+ hasError = true;
+ }
+ }
+
+ return hasError ? 'failed' : 'success';
+ };
+
+ $scope.cancelValidation = function() {
+ $('#validateAndSaveModal').modal('hide');
+ $scope.validating = null;
+ $scope.savingConfiguration = false;
+ };
+
+ $scope.validateService = function(serviceInfo, opt_password) {
+ var params = {
+ 'service': serviceInfo.service.id
+ };
+
+ var data = {
+ 'config': $scope.config,
+ 'password': opt_password || ''
+ };
+
+ var errorDisplay = ApiService.errorDisplay(
+ 'Could not validate configuration. Please report this error.',
+ function() {
+ authPassword = null;
+ });
+
+ ApiService.scValidateConfig(data, params).then(function(resp) {
+ serviceInfo.status = resp.status ? 'success' : 'error';
+ serviceInfo.errorMessage = $.trim(resp.reason || '');
+
+ if (!resp.status) {
+ authPassword = null;
+ }
+
+ }, errorDisplay);
+ };
+
+ $scope.checkValidateAndSave = function() {
+ if ($scope.configform.$valid) {
+ saveStorageConfig();
+ $scope.validateAndSave();
+ return;
+ }
+
+ var query = $element.find("input.ng-invalid:first");
+
+ if (query && query.length) {
+ query[0].scrollIntoView();
+ query.focus();
+ }
+ };
+
+ $scope.validateAndSave = function() {
+ $scope.validating = $scope.getServices($scope.config);
+
+ $scope.performValidateAndSave();
+ };
+
+ $scope.performValidateAndSave = function(opt_password) {
+ $scope.savingConfiguration = false;
+ $scope.validating = $scope.getServices($scope.config);
+
+ authPassword = opt_password;
+
+ $('#validateAndSaveModal').modal({
+ keyboard: false,
+ backdrop: 'static'
+ });
+
+ for (var i = 0; i < $scope.validating.length; ++i) {
+ var serviceInfo = $scope.validating[i];
+ $scope.validateService(serviceInfo, opt_password);
+ }
+ };
+
+ $scope.saveConfiguration = function() {
+ $scope.savingConfiguration = true;
+
+ // Make sure to note that fully verified setup is completed. We use this as a signal
+ // in the setup tool.
+ $scope.config['SETUP_COMPLETE'] = true;
+
+ var data = {
+ 'config': $scope.config,
+ 'hostname': window.location.host,
+ 'password': authPassword || ''
+ };
+
+ var errorDisplay = ApiService.errorDisplay(
+ 'Could not save configuration. Please report this error.',
+ function() {
+ authPassword = null;
+ });
+
+ ApiService.scUpdateConfig(data).then(function(resp) {
+ authPassword = null;
+
+ $scope.savingConfiguration = false;
+ $scope.mapped.$hasChanges = false;
+
+ $('#validateAndSaveModal').modal('hide');
+
+ $scope.setupCompleted();
+ }, errorDisplay);
+ };
+
+ // Convert storage config to an array
+ var initializeStorageConfig = function($scope) {
+ var config = $scope.config.DISTRIBUTED_STORAGE_CONFIG || {};
+ var defaultLocations = $scope.config.DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS || [];
+ var preference = $scope.config.DISTRIBUTED_STORAGE_PREFERENCE || [];
+
+ $scope.serverStorageConfig = angular.copy(config);
+ $scope.storageConfig = [];
+
+ Object.keys(config).forEach(function(location) {
+ $scope.storageConfig.push({
+ location: location,
+ defaultLocation: defaultLocations.indexOf(location) >= 0,
+ data: angular.copy(config[location]),
+ error: {},
+ });
+ });
+
+ if (!$scope.storageConfig.length) {
+ $scope.addStorageConfig('default');
+ return;
+ }
+
+ // match DISTRIBUTED_STORAGE_PREFERENCE order first, remaining are
+ // ordered by unicode point value
+ $scope.storageConfig.sort(function(a, b) {
+ var indexA = preference.indexOf(a.location);
+ var indexB = preference.indexOf(b.location);
+
+ if (indexA > -1 && indexB > -1) return indexA < indexB ? -1 : 1;
+ if (indexA > -1) return -1;
+ if (indexB > -1) return 1;
+
+ return a.location < b.location ? -1 : 1;
+ });
+ };
+
+ $scope.allowChangeLocationStorageConfig = function(location) {
+ if (!$scope.serverStorageConfig[location]) { return true };
+
+ // allow user to change location ID if another exists with the same ID
+ return $scope.storageConfig.filter(function(sc) {
+ return sc.location === location;
+ }).length >= 2;
+ };
+
+ $scope.allowRemoveStorageConfig = function(location) {
+ return $scope.storageConfig.length > 1 && $scope.allowChangeLocationStorageConfig(location);
+ };
+
+ $scope.canAddStorageConfig = function() {
+ return $scope.config &&
+ $scope.config.FEATURE_STORAGE_REPLICATION &&
+ $scope.storageConfig &&
+ (!$scope.storageConfig.length || $scope.storageConfig.length < 10);
+ };
+
+ $scope.addStorageConfig = function(location) {
+ var storageType = 'LocalStorage';
+
+ // Use last storage type by default
+ if ($scope.storageConfig.length) {
+ storageType = $scope.storageConfig[$scope.storageConfig.length-1].data[0];
+ }
+
+ $scope.storageConfig.push({
+ location: location || '',
+ defaultLocation: false,
+ data: [storageType, {}],
+ error: {},
+ });
+ };
+
+ $scope.removeStorageConfig = function(sc) {
+ $scope.storageConfig.splice($scope.storageConfig.indexOf(sc), 1);
+ };
+
+ var saveStorageConfig = function() {
+ var config = {};
+ var defaultLocations = [];
+ var preference = [];
+
+ $scope.storageConfig.forEach(function(sc) {
+ config[sc.location] = sc.data;
+ if (sc.defaultLocation) defaultLocations.push(sc.location);
+ preference.push(sc.location);
+ });
+
+ $scope.config.DISTRIBUTED_STORAGE_CONFIG = config;
+ $scope.config.DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS = defaultLocations;
+ $scope.config.DISTRIBUTED_STORAGE_PREFERENCE = preference;
+ };
+
+ var gitlabSelector = function(key) {
+ return function(value) {
+ if (!value || !$scope.config) { return; }
+
+ if (!$scope.config[key]) {
+ $scope.config[key] = {};
+ }
+
+ if (value == 'enterprise') {
+ if ($scope.config[key]['GITLAB_ENDPOINT'] == 'https://gitlab.com/') {
+ $scope.config[key]['GITLAB_ENDPOINT'] = '';
+ }
+ } else if (value == 'hosted') {
+ $scope.config[key]['GITLAB_ENDPOINT'] = 'https://gitlab.com/';
+ }
+ };
+ };
+
+ var githubSelector = function(key) {
+ return function(value) {
+ if (!value || !$scope.config) { return; }
+
+ if (!$scope.config[key]) {
+ $scope.config[key] = {};
+ }
+
+ if (value == 'enterprise') {
+ if ($scope.config[key]['GITHUB_ENDPOINT'] == 'https://github.com/') {
+ $scope.config[key]['GITHUB_ENDPOINT'] = '';
+ }
+ delete $scope.config[key]['API_ENDPOINT'];
+ } else if (value == 'hosted') {
+ $scope.config[key]['GITHUB_ENDPOINT'] = 'https://github.com/';
+ $scope.config[key]['API_ENDPOINT'] = 'https://api.github.com/';
+ }
+ };
+ };
+
+ var getKey = function(config, path) {
+ if (!config) {
+ return null;
+ }
+
+ var parts = path.split('.');
+ var current = config;
+ for (var i = 0; i < parts.length; ++i) {
+ var part = parts[i];
+ if (!current[part]) { return null; }
+ current = current[part];
+ }
+ return current;
+ };
+
+ var initializeMappedLogic = function(config) {
+ var gle = getKey(config, 'GITHUB_LOGIN_CONFIG.GITHUB_ENDPOINT');
+ var gte = getKey(config, 'GITHUB_TRIGGER_CONFIG.GITHUB_ENDPOINT');
+
+ $scope.mapped['GITHUB_LOGIN_KIND'] = gle == 'https://github.com/' ? 'hosted' : 'enterprise';
+ $scope.mapped['GITHUB_TRIGGER_KIND'] = gte == 'https://github.com/' ? 'hosted' : 'enterprise';
+
+ var glabe = getKey(config, 'GITLAB_TRIGGER_KIND.GITHUB_ENDPOINT');
+ $scope.mapped['GITLAB_TRIGGER_KIND'] = glabe == 'https://gitlab.com/' ? 'hosted' : 'enterprise';
+
+ $scope.mapped['redis'] = {};
+ $scope.mapped['redis']['host'] = getKey(config, 'BUILDLOGS_REDIS.host') || getKey(config, 'USER_EVENTS_REDIS.host');
+ $scope.mapped['redis']['port'] = getKey(config, 'BUILDLOGS_REDIS.port') || getKey(config, 'USER_EVENTS_REDIS.port');
+ $scope.mapped['redis']['password'] = getKey(config, 'BUILDLOGS_REDIS.password') || getKey(config, 'USER_EVENTS_REDIS.password');
+
+ $scope.mapped['TLS_SETTING'] = 'none';
+ if (config['PREFERRED_URL_SCHEME'] == 'https') {
+ if (config['EXTERNAL_TLS_TERMINATION'] === true) {
+ $scope.mapped['TLS_SETTING'] = 'external-tls';
+ } else {
+ $scope.mapped['TLS_SETTING'] = 'internal-tls';
+ }
+ }
+ };
+
+ var tlsSetter = function(value) {
+ if (value == null || !$scope.config) { return; }
+
+ switch (value) {
+ case 'none':
+ $scope.config['PREFERRED_URL_SCHEME'] = 'http';
+ delete $scope.config['EXTERNAL_TLS_TERMINATION'];
+ return;
+
+ case 'external-tls':
+ $scope.config['PREFERRED_URL_SCHEME'] = 'https';
+ $scope.config['EXTERNAL_TLS_TERMINATION'] = true;
+ return;
+
+ case 'internal-tls':
+ $scope.config['PREFERRED_URL_SCHEME'] = 'https';
+ delete $scope.config['EXTERNAL_TLS_TERMINATION'];
+ return;
+ }
+ };
+
+ var redisSetter = function(keyname) {
+ return function(value) {
+ if (value == null || !$scope.config) { return; }
+
+ if (!$scope.config['BUILDLOGS_REDIS']) {
+ $scope.config['BUILDLOGS_REDIS'] = {};
+ }
+
+ if (!$scope.config['USER_EVENTS_REDIS']) {
+ $scope.config['USER_EVENTS_REDIS'] = {};
+ }
+
+ if (!value) {
+ delete $scope.config['BUILDLOGS_REDIS'][keyname];
+ delete $scope.config['USER_EVENTS_REDIS'][keyname];
+ return;
+ }
+
+ $scope.config['BUILDLOGS_REDIS'][keyname] = value;
+ $scope.config['USER_EVENTS_REDIS'][keyname] = value;
+ };
+ };
+
+ // Add mapped logic.
+ $scope.$watch('mapped.GITHUB_LOGIN_KIND', githubSelector('GITHUB_LOGIN_CONFIG'));
+ $scope.$watch('mapped.GITHUB_TRIGGER_KIND', githubSelector('GITHUB_TRIGGER_CONFIG'));
+ $scope.$watch('mapped.GITLAB_TRIGGER_KIND', gitlabSelector('GITLAB_TRIGGER_KIND'));
+ $scope.$watch('mapped.TLS_SETTING', tlsSetter);
+
+ $scope.$watch('mapped.redis.host', redisSetter('host'));
+ $scope.$watch('mapped.redis.port', redisSetter('port'));
+ $scope.$watch('mapped.redis.password', redisSetter('password'));
+
+ // Remove extra extra fields (which are not allowed) from storage config.
+ var updateFields = function(sc) {
+ var type = sc.data[0];
+ var configObject = sc.data[1];
+ var allowedFields = $scope.STORAGE_CONFIG_FIELDS[type];
+
+ // Remove any fields not allowed.
+ for (var fieldName in configObject) {
+ if (!configObject.hasOwnProperty(fieldName)) {
+ continue;
+ }
+
+ var isValidField = $.grep(allowedFields, function(field) {
+ return field.name == fieldName;
+ }).length > 0;
+
+ if (!isValidField) {
+ delete configObject[fieldName];
+ }
+ }
+
+ // Set any missing boolean fields to false.
+ for (var i = 0; i < allowedFields.length; ++i) {
+ if (allowedFields[i].kind == 'bool') {
+ configObject[allowedFields[i].name] = configObject[allowedFields[i].name] || false;
+ }
+ }
+ };
+
+ // Validate and update storage config on update.
+ var refreshStorageConfig = function() {
+ if (!$scope.config || !$scope.storageConfig) return;
+
+ var locationCounts = {};
+ var errors = [];
+ var valid = true;
+
+ $scope.storageConfig.forEach(function(sc) {
+ // remove extra fields from storage config
+ updateFields(sc);
+
+ if (!locationCounts[sc.location]) locationCounts[sc.location] = 0;
+ locationCounts[sc.location]++;
+ });
+
+ // validate storage config
+ $scope.storageConfig.forEach(function(sc) {
+ var error = {};
+
+ if ($scope.config.FEATURE_STORAGE_REPLICATION && sc.data[0] === 'LocalStorage') {
+ error.engine = 'Replication to a locally mounted directory is unsupported as it is only accessible on a single machine.';
+ valid = false;
+ }
+
+ if (locationCounts[sc.location] > 1) {
+ error.location = 'Location ID must be unique.';
+ valid = false;
+ }
+
+ errors.push(error);
+ });
+
+ $scope.storageConfigError = errors;
+ $scope.configform.$setValidity('storageConfig', valid);
+ };
+
+ $scope.$watch('config.INTERNAL_OIDC_SERVICE_ID', function(service_id) {
+ if (service_id) {
+ $scope.config['FEATURE_DIRECT_LOGIN'] = false;
+ }
+ });
+
+ $scope.$watch('config.FEATURE_STORAGE_REPLICATION', function() {
+ refreshStorageConfig();
+ });
+
+ $scope.$watch('config.FEATURE_USER_CREATION', function(value) {
+ if (!value) {
+ $scope.config['FEATURE_INVITE_ONLY_USER_CREATION'] = false;
+ }
+ });
+
+ $scope.$watch('storageConfig', function() {
+ refreshStorageConfig();
+ }, true);
+
+ $scope.$watch('config', function(value) {
+ $scope.mapped['$hasChanges'] = true;
+ }, true);
+
+ $scope.$watch('isActive', function(value) {
+ if (!value) { return; }
+
+ ApiService.scGetConfig().then(function(resp) {
+ $scope.config = resp['config'] || {};
+ initializeMappedLogic($scope.config);
+ initializeStorageConfig($scope);
+ $scope.mapped['$hasChanges'] = false;
+ }, ApiService.errorDisplay('Could not load config'));
+ });
+ }
+ };
+
+ return directiveDefinitionObject;
+ })
+
+ .directive('configParsedField', function ($timeout) {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlParsedField,
+ replace: false,
+ transclude: true,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding',
+ 'parser': '&parser',
+ 'serializer': '&serializer'
+ },
+ controller: function($scope, $element, $transclude) {
+ $scope.childScope = null;
+
+ $transclude(function(clone, scope) {
+ $scope.childScope = scope;
+ $scope.childScope['fields'] = {};
+ $element.append(clone);
+ });
+
+ $scope.childScope.$watch('fields', function(value) {
+ // Note: We need the timeout here because Angular starts the digest of the
+ // parent scope AFTER the child scope, which means it can end up one action
+ // behind. The timeout ensures that the parent scope will be fully digest-ed
+ // and then we update the binding. Yes, this is a hack :-/.
+ $timeout(function() {
+ $scope.binding = $scope.serializer({'fields': value});
+ });
+ }, true);
+
+ $scope.$watch('binding', function(value) {
+ var parsed = $scope.parser({'value': value});
+ for (var key in parsed) {
+ if (parsed.hasOwnProperty(key)) {
+ $scope.childScope['fields'][key] = parsed[key];
+ }
+ }
+ });
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configVariableField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlVarField,
+ replace: false,
+ transclude: true,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding'
+ },
+ controller: function($scope, $element) {
+ $scope.sections = {};
+ $scope.currentSection = null;
+
+ $scope.setSection = function(section) {
+ $scope.binding = section.value;
+ };
+
+ this.addSection = function(section, element) {
+ $scope.sections[section.value] = {
+ 'title': section.valueTitle,
+ 'value': section.value,
+ 'element': element
+ };
+
+ element.hide();
+
+ if (!$scope.binding) {
+ $scope.binding = section.value;
+ }
+ };
+
+ $scope.$watch('binding', function(binding) {
+ if (!binding) { return; }
+
+ if ($scope.currentSection) {
+ $scope.currentSection.element.hide();
+ }
+
+ if ($scope.sections[binding]) {
+ $scope.sections[binding].element.show();
+ $scope.currentSection = $scope.sections[binding];
+ }
+ });
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('variableSection', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlVarField,
+ priority: 1,
+ require: '^configVariableField',
+ replace: false,
+ transclude: true,
+ restrict: 'C',
+ scope: {
+ 'value': '@value',
+ 'valueTitle': '@valueTitle'
+ },
+ controller: function($scope, $element) {
+ var parentCtrl = $element.parent().controller('configVariableField');
+ parentCtrl.addSection($scope, $element);
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configListField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlListField,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding',
+ 'placeholder': '@placeholder',
+ 'defaultValue': '@defaultValue',
+ 'itemTitle': '@itemTitle',
+ 'itemPattern': '@itemPattern'
+ },
+ controller: function($scope, $element) {
+ $scope.removeItem = function(item) {
+ var index = $scope.binding.indexOf(item);
+ if (index >= 0) {
+ $scope.binding.splice(index, 1);
+ }
+ };
+
+ $scope.addItem = function() {
+ if (!$scope.newItemName) {
+ return;
+ }
+
+ if (!$scope.binding) {
+ $scope.binding = [];
+ }
+
+ if ($scope.binding.indexOf($scope.newItemName) >= 0) {
+ return;
+ }
+
+ $scope.binding.push($scope.newItemName);
+ $scope.newItemName = null;
+ };
+
+ $scope.patternMap = {};
+
+ $scope.getRegexp = function(pattern) {
+ if (!pattern) {
+ pattern = '.*';
+ }
+
+ if ($scope.patternMap[pattern]) {
+ return $scope.patternMap[pattern];
+ }
+
+ return $scope.patternMap[pattern] = new RegExp(pattern);
+ };
+
+ $scope.$watch('binding', function(binding) {
+ if (!binding && $scope.defaultValue) {
+ $scope.binding = eval($scope.defaultValue);
+ }
+ });
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configFileField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlFileField,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ 'filename': '@filename',
+ 'skipCheckFile': '@skipCheckFile',
+ 'hasFile': '=hasFile',
+ 'binding': '=?binding'
+ },
+ controller: function($scope, $element, Restangular, $upload) {
+ $scope.hasFile = false;
+
+ var setHasFile = function(hasFile) {
+ $scope.hasFile = hasFile;
+ $scope.binding = hasFile ? $scope.filename : null;
+ };
+
+ $scope.onFileSelect = function(files) {
+ if (files.length < 1) {
+ setHasFile(false);
+ return;
+ }
+
+ $scope.uploadProgress = 0;
+ $scope.upload = $upload.upload({
+ url: '/api/v1/superuser/config/file/' + $scope.filename,
+ method: 'POST',
+ data: {'_csrf_token': window.__token},
+ file: files[0],
+ }).progress(function(evt) {
+ $scope.uploadProgress = parseInt(100.0 * evt.loaded / evt.total);
+ if ($scope.uploadProgress == 100) {
+ $scope.uploadProgress = null;
+ setHasFile(true);
+ }
+ }).success(function(data, status, headers, config) {
+ $scope.uploadProgress = null;
+ setHasFile(true);
+ });
+ };
+
+ var loadStatus = function(filename) {
+ Restangular.one('superuser/config/file/' + filename).get().then(function(resp) {
+ setHasFile(false);
+ });
+ };
+
+ if ($scope.filename && $scope.skipCheckFile != "true") {
+ loadStatus($scope.filename);
+ }
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configBoolField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlBoolField,
+ replace: false,
+ transclude: true,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding'
+ },
+ controller: function($scope, $element) {
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configNumericField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlNumericField,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding',
+ 'placeholder': '@placeholder',
+ 'defaultValue': '@defaultValue',
+ },
+ controller: function($scope, $element) {
+ $scope.bindinginternal = 0;
+
+ $scope.$watch('binding', function(binding) {
+ if ($scope.binding == 0 && $scope.defaultValue) {
+ $scope.binding = $scope.defaultValue * 1;
+ }
+
+ $scope.bindinginternal = $scope.binding;
+ });
+
+ $scope.$watch('bindinginternal', function(binding) {
+ var newValue = $scope.bindinginternal * 1;
+ if (isNaN(newValue)) {
+ newValue = 0;
+ }
+ $scope.binding = newValue;
+ });
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configContactsField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlContactsField,
+ priority: 1,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding'
+ },
+ controller: function($scope, $element) {
+ var padItems = function(items) {
+ // Remove the last item if both it and the second to last items are empty.
+ if (items.length > 1 && !items[items.length - 2].value && !items[items.length - 1].value) {
+ items.splice(items.length - 1, 1);
+ return;
+ }
+
+ // If the last item is non-empty, add a new item.
+ if (items.length == 0 || items[items.length - 1].value) {
+ items.push({'value': ''});
+ return;
+ }
+ };
+
+ $scope.itemHash = null;
+ $scope.$watch('items', function(items) {
+ if (!items) { return; }
+ padItems(items);
+
+ var itemHash = '';
+ var binding = [];
+ for (var i = 0; i < items.length; ++i) {
+ var item = items[i];
+ if (item.value && (URI(item.value).host() || URI(item.value).path())) {
+ binding.push(item.value);
+ itemHash += item.value;
+ }
+ }
+
+ $scope.itemHash = itemHash;
+ $scope.binding = binding;
+ }, true);
+
+ $scope.$watch('binding', function(binding) {
+ var current = binding || [];
+ var items = [];
+ var itemHash = '';
+ for (var i = 0; i < current.length; ++i) {
+ items.push({'value': current[i]})
+ itemHash += current[i];
+ }
+
+ if ($scope.itemHash != itemHash) {
+ $scope.items = items;
+ }
+ });
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configContactField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlContactField,
+ priority: 1,
+ replace: false,
+ transclude: true,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding'
+ },
+ controller: function($scope, $element) {
+ $scope.kind = null;
+ $scope.value = null;
+
+ var updateBinding = function() {
+ if ($scope.value == null) { return; }
+ var value = $scope.value || '';
+
+ switch ($scope.kind) {
+ case 'mailto':
+ $scope.binding = 'mailto:' + value;
+ return;
+
+ case 'tel':
+ $scope.binding = 'tel:' + value;
+ return;
+
+ case 'irc':
+ $scope.binding = 'irc://' + value;
+ return;
+
+ default:
+ $scope.binding = value;
+ return;
+ }
+ };
+
+ $scope.$watch('kind', updateBinding);
+ $scope.$watch('value', updateBinding);
+
+ $scope.$watch('binding', function(value) {
+ if (!value) {
+ $scope.kind = null;
+ $scope.value = null;
+ return;
+ }
+
+ var uri = URI(value);
+ $scope.kind = uri.scheme();
+
+ switch ($scope.kind) {
+ case 'mailto':
+ case 'tel':
+ $scope.value = uri.path();
+ break;
+
+ case 'irc':
+ $scope.value = value.substr('irc://'.length);
+ break;
+
+ default:
+ $scope.kind = 'http';
+ $scope.value = value;
+ break;
+ }
+ });
+
+ $scope.getPlaceholder = function(kind) {
+ switch (kind) {
+ case 'mailto':
+ return 'some@example.com';
+
+ case 'tel':
+ return '555-555-5555';
+
+ case 'irc':
+ return 'myserver:port/somechannel';
+
+ default:
+ return 'http://some/url';
+ }
+ };
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configMapField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlMapField,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding',
+ 'keys': '=keys'
+ },
+ controller: function($scope, $element) {
+ $scope.newKey = null;
+ $scope.newValue = null;
+
+ $scope.hasValues = function(binding) {
+ return binding && Object.keys(binding).length;
+ };
+
+ $scope.removeKey = function(key) {
+ delete $scope.binding[key];
+ };
+
+ $scope.addEntry = function() {
+ if (!$scope.newKey || !$scope.newValue) { return; }
+
+ $scope.binding = $scope.binding || {};
+ $scope.binding[$scope.newKey] = $scope.newValue;
+ $scope.newKey = null;
+ $scope.newValue = null;
+ }
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configServiceKeyField', function (ApiService) {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlServiceKeyField,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ 'serviceName': '@serviceName',
+ },
+ controller: function($scope, $element) {
+ $scope.foundKeys = [];
+ $scope.loading = false;
+ $scope.loadError = false;
+ $scope.hasValidKey = false;
+ $scope.hasValidKeyStr = null;
+
+ $scope.updateKeys = function() {
+ $scope.foundKeys = [];
+ $scope.loading = true;
+
+ ApiService.listServiceKeys().then(function(resp) {
+ $scope.loading = false;
+ $scope.loadError = false;
+
+ resp['keys'].forEach(function(key) {
+ if (key['service'] == $scope.serviceName) {
+ $scope.foundKeys.push(key);
+ }
+ });
+
+ $scope.hasValidKey = checkValidKey($scope.foundKeys);
+ $scope.hasValidKeyStr = $scope.hasValidKey ? 'true' : '';
+ }, function() {
+ $scope.loading = false;
+ $scope.loadError = true;
+ });
+ };
+
+ // Perform initial loading of the keys.
+ $scope.updateKeys();
+
+ $scope.isKeyExpired = function(key) {
+ if (key.expiration_date != null) {
+ var expiration_date = moment(key.expiration_date);
+ return moment().isAfter(expiration_date);
+ }
+ return false;
+ };
+
+ $scope.showRequestServiceKey = function(opt_newKey) {
+ $scope.requestKeyInfo = {
+ 'service': $scope.serviceName,
+ 'newKey': opt_newKey
+ };
+ };
+
+ $scope.handleKeyCreated = function() {
+ $scope.updateKeys();
+ };
+
+ var checkValidKey = function(keys) {
+ for (var i = 0; i < keys.length; ++i) {
+ var key = keys[i];
+ if (!key.approval) {
+ continue;
+ }
+
+ if ($scope.isKeyExpired(key)) {
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+ };
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configStringField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlStringField,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding',
+ 'placeholder': '@placeholder',
+ 'pattern': '@pattern',
+ 'defaultValue': '@defaultValue',
+ 'validator': '&validator',
+ 'isOptional': '=isOptional'
+ },
+ controller: function($scope, $element) {
+ var firstSet = true;
+
+ $scope.patternMap = {};
+
+ $scope.getRegexp = function(pattern) {
+ if (!pattern) {
+ pattern = '.*';
+ }
+
+ if ($scope.patternMap[pattern]) {
+ return $scope.patternMap[pattern];
+ }
+
+ return $scope.patternMap[pattern] = new RegExp(pattern);
+ };
+
+ $scope.$watch('binding', function(binding) {
+ if (firstSet && !binding && $scope.defaultValue) {
+ $scope.binding = $scope.defaultValue;
+ firstSet = false;
+ }
+
+ $scope.errorMessage = $scope.validator({'value': binding || ''});
+ });
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configPasswordField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlPasswordField,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding',
+ 'placeholder': '@placeholder',
+ 'defaultValue': '@defaultValue',
+ 'validator': '&validator',
+ 'isOptional': '=isOptional'
+ },
+ controller: function($scope, $element) {
+ var firstSet = true;
+
+ $scope.$watch('binding', function(binding) {
+ if (firstSet && !binding && $scope.defaultValue) {
+ $scope.binding = $scope.defaultValue;
+ firstSet = false;
+ }
+ $scope.errorMessage = $scope.validator({'value': binding || ''});
+ });
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configStringListField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlStringListField,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ 'binding': '=binding',
+ 'itemTitle': '@itemTitle',
+ 'itemDelimiter': '@itemDelimiter',
+ 'placeholder': '@placeholder',
+ 'isOptional': '=isOptional'
+ },
+ controller: function($scope, $element) {
+ $scope.$watch('internalBinding', function(value) {
+ if (value) {
+ $scope.binding = value.split($scope.itemDelimiter);
+ }
+ });
+
+ $scope.$watch('binding', function(value) {
+ if (value) {
+ $scope.internalBinding = value.join($scope.itemDelimiter);
+ }
+ });
+ }
+ };
+ return directiveDefinitionObject;
+ })
+
+ .directive('configCertificatesField', function () {
+ var directiveDefinitionObject = {
+ priority: 0,
+ templateUrl: urlCertField,
+ replace: false,
+ transclude: false,
+ restrict: 'C',
+ scope: {
+ },
+ controller: function($scope, $element, $upload, ApiService, UserService) {
+ $scope.resetUpload = 0;
+ $scope.certsUploading = false;
+
+ var loadCertificates = function() {
+ $scope.certificatesResource = ApiService.getCustomCertificatesAsResource().get(function(resp) {
+ $scope.certInfo = resp;
+ $scope.certsUploading = false;
+ });
+ };
+
+ loadCertificates();
+
+ $scope.handleCertsSelected = function(files, callback) {
+ $scope.certsUploading = true;
+ $upload.upload({
+ url: '/api/v1/superuser/customcerts/' + files[0].name,
+ method: 'POST',
+ data: {'_csrf_token': window.__token},
+ file: files[0]
+ }).success(function() {
+ callback(true);
+ $scope.resetUpload++;
+ loadCertificates();
+ }).error(function(r) {
+ bootbox.alert('Could not upload certificate')
+ callback(false);
+ $scope.resetUpload++;
+ loadCertificates();
+ });
+ };
+
+ $scope.deleteCert = function(path) {
+ var errorDisplay = ApiService.errorDisplay('Could not delete certificate');
+ var params = {
+ 'certpath': path
+ };
+
+ ApiService.deleteCustomCertificate(null, params).then(loadCertificates, errorDisplay);
+ };
+ }
+ };
+ return directiveDefinitionObject;
+ });
diff --git a/config_app/js/main.ts b/config_app/js/main.ts
new file mode 100644
index 000000000..cdd326001
--- /dev/null
+++ b/config_app/js/main.ts
@@ -0,0 +1,37 @@
+// imports shims, etc
+import 'core-js';
+
+import * as angular from 'angular';
+import { ConfigAppModule } from './config-app.module';
+import { bundle } from 'ng-metadata/core';
+
+// load all app dependencies
+require('../static/lib/angular-file-upload.min.js');
+require('../../static/js/tar');
+
+const ng1QuayModule: string = bundle(ConfigAppModule, []).name;
+angular.module('quay-config', [ng1QuayModule])
+ .run(() => {
+ });
+
+declare var require: any;
+function requireAll(r) {
+ r.keys().forEach(r);
+}
+
+// load all services
+requireAll(require.context('./services', true, /\.js$/));
+
+
+// load all the components after services
+requireAll(require.context('./setup', true, /\.js$/));
+requireAll(require.context('./core-config-setup', true, /\.js$/));
+requireAll(require.context('./components', true, /\.js$/));
+
+// load config-app specific css
+requireAll(require.context('../static/css', true, /\.css$/));
+
+
+// Load all the main quay css
+requireAll(require.context('../../static/css', true, /\.css$/));
+requireAll(require.context('../../static/lib', true, /\.css$/));
diff --git a/config_app/js/services/angular-poll-channel.js b/config_app/js/services/angular-poll-channel.js
new file mode 100644
index 000000000..697a04e15
--- /dev/null
+++ b/config_app/js/services/angular-poll-channel.js
@@ -0,0 +1,107 @@
+/**
+ * Specialized class for conducting an HTTP poll, while properly preventing multiple calls.
+ */
+angular.module('quay-config').factory('AngularPollChannel',
+ ['ApiService', '$timeout', 'DocumentVisibilityService', 'CORE_EVENT', '$rootScope',
+ function(ApiService, $timeout, DocumentVisibilityService, CORE_EVENT, $rootScope) {
+ var _PollChannel = function(scope, requester, opt_sleeptime) {
+ this.scope_ = scope;
+ this.requester_ = requester;
+ this.sleeptime_ = opt_sleeptime || (60 * 1000 /* 60s */);
+ this.timer_ = null;
+
+ this.working = false;
+ this.polling = false;
+ this.skipping = false;
+
+ var that = this;
+
+ var visibilityHandler = $rootScope.$on(CORE_EVENT.DOC_VISIBILITY_CHANGE, function() {
+ // If the poll channel was skipping because the visibility was hidden, call it immediately.
+ if (that.skipping && !DocumentVisibilityService.isHidden()) {
+ that.call_();
+ }
+ });
+
+ scope.$on('$destroy', function() {
+ that.stop();
+ visibilityHandler();
+ });
+ };
+
+ _PollChannel.prototype.setSleepTime = function(sleepTime) {
+ this.sleeptime_ = sleepTime;
+ this.stop();
+ this.start(true);
+ };
+
+ _PollChannel.prototype.stop = function() {
+ if (this.timer_) {
+ $timeout.cancel(this.timer_);
+ this.timer_ = null;
+ this.polling = false;
+ }
+
+ this.skipping = false;
+ this.working = false;
+ };
+
+ _PollChannel.prototype.start = function(opt_skipFirstCall) {
+ // Make sure we invoke call outside the normal digest cycle, since
+ // we'll call $scope.$apply ourselves.
+ var that = this;
+ setTimeout(function() {
+ if (opt_skipFirstCall) {
+ that.setupTimer_();
+ return;
+ }
+
+ that.call_();
+ }, 0);
+ };
+
+ _PollChannel.prototype.call_ = function() {
+ if (this.working) { return; }
+
+ // If the document is currently hidden, skip the call.
+ if (DocumentVisibilityService.isHidden()) {
+ this.skipping = true;
+ this.setupTimer_();
+ return;
+ }
+
+ var that = this;
+ this.working = true;
+
+ $timeout(function() {
+ that.requester_(function(status) {
+ if (status) {
+ that.working = false;
+ that.skipping = false;
+ that.setupTimer_();
+ } else {
+ that.stop();
+ }
+ });
+ }, 0);
+ };
+
+ _PollChannel.prototype.setupTimer_ = function() {
+ if (this.timer_) { return; }
+
+ var that = this;
+ this.polling = true;
+ this.timer_ = $timeout(function() {
+ that.timer_ = null;
+ that.call_();
+ }, this.sleeptime_)
+ };
+
+ var service = {
+ 'create': function(scope, requester, opt_sleeptime) {
+ return new _PollChannel(scope, requester, opt_sleeptime);
+ }
+ };
+
+ return service;
+}]);
diff --git a/config_app/js/services/api-service.js b/config_app/js/services/api-service.js
new file mode 100644
index 000000000..814e25a45
--- /dev/null
+++ b/config_app/js/services/api-service.js
@@ -0,0 +1,335 @@
+/**
+ * Service which exposes the server-defined API as a nice set of helper methods and automatic
+ * callbacks. Any method defined on the server is exposed here as an equivalent method. Also
+ * defines some helper functions for working with API responses.
+ */
+angular.module('quay-config').factory('ApiService', ['Restangular', '$q', 'UtilService', function(Restangular, $q, UtilService) {
+ var apiService = {};
+
+ if (!window.__endpoints) {
+ return apiService;
+ }
+
+ var getResource = function(getMethod, operation, opt_parameters, opt_background) {
+ var resource = {};
+ resource.withOptions = function(options) {
+ this.options = options;
+ return this;
+ };
+
+ resource.get = function(processor, opt_errorHandler) {
+ var options = this.options;
+ var result = {
+ 'loading': true,
+ 'value': null,
+ 'hasError': false
+ };
+
+ getMethod(options, opt_parameters, opt_background, true).then(function(resp) {
+ result.value = processor(resp);
+ result.loading = false;
+ }, function(resp) {
+ result.hasError = true;
+ result.loading = false;
+ if (opt_errorHandler) {
+ opt_errorHandler(resp);
+ }
+ });
+
+ return result;
+ };
+
+ return resource;
+ };
+
+ var buildUrl = function(path, parameters) {
+ // We already have /api/v1/ on the URLs, so remove them from the paths.
+ path = path.substr('/api/v1/'.length, path.length);
+
+ // Build the path, adjusted with the inline parameters.
+ var used = {};
+ var url = '';
+ for (var i = 0; i < path.length; ++i) {
+ var c = path[i];
+ if (c == '{') {
+ var end = path.indexOf('}', i);
+ var varName = path.substr(i + 1, end - i - 1);
+
+ if (!parameters[varName]) {
+ throw new Error('Missing parameter: ' + varName);
+ }
+
+ used[varName] = true;
+ url += parameters[varName];
+ i = end;
+ continue;
+ }
+
+ url += c;
+ }
+
+ // Append any query parameters.
+ var isFirst = true;
+ for (var paramName in parameters) {
+ if (!parameters.hasOwnProperty(paramName)) { continue; }
+ if (used[paramName]) { continue; }
+
+ var value = parameters[paramName];
+ if (value) {
+ url += isFirst ? '?' : '&';
+ url += paramName + '=' + encodeURIComponent(value)
+ isFirst = false;
+ }
+ }
+
+ return url;
+ };
+
+ var getGenericOperationName = function(userOperationName) {
+ return userOperationName.replace('User', '');
+ };
+
+ var getMatchingUserOperationName = function(orgOperationName, method, userRelatedResource) {
+ if (userRelatedResource) {
+ if (userRelatedResource[method.toLowerCase()]) {
+ return userRelatedResource[method.toLowerCase()]['operationId'];
+ }
+ }
+
+ throw new Error('Could not find user operation matching org operation: ' + orgOperationName);
+ };
+
+ var freshLoginInProgress = [];
+ var reject = function(msg) {
+ for (var i = 0; i < freshLoginInProgress.length; ++i) {
+ freshLoginInProgress[i].deferred.reject({'data': {'message': msg}});
+ }
+ freshLoginInProgress = [];
+ };
+
+ var retry = function() {
+ for (var i = 0; i < freshLoginInProgress.length; ++i) {
+ freshLoginInProgress[i].retry();
+ }
+ freshLoginInProgress = [];
+ };
+
+ var freshLoginFailCheck = function(opName, opArgs) {
+ return function(resp) {
+ var deferred = $q.defer();
+
+ // If the error is a fresh login required, show the dialog.
+ // TODO: remove error_type (old style error)
+ var fresh_login_required = resp.data['title'] == 'fresh_login_required' || resp.data['error_type'] == 'fresh_login_required';
+ if (resp.status == 401 && fresh_login_required) {
+ var retryOperation = function() {
+ apiService[opName].apply(apiService, opArgs).then(function(resp) {
+ deferred.resolve(resp);
+ }, function(resp) {
+ deferred.reject(resp);
+ });
+ };
+
+ var verifyNow = function() {
+ if (!$('#freshPassword').val()) {
+ return;
+ }
+
+ var info = {
+ 'password': $('#freshPassword').val()
+ };
+
+ $('#freshPassword').val('');
+
+ // Conduct the sign in of the user.
+ apiService.verifyUser(info).then(function() {
+ // On success, retry the operations. if it succeeds, then resolve the
+ // deferred promise with the result. Otherwise, reject the same.
+ retry();
+ }, function(resp) {
+ // Reject with the sign in error.
+ reject('Invalid verification credentials');
+ });
+ };
+
+ // Add the retry call to the in progress list. If there is more than a single
+ // in progress call, we skip showing the dialog (since it has already been
+ // shown).
+ freshLoginInProgress.push({
+ 'deferred': deferred,
+ 'retry': retryOperation
+ })
+
+ if (freshLoginInProgress.length > 1) {
+ return deferred.promise;
+ }
+
+ var box = bootbox.dialog({
+ "message": 'It has been more than a few minutes since you last logged in, ' +
+ 'so please verify your password to perform this sensitive operation:' +
+ '
',
+ "title": 'Please Verify',
+ "buttons": {
+ "verify": {
+ "label": "Verify",
+ "className": "btn-success btn-continue",
+ "callback": verifyNow
+ },
+ "close": {
+ "label": "Cancel",
+ "className": "btn-default",
+ "callback": function() {
+ reject('Verification canceled')
+ }
+ }
+ }
+ });
+
+ box.bind('shown.bs.modal', function(){
+ box.find("input").focus();
+ box.find("form").submit(function() {
+ if (!$('#freshPassword').val()) { return; }
+
+ box.modal('hide');
+ verifyNow();
+ });
+ });
+
+ // Return a new promise. We'll accept or reject it based on the result
+ // of the login.
+ return deferred.promise;
+ }
+
+ // Otherwise, we just 'raise' the error via the reject method on the promise.
+ return $q.reject(resp);
+ };
+ };
+
+ var buildMethodsForOperation = function(operation, method, path, resourceMap) {
+ var operationName = operation['operationId'];
+ var urlPath = path['x-path'];
+
+ // Add the operation itself.
+ apiService[operationName] = function(opt_options, opt_parameters, opt_background, opt_forceget, opt_responseType) {
+ var one = Restangular.one(buildUrl(urlPath, opt_parameters));
+
+ if (opt_background || opt_responseType) {
+ let httpConfig = {};
+
+ if (opt_background) {
+ httpConfig['ignoreLoadingBar'] = true;
+ }
+ if (opt_responseType) {
+ httpConfig['responseType'] = opt_responseType;
+ }
+
+ one.withHttpConfig(httpConfig);
+ }
+
+ var opObj = one[opt_forceget ? 'get' : 'custom' + method.toUpperCase()](opt_options);
+
+ // If the operation requires_fresh_login, then add a specialized error handler that
+ // will defer the operation's result if sudo is requested.
+ if (operation['x-requires-fresh-login']) {
+ opObj = opObj.catch(freshLoginFailCheck(operationName, arguments));
+ }
+ return opObj;
+ };
+
+ // If the method for the operation is a GET, add an operationAsResource method.
+ if (method == 'get') {
+ apiService[operationName + 'AsResource'] = function(opt_parameters, opt_background) {
+ var getMethod = apiService[operationName];
+ return getResource(getMethod, operation, opt_parameters, opt_background);
+ };
+ }
+
+ // If the operation has a user-related operation, then make a generic operation for this operation
+ // that can call both the user and the organization versions of the operation, depending on the
+ // parameters given.
+ if (path['x-user-related']) {
+ var userOperationName = getMatchingUserOperationName(operationName, method, resourceMap[path['x-user-related']]);
+ var genericOperationName = getGenericOperationName(userOperationName);
+ apiService[genericOperationName] = function(orgname, opt_options, opt_parameters, opt_background) {
+ if (orgname) {
+ if (orgname.name) {
+ orgname = orgname.name;
+ }
+
+ var params = jQuery.extend({'orgname' : orgname}, opt_parameters || {}, opt_background);
+ return apiService[operationName](opt_options, params);
+ } else {
+ return apiService[userOperationName](opt_options, opt_parameters, opt_background);
+ }
+ };
+ }
+ };
+
+
+ var allowedMethods = ['get', 'post', 'put', 'delete'];
+ var resourceMap = {};
+ var forEachOperation = function(callback) {
+ for (var path in window.__endpoints) {
+ if (!window.__endpoints.hasOwnProperty(path)) {
+ continue;
+ }
+
+ for (var method in window.__endpoints[path]) {
+ if (!window.__endpoints[path].hasOwnProperty(method)) {
+ continue;
+ }
+
+ if (allowedMethods.indexOf(method.toLowerCase()) < 0) { continue; }
+ callback(window.__endpoints[path][method], method, window.__endpoints[path]);
+ }
+ }
+ };
+
+ // Build the map of resource names to their objects.
+ forEachOperation(function(operation, method, path) {
+ resourceMap[path['x-name']] = path;
+ });
+
+ // Construct the methods for each API endpoint.
+ forEachOperation(function(operation, method, path) {
+ buildMethodsForOperation(operation, method, path, resourceMap);
+ });
+
+ apiService.getErrorMessage = function(resp, defaultMessage) {
+ var message = defaultMessage;
+ if (resp && resp['data']) {
+ //TODO: remove error_message and error_description (old style error)
+ message = resp['data']['detail'] || resp['data']['error_message'] || resp['data']['message'] || resp['data']['error_description'] || message;
+ }
+
+ return message;
+ };
+
+ apiService.errorDisplay = function(defaultMessage, opt_handler) {
+ return function(resp) {
+ var message = apiService.getErrorMessage(resp, defaultMessage);
+ if (opt_handler) {
+ var handlerMessage = opt_handler(resp);
+ if (handlerMessage) {
+ message = handlerMessage;
+ }
+ }
+
+ message = UtilService.stringToHTML(message);
+ bootbox.dialog({
+ "message": message,
+ "title": defaultMessage || 'Request Failure',
+ "buttons": {
+ "close": {
+ "label": "Close",
+ "className": "btn-primary"
+ }
+ }
+ });
+ };
+ };
+
+ return apiService;
+}]);
diff --git a/config_app/js/services/container-service.js b/config_app/js/services/container-service.js
new file mode 100644
index 000000000..31b495176
--- /dev/null
+++ b/config_app/js/services/container-service.js
@@ -0,0 +1,43 @@
+/**
+ * Helper service for working with the registry's container. Only works in enterprise.
+ */
+angular.module('quay-config')
+ .factory('ContainerService', ['ApiService', '$timeout', 'Restangular',
+ function(ApiService, $timeout, Restangular) {
+ var containerService = {};
+ containerService.restartContainer = function(callback) {
+ ApiService.errorDisplay('Removed Endpoint. This error should never be seen.')
+ };
+
+ containerService.scheduleStatusCheck = function(callback, opt_config) {
+ $timeout(function() {
+ containerService.checkStatus(callback, opt_config);
+ }, 2000);
+ };
+
+ containerService.checkStatus = function(callback, opt_config) {
+ var errorHandler = function(resp) {
+ if (resp.status == 404 || resp.status == 502 || resp.status == -1) {
+ // Container has not yet come back up, so we schedule another check.
+ containerService.scheduleStatusCheck(callback, opt_config);
+ return;
+ }
+
+ return ApiService.errorDisplay('Cannot load status. Please report this to support')(resp);
+ };
+
+ // If config is specified, override the API base URL from this point onward.
+ // TODO: Find a better way than this. This is safe, since this will only be called
+ // for a restart, but it is still ugly.
+ if (opt_config && opt_config['SERVER_HOSTNAME']) {
+ var scheme = opt_config['PREFERRED_URL_SCHEME'] || 'http';
+ var baseUrl = scheme + '://' + opt_config['SERVER_HOSTNAME'] + '/api/v1/';
+ Restangular.setBaseUrl(baseUrl);
+ }
+
+ ApiService.scRegistryStatus(null, null, /* background */true)
+ .then(callback, errorHandler);
+ };
+
+ return containerService;
+ }]);
diff --git a/config_app/js/services/cookie-service.js b/config_app/js/services/cookie-service.js
new file mode 100644
index 000000000..af904124a
--- /dev/null
+++ b/config_app/js/services/cookie-service.js
@@ -0,0 +1,23 @@
+/**
+ * Helper service for working with cookies.
+ */
+angular.module('quay-config').factory('CookieService', ['$cookies', function($cookies) {
+ var cookieService = {};
+ cookieService.putPermanent = function(name, value) {
+ document.cookie = escape(name) + "=" + escape(value) + "; expires=Fri, 31 Dec 9999 23:59:59 GMT; path=/";
+ };
+
+ cookieService.putSession = function(name, value) {
+ $cookies.put(name, value);
+ };
+
+ cookieService.clear = function(name) {
+ $cookies.remove(name);
+ };
+
+ cookieService.get = function(name) {
+ return $cookies.get(name);
+ };
+
+ return cookieService;
+}]);
diff --git a/config_app/js/services/document-visibility-service.js b/config_app/js/services/document-visibility-service.js
new file mode 100644
index 000000000..59d935d8b
--- /dev/null
+++ b/config_app/js/services/document-visibility-service.js
@@ -0,0 +1,60 @@
+/**
+ * Helper service which fires off events when the document's visibility changes, as well as allowing
+ * other Angular code to query the state of the document's visibility directly.
+ */
+angular.module('quay-config').constant('CORE_EVENT', {
+ DOC_VISIBILITY_CHANGE: 'core.event.doc_visibility_change'
+});
+
+angular.module('quay-config').factory('DocumentVisibilityService', ['$rootScope', '$document', 'CORE_EVENT',
+ function($rootScope, $document, CORE_EVENT) {
+ var document = $document[0],
+ features,
+ detectedFeature;
+
+ function broadcastChangeEvent() {
+ $rootScope.$broadcast(CORE_EVENT.DOC_VISIBILITY_CHANGE,
+ document[detectedFeature.propertyName]);
+ }
+
+ features = {
+ standard: {
+ eventName: 'visibilitychange',
+ propertyName: 'hidden'
+ },
+ moz: {
+ eventName: 'mozvisibilitychange',
+ propertyName: 'mozHidden'
+ },
+ ms: {
+ eventName: 'msvisibilitychange',
+ propertyName: 'msHidden'
+ },
+ webkit: {
+ eventName: 'webkitvisibilitychange',
+ propertyName: 'webkitHidden'
+ }
+ };
+
+ Object.keys(features).some(function(feature) {
+ if (document[features[feature].propertyName] !== undefined) {
+ detectedFeature = features[feature];
+ return true;
+ }
+ });
+
+ if (detectedFeature) {
+ $document.on(detectedFeature.eventName, broadcastChangeEvent);
+ }
+
+ return {
+ /**
+ * Is the window currently hidden or not.
+ */
+ isHidden: function() {
+ if (detectedFeature) {
+ return document[detectedFeature.propertyName];
+ }
+ }
+ };
+}]);
\ No newline at end of file
diff --git a/config_app/js/services/features-config.js b/config_app/js/services/features-config.js
new file mode 100644
index 000000000..e655f32bf
--- /dev/null
+++ b/config_app/js/services/features-config.js
@@ -0,0 +1,91 @@
+/**
+ * Feature flags.
+ */
+angular.module('quay-config').factory('Features', [function() {
+ if (!window.__features) {
+ return {};
+ }
+
+ var features = window.__features;
+ features.getFeature = function(name, opt_defaultValue) {
+ var value = features[name];
+ if (value == null) {
+ return opt_defaultValue;
+ }
+ return value;
+ };
+
+ features.hasFeature = function(name) {
+ return !!features.getFeature(name);
+ };
+
+ features.matchesFeatures = function(list) {
+ for (var i = 0; i < list.length; ++i) {
+ var value = features.getFeature(list[i]);
+ if (!value) {
+ return false;
+ }
+ }
+ return true;
+ };
+
+ return features;
+}]);
+
+/**
+ * Application configuration.
+ */
+angular.module('quay-config').factory('Config', ['Features', function(Features) {
+ if (!window.__config) {
+ return {};
+ }
+
+ var config = window.__config;
+ config.getDomain = function() {
+ return config['SERVER_HOSTNAME'];
+ };
+
+ config.getHost = function(opt_auth) {
+ var auth = opt_auth;
+ if (auth) {
+ auth = auth + '@';
+ }
+
+ return config['PREFERRED_URL_SCHEME'] + '://' + auth + config['SERVER_HOSTNAME'];
+ };
+
+ config.getHttp = function() {
+ return config['PREFERRED_URL_SCHEME'];
+ };
+
+ config.getUrl = function(opt_path) {
+ var path = opt_path || '';
+ return config['PREFERRED_URL_SCHEME'] + '://' + config['SERVER_HOSTNAME'] + path;
+ };
+
+ config.getValue = function(name, opt_defaultValue) {
+ var value = config[name];
+ if (value == null) {
+ return opt_defaultValue;
+ }
+ return value;
+ };
+
+ config.getEnterpriseLogo = function(opt_defaultValue) {
+ if (!config.ENTERPRISE_LOGO_URL) {
+ if (opt_defaultValue) {
+ return opt_defaultValue;
+ }
+
+ if (Features.BILLING) {
+ return '/static/img/quay-horizontal-color.svg';
+ } else {
+ return '/static/img/QuayEnterprise_horizontal_color.svg';
+ }
+ }
+
+ return config.ENTERPRISE_LOGO_URL;
+ };
+
+ return config;
+}]);
\ No newline at end of file
diff --git a/config_app/js/services/services.types.ts b/config_app/js/services/services.types.ts
new file mode 100644
index 000000000..217824f6b
--- /dev/null
+++ b/config_app/js/services/services.types.ts
@@ -0,0 +1,15 @@
+export interface AngularPollChannel {
+ create: PollConstructor
+}
+
+type PollConstructor = (scope: MockAngularScope, requester: ShouldContinueCallback, opt_sleeptime?: number) => PollHandle;
+type MockAngularScope = {
+ '$on': Function
+};
+type ShouldContinueCallback = (boolean) => void;
+
+export interface PollHandle {
+ start(opt_skipFirstCall?: boolean): void,
+ stop(): void,
+ setSleepTime(sleepTime: number): void,
+}
diff --git a/config_app/js/services/user-service.js b/config_app/js/services/user-service.js
new file mode 100644
index 000000000..8c222b955
--- /dev/null
+++ b/config_app/js/services/user-service.js
@@ -0,0 +1,177 @@
+import * as Raven from 'raven-js';
+
+
+/**
+ * Service which monitors the current user session and provides methods for returning information
+ * about the user.
+ */
+angular.module('quay-config')
+ .factory('UserService', ['ApiService', 'CookieService', '$rootScope', 'Config', '$location', '$timeout',
+
+function(ApiService, CookieService, $rootScope, Config, $location, $timeout) {
+ var userResponse = {
+ verified: false,
+ anonymous: true,
+ username: null,
+ email: null,
+ organizations: [],
+ logins: [],
+ beforeload: true
+ };
+
+ var userService = {};
+
+ userService.hasEverLoggedIn = function() {
+ return CookieService.get('quay.loggedin') == 'true';
+ };
+
+ userService.updateUserIn = function(scope, opt_callback) {
+ scope.$watch(function () { return userService.currentUser(); }, function (currentUser) {
+ if (currentUser) {
+ $timeout(function(){
+ scope.user = currentUser;
+ if (opt_callback) {
+ opt_callback(currentUser);
+ }
+ }, 0, false);
+ };
+ }, true);
+ };
+
+ userService.load = function(opt_callback) {
+ var handleUserResponse = function(loadedUser) {
+ userResponse = loadedUser;
+
+ if (!userResponse.anonymous) {
+ if (Config.MIXPANEL_KEY) {
+ try {
+ mixpanel.identify(userResponse.username);
+ mixpanel.people.set({
+ '$email': userResponse.email,
+ '$username': userResponse.username,
+ 'verified': userResponse.verified
+ });
+ mixpanel.people.set_once({
+ '$created': new Date()
+ })
+ } catch (e) {
+ window.console.log(e);
+ }
+ }
+
+ if (Config.MARKETO_MUNCHKIN_ID && userResponse['marketo_user_hash']) {
+ var associateLeadBody = {'Email': userResponse.email};
+ if (window.Munchkin !== undefined) {
+ try {
+ Munchkin.munchkinFunction(
+ 'associateLead',
+ associateLeadBody,
+ userResponse['marketo_user_hash']
+ );
+ } catch (e) {
+ }
+ } else {
+ window.__quay_munchkin_queue.push([
+ 'associateLead',
+ associateLeadBody,
+ userResponse['marketo_user_hash']
+ ]);
+ }
+ }
+
+ if (window.Raven !== undefined) {
+ try {
+ Raven.setUser({
+ email: userResponse.email,
+ id: userResponse.username
+ });
+ } catch (e) {
+ window.console.log(e);
+ }
+ }
+
+ CookieService.putPermanent('quay.loggedin', 'true');
+ } else {
+ if (window.Raven !== undefined) {
+ Raven.setUser();
+ }
+ }
+
+ // If the loaded user has a prompt, redirect them to the update page.
+ if (loadedUser.prompts && loadedUser.prompts.length) {
+ $location.path('/updateuser');
+ return;
+ }
+
+ if (opt_callback) {
+ opt_callback(loadedUser);
+ }
+ };
+
+ ApiService.getLoggedInUser().then(function(loadedUser) {
+ handleUserResponse(loadedUser);
+ }, function() {
+ handleUserResponse({'anonymous': true});
+ });
+ };
+
+ userService.isOrganization = function(name) {
+ return !!userService.getOrganization(name);
+ };
+
+ userService.getOrganization = function(name) {
+ if (!userResponse || !userResponse.organizations) { return null; }
+ for (var i = 0; i < userResponse.organizations.length; ++i) {
+ var org = userResponse.organizations[i];
+ if (org.name == name) {
+ return org;
+ }
+ }
+
+ return null;
+ };
+
+ userService.isNamespaceAdmin = function(namespace) {
+ if (namespace == userResponse.username) {
+ return true;
+ }
+
+ var org = userService.getOrganization(namespace);
+ if (!org) {
+ return false;
+ }
+
+ return org.is_org_admin;
+ };
+
+ userService.isKnownNamespace = function(namespace) {
+ if (namespace == userResponse.username) {
+ return true;
+ }
+
+ var org = userService.getOrganization(namespace);
+ return !!org;
+ };
+
+ userService.getNamespace = function(namespace) {
+ var org = userService.getOrganization(namespace);
+ if (org) {
+ return org;
+ }
+
+ if (namespace == userResponse.username) {
+ return userResponse;
+ }
+
+ return null;
+ };
+
+ userService.currentUser = function() {
+ return userResponse;
+ };
+
+ // Update the user in the root scope.
+ userService.updateUserIn($rootScope);
+
+ return userService;
+}]);
diff --git a/config_app/js/services/util-service.js b/config_app/js/services/util-service.js
new file mode 100644
index 000000000..34f0a4191
--- /dev/null
+++ b/config_app/js/services/util-service.js
@@ -0,0 +1,83 @@
+/**
+ * Service which exposes various utility methods.
+ */
+angular.module('quay-config').factory('UtilService', ['$sanitize',
+ function($sanitize) {
+ var utilService = {};
+
+ var adBlockEnabled = null;
+
+ utilService.isAdBlockEnabled = function(callback) {
+ if (adBlockEnabled !== null) {
+ callback(adBlockEnabled);
+ return;
+ }
+
+ if(typeof blockAdBlock === 'undefined') {
+ callback(true);
+ return;
+ }
+
+ var bab = new BlockAdBlock({
+ checkOnLoad: false,
+ resetOnEnd: true
+ });
+
+ bab.onDetected(function() { adBlockEnabled = true; callback(true); });
+ bab.onNotDetected(function() { adBlockEnabled = false; callback(false); });
+ bab.check();
+ };
+
+ utilService.isEmailAddress = function(val) {
+ var emailRegex = /^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/;
+ return emailRegex.test(val);
+ };
+
+ utilService.escapeHtmlString = function(text) {
+ var textStr = (text || '').toString();
+ var adjusted = textStr.replace(/&/g, "&")
+ .replace(//g, ">")
+ .replace(/"/g, """)
+ .replace(/'/g, "'");
+
+ return adjusted;
+ };
+
+ utilService.stringToHTML = function(text) {
+ text = utilService.escapeHtmlString(text);
+ text = text.replace(/\n/g, '
');
+ return text;
+ };
+
+ utilService.getRestUrl = function(args) {
+ var url = '';
+ for (var i = 0; i < arguments.length; ++i) {
+ if (i > 0) {
+ url += '/';
+ }
+ url += encodeURI(arguments[i])
+ }
+ return url;
+ };
+
+ utilService.textToSafeHtml = function(text) {
+ return $sanitize(utilService.escapeHtmlString(text));
+ };
+
+ return utilService;
+ }])
+ .factory('CoreDialog', [() => {
+ var service = {};
+ service['fatal'] = function(title, message) {
+ bootbox.dialog({
+ "title": title,
+ "message": "
" + message,
+ "buttons": {},
+ "className": "co-dialog fatal-error",
+ "closeButton": false
+ });
+ };
+
+ return service;
+ }]);
diff --git a/config_app/js/setup/setup.component.js b/config_app/js/setup/setup.component.js
new file mode 100644
index 000000000..65a076f31
--- /dev/null
+++ b/config_app/js/setup/setup.component.js
@@ -0,0 +1,319 @@
+import * as URI from 'urijs';
+const templateUrl = require('./setup.html');
+
+(function() {
+ /**
+ * The Setup page provides a nice GUI walkthrough experience for setting up Red Hat Quay.
+ */
+
+ angular.module('quay-config').directive('setup', () => {
+ const directiveDefinitionObject = {
+ priority: 1,
+ templateUrl,
+ replace: true,
+ transclude: true,
+ restrict: 'C',
+ scope: {
+ 'isActive': '=isActive',
+ 'configurationSaved': '&configurationSaved',
+ 'setupCompleted': '&setupCompleted',
+ },
+ controller: SetupCtrl,
+ };
+
+ return directiveDefinitionObject;
+ })
+
+ function SetupCtrl($scope, $timeout, ApiService, Features, UserService, ContainerService, CoreDialog) {
+ // if (!Features.SUPER_USERS) {
+ // return;
+ // }
+
+ $scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9_\.\-]+(:[0-9]+)?$';
+
+ $scope.validateHostname = function(hostname) {
+ if (hostname.indexOf('127.0.0.1') == 0 || hostname.indexOf('localhost') == 0) {
+ return 'Please specify a non-localhost hostname. "localhost" will refer to the container, not your machine.'
+ }
+
+ return null;
+ };
+
+ // Note: The values of the enumeration are important for isStepFamily. For example,
+ // *all* states under the "configuring db" family must start with "config-db".
+ $scope.States = {
+ // Loading the state of the product.
+ 'LOADING': 'loading',
+
+ // The configuration directory is missing.
+ 'MISSING_CONFIG_DIR': 'missing-config-dir',
+
+ // The config.yaml exists but it is invalid.
+ 'INVALID_CONFIG': 'config-invalid',
+
+ // DB is being configured.
+ 'CONFIG_DB': 'config-db',
+
+ // DB information is being validated.
+ 'VALIDATING_DB': 'config-db-validating',
+
+ // DB information is being saved to the config.
+ 'SAVING_DB': 'config-db-saving',
+
+ // A validation error occurred with the database.
+ 'DB_ERROR': 'config-db-error',
+
+ // Database is being setup.
+ 'DB_SETUP': 'setup-db',
+
+ // An error occurred when setting up the database.
+ 'DB_SETUP_ERROR': 'setup-db-error',
+
+ // A superuser is being configured.
+ 'CREATE_SUPERUSER': 'create-superuser',
+
+ // The superuser is being created.
+ 'CREATING_SUPERUSER': 'create-superuser-creating',
+
+ // An error occurred when setting up the superuser.
+ 'SUPERUSER_ERROR': 'create-superuser-error',
+
+ // The superuser was created successfully.
+ 'SUPERUSER_CREATED': 'create-superuser-created',
+
+ // General configuration is being setup.
+ 'CONFIG': 'config',
+
+ // The configuration is fully valid.
+ 'VALID_CONFIG': 'valid-config',
+
+ // The product is ready for use.
+ 'READY': 'ready'
+ }
+
+ $scope.csrf_token = window.__token;
+ $scope.currentStep = $scope.States.LOADING;
+ $scope.errors = {};
+ $scope.stepProgress = [];
+ $scope.hasSSL = false;
+ $scope.hostname = null;
+ $scope.currentConfig = null;
+
+ $scope.currentState = {
+ 'hasDatabaseSSLCert': false
+ };
+
+ $scope.$watch('currentStep', function(currentStep) {
+ $scope.stepProgress = $scope.getProgress(currentStep);
+
+ switch (currentStep) {
+ case $scope.States.CONFIG:
+ $('#setupModal').modal('hide');
+ break;
+
+ case $scope.States.MISSING_CONFIG_DIR:
+ $scope.showMissingConfigDialog();
+ break;
+
+ case $scope.States.INVALID_CONFIG:
+ $scope.showInvalidConfigDialog();
+ break;
+
+ case $scope.States.DB_SETUP:
+ $scope.performDatabaseSetup();
+ // Fall-through.
+
+ case $scope.States.CREATE_SUPERUSER:
+ case $scope.States.CONFIG_DB:
+ case $scope.States.VALID_CONFIG:
+ case $scope.States.READY:
+ $('#setupModal').modal({
+ keyboard: false,
+ backdrop: 'static'
+ });
+ break;
+ }
+ });
+
+ $scope.restartContainer = function(state) {
+ $scope.currentStep = state;
+ ContainerService.restartContainer(function() {
+ $scope.checkStatus()
+ });
+ };
+
+ $scope.showSuperuserPanel = function() {
+ $('#setupModal').modal('hide');
+ var prefix = $scope.hasSSL ? 'https' : 'http';
+ var hostname = $scope.hostname;
+ if (!hostname) {
+ hostname = document.location.hostname;
+ if (document.location.port) {
+ hostname = hostname + ':' + document.location.port;
+ }
+ }
+
+ window.location = prefix + '://' + hostname + '/superuser';
+ };
+
+ $scope.configurationSaved = function(config) {
+ $scope.hasSSL = config['PREFERRED_URL_SCHEME'] == 'https';
+ $scope.hostname = config['SERVER_HOSTNAME'];
+ $scope.currentConfig = config;
+
+ $scope.currentStep = $scope.States.VALID_CONFIG;
+ };
+
+ $scope.getProgress = function(step) {
+ var isStep = $scope.isStep;
+ var isStepFamily = $scope.isStepFamily;
+ var States = $scope.States;
+
+ return [
+ isStepFamily(step, States.CONFIG_DB),
+ isStepFamily(step, States.DB_SETUP),
+ isStepFamily(step, States.CREATE_SUPERUSER),
+ isStep(step, States.CONFIG),
+ isStep(step, States.VALID_CONFIG),
+ isStep(step, States.READY)
+ ];
+ };
+
+ $scope.isStepFamily = function(step, family) {
+ if (!step) { return false; }
+ return step.indexOf(family) == 0;
+ };
+
+ $scope.isStep = function(step) {
+ for (var i = 1; i < arguments.length; ++i) {
+ if (arguments[i] == step) {
+ return true;
+ }
+ }
+ return false;
+ };
+
+ $scope.beginSetup = function() {
+ $scope.currentStep = $scope.States.CONFIG_DB;
+ };
+
+ $scope.showInvalidConfigDialog = function() {
+ var message = "The
config.yaml
file found in
conf/stack
could not be parsed."
+ var title = "Invalid configuration file";
+ CoreDialog.fatal(title, message);
+ };
+
+
+ $scope.showMissingConfigDialog = function() {
+ var message = "A volume should be mounted into the container at
/conf/stack
: " +
+ "
docker run -v /path/to/config:/conf/stack " +
+ "
Once fixed, restart the container. For more information, " +
+ "
" +
+ "Read the Setup Guide "
+
+ var title = "Missing configuration volume";
+ CoreDialog.fatal(title, message);
+ };
+
+ $scope.parseDbUri = function(value) {
+ if (!value) { return null; }
+
+ // Format: mysql+pymysql://
:@/
+ var uri = URI(value);
+ return {
+ 'kind': uri.protocol(),
+ 'username': uri.username(),
+ 'password': uri.password(),
+ 'server': uri.host(),
+ 'database': uri.path() ? uri.path().substr(1) : ''
+ };
+ };
+
+ $scope.serializeDbUri = function(fields) {
+ if (!fields['server']) { return ''; }
+ if (!fields['database']) { return ''; }
+
+ var uri = URI();
+ try {
+ uri = uri && uri.host(fields['server']);
+ uri = uri && uri.protocol(fields['kind']);
+ uri = uri && uri.username(fields['username']);
+ uri = uri && uri.password(fields['password']);
+ uri = uri && uri.path('/' + (fields['database'] || ''));
+ uri = uri && uri.toString();
+ } catch (ex) {
+ return '';
+ }
+
+ return uri;
+ };
+
+ $scope.createSuperUser = function() {
+ $scope.currentStep = $scope.States.CREATING_SUPERUSER;
+ ApiService.scCreateInitialSuperuser($scope.superUser, null).then(function(resp) {
+ $scope.checkStatus();
+ }, function(resp) {
+ $scope.currentStep = $scope.States.SUPERUSER_ERROR;
+ $scope.errors.SuperuserCreationError = ApiService.getErrorMessage(resp, 'Could not create superuser');
+ });
+ };
+
+ $scope.performDatabaseSetup = function() {
+ $scope.currentStep = $scope.States.DB_SETUP;
+ ApiService.scSetupDatabase(null, null).then(function(resp) {
+ if (resp['error']) {
+ $scope.currentStep = $scope.States.DB_SETUP_ERROR;
+ $scope.errors.DatabaseSetupError = resp['error'];
+ } else {
+ $scope.currentStep = $scope.States.CREATE_SUPERUSER;
+ }
+ }, ApiService.errorDisplay('Could not setup database. Please report this to support.'))
+ };
+
+ $scope.validateDatabase = function() {
+ $scope.currentStep = $scope.States.VALIDATING_DB;
+ $scope.databaseInvalid = null;
+
+ var data = {
+ 'config': {
+ 'DB_URI': $scope.databaseUri
+ },
+ };
+
+ if ($scope.currentState.hasDatabaseSSLCert) {
+ data['config']['DB_CONNECTION_ARGS'] = {
+ 'ssl': {
+ 'ca': 'conf/stack/database.pem'
+ }
+ };
+ }
+
+ var params = {
+ 'service': 'database'
+ };
+
+ ApiService.scValidateConfig(data, params).then(function(resp) {
+ var status = resp.status;
+
+ if (status) {
+ $scope.currentStep = $scope.States.SAVING_DB;
+ ApiService.scUpdateConfig(data, null).then(function(resp) {
+ $scope.checkStatus();
+ }, ApiService.errorDisplay('Cannot update config. Please report this to support'));
+ } else {
+ $scope.currentStep = $scope.States.DB_ERROR;
+ $scope.errors.DatabaseValidationError = resp.reason;
+ }
+ }, ApiService.errorDisplay('Cannot validate database. Please report this to support'));
+ };
+
+ $scope.checkStatus = function() {
+ ContainerService.checkStatus(function(resp) {
+ $scope.currentStep = resp['status'];
+ }, $scope.currentConfig);
+ };
+
+ // Load the initial status.
+ $scope.checkStatus();
+ };
+})();
diff --git a/config_app/js/setup/setup.html b/config_app/js/setup/setup.html
new file mode 100644
index 000000000..7b989804f
--- /dev/null
+++ b/config_app/js/setup/setup.html
@@ -0,0 +1,307 @@
+
+
+
+
+
+
+ Red Hat Quay Setup
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Almost done!
+
Configure your Redis database and other settings below
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ is currently being restarted
+
+ This can take several minutes. If the container does not restart on its own,
+ please re-execute the docker run
command.
+
+
+
+
+
Installation and setup of is complete
+ You can now invite users to join, create organizations and start pushing and pulling
+ repositories.
+
+
+ Note: SSL is enabled. Please make sure to visit with
+ an https prefix
+
+
+
+
+
+
All configuration has been validated and saved
+ The container must be restarted to apply the configuration changes.
+
+
+
+
+
The database has been setup and is ready
+ The container must be restarted to apply the configuration changes.
+
+
+
+
+
+
+ is currently setting up its database
+ schema
+
+ This can take several minutes.
+
+
+
+
+
+ Please enter the connection details for your empty database. The schema will be created in the following step.
+
+
+
+
+
+ Database Type:
+
+
+ MySQL
+ Postgres
+
+
+
+
+ Database Server:
+
+ >
+
+ The server (and optionally, custom port) where the database lives
+
+
+
+
+ Username:
+
+
+ This user must have full access to the database
+
+
+
+ Password:
+
+
+
+
+
+ Database Name:
+
+
+
+
+
+ SSL Certificate:
+
+
+ Optional SSL certicate (in PEM format) to use to connect to the database
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/config_app/static/css/config-setup-app-component.css b/config_app/static/css/config-setup-app-component.css
new file mode 100644
index 000000000..2d41544d9
--- /dev/null
+++ b/config_app/static/css/config-setup-app-component.css
@@ -0,0 +1,93 @@
+.config-setup-wrapper {
+ display: flex;
+}
+
+.config-setup_option {
+ font-size: 22px;
+ height: 250px;
+ display: flex;
+ flex: 1;
+ flex-direction: column;
+ align-items: center;
+ padding: 15px;
+ margin: 15px;
+ justify-content: space-evenly;
+}
+
+.config-setup_option i {
+ padding-bottom: 10px;
+}
+
+.config-setup_option div {
+ text-align: center;
+ min-height: 100px;
+}
+
+.config-setup_option:hover {
+ background-color: #dddddd;
+ text-decoration: none;
+}
+
+/* Overrides for fixing old quay styles*/
+
+.quay-config-app .alert-danger {
+ padding: 25px;
+ display: flex;
+}
+
+.quay-config-app .alert-danger:before {
+ content: "\f071";
+ font-family: Font Awesome\ 5 Free;
+ font-weight: 900;
+ font-size: 30px;
+ padding-right: 15px;
+ color: #c53c3f;
+ text-align: center;
+}
+
+.quay-config-app .co-alert.co-alert-success {
+ padding: 25px;
+ display: flex;
+ margin-bottom: 0;
+ text-align: left;
+}
+
+.quay-config-app .co-alert.co-alert-success:before {
+ font-family: Font Awesome\ 5 Free;
+ font-weight: 900;
+ font-size: 30px;
+ padding-right: 15px;
+ color: green;
+ text-align: center;
+ position: static;
+}
+
+.co-alert.co-alert-danger:after {
+ /* Ignore the exclamation mark, it also messes with spacing elements */
+ content: none;
+}
+
+/* Fixes the transition to font awesome 5 */
+.quay-config-app .co-alert.co-alert-warning::before {
+ font-family: Font Awesome\ 5 Free;
+ font-weight: 900;
+}
+
+.quay-config-app .co-alert.co-alert-info::before {
+ font-family: Font Awesome\ 5 Free;
+ font-weight: 900;
+}
+
+.quay-config-app .co-alert.co-alert-danger::after {
+ font-family: Font Awesome\ 5 Free;
+ font-weight: 900;
+ /* Font Awesome 5's icons are slightly bigger, so we have to adjust this one because it is inside another icon */
+ font-size: 12px;
+ top: 18px;
+ left: 20.75px;
+}
+
+.quay-config-app .co-modify-link::after {
+ font-family: Font Awesome\ 5 Free;
+ font-weight: 900;
+}
diff --git a/config_app/static/css/cor-option.css b/config_app/static/css/cor-option.css
new file mode 100644
index 000000000..97ae7887d
--- /dev/null
+++ b/config_app/static/css/cor-option.css
@@ -0,0 +1,8 @@
+.cor-options-menu .fa-cog {
+ color: #999;
+ cursor: pointer;
+}
+
+.open .fa-cog {
+ color: #428BCA;
+}
diff --git a/config_app/static/css/cor-title.css b/config_app/static/css/cor-title.css
new file mode 100644
index 000000000..ef199785a
--- /dev/null
+++ b/config_app/static/css/cor-title.css
@@ -0,0 +1,4 @@
+.cor-title {
+ display: flex;
+ justify-content: center;
+}
diff --git a/config_app/static/img/RH_Logo_Quay_Black_UX-horizontal.svg b/config_app/static/img/RH_Logo_Quay_Black_UX-horizontal.svg
new file mode 100644
index 000000000..ae73f2568
--- /dev/null
+++ b/config_app/static/img/RH_Logo_Quay_Black_UX-horizontal.svg
@@ -0,0 +1,116 @@
+
+
+
+image/svg+xml
\ No newline at end of file
diff --git a/config_app/static/img/network-tile.png b/config_app/static/img/network-tile.png
new file mode 100644
index 000000000..c27deaff2
Binary files /dev/null and b/config_app/static/img/network-tile.png differ
diff --git a/config_app/static/img/quay-logo.png b/config_app/static/img/quay-logo.png
new file mode 100644
index 000000000..031087b1d
Binary files /dev/null and b/config_app/static/img/quay-logo.png differ
diff --git a/config_app/static/img/quay_favicon.png b/config_app/static/img/quay_favicon.png
new file mode 100644
index 000000000..a1ae74d8b
Binary files /dev/null and b/config_app/static/img/quay_favicon.png differ
diff --git a/config_app/static/img/redis-small.png b/config_app/static/img/redis-small.png
new file mode 100644
index 000000000..c330a720e
Binary files /dev/null and b/config_app/static/img/redis-small.png differ
diff --git a/config_app/static/img/rocket.png b/config_app/static/img/rocket.png
new file mode 100644
index 000000000..b9ffddf39
Binary files /dev/null and b/config_app/static/img/rocket.png differ
diff --git a/config_app/static/lib/angular-file-upload.min.js b/config_app/static/lib/angular-file-upload.min.js
new file mode 100644
index 000000000..b9d0196f7
--- /dev/null
+++ b/config_app/static/lib/angular-file-upload.min.js
@@ -0,0 +1,2 @@
+/*! 1.4.0 */
+!function(){var a=angular.module("angularFileUpload",[]);a.service("$upload",["$http","$timeout",function(a,b){function c(c){c.method=c.method||"POST",c.headers=c.headers||{},c.transformRequest=c.transformRequest||function(b,c){return window.ArrayBuffer&&b instanceof window.ArrayBuffer?b:a.defaults.transformRequest[0](b,c)},window.XMLHttpRequest.__isShim&&(c.headers.__setXHR_=function(){return function(a){a&&(c.__XHR=a,c.xhrFn&&c.xhrFn(a),a.upload.addEventListener("progress",function(a){c.progress&&b(function(){c.progress&&c.progress(a)})},!1),a.upload.addEventListener("load",function(a){a.lengthComputable&&c.progress&&c.progress(a)},!1))}});var d=a(c);return d.progress=function(a){return c.progress=a,d},d.abort=function(){return c.__XHR&&b(function(){c.__XHR.abort()}),d},d.xhr=function(a){return c.xhrFn=a,d},d.then=function(a,b){return function(d,e,f){c.progress=f||c.progress;var g=b.apply(a,[d,e,f]);return g.abort=a.abort,g.progress=a.progress,g.xhr=a.xhr,g.then=a.then,g}}(d,d.then),d}this.upload=function(b){b.headers=b.headers||{},b.headers["Content-Type"]=void 0,b.transformRequest=b.transformRequest||a.defaults.transformRequest;var d=new FormData,e=b.transformRequest,f=b.data;return b.transformRequest=function(a,c){if(f)if(b.formDataAppender)for(var d in f){var g=f[d];b.formDataAppender(a,d,g)}else for(var d in f){var g=f[d];if("function"==typeof e)g=e(g,c);else for(var h=0;h0||navigator.msMaxTouchPoints>0)&&d.bind("touchend",function(a){a.preventDefault(),a.target.click()})}}]),a.directive("ngFileDropAvailable",["$parse","$timeout",function(a,b){return function(c,d,e){if("draggable"in document.createElement("span")){var f=a(e.ngFileDropAvailable);b(function(){f(c)})}}}]),a.directive("ngFileDrop",["$parse","$timeout",function(a,b){return function(c,d,e){function f(a,b){if(b.isDirectory){var c=b.createReader();i++,c.readEntries(function(b){for(var c=0;c0&&j[0].webkitGetAsEntry)for(var k=0;k
+
+
+
+
+ {% for style_url in external_styles %}
+
+ {% endfor %}
+
+ {% for script_url in external_scripts %}
+
+ {% endfor %}
+
+ {% for script_path in main_scripts %}
+
+ {% endfor %}
+
+ Config app
+
+
+
+
+
+
+
+
diff --git a/config_app/webpack.config.js b/config_app/webpack.config.js
new file mode 100644
index 000000000..be3cbd931
--- /dev/null
+++ b/config_app/webpack.config.js
@@ -0,0 +1,75 @@
+const webpack = require('webpack');
+const path = require('path');
+const TerserPlugin = require('terser-webpack-plugin');
+
+let config = {
+ entry: {
+ configapp: "./js/main.ts"
+ },
+ output: {
+ path: path.resolve(__dirname, "static/build"),
+ filename: '[name]-quay-frontend.bundle.js',
+ chunkFilename: '[name]-quay-frontend.chunk.js'
+ },
+ resolve: {
+ extensions: [".ts", ".js"],
+ modules: [
+ // Allows us to use the top-level node modules
+ path.resolve(__dirname, '../node_modules'),
+ path.resolve(__dirname, '../static/css/')
+ ]
+ },
+ externals: {
+ angular: "angular",
+ jquery: "$",
+ },
+ module: {
+ rules: [
+ {
+ test: /\.ts$/,
+ use: ["ts-loader"],
+ exclude: /node_modules/
+ },
+ {
+ test: /\.css$/,
+ use: [
+ "style-loader",
+ "css-loader?minimize=true",
+ ],
+ },
+ {
+ test: /\.html$/,
+ use: [
+ 'ngtemplate-loader?relativeTo=' + (path.resolve(__dirname)),
+ 'html-loader',
+ ]
+ },
+ ]
+ },
+ optimization: {},
+ plugins: [
+ // Replace references to global variables with associated modules
+ new webpack.ProvidePlugin({
+ FileSaver: 'file-saver',
+ angular: "angular",
+ $: "jquery",
+ }),
+ ],
+ devtool: "cheap-module-source-map",
+};
+
+/**
+ * Production settings
+ */
+if (process.env.NODE_ENV === 'production') {
+ config.optimization.minimizer = [
+ new TerserPlugin({
+ // Disable mangle to prevent AngularJS errors
+ terserOptions: {mangle: false},
+ sourceMap: true,
+ }),
+ ];
+ config.output.filename = '[name]-quay-frontend-[hash].bundle.js';
+}
+
+module.exports = config;
diff --git a/data/__init__.py b/data/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/data/appr_model/__init__.py b/data/appr_model/__init__.py
new file mode 100644
index 000000000..7c9620864
--- /dev/null
+++ b/data/appr_model/__init__.py
@@ -0,0 +1,9 @@
+from data.appr_model import (
+ blob,
+ channel,
+ manifest,
+ manifest_list,
+ package,
+ release,
+ tag,
+)
diff --git a/data/appr_model/blob.py b/data/appr_model/blob.py
new file mode 100644
index 000000000..d340a7491
--- /dev/null
+++ b/data/appr_model/blob.py
@@ -0,0 +1,76 @@
+import logging
+
+from peewee import IntegrityError
+
+from data.model import db_transaction
+
+logger = logging.getLogger(__name__)
+
+def _ensure_sha256_header(digest):
+ if digest.startswith('sha256:'):
+ return digest
+ return 'sha256:' + digest
+
+
+def get_blob(digest, models_ref):
+ """ Find a blob by its digest. """
+ Blob = models_ref.Blob
+ return Blob.select().where(Blob.digest == _ensure_sha256_header(digest)).get()
+
+
+def get_or_create_blob(digest, size, media_type_name, locations, models_ref):
+ """ Try to find a blob by its digest or create it. """
+ Blob = models_ref.Blob
+ BlobPlacement = models_ref.BlobPlacement
+
+ # Get or create the blog entry for the digest.
+ try:
+ blob = get_blob(digest, models_ref)
+ logger.debug('Retrieved blob with digest %s', digest)
+ except Blob.DoesNotExist:
+ blob = Blob.create(digest=_ensure_sha256_header(digest),
+ media_type_id=Blob.media_type.get_id(media_type_name),
+ size=size)
+ logger.debug('Created blob with digest %s', digest)
+
+ # Add the locations to the blob.
+ for location_name in locations:
+ location_id = BlobPlacement.location.get_id(location_name)
+ try:
+ BlobPlacement.create(blob=blob, location=location_id)
+ except IntegrityError:
+ logger.debug('Location %s already existing for blob %s', location_name, blob.id)
+
+ return blob
+
+
+def get_blob_locations(digest, models_ref):
+ """ Find all locations names for a blob. """
+ Blob = models_ref.Blob
+ BlobPlacement = models_ref.BlobPlacement
+ BlobPlacementLocation = models_ref.BlobPlacementLocation
+
+ return [x.name for x in
+ BlobPlacementLocation
+ .select()
+ .join(BlobPlacement)
+ .join(Blob)
+ .where(Blob.digest == _ensure_sha256_header(digest))]
+
+
+def ensure_blob_locations(models_ref, *names):
+ BlobPlacementLocation = models_ref.BlobPlacementLocation
+
+ with db_transaction():
+ locations = BlobPlacementLocation.select().where(BlobPlacementLocation.name << names)
+
+ insert_names = list(names)
+
+ for location in locations:
+ insert_names.remove(location.name)
+
+ if not insert_names:
+ return
+
+ data = [{'name': name} for name in insert_names]
+ BlobPlacementLocation.insert_many(data).execute()
diff --git a/data/appr_model/channel.py b/data/appr_model/channel.py
new file mode 100644
index 000000000..3631d97a5
--- /dev/null
+++ b/data/appr_model/channel.py
@@ -0,0 +1,64 @@
+from data.appr_model import tag as tag_model
+
+
+def get_channel_releases(repo, channel, models_ref):
+ """ Return all previously linked tags.
+ This works based upon Tag lifetimes.
+ """
+ Channel = models_ref.Channel
+ Tag = models_ref.Tag
+
+ tag_kind_id = Channel.tag_kind.get_id('channel')
+ channel_name = channel.name
+ return (Tag
+ .select(Tag, Channel)
+ .join(Channel, on=(Tag.id == Channel.linked_tag))
+ .where(Channel.repository == repo,
+ Channel.name == channel_name,
+ Channel.tag_kind == tag_kind_id, Channel.lifetime_end != None)
+ .order_by(Tag.lifetime_end))
+
+
+def get_channel(repo, channel_name, models_ref):
+ """ Find a Channel by name. """
+ channel = tag_model.get_tag(repo, channel_name, models_ref, "channel")
+ return channel
+
+
+def get_tag_channels(repo, tag_name, models_ref, active=True):
+ """ Find the Channels associated with a Tag. """
+ Tag = models_ref.Tag
+
+ tag = tag_model.get_tag(repo, tag_name, models_ref, "release")
+ query = tag.tag_parents
+
+ if active:
+ query = tag_model.tag_is_alive(query, Tag)
+
+ return query
+
+
+def delete_channel(repo, channel_name, models_ref):
+ """ Delete a channel by name. """
+ return tag_model.delete_tag(repo, channel_name, models_ref, "channel")
+
+
+def create_or_update_channel(repo, channel_name, tag_name, models_ref):
+ """ Creates or updates a channel to include a particular tag. """
+ tag = tag_model.get_tag(repo, tag_name, models_ref, 'release')
+ return tag_model.create_or_update_tag(repo, channel_name, models_ref, linked_tag=tag,
+ tag_kind="channel")
+
+
+def get_repo_channels(repo, models_ref):
+ """ Creates or updates a channel to include a particular tag. """
+ Channel = models_ref.Channel
+ Tag = models_ref.Tag
+
+ tag_kind_id = Channel.tag_kind.get_id('channel')
+ query = (Channel
+ .select(Channel, Tag)
+ .join(Tag, on=(Tag.id == Channel.linked_tag))
+ .where(Channel.repository == repo,
+ Channel.tag_kind == tag_kind_id))
+ return tag_model.tag_is_alive(query, Channel)
diff --git a/data/appr_model/manifest.py b/data/appr_model/manifest.py
new file mode 100644
index 000000000..f08be8d9b
--- /dev/null
+++ b/data/appr_model/manifest.py
@@ -0,0 +1,67 @@
+import logging
+import hashlib
+import json
+
+from cnr.models.package_base import get_media_type
+
+from data.database import db_transaction, MediaType
+from data.appr_model import tag as tag_model
+
+
+logger = logging.getLogger(__name__)
+
+
+def _ensure_sha256_header(digest):
+ if digest.startswith('sha256:'):
+ return digest
+ return 'sha256:' + digest
+
+
+def _digest(manifestjson):
+ return _ensure_sha256_header(hashlib.sha256(json.dumps(manifestjson, sort_keys=True)).hexdigest())
+
+
+def get_manifest_query(digest, media_type, models_ref):
+ Manifest = models_ref.Manifest
+ return Manifest.select().where(Manifest.digest == _ensure_sha256_header(digest),
+ Manifest.media_type == Manifest.media_type.get_id(media_type))
+
+
+def get_manifest_with_blob(digest, media_type, models_ref):
+ Blob = models_ref.Blob
+ query = get_manifest_query(digest, media_type, models_ref)
+ return query.join(Blob).get()
+
+
+def get_or_create_manifest(manifest_json, media_type_name, models_ref):
+ Manifest = models_ref.Manifest
+ digest = _digest(manifest_json)
+ try:
+ manifest = get_manifest_query(digest, media_type_name, models_ref).get()
+ except Manifest.DoesNotExist:
+ with db_transaction():
+ manifest = Manifest.create(digest=digest,
+ manifest_json=manifest_json,
+ media_type=Manifest.media_type.get_id(media_type_name))
+ return manifest
+
+def get_manifest_types(repo, models_ref, release=None):
+ """ Returns an array of MediaTypes.name for a repo, can filter by tag """
+ Tag = models_ref.Tag
+ ManifestListManifest = models_ref.ManifestListManifest
+
+ query = tag_model.tag_is_alive(Tag
+ .select(MediaType.name)
+ .join(ManifestListManifest,
+ on=(ManifestListManifest.manifest_list == Tag.manifest_list))
+ .join(MediaType,
+ on=(ManifestListManifest.media_type == MediaType.id))
+ .where(Tag.repository == repo,
+ Tag.tag_kind == Tag.tag_kind.get_id('release')), Tag)
+ if release:
+ query = query.where(Tag.name == release)
+
+ manifests = set()
+ for m in query.distinct().tuples():
+ manifests.add(get_media_type(m[0]))
+ return manifests
diff --git a/data/appr_model/manifest_list.py b/data/appr_model/manifest_list.py
new file mode 100644
index 000000000..92b10be6e
--- /dev/null
+++ b/data/appr_model/manifest_list.py
@@ -0,0 +1,67 @@
+import logging
+import hashlib
+import json
+
+from data.database import db_transaction
+
+
+logger = logging.getLogger(__name__)
+
+
+def _ensure_sha256_header(digest):
+ if digest.startswith('sha256:'):
+ return digest
+ return 'sha256:' + digest
+
+
+def _digest(manifestjson):
+ return _ensure_sha256_header(hashlib.sha256(json.dumps(manifestjson, sort_keys=True)).hexdigest())
+
+
+def get_manifest_list(digest, models_ref):
+ ManifestList = models_ref.ManifestList
+ return ManifestList.select().where(ManifestList.digest == _ensure_sha256_header(digest)).get()
+
+
+def get_or_create_manifest_list(manifest_list_json, media_type_name, schema_version, models_ref):
+ ManifestList = models_ref.ManifestList
+
+ digest = _digest(manifest_list_json)
+ media_type_id = ManifestList.media_type.get_id(media_type_name)
+
+ try:
+ return get_manifest_list(digest, models_ref)
+ except ManifestList.DoesNotExist:
+ with db_transaction():
+ manifestlist = ManifestList.create(digest=digest, manifest_list_json=manifest_list_json,
+ schema_version=schema_version, media_type=media_type_id)
+ return manifestlist
+
+
+def create_manifestlistmanifest(manifestlist, manifest_ids, manifest_list_json, models_ref):
+ """ From a manifestlist, manifests, and the manifest list blob,
+ create if doesn't exist the manfiestlistmanifest for each manifest """
+ for pos in xrange(len(manifest_ids)):
+ manifest_id = manifest_ids[pos]
+ manifest_json = manifest_list_json[pos]
+ get_or_create_manifestlistmanifest(manifest=manifest_id,
+ manifestlist=manifestlist,
+ media_type_name=manifest_json['mediaType'],
+ models_ref=models_ref)
+
+
+def get_or_create_manifestlistmanifest(manifest, manifestlist, media_type_name, models_ref):
+ ManifestListManifest = models_ref.ManifestListManifest
+
+ media_type_id = ManifestListManifest.media_type.get_id(media_type_name)
+ try:
+ ml = (ManifestListManifest
+ .select()
+ .where(ManifestListManifest.manifest == manifest,
+ ManifestListManifest.media_type == media_type_id,
+ ManifestListManifest.manifest_list == manifestlist)).get()
+
+ except ManifestListManifest.DoesNotExist:
+ ml = ManifestListManifest.create(manifest_list=manifestlist, media_type=media_type_id,
+ manifest=manifest)
+ return ml
diff --git a/data/appr_model/models.py b/data/appr_model/models.py
new file mode 100644
index 000000000..0fde7d83c
--- /dev/null
+++ b/data/appr_model/models.py
@@ -0,0 +1,15 @@
+from collections import namedtuple
+
+from data.database import (ApprTag, ApprTagKind, ApprBlobPlacementLocation, ApprManifestList,
+ ApprManifestBlob, ApprBlob, ApprManifestListManifest, ApprManifest,
+ ApprBlobPlacement, ApprChannel)
+
+ModelsRef = namedtuple('ModelsRef', ['Tag', 'TagKind', 'BlobPlacementLocation', 'ManifestList',
+ 'ManifestBlob', 'Blob', 'ManifestListManifest', 'Manifest',
+ 'BlobPlacement', 'Channel', 'manifestlistmanifest_set_name',
+ 'tag_set_prefetch_name'])
+
+NEW_MODELS = ModelsRef(ApprTag, ApprTagKind, ApprBlobPlacementLocation, ApprManifestList,
+ ApprManifestBlob, ApprBlob, ApprManifestListManifest, ApprManifest,
+ ApprBlobPlacement, ApprChannel, 'apprmanifestlistmanifest_set',
+ 'apprtag_set')
diff --git a/data/appr_model/package.py b/data/appr_model/package.py
new file mode 100644
index 000000000..97ea9f791
--- /dev/null
+++ b/data/appr_model/package.py
@@ -0,0 +1,67 @@
+from cnr.models.package_base import get_media_type, manifest_media_type
+from peewee import prefetch
+
+
+from data import model
+from data.database import Repository, Namespace
+from data.appr_model import tag as tag_model
+
+
+def list_packages_query(models_ref, namespace=None, media_type=None, search_query=None,
+ username=None):
+ """ List and filter repository by search query. """
+ Tag = models_ref.Tag
+
+ if username and not search_query:
+ repositories = model.repository.get_visible_repositories(username,
+ kind_filter='application',
+ include_public=True,
+ namespace=namespace,
+ limit=50)
+ if not repositories:
+ return []
+
+ repo_query = (Repository
+ .select(Repository, Namespace.username)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Repository.id << [repo.rid for repo in repositories]))
+
+ if namespace:
+ repo_query = (repo_query
+ .where(Namespace.username == namespace))
+ else:
+ if search_query is not None:
+ fields = [model.repository.SEARCH_FIELDS.name.name]
+ repositories = model.repository.get_app_search(search_query,
+ username=username,
+ search_fields=fields,
+ limit=50)
+ if not repositories:
+ return []
+
+ repo_query = (Repository
+ .select(Repository, Namespace.username)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Repository.id << [repo.id for repo in repositories]))
+ else:
+ repo_query = (Repository
+ .select(Repository, Namespace.username)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Repository.visibility == model.repository.get_public_repo_visibility(),
+ Repository.kind == Repository.kind.get_id('application')))
+
+ if namespace:
+ repo_query = (repo_query
+ .where(Namespace.username == namespace))
+
+ tag_query = (Tag
+ .select()
+ .where(Tag.tag_kind == Tag.tag_kind.get_id('release'))
+ .order_by(Tag.lifetime_start))
+
+ if media_type:
+ tag_query = tag_model.filter_tags_by_media_type(tag_query, media_type, models_ref)
+
+ tag_query = tag_model.tag_is_alive(tag_query, Tag)
+ query = prefetch(repo_query, tag_query)
+ return query
diff --git a/data/appr_model/release.py b/data/appr_model/release.py
new file mode 100644
index 000000000..dcfa455d0
--- /dev/null
+++ b/data/appr_model/release.py
@@ -0,0 +1,152 @@
+import bisect
+
+from cnr.exception import PackageAlreadyExists
+from cnr.models.package_base import manifest_media_type
+
+from data.database import db_transaction, get_epoch_timestamp
+from data.appr_model import (blob as blob_model, manifest as manifest_model,
+ manifest_list as manifest_list_model,
+ tag as tag_model)
+
+
+LIST_MEDIA_TYPE = 'application/vnd.cnr.manifest.list.v0.json'
+SCHEMA_VERSION = 'v0'
+
+
+def _ensure_sha256_header(digest):
+ if digest.startswith('sha256:'):
+ return digest
+ return 'sha256:' + digest
+
+
+def get_app_release(repo, tag_name, media_type, models_ref):
+ """ Returns (tag, manifest, blob) given a repo object, tag_name, and media_type). """
+ ManifestListManifest = models_ref.ManifestListManifest
+ Manifest = models_ref.Manifest
+ Blob = models_ref.Blob
+ ManifestBlob = models_ref.ManifestBlob
+ manifestlistmanifest_set_name = models_ref.manifestlistmanifest_set_name
+
+ tag = tag_model.get_tag(repo, tag_name, models_ref, tag_kind='release')
+ media_type_id = ManifestListManifest.media_type.get_id(manifest_media_type(media_type))
+ manifestlistmanifest = (getattr(tag.manifest_list, manifestlistmanifest_set_name)
+ .join(Manifest)
+ .where(ManifestListManifest.media_type == media_type_id).get())
+ manifest = manifestlistmanifest.manifest
+ blob = Blob.select().join(ManifestBlob).where(ManifestBlob.manifest == manifest).get()
+ return (tag, manifest, blob)
+
+
+def delete_app_release(repo, tag_name, media_type, models_ref):
+ """ Terminate a Tag/media-type couple
+ It find the corresponding tag/manifest and remove from the manifestlistmanifest the manifest
+ 1. it terminates the current tag (in all-cases)
+ 2. if the new manifestlist is not empty, it creates a new tag for it
+ """
+ ManifestListManifest = models_ref.ManifestListManifest
+ manifestlistmanifest_set_name = models_ref.manifestlistmanifest_set_name
+
+ media_type_id = ManifestListManifest.media_type.get_id(manifest_media_type(media_type))
+
+ with db_transaction():
+ tag = tag_model.get_tag(repo, tag_name, models_ref)
+ manifest_list = tag.manifest_list
+ list_json = manifest_list.manifest_list_json
+ mlm_query = (ManifestListManifest
+ .select()
+ .where(ManifestListManifest.manifest_list == tag.manifest_list))
+ list_manifest_ids = sorted([mlm.manifest_id for mlm in mlm_query])
+ manifestlistmanifest = (getattr(tag.manifest_list, manifestlistmanifest_set_name)
+ .where(ManifestListManifest.media_type == media_type_id).get())
+ index = list_manifest_ids.index(manifestlistmanifest.manifest_id)
+ list_manifest_ids.pop(index)
+ list_json.pop(index)
+
+ if not list_json:
+ tag.lifetime_end = get_epoch_timestamp()
+ tag.save()
+ else:
+ manifestlist = manifest_list_model.get_or_create_manifest_list(list_json, LIST_MEDIA_TYPE,
+ SCHEMA_VERSION, models_ref)
+ manifest_list_model.create_manifestlistmanifest(manifestlist, list_manifest_ids,
+ list_json, models_ref)
+ tag = tag_model.create_or_update_tag(repo, tag_name, models_ref, manifest_list=manifestlist,
+ tag_kind="release")
+ return tag
+
+
+def create_app_release(repo, tag_name, manifest_data, digest, models_ref, force=False):
+ """ Create a new application release, it includes creating a new Tag, ManifestList,
+ ManifestListManifests, Manifest, ManifestBlob.
+
+ To deduplicate the ManifestList, the manifestlist_json is kept ordered by the manifest.id.
+ To find the insert point in the ManifestList it uses bisect on the manifest-ids list.
+ """
+ ManifestList = models_ref.ManifestList
+ ManifestListManifest = models_ref.ManifestListManifest
+ Blob = models_ref.Blob
+ ManifestBlob = models_ref.ManifestBlob
+
+ with db_transaction():
+ # Create/get the package manifest
+ manifest = manifest_model.get_or_create_manifest(manifest_data, manifest_data['mediaType'],
+ models_ref)
+ # get the tag
+ tag = tag_model.get_or_initialize_tag(repo, tag_name, models_ref)
+
+ if tag.manifest_list is None:
+ tag.manifest_list = ManifestList(media_type=ManifestList.media_type.get_id(LIST_MEDIA_TYPE),
+ schema_version=SCHEMA_VERSION,
+ manifest_list_json=[], )
+
+ elif tag_model.tag_media_type_exists(tag, manifest.media_type, models_ref):
+ if force:
+ delete_app_release(repo, tag_name, manifest.media_type.name, models_ref)
+ return create_app_release(repo, tag_name, manifest_data, digest, models_ref, force=False)
+ else:
+ raise PackageAlreadyExists("package exists already")
+
+ list_json = tag.manifest_list.manifest_list_json
+ mlm_query = (ManifestListManifest
+ .select()
+ .where(ManifestListManifest.manifest_list == tag.manifest_list))
+ list_manifest_ids = sorted([mlm.manifest_id for mlm in mlm_query])
+ insert_point = bisect.bisect_left(list_manifest_ids, manifest.id)
+ list_json.insert(insert_point, manifest.manifest_json)
+ list_manifest_ids.insert(insert_point, manifest.id)
+ manifestlist = manifest_list_model.get_or_create_manifest_list(list_json, LIST_MEDIA_TYPE,
+ SCHEMA_VERSION, models_ref)
+ manifest_list_model.create_manifestlistmanifest(manifestlist, list_manifest_ids, list_json,
+ models_ref)
+
+ tag = tag_model.create_or_update_tag(repo, tag_name, models_ref, manifest_list=manifestlist,
+ tag_kind="release")
+ blob_digest = digest
+
+ try:
+ (ManifestBlob
+ .select()
+ .join(Blob)
+ .where(ManifestBlob.manifest == manifest,
+ Blob.digest == _ensure_sha256_header(blob_digest)).get())
+ except ManifestBlob.DoesNotExist:
+ blob = blob_model.get_blob(blob_digest, models_ref)
+ ManifestBlob.create(manifest=manifest, blob=blob)
+ return tag
+
+def get_release_objs(repo, models_ref, media_type=None):
+ """ Returns an array of Tag for a repo, with optional filtering by media_type. """
+ Tag = models_ref.Tag
+
+ release_query = (Tag
+ .select()
+ .where(Tag.repository == repo,
+ Tag.tag_kind == Tag.tag_kind.get_id("release")))
+ if media_type:
+ release_query = tag_model.filter_tags_by_media_type(release_query, media_type, models_ref)
+
+ return tag_model.tag_is_alive(release_query, Tag)
+
+def get_releases(repo, model_refs, media_type=None):
+ """ Returns an array of Tag.name for a repo, can filter by media_type. """
+ return [t.name for t in get_release_objs(repo, model_refs, media_type)]
diff --git a/data/appr_model/tag.py b/data/appr_model/tag.py
new file mode 100644
index 000000000..4903a4572
--- /dev/null
+++ b/data/appr_model/tag.py
@@ -0,0 +1,99 @@
+import logging
+
+from cnr.models.package_base import manifest_media_type
+from peewee import IntegrityError
+
+from data.model import (db_transaction, TagAlreadyCreatedException)
+from data.database import get_epoch_timestamp_ms, db_for_update
+
+
+logger = logging.getLogger(__name__)
+
+
+def tag_is_alive(query, cls, now_ts=None):
+ return query.where((cls.lifetime_end >> None) |
+ (cls.lifetime_end > now_ts))
+
+
+def tag_media_type_exists(tag, media_type, models_ref):
+ ManifestListManifest = models_ref.ManifestListManifest
+ manifestlistmanifest_set_name = models_ref.manifestlistmanifest_set_name
+ return (getattr(tag.manifest_list, manifestlistmanifest_set_name)
+ .where(ManifestListManifest.media_type == media_type).count() > 0)
+
+
+def create_or_update_tag(repo, tag_name, models_ref, manifest_list=None, linked_tag=None,
+ tag_kind="release"):
+ Tag = models_ref.Tag
+
+ now_ts = get_epoch_timestamp_ms()
+ tag_kind_id = Tag.tag_kind.get_id(tag_kind)
+ with db_transaction():
+ try:
+ tag = db_for_update(tag_is_alive(Tag
+ .select()
+ .where(Tag.repository == repo,
+ Tag.name == tag_name,
+ Tag.tag_kind == tag_kind_id), Tag, now_ts)).get()
+ if tag.manifest_list == manifest_list and tag.linked_tag == linked_tag:
+ return tag
+ tag.lifetime_end = now_ts
+ tag.save()
+ except Tag.DoesNotExist:
+ pass
+
+ try:
+ return Tag.create(repository=repo, manifest_list=manifest_list, linked_tag=linked_tag,
+ name=tag_name, lifetime_start=now_ts, lifetime_end=None,
+ tag_kind=tag_kind_id)
+ except IntegrityError:
+ msg = 'Tag with name %s and lifetime start %s under repository %s/%s already exists'
+ raise TagAlreadyCreatedException(msg % (tag_name, now_ts, repo.namespace_user, repo.name))
+
+
+def get_or_initialize_tag(repo, tag_name, models_ref, tag_kind="release"):
+ Tag = models_ref.Tag
+
+ try:
+ return tag_is_alive(Tag.select().where(Tag.repository == repo, Tag.name == tag_name), Tag).get()
+ except Tag.DoesNotExist:
+ return Tag(repo=repo, name=tag_name, tag_kind=Tag.tag_kind.get_id(tag_kind))
+
+
+def get_tag(repo, tag_name, models_ref, tag_kind="release"):
+ Tag = models_ref.Tag
+ return tag_is_alive(Tag.select()
+ .where(Tag.repository == repo,
+ Tag.name == tag_name,
+ Tag.tag_kind == Tag.tag_kind.get_id(tag_kind)), Tag).get()
+
+
+def delete_tag(repo, tag_name, models_ref, tag_kind="release"):
+ Tag = models_ref.Tag
+ tag_kind_id = Tag.tag_kind.get_id(tag_kind)
+ tag = tag_is_alive(Tag.select()
+ .where(Tag.repository == repo,
+ Tag.name == tag_name, Tag.tag_kind == tag_kind_id), Tag).get()
+ tag.lifetime_end = get_epoch_timestamp_ms()
+ tag.save()
+ return tag
+
+
+def tag_exists(repo, tag_name, models_ref, tag_kind="release"):
+ Tag = models_ref.Tag
+ try:
+ get_tag(repo, tag_name, models_ref, tag_kind)
+ return True
+ except Tag.DoesNotExist:
+ return False
+
+
+def filter_tags_by_media_type(tag_query, media_type, models_ref):
+ """ Return only available tag for a media_type. """
+ ManifestListManifest = models_ref.ManifestListManifest
+ Tag = models_ref.Tag
+ media_type = manifest_media_type(media_type)
+ t = (tag_query
+ .join(ManifestListManifest, on=(ManifestListManifest.manifest_list == Tag.manifest_list))
+ .where(ManifestListManifest.media_type == ManifestListManifest.media_type.get_id(media_type)))
+ return t
diff --git a/data/archivedlogs.py b/data/archivedlogs.py
new file mode 100644
index 000000000..0172c74c8
--- /dev/null
+++ b/data/archivedlogs.py
@@ -0,0 +1,37 @@
+import logging
+
+from util.registry.gzipinputstream import GzipInputStream
+from flask import send_file, abort
+
+from data.userfiles import DelegateUserfiles, UserfilesHandlers
+
+
+JSON_MIMETYPE = 'application/json'
+
+
+logger = logging.getLogger(__name__)
+
+
+class LogArchive(object):
+ def __init__(self, app=None, distributed_storage=None):
+ self.app = app
+ if app is not None:
+ self.state = self.init_app(app, distributed_storage)
+ else:
+ self.state = None
+
+ def init_app(self, app, distributed_storage):
+ location = app.config.get('LOG_ARCHIVE_LOCATION')
+ path = app.config.get('LOG_ARCHIVE_PATH', None)
+
+ handler_name = 'web.logarchive'
+
+ log_archive = DelegateUserfiles(app, distributed_storage, location, path,
+ handler_name=handler_name)
+ # register extension with app
+ app.extensions = getattr(app, 'extensions', {})
+ app.extensions['log_archive'] = log_archive
+ return log_archive
+
+ def __getattr__(self, name):
+ return getattr(self.state, name, None)
diff --git a/data/billing.py b/data/billing.py
new file mode 100644
index 000000000..aa2420c01
--- /dev/null
+++ b/data/billing.py
@@ -0,0 +1,453 @@
+import stripe
+
+from datetime import datetime, timedelta
+from calendar import timegm
+
+from util.morecollections import AttrDict
+
+PLANS = [
+ # Deprecated Plans (2013-2014)
+ {
+ 'title': 'Micro',
+ 'price': 700,
+ 'privateRepos': 5,
+ 'stripeId': 'micro',
+ 'audience': 'For smaller teams',
+ 'bus_features': False,
+ 'deprecated': True,
+ 'free_trial_days': 14,
+ 'superseded_by': 'personal-30',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Basic',
+ 'price': 1200,
+ 'privateRepos': 10,
+ 'stripeId': 'small',
+ 'audience': 'For your basic team',
+ 'bus_features': False,
+ 'deprecated': True,
+ 'free_trial_days': 14,
+ 'superseded_by': 'bus-micro-30',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Yacht',
+ 'price': 5000,
+ 'privateRepos': 20,
+ 'stripeId': 'bus-coreos-trial',
+ 'audience': 'For small businesses',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 180,
+ 'superseded_by': 'bus-small-30',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Personal',
+ 'price': 1200,
+ 'privateRepos': 5,
+ 'stripeId': 'personal',
+ 'audience': 'Individuals',
+ 'bus_features': False,
+ 'deprecated': True,
+ 'free_trial_days': 14,
+ 'superseded_by': 'personal-30',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Skiff',
+ 'price': 2500,
+ 'privateRepos': 10,
+ 'stripeId': 'bus-micro',
+ 'audience': 'For startups',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 14,
+ 'superseded_by': 'bus-micro-30',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Yacht',
+ 'price': 5000,
+ 'privateRepos': 20,
+ 'stripeId': 'bus-small',
+ 'audience': 'For small businesses',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 14,
+ 'superseded_by': 'bus-small-30',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Freighter',
+ 'price': 10000,
+ 'privateRepos': 50,
+ 'stripeId': 'bus-medium',
+ 'audience': 'For normal businesses',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 14,
+ 'superseded_by': 'bus-medium-30',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Tanker',
+ 'price': 20000,
+ 'privateRepos': 125,
+ 'stripeId': 'bus-large',
+ 'audience': 'For large businesses',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 14,
+ 'superseded_by': 'bus-large-30',
+ 'plans_page_hidden': False,
+ },
+
+ # Deprecated plans (2014-2017)
+ {
+ 'title': 'Personal',
+ 'price': 1200,
+ 'privateRepos': 5,
+ 'stripeId': 'personal-30',
+ 'audience': 'Individuals',
+ 'bus_features': False,
+ 'deprecated': True,
+ 'free_trial_days': 30,
+ 'superseded_by': 'personal-2018',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Skiff',
+ 'price': 2500,
+ 'privateRepos': 10,
+ 'stripeId': 'bus-micro-30',
+ 'audience': 'For startups',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 30,
+ 'superseded_by': 'bus-micro-2018',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Yacht',
+ 'price': 5000,
+ 'privateRepos': 20,
+ 'stripeId': 'bus-small-30',
+ 'audience': 'For small businesses',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 30,
+ 'superseded_by': 'bus-small-2018',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Freighter',
+ 'price': 10000,
+ 'privateRepos': 50,
+ 'stripeId': 'bus-medium-30',
+ 'audience': 'For normal businesses',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 30,
+ 'superseded_by': 'bus-medium-2018',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Tanker',
+ 'price': 20000,
+ 'privateRepos': 125,
+ 'stripeId': 'bus-large-30',
+ 'audience': 'For large businesses',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 30,
+ 'superseded_by': 'bus-large-2018',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Carrier',
+ 'price': 35000,
+ 'privateRepos': 250,
+ 'stripeId': 'bus-xlarge-30',
+ 'audience': 'For extra large businesses',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 30,
+ 'superseded_by': 'bus-xlarge-2018',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Huge',
+ 'price': 65000,
+ 'privateRepos': 500,
+ 'stripeId': 'bus-500-30',
+ 'audience': 'For huge business',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 30,
+ 'superseded_by': 'bus-500-2018',
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Huuge',
+ 'price': 120000,
+ 'privateRepos': 1000,
+ 'stripeId': 'bus-1000-30',
+ 'audience': 'For the SaaS savvy enterprise',
+ 'bus_features': True,
+ 'deprecated': True,
+ 'free_trial_days': 30,
+ 'superseded_by': 'bus-1000-2018',
+ 'plans_page_hidden': False,
+ },
+
+ # Active plans (as of Dec 2017)
+ {
+ 'title': 'Open Source',
+ 'price': 0,
+ 'privateRepos': 0,
+ 'stripeId': 'free',
+ 'audience': 'Committment to FOSS',
+ 'bus_features': False,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Developer',
+ 'price': 1500,
+ 'privateRepos': 5,
+ 'stripeId': 'personal-2018',
+ 'audience': 'Individuals',
+ 'bus_features': False,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Micro',
+ 'price': 3000,
+ 'privateRepos': 10,
+ 'stripeId': 'bus-micro-2018',
+ 'audience': 'For startups',
+ 'bus_features': True,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Small',
+ 'price': 6000,
+ 'privateRepos': 20,
+ 'stripeId': 'bus-small-2018',
+ 'audience': 'For small businesses',
+ 'bus_features': True,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Medium',
+ 'price': 12500,
+ 'privateRepos': 50,
+ 'stripeId': 'bus-medium-2018',
+ 'audience': 'For normal businesses',
+ 'bus_features': True,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Large',
+ 'price': 25000,
+ 'privateRepos': 125,
+ 'stripeId': 'bus-large-2018',
+ 'audience': 'For large businesses',
+ 'bus_features': True,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'Extra Large',
+ 'price': 45000,
+ 'privateRepos': 250,
+ 'stripeId': 'bus-xlarge-2018',
+ 'audience': 'For extra large businesses',
+ 'bus_features': True,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'XXL',
+ 'price': 85000,
+ 'privateRepos': 500,
+ 'stripeId': 'bus-500-2018',
+ 'audience': 'For huge business',
+ 'bus_features': True,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'XXXL',
+ 'price': 160000,
+ 'privateRepos': 1000,
+ 'stripeId': 'bus-1000-2018',
+ 'audience': 'For the SaaS savvy enterprise',
+ 'bus_features': True,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+ {
+ 'title': 'XXXXL',
+ 'price': 310000,
+ 'privateRepos': 2000,
+ 'stripeId': 'bus-2000-2018',
+ 'audience': 'For the SaaS savvy big enterprise',
+ 'bus_features': True,
+ 'deprecated': False,
+ 'free_trial_days': 30,
+ 'superseded_by': None,
+ 'plans_page_hidden': False,
+ },
+]
+
+
+def get_plan(plan_id):
+ """ Returns the plan with the given ID or None if none. """
+ for plan in PLANS:
+ if plan['stripeId'] == plan_id:
+ return plan
+
+ return None
+
+
+class FakeSubscription(AttrDict):
+ @classmethod
+ def build(cls, data, customer):
+ data = AttrDict.deep_copy(data)
+ data['customer'] = customer
+ return cls(data)
+
+ def delete(self):
+ self.customer.subscription = None
+
+
+class FakeStripe(object):
+ class Customer(AttrDict):
+ FAKE_PLAN = AttrDict({
+ 'id': 'bus-small',
+ })
+
+ FAKE_SUBSCRIPTION = AttrDict({
+ 'plan': FAKE_PLAN,
+ 'current_period_start': timegm(datetime.utcnow().utctimetuple()),
+ 'current_period_end': timegm((datetime.utcnow() + timedelta(days=30)).utctimetuple()),
+ 'trial_start': timegm(datetime.utcnow().utctimetuple()),
+ 'trial_end': timegm((datetime.utcnow() + timedelta(days=30)).utctimetuple()),
+ })
+
+ FAKE_CARD = AttrDict({
+ 'id': 'card123',
+ 'name': 'Joe User',
+ 'type': 'Visa',
+ 'last4': '4242',
+ 'exp_month': 5,
+ 'exp_year': 2016,
+ })
+
+ FAKE_CARD_LIST = AttrDict({
+ 'data': [FAKE_CARD],
+ })
+
+ ACTIVE_CUSTOMERS = {}
+
+ @property
+ def card(self):
+ return self.get('new_card', None)
+
+ @card.setter
+ def card(self, card_token):
+ self['new_card'] = card_token
+
+ @property
+ def plan(self):
+ return self.get('new_plan', None)
+
+ @plan.setter
+ def plan(self, plan_name):
+ self['new_plan'] = plan_name
+
+ def save(self):
+ if self.get('new_card', None) is not None:
+ raise stripe.error.CardError('Test raising exception on set card.', self.get('new_card'), 402)
+ if self.get('new_plan', None) is not None:
+ if self.subscription is None:
+ self.subscription = FakeSubscription.build(self.FAKE_SUBSCRIPTION, self)
+ self.subscription.plan.id = self.get('new_plan')
+
+ @classmethod
+ def retrieve(cls, stripe_customer_id):
+ if stripe_customer_id in cls.ACTIVE_CUSTOMERS:
+ cls.ACTIVE_CUSTOMERS[stripe_customer_id].pop('new_card', None)
+ cls.ACTIVE_CUSTOMERS[stripe_customer_id].pop('new_plan', None)
+ return cls.ACTIVE_CUSTOMERS[stripe_customer_id]
+ else:
+ new_customer = cls({
+ 'default_card': 'card123',
+ 'cards': AttrDict.deep_copy(cls.FAKE_CARD_LIST),
+ 'id': stripe_customer_id,
+ })
+ new_customer.subscription = FakeSubscription.build(cls.FAKE_SUBSCRIPTION, new_customer)
+ cls.ACTIVE_CUSTOMERS[stripe_customer_id] = new_customer
+ return new_customer
+
+ class Invoice(AttrDict):
+ @staticmethod
+ def list(customer, count):
+ return AttrDict({
+ 'data': [],
+ })
+
+
+class Billing(object):
+ def __init__(self, app=None):
+ self.app = app
+ if app is not None:
+ self.state = self.init_app(app)
+ else:
+ self.state = None
+
+ def init_app(self, app):
+ billing_type = app.config.get('BILLING_TYPE', 'FakeStripe')
+
+ if billing_type == 'Stripe':
+ billing = stripe
+ stripe.api_key = app.config.get('STRIPE_SECRET_KEY', None)
+
+ elif billing_type == 'FakeStripe':
+ billing = FakeStripe
+
+ else:
+ raise RuntimeError('Unknown billing type: %s' % billing_type)
+
+ # register extension with app
+ app.extensions = getattr(app, 'extensions', {})
+ app.extensions['billing'] = billing
+ return billing
+
+ def __getattr__(self, name):
+ return getattr(self.state, name, None)
diff --git a/data/buildlogs.py b/data/buildlogs.py
new file mode 100644
index 000000000..b6b4d2652
--- /dev/null
+++ b/data/buildlogs.py
@@ -0,0 +1,179 @@
+import redis
+import json
+import time
+
+from contextlib import closing
+
+from util.dynamic import import_class
+from datetime import timedelta
+
+
+ONE_DAY = timedelta(days=1)
+SEVEN_DAYS = timedelta(days=7)
+
+
+class BuildStatusRetrievalError(Exception):
+ pass
+
+class RedisBuildLogs(object):
+ ERROR = 'error'
+ COMMAND = 'command'
+ PHASE = 'phase'
+
+ def __init__(self, redis_config):
+ self._redis_client = None
+ self._redis_config = redis_config
+
+ @property
+ def _redis(self):
+ if self._redis_client is not None:
+ return self._redis_client
+
+ args = dict(self._redis_config)
+ args.update({'socket_connect_timeout': 1,
+ 'socket_timeout': 2,
+ 'single_connection_client': True})
+
+ self._redis_client = redis.StrictRedis(**args)
+ return self._redis_client
+
+ @staticmethod
+ def _logs_key(build_id):
+ return 'builds/%s/logs' % build_id
+
+ def append_log_entry(self, build_id, log_obj):
+ """
+ Appends the serialized form of log_obj to the end of the log entry list
+ and returns the new length of the list.
+ """
+ pipeline = self._redis.pipeline(transaction=False)
+ pipeline.expire(self._logs_key(build_id), SEVEN_DAYS)
+ pipeline.rpush(self._logs_key(build_id), json.dumps(log_obj))
+ result = pipeline.execute()
+ return result[1]
+
+ def append_log_message(self, build_id, log_message, log_type=None, log_data=None):
+ """
+ Wraps the message in an envelope and push it to the end of the log entry
+ list and returns the index at which it was inserted.
+ """
+ log_obj = {
+ 'message': log_message
+ }
+
+ if log_type:
+ log_obj['type'] = log_type
+
+ if log_data:
+ log_obj['data'] = log_data
+
+ return self.append_log_entry(build_id, log_obj) - 1
+
+ def get_log_entries(self, build_id, start_index):
+ """
+ Returns a tuple of the current length of the list and an iterable of the
+ requested log entries.
+ """
+ try:
+ llen = self._redis.llen(self._logs_key(build_id))
+ log_entries = self._redis.lrange(self._logs_key(build_id), start_index, -1)
+ return (llen, (json.loads(entry) for entry in log_entries))
+ except redis.RedisError as re:
+ raise BuildStatusRetrievalError('Cannot retrieve build logs: %s' % re)
+
+ def expire_status(self, build_id):
+ """
+ Sets the status entry to expire in 1 day.
+ """
+ self._redis.expire(self._status_key(build_id), ONE_DAY)
+
+ def expire_log_entries(self, build_id):
+ """
+ Sets the log entry to expire in 1 day.
+ """
+ self._redis.expire(self._logs_key(build_id), ONE_DAY)
+
+ def delete_log_entries(self, build_id):
+ """
+ Delete the log entry
+ """
+ self._redis.delete(self._logs_key(build_id))
+
+ @staticmethod
+ def _status_key(build_id):
+ return 'builds/%s/status' % build_id
+
+ def set_status(self, build_id, status_obj):
+ """
+ Sets the status key for this build to json serialized form of the supplied
+ obj.
+ """
+ self._redis.set(self._status_key(build_id), json.dumps(status_obj), ex=SEVEN_DAYS)
+
+ def get_status(self, build_id):
+ """
+ Loads the status information for the specified build id.
+ """
+ try:
+ fetched = self._redis.get(self._status_key(build_id))
+ except redis.RedisError as re:
+ raise BuildStatusRetrievalError('Cannot retrieve build status: %s' % re)
+
+ return json.loads(fetched) if fetched else None
+
+ @staticmethod
+ def _health_key():
+ return '_health'
+
+ def check_health(self):
+ try:
+ args = dict(self._redis_config)
+ args.update({'socket_connect_timeout': 1,
+ 'socket_timeout': 1,
+ 'single_connection_client': True})
+
+ with closing(redis.StrictRedis(**args)) as connection:
+ if not connection.ping():
+ return (False, 'Could not ping redis')
+
+ # Ensure we can write and read a key.
+ connection.set(self._health_key(), time.time())
+ connection.get(self._health_key())
+ return (True, None)
+ except redis.RedisError as re:
+ return (False, 'Could not connect to redis: %s' % re.message)
+
+
+class BuildLogs(object):
+ def __init__(self, app=None):
+ self.app = app
+ if app is not None:
+ self.state = self.init_app(app)
+ else:
+ self.state = None
+
+ def init_app(self, app):
+ buildlogs_config = app.config.get('BUILDLOGS_REDIS')
+ if not buildlogs_config:
+ # This is the old key name.
+ buildlogs_config = {
+ 'host': app.config.get('BUILDLOGS_REDIS_HOSTNAME')
+ }
+
+ buildlogs_options = app.config.get('BUILDLOGS_OPTIONS', [])
+ buildlogs_import = app.config.get('BUILDLOGS_MODULE_AND_CLASS', None)
+
+ if buildlogs_import is None:
+ klass = RedisBuildLogs
+ else:
+ klass = import_class(buildlogs_import[0], buildlogs_import[1])
+
+ buildlogs = klass(buildlogs_config, *buildlogs_options)
+
+ # register extension with app
+ app.extensions = getattr(app, 'extensions', {})
+ app.extensions['buildlogs'] = buildlogs
+ return buildlogs
+
+ def __getattr__(self, name):
+ return getattr(self.state, name, None)
diff --git a/data/cache/__init__.py b/data/cache/__init__.py
new file mode 100644
index 000000000..a7c44dadd
--- /dev/null
+++ b/data/cache/__init__.py
@@ -0,0 +1,23 @@
+from data.cache.impl import NoopDataModelCache, InMemoryDataModelCache, MemcachedModelCache
+
+def get_model_cache(config):
+ """ Returns a data model cache matching the given configuration. """
+ cache_config = config.get('DATA_MODEL_CACHE_CONFIG', {})
+ engine = cache_config.get('engine', 'noop')
+
+ if engine == 'noop':
+ return NoopDataModelCache()
+
+ if engine == 'inmemory':
+ return InMemoryDataModelCache()
+
+ if engine == 'memcached':
+ endpoint = cache_config.get('endpoint', None)
+ if endpoint is None:
+ raise Exception('Missing `endpoint` for memcached model cache configuration')
+
+ timeout = cache_config.get('timeout')
+ connect_timeout = cache_config.get('connect_timeout')
+ return MemcachedModelCache(endpoint, timeout=timeout, connect_timeout=connect_timeout)
+
+ raise Exception('Unknown model cache engine `%s`' % engine)
diff --git a/data/cache/cache_key.py b/data/cache/cache_key.py
new file mode 100644
index 000000000..93aad65be
--- /dev/null
+++ b/data/cache/cache_key.py
@@ -0,0 +1,27 @@
+from collections import namedtuple
+
+class CacheKey(namedtuple('CacheKey', ['key', 'expiration'])):
+ """ Defines a key into the data model cache. """
+ pass
+
+
+def for_repository_blob(namespace_name, repo_name, digest, version):
+ """ Returns a cache key for a blob in a repository. """
+ return CacheKey('repo_blob__%s_%s_%s_%s' % (namespace_name, repo_name, digest, version), '60s')
+
+
+def for_catalog_page(auth_context_key, start_id, limit):
+ """ Returns a cache key for a single page of a catalog lookup for an authed context. """
+ params = (auth_context_key or '(anon)', start_id or 0, limit or 0)
+ return CacheKey('catalog_page__%s_%s_%s' % params, '60s')
+
+
+def for_namespace_geo_restrictions(namespace_name):
+ """ Returns a cache key for the geo restrictions for a namespace. """
+ return CacheKey('geo_restrictions__%s' % (namespace_name), '240s')
+
+
+def for_active_repo_tags(repository_id, start_pagination_id, limit):
+ """ Returns a cache key for the active tags in a repository. """
+ return CacheKey('repo_active_tags__%s_%s_%s' % (repository_id, start_pagination_id, limit),
+ '120s')
diff --git a/data/cache/impl.py b/data/cache/impl.py
new file mode 100644
index 000000000..982e950e9
--- /dev/null
+++ b/data/cache/impl.py
@@ -0,0 +1,146 @@
+import logging
+import json
+
+from datetime import datetime
+
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+
+from pymemcache.client.base import Client
+
+from util.expiresdict import ExpiresDict
+from util.timedeltastring import convert_to_timedelta
+
+logger = logging.getLogger(__name__)
+
+
+def is_not_none(value):
+ return value is not None
+
+
+@add_metaclass(ABCMeta)
+class DataModelCache(object):
+ """ Defines an interface for cache storing and returning tuple data model objects. """
+
+ @abstractmethod
+ def retrieve(self, cache_key, loader, should_cache=is_not_none):
+ """ Checks the cache for the specified cache key and returns the value found (if any). If none
+ found, the loader is called to get a result and populate the cache.
+ """
+ pass
+
+
+class NoopDataModelCache(DataModelCache):
+ """ Implementation of the data model cache which does nothing. """
+
+ def retrieve(self, cache_key, loader, should_cache=is_not_none):
+ return loader()
+
+
+class InMemoryDataModelCache(DataModelCache):
+ """ Implementation of the data model cache backed by an in-memory dictionary. """
+ def __init__(self):
+ self.cache = ExpiresDict()
+
+ def retrieve(self, cache_key, loader, should_cache=is_not_none):
+ not_found = [None]
+ logger.debug('Checking cache for key %s', cache_key.key)
+ result = self.cache.get(cache_key.key, default_value=not_found)
+ if result != not_found:
+ logger.debug('Found result in cache for key %s: %s', cache_key.key, result)
+ return json.loads(result)
+
+ logger.debug('Found no result in cache for key %s; calling loader', cache_key.key)
+ result = loader()
+ logger.debug('Got loaded result for key %s: %s', cache_key.key, result)
+ if should_cache(result):
+ logger.debug('Caching loaded result for key %s with expiration %s: %s', cache_key.key,
+ result, cache_key.expiration)
+ expires = convert_to_timedelta(cache_key.expiration) + datetime.now()
+ self.cache.set(cache_key.key, json.dumps(result), expires=expires)
+ logger.debug('Cached loaded result for key %s with expiration %s: %s', cache_key.key,
+ result, cache_key.expiration)
+ else:
+ logger.debug('Not caching loaded result for key %s: %s', cache_key.key, result)
+
+ return result
+
+
+_DEFAULT_MEMCACHE_TIMEOUT = 1 # second
+_DEFAULT_MEMCACHE_CONNECT_TIMEOUT = 1 # second
+
+_STRING_TYPE = 1
+_JSON_TYPE = 2
+
+class MemcachedModelCache(DataModelCache):
+ """ Implementation of the data model cache backed by a memcached. """
+ def __init__(self, endpoint, timeout=_DEFAULT_MEMCACHE_TIMEOUT,
+ connect_timeout=_DEFAULT_MEMCACHE_CONNECT_TIMEOUT):
+ self.endpoint = endpoint
+ self.timeout = timeout
+ self.connect_timeout = connect_timeout
+ self.client = None
+
+ def _get_client(self):
+ client = self.client
+ if client is not None:
+ return client
+
+ try:
+ # Copied from the doc comment for Client.
+ def serialize_json(key, value):
+ if type(value) == str:
+ return value, _STRING_TYPE
+
+ return json.dumps(value), _JSON_TYPE
+
+ def deserialize_json(key, value, flags):
+ if flags == _STRING_TYPE:
+ return value
+
+ if flags == _JSON_TYPE:
+ return json.loads(value)
+
+ raise Exception("Unknown flags for value: {1}".format(flags))
+
+ self.client = Client(self.endpoint, no_delay=True, timeout=self.timeout,
+ connect_timeout=self.connect_timeout,
+ key_prefix='data_model_cache__',
+ serializer=serialize_json,
+ deserializer=deserialize_json,
+ ignore_exc=True)
+ return self.client
+ except:
+ logger.exception('Got exception when creating memcached client to %s', self.endpoint)
+ return None
+
+ def retrieve(self, cache_key, loader, should_cache=is_not_none):
+ not_found = [None]
+ client = self._get_client()
+ if client is not None:
+ logger.debug('Checking cache for key %s', cache_key.key)
+ try:
+ result = client.get(cache_key.key, default=not_found)
+ if result != not_found:
+ logger.debug('Found result in cache for key %s: %s', cache_key.key, result)
+ return result
+ except:
+ logger.exception('Got exception when trying to retrieve key %s', cache_key.key)
+
+ logger.debug('Found no result in cache for key %s; calling loader', cache_key.key)
+ result = loader()
+ logger.debug('Got loaded result for key %s: %s', cache_key.key, result)
+ if client is not None and should_cache(result):
+ try:
+ logger.debug('Caching loaded result for key %s with expiration %s: %s', cache_key.key,
+ result, cache_key.expiration)
+ expires = convert_to_timedelta(cache_key.expiration) if cache_key.expiration else None
+ client.set(cache_key.key, result, expire=int(expires.total_seconds()) if expires else None)
+ logger.debug('Cached loaded result for key %s with expiration %s: %s', cache_key.key,
+ result, cache_key.expiration)
+ except:
+ logger.exception('Got exception when trying to set key %s to %s', cache_key.key, result)
+ else:
+ logger.debug('Not caching loaded result for key %s: %s', cache_key.key, result)
+
+ return result
diff --git a/data/cache/test/test_cache.py b/data/cache/test/test_cache.py
new file mode 100644
index 000000000..bf0c4cccd
--- /dev/null
+++ b/data/cache/test/test_cache.py
@@ -0,0 +1,56 @@
+import pytest
+
+from mock import patch
+
+from data.cache import InMemoryDataModelCache, NoopDataModelCache, MemcachedModelCache
+from data.cache.cache_key import CacheKey
+
+class MockClient(object):
+ def __init__(self, server, **kwargs):
+ self.data = {}
+
+ def get(self, key, default=None):
+ return self.data.get(key, default)
+
+ def set(self, key, value, expire=None):
+ self.data[key] = value
+
+
+@pytest.mark.parametrize('cache_type', [
+ (NoopDataModelCache),
+ (InMemoryDataModelCache),
+])
+def test_caching(cache_type):
+ key = CacheKey('foo', '60m')
+ cache = cache_type()
+
+ # Perform two retrievals, and make sure both return.
+ assert cache.retrieve(key, lambda: {'a': 1234}) == {'a': 1234}
+ assert cache.retrieve(key, lambda: {'a': 1234}) == {'a': 1234}
+
+
+def test_memcache():
+ key = CacheKey('foo', '60m')
+ with patch('data.cache.impl.Client', MockClient):
+ cache = MemcachedModelCache(('127.0.0.1', '-1'))
+ assert cache.retrieve(key, lambda: {'a': 1234}) == {'a': 1234}
+ assert cache.retrieve(key, lambda: {'a': 1234}) == {'a': 1234}
+
+
+def test_memcache_should_cache():
+ key = CacheKey('foo', None)
+
+ def sc(value):
+ return value['a'] != 1234
+
+ with patch('data.cache.impl.Client', MockClient):
+ cache = MemcachedModelCache(('127.0.0.1', '-1'))
+ assert cache.retrieve(key, lambda: {'a': 1234}, should_cache=sc) == {'a': 1234}
+
+ # Ensure not cached since it was `1234`.
+ assert cache._get_client().get(key.key) is None
+
+ # Ensure cached.
+ assert cache.retrieve(key, lambda: {'a': 2345}, should_cache=sc) == {'a': 2345}
+ assert cache._get_client().get(key.key) is not None
+ assert cache.retrieve(key, lambda: {'a': 2345}, should_cache=sc) == {'a': 2345}
diff --git a/data/database.py b/data/database.py
new file mode 100644
index 000000000..a3d038e4c
--- /dev/null
+++ b/data/database.py
@@ -0,0 +1,1793 @@
+# pylint: disable=old-style-class,no-init
+
+import inspect
+import logging
+import string
+import sys
+import time
+import uuid
+import os
+
+from contextlib import contextmanager
+from collections import defaultdict, namedtuple
+from datetime import datetime
+from random import SystemRandom
+
+import toposort
+
+from enum import IntEnum, Enum, unique
+from peewee import *
+from peewee import __exception_wrapper__, Function
+from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase, PooledSqliteDatabase
+
+from sqlalchemy.engine.url import make_url
+
+import resumablehashlib
+from cachetools.func import lru_cache
+
+from active_migration import ERTMigrationFlags, ActiveDataMigration
+from data.fields import (ResumableSHA256Field, ResumableSHA1Field, JSONField, Base64BinaryField,
+ FullIndexedTextField, FullIndexedCharField, EnumField as ClientEnumField,
+ EncryptedTextField, EncryptedCharField, CredentialField)
+from data.text import match_mysql, match_like
+from data.encryption import FieldEncrypter
+from data.readreplica import ReadReplicaSupportedModel, ReadOnlyConfig
+from util.names import urn_generator
+from util.validation import validate_postgres_precondition
+
+
+logger = logging.getLogger(__name__)
+
+DEFAULT_DB_CONNECT_TIMEOUT = 10 # seconds
+
+
+# IMAGE_NOT_SCANNED_ENGINE_VERSION is the version found in security_indexed_engine when the
+# image has not yet been scanned.
+IMAGE_NOT_SCANNED_ENGINE_VERSION = -1
+
+schemedriver = namedtuple('schemedriver', ['driver', 'pooled_driver'])
+
+_SCHEME_DRIVERS = {
+ 'mysql': schemedriver(MySQLDatabase, PooledMySQLDatabase),
+ 'mysql+pymysql': schemedriver(MySQLDatabase, PooledMySQLDatabase),
+ 'sqlite': schemedriver(SqliteDatabase, PooledSqliteDatabase),
+ 'postgresql': schemedriver(PostgresqlDatabase, PooledPostgresqlDatabase),
+ 'postgresql+psycopg2': schemedriver(PostgresqlDatabase, PooledPostgresqlDatabase),
+}
+
+
+SCHEME_MATCH_FUNCTION = {
+ 'mysql': match_mysql,
+ 'mysql+pymysql': match_mysql,
+ 'sqlite': match_like,
+ 'postgresql': match_like,
+ 'postgresql+psycopg2': match_like,
+}
+
+
+SCHEME_RANDOM_FUNCTION = {
+ 'mysql': fn.Rand,
+ 'mysql+pymysql': fn.Rand,
+ 'sqlite': fn.Random,
+ 'postgresql': fn.Random,
+ 'postgresql+psycopg2': fn.Random,
+}
+
+
+PRECONDITION_VALIDATION = {
+ 'postgresql': validate_postgres_precondition,
+ 'postgresql+psycopg2': validate_postgres_precondition,
+}
+
+
+_EXTRA_ARGS = {
+ 'mysql': dict(charset='utf8mb4'),
+ 'mysql+pymysql': dict(charset='utf8mb4'),
+}
+
+
+def pipes_concat(arg1, arg2, *extra_args):
+ """ Concat function for sqlite, since it doesn't support fn.Concat.
+ Concatenates clauses with || characters.
+ """
+ reduced = arg1.concat(arg2)
+ for arg in extra_args:
+ reduced = reduced.concat(arg)
+ return reduced
+
+
+def function_concat(arg1, arg2, *extra_args):
+ """ Default implementation of concat which uses fn.Concat(). Used by all
+ database engines except sqlite.
+ """
+ return fn.Concat(arg1, arg2, *extra_args)
+
+
+SCHEME_SPECIALIZED_CONCAT = {
+ 'sqlite': pipes_concat,
+}
+
+
+def real_for_update(query):
+ return query.for_update()
+
+
+def null_for_update(query):
+ return query
+
+
+def delete_instance_filtered(instance, model_class, delete_nullable, skip_transitive_deletes):
+ """ Deletes the DB instance recursively, skipping any models in the skip_transitive_deletes set.
+
+ Callers *must* ensure that any models listed in the skip_transitive_deletes must be capable
+ of being directly deleted when the instance is deleted (with automatic sorting handling
+ dependency order).
+
+ For example, the RepositoryTag and Image tables for Repository will always refer to the
+ *same* repository when RepositoryTag references Image, so we can safely skip
+ transitive deletion for the RepositoryTag table.
+ """
+ # We need to sort the ops so that models get cleaned in order of their dependencies
+ ops = reversed(list(instance.dependencies(delete_nullable)))
+ filtered_ops = []
+
+ dependencies = defaultdict(set)
+
+ for query, fk in ops:
+ # We only want to skip transitive deletes, which are done using subqueries in the form of
+ # DELETE FROM in . If an op is not using a subquery, we allow it to be
+ # applied directly.
+ if fk.model not in skip_transitive_deletes or query.op.lower() != 'in':
+ filtered_ops.append((query, fk))
+
+ if query.op.lower() == 'in':
+ dependencies[fk.model.__name__].add(query.rhs.model.__name__)
+ elif query.op == '=':
+ dependencies[fk.model.__name__].add(model_class.__name__)
+ else:
+ raise RuntimeError('Unknown operator in recursive repository delete query')
+
+ sorted_models = list(reversed(toposort.toposort_flatten(dependencies)))
+ def sorted_model_key(query_fk_tuple):
+ cmp_query, cmp_fk = query_fk_tuple
+ if cmp_query.op.lower() == 'in':
+ return -1
+ return sorted_models.index(cmp_fk.model.__name__)
+ filtered_ops.sort(key=sorted_model_key)
+
+ with db_transaction():
+ for query, fk in filtered_ops:
+ _model = fk.model
+ if fk.null and not delete_nullable:
+ _model.update(**{fk.name: None}).where(query).execute()
+ else:
+ _model.delete().where(query).execute()
+
+ return instance.delete().where(instance._pk_expr()).execute()
+
+
+SCHEME_SPECIALIZED_FOR_UPDATE = {
+ 'sqlite': null_for_update,
+}
+
+
+class CallableProxy(Proxy):
+ def __call__(self, *args, **kwargs):
+ if self.obj is None:
+ raise AttributeError('Cannot use uninitialized Proxy.')
+ return self.obj(*args, **kwargs)
+
+
+class RetryOperationalError(object):
+
+ def execute_sql(self, sql, params=None, commit=True):
+ try:
+ cursor = super(RetryOperationalError, self).execute_sql(sql, params, commit)
+ except OperationalError:
+ if not self.is_closed():
+ self.close()
+
+ with __exception_wrapper__:
+ cursor = self.cursor()
+ cursor.execute(sql, params or ())
+ if commit and not self.in_transaction():
+ self.commit()
+
+ return cursor
+
+
+class CloseForLongOperation(object):
+ """ Helper object which disconnects the database then reconnects after the nested operation
+ completes.
+ """
+
+ def __init__(self, config_object):
+ self.config_object = config_object
+
+ def __enter__(self):
+ if self.config_object.get('TESTING') is True:
+ return
+
+ close_db_filter(None)
+
+ def __exit__(self, typ, value, traceback):
+ # Note: Nothing to do. The next SQL call will reconnect automatically.
+ pass
+
+
+class UseThenDisconnect(object):
+ """ Helper object for conducting work with a database and then tearing it down. """
+
+ def __init__(self, config_object):
+ self.config_object = config_object
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, typ, value, traceback):
+ if self.config_object.get('TESTING') is True:
+ return
+
+ close_db_filter(None)
+
+
+class TupleSelector(object):
+ """ Helper class for selecting tuples from a peewee query and easily accessing
+ them as if they were objects.
+ """
+ class _TupleWrapper(object):
+ def __init__(self, data, fields):
+ self._data = data
+ self._fields = fields
+
+ def get(self, field):
+ return self._data[self._fields.index(TupleSelector.tuple_reference_key(field))]
+
+ @classmethod
+ def tuple_reference_key(cls, field):
+ """ Returns a string key for referencing a field in a TupleSelector. """
+ if isinstance(field, Function):
+ return field.name + ','.join([cls.tuple_reference_key(arg) for arg in field.arguments])
+
+ if isinstance(field, Field):
+ return field.name + ':' + field.model.__name__
+
+ raise Exception('Unknown field type %s in TupleSelector' % field._node_type)
+
+ def __init__(self, query, fields):
+ self._query = query.select(*fields).tuples()
+ self._fields = [TupleSelector.tuple_reference_key(field) for field in fields]
+
+ def __iter__(self):
+ return self._build_iterator()
+
+ def _build_iterator(self):
+ for tuple_data in self._query:
+ yield TupleSelector._TupleWrapper(tuple_data, self._fields)
+
+
+db = Proxy()
+read_only_config = Proxy()
+db_random_func = CallableProxy()
+db_match_func = CallableProxy()
+db_for_update = CallableProxy()
+db_transaction = CallableProxy()
+db_concat_func = CallableProxy()
+db_encrypter = Proxy()
+ensure_under_transaction = CallableProxy()
+
+
+def validate_database_url(url, db_kwargs, connect_timeout=5):
+ """ Validates that we can connect to the given database URL, with the given kwargs. Raises
+ an exception if the validation fails. """
+ db_kwargs = db_kwargs.copy()
+
+ try:
+ driver = _db_from_url(url, db_kwargs, connect_timeout=connect_timeout, allow_retry=False,
+ allow_pooling=False)
+ driver.connect()
+ finally:
+ try:
+ driver.close()
+ except:
+ pass
+
+
+def validate_database_precondition(url, db_kwargs, connect_timeout=5):
+ """ Validates that we can connect to the given database URL and the database meets our
+ precondition. Raises an exception if the validation fails. """
+ db_kwargs = db_kwargs.copy()
+ try:
+ driver = _db_from_url(url, db_kwargs, connect_timeout=connect_timeout, allow_retry=False,
+ allow_pooling=False)
+ driver.connect()
+ pre_condition_check = PRECONDITION_VALIDATION.get(make_url(url).drivername)
+ if pre_condition_check:
+ pre_condition_check(driver)
+
+ finally:
+ try:
+ driver.close()
+ except:
+ pass
+
+
+def _wrap_for_retry(driver):
+ return type('Retrying' + driver.__name__, (RetryOperationalError, driver), {})
+
+
+def _db_from_url(url, db_kwargs, connect_timeout=DEFAULT_DB_CONNECT_TIMEOUT,
+ allow_pooling=True, allow_retry=True):
+ parsed_url = make_url(url)
+
+ if parsed_url.host:
+ db_kwargs['host'] = parsed_url.host
+ if parsed_url.port:
+ db_kwargs['port'] = parsed_url.port
+ if parsed_url.username:
+ db_kwargs['user'] = parsed_url.username
+ if parsed_url.password:
+ db_kwargs['password'] = parsed_url.password
+
+ # Remove threadlocals. It used to be required.
+ db_kwargs.pop('threadlocals', None)
+
+ # Note: sqlite does not support connect_timeout.
+ if parsed_url.drivername != 'sqlite':
+ db_kwargs['connect_timeout'] = db_kwargs.get('connect_timeout', connect_timeout)
+
+ drivers = _SCHEME_DRIVERS[parsed_url.drivername]
+ driver = drivers.driver
+ if allow_pooling and os.getenv('DB_CONNECTION_POOLING', 'false').lower() == 'true':
+ driver = drivers.pooled_driver
+ db_kwargs['stale_timeout'] = db_kwargs.get('stale_timeout', None)
+ db_kwargs['max_connections'] = db_kwargs.get('max_connections', None)
+ logger.info('Connection pooling enabled for %s; stale timeout: %s; max connection count: %s',
+ parsed_url.drivername, db_kwargs['stale_timeout'], db_kwargs['max_connections'])
+ else:
+ logger.info('Connection pooling disabled for %s', parsed_url.drivername)
+ db_kwargs.pop('stale_timeout', None)
+ db_kwargs.pop('max_connections', None)
+
+ for key, value in _EXTRA_ARGS.get(parsed_url.drivername, {}).iteritems():
+ if key not in db_kwargs:
+ db_kwargs[key] = value
+
+ if allow_retry:
+ driver = _wrap_for_retry(driver)
+
+ created = driver(parsed_url.database, **db_kwargs)
+
+ # Revert the behavior "fixed" in:
+ # https://github.com/coleifer/peewee/commit/36bd887ac07647c60dfebe610b34efabec675706
+ if parsed_url.drivername.find("mysql") >= 0:
+ created.compound_select_parentheses = 0
+ return created
+
+
+def configure(config_object, testing=False):
+ logger.debug('Configuring database')
+ db_kwargs = dict(config_object['DB_CONNECTION_ARGS'])
+ write_db_uri = config_object['DB_URI']
+ db.initialize(_db_from_url(write_db_uri, db_kwargs))
+
+ parsed_write_uri = make_url(write_db_uri)
+ db_random_func.initialize(SCHEME_RANDOM_FUNCTION[parsed_write_uri.drivername])
+ db_match_func.initialize(SCHEME_MATCH_FUNCTION[parsed_write_uri.drivername])
+ db_for_update.initialize(SCHEME_SPECIALIZED_FOR_UPDATE.get(parsed_write_uri.drivername,
+ real_for_update))
+ db_concat_func.initialize(SCHEME_SPECIALIZED_CONCAT.get(parsed_write_uri.drivername,
+ function_concat))
+ db_encrypter.initialize(FieldEncrypter(config_object['DATABASE_SECRET_KEY']))
+
+ read_replicas = config_object.get('DB_READ_REPLICAS', None)
+ is_read_only = config_object.get('REGISTRY_STATE', 'normal') == 'readonly'
+
+ read_replica_dbs = []
+ if read_replicas:
+ read_replica_dbs = [_db_from_url(config['DB_URI'], db_kwargs) for config in read_replicas]
+
+ read_only_config.initialize(ReadOnlyConfig(is_read_only, read_replica_dbs))
+
+ def _db_transaction():
+ return config_object['DB_TRANSACTION_FACTORY'](db)
+
+ @contextmanager
+ def _ensure_under_transaction():
+ if not testing and not config_object['TESTING']:
+ if db.transaction_depth() == 0:
+ raise Exception('Expected to be under a transaction')
+
+ yield
+
+ db_transaction.initialize(_db_transaction)
+ ensure_under_transaction.initialize(_ensure_under_transaction)
+
+def random_string_generator(length=16):
+ def random_string():
+ random = SystemRandom()
+ return ''.join([random.choice(string.ascii_uppercase + string.digits)
+ for _ in range(length)])
+ return random_string
+
+
+def uuid_generator():
+ return str(uuid.uuid4())
+
+
+get_epoch_timestamp = lambda: int(time.time())
+get_epoch_timestamp_ms = lambda: int(time.time() * 1000)
+
+
+def close_db_filter(_):
+ if db.obj is not None and not db.is_closed():
+ logger.debug('Disconnecting from database.')
+ db.close()
+
+ if read_only_config.obj is not None:
+ for read_replica in read_only_config.obj.read_replicas:
+ if not read_replica.is_closed():
+ logger.debug('Disconnecting from read replica.')
+ read_replica.close()
+
+
+class QuayUserField(ForeignKeyField):
+ def __init__(self, allows_robots=False, robot_null_delete=False, *args, **kwargs):
+ self.allows_robots = allows_robots
+ self.robot_null_delete = robot_null_delete
+ if 'model' not in kwargs:
+ kwargs['model'] = User
+ super(QuayUserField, self).__init__(*args, **kwargs)
+
+
+@lru_cache(maxsize=16)
+def _get_enum_field_values(enum_field):
+ values = []
+ for row in enum_field.rel_model.select():
+ key = getattr(row, enum_field.enum_key_field)
+ value = getattr(row, 'id')
+ values.append((key, value))
+ return Enum(enum_field.rel_model.__name__, values)
+
+
+class EnumField(ForeignKeyField):
+ """ Create a cached python Enum from an EnumTable """
+ def __init__(self, model, enum_key_field='name', *args, **kwargs):
+ """
+ model is the EnumTable model-class (see ForeignKeyField)
+ enum_key_field is the field from the EnumTable to use as the enum name
+ """
+ self.enum_key_field = enum_key_field
+ super(EnumField, self).__init__(model, *args, **kwargs)
+
+ @property
+ def enum(self):
+ """ Returns a python enun.Enum generated from the associated EnumTable """
+ return _get_enum_field_values(self)
+
+ def get_id(self, name):
+ """ Returns the ForeignKeyId from the name field
+ Example:
+ >>> Repository.repo_kind.get_id("application")
+ 2
+ """
+ try:
+ return self.enum[name].value
+ except KeyError:
+ raise self.rel_model.DoesNotExist
+
+ def get_name(self, value):
+ """ Returns the name value from the ForeignKeyId
+ Example:
+ >>> Repository.repo_kind.get_name(2)
+ "application"
+ """
+ try:
+ return self.enum(value).name
+ except ValueError:
+ raise self.rel_model.DoesNotExist
+
+
+def deprecated_field(field, flag):
+ """ Marks a field as deprecated and removes it from the peewee model if the
+ flag is not set. A flag is defined in the active_migration module and will
+ be associated with one or more migration phases.
+ """
+ if ActiveDataMigration.has_flag(flag):
+ return field
+
+ return None
+
+
+class BaseModel(ReadReplicaSupportedModel):
+ class Meta:
+ database = db
+ encrypter = db_encrypter
+ read_only_config = read_only_config
+
+ def __getattribute__(self, name):
+ """ Adds _id accessors so that foreign key field IDs can be looked up without making
+ a database roundtrip.
+ """
+ if name.endswith('_id'):
+ field_name = name[0:len(name) - 3]
+ if field_name in self._meta.fields:
+ return self.__data__.get(field_name)
+
+ return super(BaseModel, self).__getattribute__(name)
+
+
+class User(BaseModel):
+ uuid = CharField(default=uuid_generator, max_length=36, null=True, index=True)
+ username = CharField(unique=True, index=True)
+ password_hash = CharField(null=True)
+ email = CharField(unique=True, index=True,
+ default=random_string_generator(length=64))
+ verified = BooleanField(default=False)
+ stripe_id = CharField(index=True, null=True)
+ organization = BooleanField(default=False, index=True)
+ robot = BooleanField(default=False, index=True)
+ invoice_email = BooleanField(default=False)
+ invalid_login_attempts = IntegerField(default=0)
+ last_invalid_login = DateTimeField(default=datetime.utcnow)
+ removed_tag_expiration_s = IntegerField(default=1209600) # Two weeks
+ enabled = BooleanField(default=True)
+ invoice_email_address = CharField(null=True, index=True)
+
+ given_name = CharField(null=True)
+ family_name = CharField(null=True)
+ company = CharField(null=True)
+ location = CharField(null=True)
+
+ maximum_queued_builds_count = IntegerField(null=True)
+ creation_date = DateTimeField(default=datetime.utcnow, null=True)
+ last_accessed = DateTimeField(null=True, index=True)
+
+ def delete_instance(self, recursive=False, delete_nullable=False):
+ # If we are deleting a robot account, only execute the subset of queries necessary.
+ if self.robot:
+ # For all the model dependencies, only delete those that allow robots.
+ for query, fk in reversed(list(self.dependencies(search_nullable=True))):
+ if isinstance(fk, QuayUserField) and fk.allows_robots:
+ _model = fk.model
+
+ if fk.robot_null_delete:
+ _model.update(**{fk.name: None}).where(query).execute()
+ else:
+ _model.delete().where(query).execute()
+
+ # Delete the instance itself.
+ super(User, self).delete_instance(recursive=False, delete_nullable=False)
+ else:
+ if not recursive:
+ raise RuntimeError('Non-recursive delete on user.')
+
+ # These models don't need to use transitive deletes, because the referenced objects
+ # are cleaned up directly in the model.
+ skip_transitive_deletes = {Image, Repository, Team, RepositoryBuild, ServiceKeyApproval,
+ RepositoryBuildTrigger, ServiceKey, RepositoryPermission,
+ TeamMemberInvite, Star, RepositoryAuthorizedEmail, TeamMember,
+ RepositoryTag, PermissionPrototype, DerivedStorageForImage,
+ TagManifest, AccessToken, OAuthAccessToken, BlobUpload,
+ RepositoryNotification, OAuthAuthorizationCode,
+ RepositoryActionCount, TagManifestLabel,
+ TeamSync, RepositorySearchScore,
+ DeletedNamespace, RepoMirrorRule,
+ NamespaceGeoRestriction} | appr_classes | v22_classes | transition_classes
+ delete_instance_filtered(self, User, delete_nullable, skip_transitive_deletes)
+
+
+Namespace = User.alias()
+
+
+class RobotAccountMetadata(BaseModel):
+ robot_account = QuayUserField(index=True, allows_robots=True, unique=True)
+ description = CharField()
+ unstructured_json = JSONField()
+
+
+class RobotAccountToken(BaseModel):
+ robot_account = QuayUserField(index=True, allows_robots=True, unique=True)
+ token = EncryptedCharField(default_token_length=64)
+ fully_migrated = BooleanField(default=False)
+
+
+class DeletedNamespace(BaseModel):
+ namespace = QuayUserField(index=True, allows_robots=False, unique=True)
+ marked = DateTimeField(default=datetime.now)
+ original_username = CharField(index=True)
+ original_email = CharField(index=True)
+ queue_id = CharField(null=True, index=True)
+
+
+class NamespaceGeoRestriction(BaseModel):
+ namespace = QuayUserField(index=True, allows_robots=False)
+ added = DateTimeField(default=datetime.utcnow)
+ description = CharField()
+ unstructured_json = JSONField()
+ restricted_region_iso_code = CharField(index=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('namespace', 'restricted_region_iso_code'), True),
+ )
+
+
+class UserPromptTypes(object):
+ CONFIRM_USERNAME = 'confirm_username'
+ ENTER_NAME = 'enter_name'
+ ENTER_COMPANY = 'enter_company'
+
+
+class UserPromptKind(BaseModel):
+ name = CharField(index=True)
+
+
+class UserPrompt(BaseModel):
+ user = QuayUserField(allows_robots=False, index=True)
+ kind = ForeignKeyField(UserPromptKind)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('user', 'kind'), True),
+ )
+
+
+class TeamRole(BaseModel):
+ name = CharField(index=True)
+
+
+class Team(BaseModel):
+ name = CharField(index=True)
+ organization = QuayUserField(index=True)
+ role = EnumField(TeamRole)
+ description = TextField(default='')
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # A team name must be unique within an organization
+ (('name', 'organization'), True),
+ )
+
+
+class TeamMember(BaseModel):
+ user = QuayUserField(allows_robots=True, index=True)
+ team = ForeignKeyField(Team)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # A user may belong to a team only once
+ (('user', 'team'), True),
+ )
+
+
+class TeamMemberInvite(BaseModel):
+ # Note: Either user OR email will be filled in, but not both.
+ user = QuayUserField(index=True, null=True)
+ email = CharField(null=True)
+ team = ForeignKeyField(Team)
+ inviter = ForeignKeyField(User, backref='inviter')
+ invite_token = CharField(default=urn_generator(['teaminvite']))
+
+
+class LoginService(BaseModel):
+ name = CharField(unique=True, index=True)
+
+
+class TeamSync(BaseModel):
+ team = ForeignKeyField(Team, unique=True)
+
+ transaction_id = CharField()
+ last_updated = DateTimeField(null=True, index=True)
+ service = ForeignKeyField(LoginService)
+ config = JSONField()
+
+
+class FederatedLogin(BaseModel):
+ user = QuayUserField(allows_robots=True, index=True)
+ service = ForeignKeyField(LoginService)
+ service_ident = CharField()
+ metadata_json = TextField(default='{}')
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # create a unique index on service and the local service id
+ (('service', 'service_ident'), True),
+
+ # a user may only have one federated login per service
+ (('service', 'user'), True),
+ )
+
+
+class Visibility(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class RepositoryKind(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+@unique
+class RepositoryState(IntEnum):
+ """
+ Possible states of a repository.
+ NORMAL: Regular repo where all actions are possible
+ READ_ONLY: Only read actions, such as pull, are allowed regardless of specific user permissions
+ MIRROR: Equivalent to READ_ONLY except that mirror robot has write permission
+ """
+ NORMAL = 0
+ READ_ONLY = 1
+ MIRROR = 2
+
+
+class Repository(BaseModel):
+ namespace_user = QuayUserField(null=True)
+ name = FullIndexedCharField(match_function=db_match_func)
+ visibility = EnumField(Visibility)
+ description = FullIndexedTextField(match_function=db_match_func, null=True)
+ badge_token = CharField(default=uuid_generator)
+ kind = EnumField(RepositoryKind)
+ trust_enabled = BooleanField(default=False)
+ state = ClientEnumField(RepositoryState, default=RepositoryState.NORMAL)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # create a unique index on namespace and name
+ (('namespace_user', 'name'), True),
+ )
+
+ def delete_instance(self, recursive=False, delete_nullable=False):
+ if not recursive:
+ raise RuntimeError('Non-recursive delete on repository.')
+
+ # These models don't need to use transitive deletes, because the referenced objects
+ # are cleaned up directly
+ skip_transitive_deletes = ({RepositoryTag, RepositoryBuild, RepositoryBuildTrigger, BlobUpload,
+ Image, TagManifest, TagManifestLabel, Label, DerivedStorageForImage,
+ RepositorySearchScore, RepoMirrorConfig, RepoMirrorRule}
+ | appr_classes | v22_classes | transition_classes)
+
+ delete_instance_filtered(self, Repository, delete_nullable, skip_transitive_deletes)
+
+
+class RepositorySearchScore(BaseModel):
+ repository = ForeignKeyField(Repository, unique=True)
+ score = BigIntegerField(index=True, default=0)
+ last_updated = DateTimeField(null=True)
+
+
+class Star(BaseModel):
+ user = ForeignKeyField(User)
+ repository = ForeignKeyField(Repository)
+ created = DateTimeField(default=datetime.now)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # create a unique index on user and repository
+ (('user', 'repository'), True),
+ )
+
+
+class Role(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class RepositoryPermission(BaseModel):
+ team = ForeignKeyField(Team, null=True)
+ user = QuayUserField(allows_robots=True, null=True)
+ repository = ForeignKeyField(Repository)
+ role = ForeignKeyField(Role)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('team', 'repository'), True),
+ (('user', 'repository'), True),
+ )
+
+
+class PermissionPrototype(BaseModel):
+ org = QuayUserField(index=True, backref='orgpermissionproto')
+ uuid = CharField(default=uuid_generator, index=True)
+ activating_user = QuayUserField(allows_robots=True, index=True, null=True,
+ backref='userpermissionproto')
+ delegate_user = QuayUserField(allows_robots=True, backref='receivingpermission',
+ null=True)
+ delegate_team = ForeignKeyField(Team, backref='receivingpermission',
+ null=True)
+ role = ForeignKeyField(Role)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('org', 'activating_user'), False),
+ )
+
+
+class AccessTokenKind(BaseModel):
+ name = CharField(unique=True, index=True)
+
+
+class AccessToken(BaseModel):
+ friendly_name = CharField(null=True)
+
+ # TODO(remove-unenc): This field is deprecated and should be removed soon.
+ code = deprecated_field(
+ CharField(default=random_string_generator(length=64), unique=True, index=True, null=True),
+ ERTMigrationFlags.WRITE_OLD_FIELDS)
+
+ token_name = CharField(default=random_string_generator(length=32), unique=True, index=True)
+ token_code = EncryptedCharField(default_token_length=32)
+
+ repository = ForeignKeyField(Repository)
+ created = DateTimeField(default=datetime.now)
+ role = ForeignKeyField(Role)
+ temporary = BooleanField(default=True)
+ kind = ForeignKeyField(AccessTokenKind, null=True)
+
+ def get_code(self):
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ return self.code
+ else:
+ return self.token_name + self.token_code.decrypt()
+
+
+class BuildTriggerService(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class DisableReason(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class RepositoryBuildTrigger(BaseModel):
+ uuid = CharField(default=uuid_generator, index=True)
+ service = ForeignKeyField(BuildTriggerService)
+ repository = ForeignKeyField(Repository)
+ connected_user = QuayUserField()
+
+ # TODO(remove-unenc): These fields are deprecated and should be removed soon.
+ auth_token = deprecated_field(CharField(null=True), ERTMigrationFlags.WRITE_OLD_FIELDS)
+ private_key = deprecated_field(TextField(null=True), ERTMigrationFlags.WRITE_OLD_FIELDS)
+
+ secure_auth_token = EncryptedCharField(null=True)
+ secure_private_key = EncryptedTextField(null=True)
+ fully_migrated = BooleanField(default=False)
+
+ config = TextField(default='{}')
+ write_token = ForeignKeyField(AccessToken, null=True)
+ pull_robot = QuayUserField(allows_robots=True, null=True, backref='triggerpullrobot',
+ robot_null_delete=True)
+
+ enabled = BooleanField(default=True)
+ disabled_reason = EnumField(DisableReason, null=True)
+ disabled_datetime = DateTimeField(default=datetime.utcnow, null=True, index=True)
+ successive_failure_count = IntegerField(default=0)
+ successive_internal_error_count = IntegerField(default=0)
+
+
+class EmailConfirmation(BaseModel):
+ code = CharField(default=random_string_generator(), unique=True, index=True)
+ verification_code = CredentialField(null=True)
+ user = QuayUserField()
+ pw_reset = BooleanField(default=False)
+ new_email = CharField(null=True)
+ email_confirm = BooleanField(default=False)
+ created = DateTimeField(default=datetime.now)
+
+
+class ImageStorage(BaseModel):
+ uuid = CharField(default=uuid_generator, index=True, unique=True)
+ image_size = BigIntegerField(null=True)
+ uncompressed_size = BigIntegerField(null=True)
+ uploading = BooleanField(default=True, null=True)
+ cas_path = BooleanField(default=True)
+ content_checksum = CharField(null=True, index=True)
+
+
+class ImageStorageTransformation(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class ImageStorageSignatureKind(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class ImageStorageSignature(BaseModel):
+ storage = ForeignKeyField(ImageStorage)
+ kind = ForeignKeyField(ImageStorageSignatureKind)
+ signature = TextField(null=True)
+ uploading = BooleanField(default=True, null=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('kind', 'storage'), True),
+ )
+
+
+class ImageStorageLocation(BaseModel):
+ name = CharField(unique=True, index=True)
+
+
+class ImageStoragePlacement(BaseModel):
+ storage = ForeignKeyField(ImageStorage)
+ location = ForeignKeyField(ImageStorageLocation)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # An image can only be placed in the same place once
+ (('storage', 'location'), True),
+ )
+
+
+class UserRegion(BaseModel):
+ user = QuayUserField(index=True, allows_robots=False)
+ location = ForeignKeyField(ImageStorageLocation)
+
+ indexes = (
+ (('user', 'location'), True),
+ )
+
+
+class Image(BaseModel):
+ # This class is intentionally denormalized. Even though images are supposed
+ # to be globally unique we can't treat them as such for permissions and
+ # security reasons. So rather than Repository <-> Image being many to many
+ # each image now belongs to exactly one repository.
+ docker_image_id = CharField(index=True)
+ repository = ForeignKeyField(Repository)
+
+ # '/' separated list of ancestory ids, e.g. /1/2/6/7/10/
+ ancestors = CharField(index=True, default='/', max_length=64535, null=True)
+
+ storage = ForeignKeyField(ImageStorage, null=True)
+
+ created = DateTimeField(null=True)
+ comment = TextField(null=True)
+ command = TextField(null=True)
+ aggregate_size = BigIntegerField(null=True)
+ v1_json_metadata = TextField(null=True)
+ v1_checksum = CharField(null=True)
+
+ security_indexed = BooleanField(default=False, index=True)
+ security_indexed_engine = IntegerField(default=IMAGE_NOT_SCANNED_ENGINE_VERSION, index=True)
+
+ # We use a proxy here instead of 'self' in order to disable the foreign key constraint
+ parent = DeferredForeignKey('Image', null=True, backref='children')
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # we don't really want duplicates
+ (('repository', 'docker_image_id'), True),
+
+ (('security_indexed_engine', 'security_indexed'), False),
+ )
+
+ def ancestor_id_list(self):
+ """ Returns an integer list of ancestor ids, ordered chronologically from
+ root to direct parent.
+ """
+ return map(int, self.ancestors.split('/')[1:-1])
+
+
+class DerivedStorageForImage(BaseModel):
+ source_image = ForeignKeyField(Image)
+ derivative = ForeignKeyField(ImageStorage)
+ transformation = ForeignKeyField(ImageStorageTransformation)
+ uniqueness_hash = CharField(null=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('source_image', 'transformation', 'uniqueness_hash'), True),
+ )
+
+
+class RepositoryTag(BaseModel):
+ name = CharField()
+ image = ForeignKeyField(Image)
+ repository = ForeignKeyField(Repository)
+ lifetime_start_ts = IntegerField(default=get_epoch_timestamp)
+ lifetime_end_ts = IntegerField(null=True, index=True)
+ hidden = BooleanField(default=False)
+ reversion = BooleanField(default=False)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('repository', 'name'), False),
+ (('repository', 'lifetime_start_ts'), False),
+ (('repository', 'lifetime_end_ts'), False),
+
+ # This unique index prevents deadlocks when concurrently moving and deleting tags
+ (('repository', 'name', 'lifetime_end_ts'), True),
+ )
+
+
+class BUILD_PHASE(object):
+ """ Build phases enum """
+ ERROR = 'error'
+ INTERNAL_ERROR = 'internalerror'
+ BUILD_SCHEDULED = 'build-scheduled'
+ UNPACKING = 'unpacking'
+ PULLING = 'pulling'
+ BUILDING = 'building'
+ PUSHING = 'pushing'
+ WAITING = 'waiting'
+ COMPLETE = 'complete'
+ CANCELLED = 'cancelled'
+
+ @classmethod
+ def is_terminal_phase(cls, phase):
+ return (phase == cls.COMPLETE or
+ phase == cls.ERROR or
+ phase == cls.INTERNAL_ERROR or
+ phase == cls.CANCELLED)
+
+
+class TRIGGER_DISABLE_REASON(object):
+ """ Build trigger disable reason enum """
+ BUILD_FALURES = 'successive_build_failures'
+ INTERNAL_ERRORS = 'successive_build_internal_errors'
+ USER_TOGGLED = 'user_toggled'
+
+
+class QueueItem(BaseModel):
+ queue_name = CharField(index=True, max_length=1024)
+ body = TextField()
+ available_after = DateTimeField(default=datetime.utcnow)
+ available = BooleanField(default=True)
+ processing_expires = DateTimeField(null=True)
+ retries_remaining = IntegerField(default=5)
+ state_id = CharField(default=uuid_generator, index=True, unique=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ only_save_dirty = True
+ indexes = (
+ (('processing_expires', 'available'), False),
+ (('processing_expires', 'queue_name', 'available'), False),
+ (('processing_expires', 'available_after', 'retries_remaining', 'available'), False),
+ (('processing_expires', 'available_after', 'queue_name', 'retries_remaining', 'available'), False),
+ )
+
+ def save(self, *args, **kwargs):
+ # Always change the queue item's state ID when we update it.
+ self.state_id = str(uuid.uuid4())
+ super(QueueItem, self).save(*args, **kwargs)
+
+
+class RepositoryBuild(BaseModel):
+ uuid = CharField(default=uuid_generator, index=True)
+ repository = ForeignKeyField(Repository)
+ access_token = ForeignKeyField(AccessToken)
+ resource_key = CharField(index=True, null=True)
+ job_config = TextField()
+ phase = CharField(default=BUILD_PHASE.WAITING)
+ started = DateTimeField(default=datetime.now, index=True)
+ display_name = CharField()
+ trigger = ForeignKeyField(RepositoryBuildTrigger, null=True)
+ pull_robot = QuayUserField(null=True, backref='buildpullrobot', allows_robots=True,
+ robot_null_delete=True)
+ logs_archived = BooleanField(default=False, index=True)
+ queue_id = CharField(null=True, index=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('repository', 'started', 'phase'), False),
+ (('started', 'logs_archived', 'phase'), False),
+ )
+
+
+class LogEntryKind(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class LogEntry(BaseModel):
+ id = BigAutoField()
+ kind = ForeignKeyField(LogEntryKind)
+ account = IntegerField(index=True, column_name='account_id')
+ performer = IntegerField(index=True, null=True, column_name='performer_id')
+ repository = IntegerField(index=True, null=True, column_name='repository_id')
+ datetime = DateTimeField(default=datetime.now, index=True)
+ ip = CharField(null=True)
+ metadata_json = TextField(default='{}')
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('account', 'datetime'), False),
+ (('performer', 'datetime'), False),
+ (('repository', 'datetime'), False),
+ (('repository', 'datetime', 'kind'), False),
+ )
+
+
+class LogEntry2(BaseModel):
+ """ TEMP FOR QUAY.IO ONLY. DO NOT RELEASE INTO QUAY ENTERPRISE. """
+ kind = ForeignKeyField(LogEntryKind)
+ account = IntegerField(index=True, db_column='account_id')
+ performer = IntegerField(index=True, null=True, db_column='performer_id')
+ repository = IntegerField(index=True, null=True, db_column='repository_id')
+ datetime = DateTimeField(default=datetime.now, index=True)
+ ip = CharField(null=True)
+ metadata_json = TextField(default='{}')
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('account', 'datetime'), False),
+ (('performer', 'datetime'), False),
+ (('repository', 'datetime'), False),
+ (('repository', 'datetime', 'kind'), False),
+ )
+
+
+class LogEntry3(BaseModel):
+ id = BigAutoField()
+ kind = IntegerField(db_column='kind_id')
+ account = IntegerField(db_column='account_id')
+ performer = IntegerField(null=True, db_column='performer_id')
+ repository = IntegerField(null=True, db_column='repository_id')
+ datetime = DateTimeField(default=datetime.now, index=True)
+ ip = CharField(null=True)
+ metadata_json = TextField(default='{}')
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('account', 'datetime'), False),
+ (('performer', 'datetime'), False),
+ (('repository', 'datetime', 'kind'), False),
+ )
+
+
+class RepositoryActionCount(BaseModel):
+ repository = ForeignKeyField(Repository)
+ count = IntegerField()
+ date = DateField(index=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # create a unique index on repository and date
+ (('repository', 'date'), True),
+ )
+
+
+class OAuthApplication(BaseModel):
+ client_id = CharField(index=True, default=random_string_generator(length=20))
+ secure_client_secret = EncryptedCharField(default_token_length=40, null=True)
+ fully_migrated = BooleanField(default=False)
+
+ # TODO(remove-unenc): This field is deprecated and should be removed soon.
+ client_secret = deprecated_field(
+ CharField(default=random_string_generator(length=40), null=True),
+ ERTMigrationFlags.WRITE_OLD_FIELDS)
+
+ redirect_uri = CharField()
+ application_uri = CharField()
+ organization = QuayUserField()
+
+ name = CharField()
+ description = TextField(default='')
+ avatar_email = CharField(null=True, column_name='gravatar_email')
+
+
+class OAuthAuthorizationCode(BaseModel):
+ application = ForeignKeyField(OAuthApplication)
+
+ # TODO(remove-unenc): This field is deprecated and should be removed soon.
+ code = deprecated_field(
+ CharField(index=True, unique=True, null=True),
+ ERTMigrationFlags.WRITE_OLD_FIELDS)
+
+ code_name = CharField(index=True, unique=True)
+ code_credential = CredentialField()
+
+ scope = CharField()
+ data = TextField() # Context for the code, such as the user
+
+
+class OAuthAccessToken(BaseModel):
+ uuid = CharField(default=uuid_generator, index=True)
+ application = ForeignKeyField(OAuthApplication)
+ authorized_user = QuayUserField()
+ scope = CharField()
+ token_name = CharField(index=True, unique=True)
+ token_code = CredentialField()
+
+ # TODO(remove-unenc): This field is deprecated and should be removed soon.
+ access_token = deprecated_field(
+ CharField(index=True, null=True),
+ ERTMigrationFlags.WRITE_OLD_FIELDS)
+
+ token_type = CharField(default='Bearer')
+ expires_at = DateTimeField()
+ data = TextField() # This is context for which this token was generated, such as the user
+
+
+class NotificationKind(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class Notification(BaseModel):
+ uuid = CharField(default=uuid_generator, index=True)
+ kind = ForeignKeyField(NotificationKind)
+ target = QuayUserField(index=True, allows_robots=True)
+ metadata_json = TextField(default='{}')
+ created = DateTimeField(default=datetime.now, index=True)
+ dismissed = BooleanField(default=False)
+ lookup_path = CharField(null=True, index=True)
+
+
+class ExternalNotificationEvent(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class ExternalNotificationMethod(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class RepositoryNotification(BaseModel):
+ uuid = CharField(default=uuid_generator, index=True)
+ repository = ForeignKeyField(Repository)
+ event = ForeignKeyField(ExternalNotificationEvent)
+ method = ForeignKeyField(ExternalNotificationMethod)
+ title = CharField(null=True)
+ config_json = TextField()
+ event_config_json = TextField(default='{}')
+ number_of_failures = IntegerField(default=0)
+
+
+class RepositoryAuthorizedEmail(BaseModel):
+ repository = ForeignKeyField(Repository)
+ email = CharField()
+ code = CharField(default=random_string_generator(), unique=True, index=True)
+ confirmed = BooleanField(default=False)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # create a unique index on email and repository
+ (('email', 'repository'), True),
+ )
+
+
+class BlobUpload(BaseModel):
+ repository = ForeignKeyField(Repository)
+ uuid = CharField(index=True, unique=True)
+ byte_count = BigIntegerField(default=0)
+ sha_state = ResumableSHA256Field(null=True, default=resumablehashlib.sha256)
+ location = ForeignKeyField(ImageStorageLocation)
+ storage_metadata = JSONField(null=True, default={})
+ chunk_count = IntegerField(default=0)
+ uncompressed_byte_count = BigIntegerField(null=True)
+ created = DateTimeField(default=datetime.now, index=True)
+ piece_sha_state = ResumableSHA1Field(null=True)
+ piece_hashes = Base64BinaryField(null=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # create a unique index on email and repository
+ (('repository', 'uuid'), True),
+ )
+
+
+class QuayService(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class QuayRegion(BaseModel):
+ name = CharField(index=True, unique=True)
+
+
+class QuayRelease(BaseModel):
+ service = ForeignKeyField(QuayService)
+ version = CharField()
+ region = ForeignKeyField(QuayRegion)
+ reverted = BooleanField(default=False)
+ created = DateTimeField(default=datetime.now, index=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # unique release per region
+ (('service', 'version', 'region'), True),
+
+ # get recent releases
+ (('service', 'region', 'created'), False),
+ )
+
+
+class TorrentInfo(BaseModel):
+ storage = ForeignKeyField(ImageStorage)
+ piece_length = IntegerField()
+ pieces = Base64BinaryField()
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ # we may want to compute the piece hashes multiple times with different piece lengths
+ (('storage', 'piece_length'), True),
+ )
+
+
+class ServiceKeyApprovalType(Enum):
+ SUPERUSER = 'Super User API'
+ KEY_ROTATION = 'Key Rotation'
+ AUTOMATIC = 'Automatic'
+
+
+class ServiceKeyApproval(BaseModel):
+ approver = QuayUserField(null=True)
+ approval_type = CharField(index=True)
+ approved_date = DateTimeField(default=datetime.utcnow)
+ notes = TextField(default='')
+
+
+class ServiceKey(BaseModel):
+ name = CharField()
+ kid = CharField(unique=True, index=True)
+ service = CharField(index=True)
+ jwk = JSONField()
+ metadata = JSONField()
+ created_date = DateTimeField(default=datetime.utcnow)
+ expiration_date = DateTimeField(null=True)
+ rotation_duration = IntegerField(null=True)
+ approval = ForeignKeyField(ServiceKeyApproval, null=True)
+
+
+class MediaType(BaseModel):
+ """ MediaType is an enumeration of the possible formats of various objects in the data model.
+ """
+ name = CharField(index=True, unique=True)
+
+
+class Messages(BaseModel):
+ content = TextField()
+ uuid = CharField(default=uuid_generator, max_length=36, index=True)
+ severity = CharField(default='info', index=True)
+ media_type = ForeignKeyField(MediaType)
+
+
+class LabelSourceType(BaseModel):
+ """ LabelSourceType is an enumeration of the possible sources for a label.
+ """
+ name = CharField(index=True, unique=True)
+ mutable = BooleanField(default=False)
+
+
+class Label(BaseModel):
+ """ Label represents user-facing metadata associated with another entry in the database (e.g. a
+ Manifest).
+ """
+ uuid = CharField(default=uuid_generator, index=True, unique=True)
+ key = CharField(index=True)
+ value = TextField()
+ media_type = EnumField(MediaType)
+ source_type = EnumField(LabelSourceType)
+
+
+class ApprBlob(BaseModel):
+ """ ApprBlob represents a content-addressable object stored outside of the database.
+ """
+ digest = CharField(index=True, unique=True)
+ media_type = EnumField(MediaType)
+ size = BigIntegerField()
+ uncompressed_size = BigIntegerField(null=True)
+
+
+class ApprBlobPlacementLocation(BaseModel):
+ """ ApprBlobPlacementLocation is an enumeration of the possible storage locations for ApprBlobs.
+ """
+ name = CharField(index=True, unique=True)
+
+
+class ApprBlobPlacement(BaseModel):
+ """ ApprBlobPlacement represents the location of a Blob.
+ """
+ blob = ForeignKeyField(ApprBlob)
+ location = EnumField(ApprBlobPlacementLocation)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('blob', 'location'), True),
+ )
+
+
+class ApprManifest(BaseModel):
+ """ ApprManifest represents the metadata and collection of blobs that comprise an Appr image.
+ """
+ digest = CharField(index=True, unique=True)
+ media_type = EnumField(MediaType)
+ manifest_json = JSONField()
+
+
+class ApprManifestBlob(BaseModel):
+ """ ApprManifestBlob is a many-to-many relation table linking ApprManifests and ApprBlobs.
+ """
+ manifest = ForeignKeyField(ApprManifest, index=True)
+ blob = ForeignKeyField(ApprBlob, index=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('manifest', 'blob'), True),
+ )
+
+
+class ApprManifestList(BaseModel):
+ """ ApprManifestList represents all of the various Appr manifests that compose an ApprTag.
+ """
+ digest = CharField(index=True, unique=True)
+ manifest_list_json = JSONField()
+ schema_version = CharField()
+ media_type = EnumField(MediaType)
+
+
+
+class ApprTagKind(BaseModel):
+ """ ApprTagKind is a enumtable to reference tag kinds.
+ """
+ name = CharField(index=True, unique=True)
+
+
+class ApprTag(BaseModel):
+ """ ApprTag represents a user-facing alias for referencing an ApprManifestList.
+ """
+ name = CharField()
+ repository = ForeignKeyField(Repository)
+ manifest_list = ForeignKeyField(ApprManifestList, null=True)
+ lifetime_start = BigIntegerField(default=get_epoch_timestamp_ms)
+ lifetime_end = BigIntegerField(null=True, index=True)
+ hidden = BooleanField(default=False)
+ reverted = BooleanField(default=False)
+ protected = BooleanField(default=False)
+ tag_kind = EnumField(ApprTagKind)
+ linked_tag = ForeignKeyField('self', null=True, backref='tag_parents')
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('repository', 'name'), False),
+ (('repository', 'name', 'hidden'), False),
+ # This unique index prevents deadlocks when concurrently moving and deleting tags
+ (('repository', 'name', 'lifetime_end'), True),
+ )
+
+ApprChannel = ApprTag.alias()
+
+
+class ApprManifestListManifest(BaseModel):
+ """ ApprManifestListManifest is a many-to-many relation table linking ApprManifestLists and
+ ApprManifests.
+ """
+ manifest_list = ForeignKeyField(ApprManifestList, index=True)
+ manifest = ForeignKeyField(ApprManifest, index=True)
+ operating_system = CharField(null=True)
+ architecture = CharField(null=True)
+ platform_json = JSONField(null=True)
+ media_type = EnumField(MediaType)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('manifest_list', 'media_type'), False),
+ )
+
+
+class AppSpecificAuthToken(BaseModel):
+ """ AppSpecificAuthToken represents a token generated by a user for use with an external
+ application where putting the user's credentials, even encrypted, is deemed too risky.
+ """
+ user = QuayUserField()
+ uuid = CharField(default=uuid_generator, max_length=36, index=True)
+ title = CharField()
+ token_name = CharField(index=True, unique=True, default=random_string_generator(60))
+ token_secret = EncryptedCharField(default_token_length=60)
+
+ # TODO(remove-unenc): This field is deprecated and should be removed soon.
+ token_code = deprecated_field(
+ CharField(default=random_string_generator(length=120), unique=True, index=True, null=True),
+ ERTMigrationFlags.WRITE_OLD_FIELDS)
+
+ created = DateTimeField(default=datetime.now)
+ expiration = DateTimeField(null=True)
+ last_accessed = DateTimeField(null=True)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('user', 'expiration'), False),
+ )
+
+
+class Manifest(BaseModel):
+ """ Manifest represents a single manifest under a repository. Within a repository,
+ there can only be one manifest with the same digest.
+ """
+ repository = ForeignKeyField(Repository)
+ digest = CharField(index=True)
+ media_type = EnumField(MediaType)
+ manifest_bytes = TextField()
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('repository', 'digest'), True),
+ (('repository', 'media_type'), False),
+ )
+
+
+class TagKind(BaseModel):
+ """ TagKind describes the various kinds of tags that can be found in the registry.
+ """
+ name = CharField(index=True, unique=True)
+
+
+class Tag(BaseModel):
+ """ Tag represents a user-facing alias for referencing a Manifest or as an alias to another tag.
+ """
+ name = CharField()
+ repository = ForeignKeyField(Repository)
+ manifest = ForeignKeyField(Manifest, null=True)
+ lifetime_start_ms = BigIntegerField(default=get_epoch_timestamp_ms)
+ lifetime_end_ms = BigIntegerField(null=True, index=True)
+ hidden = BooleanField(default=False)
+ reversion = BooleanField(default=False)
+ tag_kind = EnumField(TagKind)
+ linked_tag = ForeignKeyField('self', null=True, backref='tag_parents')
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('repository', 'name'), False),
+ (('repository', 'name', 'hidden'), False),
+ (('repository', 'name', 'tag_kind'), False),
+
+ (('repository', 'lifetime_start_ms'), False),
+ (('repository', 'lifetime_end_ms'), False),
+
+ # This unique index prevents deadlocks when concurrently moving and deleting tags
+ (('repository', 'name', 'lifetime_end_ms'), True),
+ )
+
+
+class ManifestChild(BaseModel):
+ """ ManifestChild represents a relationship between a manifest and its child manifest(s).
+ Multiple manifests can share the same children. Note that since Manifests are stored
+ per-repository, the repository here is a bit redundant, but we do so to make cleanup easier.
+ """
+ repository = ForeignKeyField(Repository)
+ manifest = ForeignKeyField(Manifest)
+ child_manifest = ForeignKeyField(Manifest)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('repository', 'manifest'), False),
+ (('repository', 'child_manifest'), False),
+ (('repository', 'manifest', 'child_manifest'), False),
+ (('manifest', 'child_manifest'), True),
+ )
+
+
+class ManifestLabel(BaseModel):
+ """ ManifestLabel represents a label applied to a Manifest, within a repository.
+ Note that since Manifests are stored per-repository, the repository here is
+ a bit redundant, but we do so to make cleanup easier.
+ """
+ repository = ForeignKeyField(Repository, index=True)
+ manifest = ForeignKeyField(Manifest)
+ label = ForeignKeyField(Label)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('manifest', 'label'), True),
+ )
+
+
+class ManifestBlob(BaseModel):
+ """ ManifestBlob represents a blob that is used by a manifest. """
+ repository = ForeignKeyField(Repository, index=True)
+ manifest = ForeignKeyField(Manifest)
+ blob = ForeignKeyField(ImageStorage)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('manifest', 'blob'), True),
+ )
+
+
+class ManifestLegacyImage(BaseModel):
+ """ For V1-compatible manifests only, this table maps from the manifest to its associated
+ Docker image.
+ """
+ repository = ForeignKeyField(Repository, index=True)
+ manifest = ForeignKeyField(Manifest, unique=True)
+ image = ForeignKeyField(Image)
+
+
+class TagManifest(BaseModel):
+ """ TO BE DEPRECATED: The manifest for a tag. """
+ tag = ForeignKeyField(RepositoryTag, unique=True)
+ digest = CharField(index=True)
+ json_data = TextField()
+
+
+class TagManifestToManifest(BaseModel):
+ """ NOTE: Only used for the duration of the migrations. """
+ tag_manifest = ForeignKeyField(TagManifest, index=True, unique=True)
+ manifest = ForeignKeyField(Manifest, index=True)
+ broken = BooleanField(index=True, default=False)
+
+
+class TagManifestLabel(BaseModel):
+ """ TO BE DEPRECATED: Mapping from a tag manifest to a label.
+ """
+ repository = ForeignKeyField(Repository, index=True)
+ annotated = ForeignKeyField(TagManifest, index=True)
+ label = ForeignKeyField(Label)
+
+ class Meta:
+ database = db
+ read_only_config = read_only_config
+ indexes = (
+ (('annotated', 'label'), True),
+ )
+
+
+class TagManifestLabelMap(BaseModel):
+ """ NOTE: Only used for the duration of the migrations. """
+ tag_manifest = ForeignKeyField(TagManifest, index=True)
+ manifest = ForeignKeyField(Manifest, null=True, index=True)
+
+ label = ForeignKeyField(Label, index=True)
+
+ tag_manifest_label = ForeignKeyField(TagManifestLabel, index=True)
+ manifest_label = ForeignKeyField(ManifestLabel, null=True, index=True)
+
+ broken_manifest = BooleanField(index=True, default=False)
+
+
+class TagToRepositoryTag(BaseModel):
+ """ NOTE: Only used for the duration of the migrations. """
+ repository = ForeignKeyField(Repository, index=True)
+ tag = ForeignKeyField(Tag, index=True, unique=True)
+ repository_tag = ForeignKeyField(RepositoryTag, index=True, unique=True)
+
+
+@unique
+class RepoMirrorRuleType(IntEnum):
+ """
+ Types of mirroring rules.
+ TAG_GLOB_CSV: Comma separated glob values (eg. "7.6,7.6-1.*")
+ """
+ TAG_GLOB_CSV = 1
+
+
+class RepoMirrorRule(BaseModel):
+ """
+ Determines how a given Repository should be mirrored.
+ """
+ uuid = CharField(default=uuid_generator, max_length=36, index=True)
+ repository = ForeignKeyField(Repository, index=True)
+ creation_date = DateTimeField(default=datetime.utcnow)
+
+ rule_type = ClientEnumField(RepoMirrorRuleType, default=RepoMirrorRuleType.TAG_GLOB_CSV)
+ rule_value = JSONField()
+
+ # Optional associations to allow the generation of a ruleset tree
+ left_child = ForeignKeyField('self', null=True, backref='left_child')
+ right_child = ForeignKeyField('self', null=True, backref='right_child')
+
+
+@unique
+class RepoMirrorType(IntEnum):
+ """
+ Types of repository mirrors.
+ """
+ PULL = 1 # Pull images from the external repo
+
+
+@unique
+class RepoMirrorStatus(IntEnum):
+ """
+ Possible statuses of repository mirroring.
+ """
+ FAIL = -1
+ NEVER_RUN = 0
+ SUCCESS = 1
+ SYNCING = 2
+ SYNC_NOW = 3
+
+
+class RepoMirrorConfig(BaseModel):
+ """
+ Represents a repository to be mirrored and any additional configuration
+ required to perform the mirroring.
+ """
+ repository = ForeignKeyField(Repository, index=True, unique=True, backref='mirror')
+ creation_date = DateTimeField(default=datetime.utcnow)
+ is_enabled = BooleanField(default=True)
+
+ # Mirror Configuration
+ mirror_type = ClientEnumField(RepoMirrorType, default=RepoMirrorType.PULL)
+ internal_robot = QuayUserField(allows_robots=True, null=True, backref='mirrorpullrobot',
+ robot_null_delete=True)
+ external_reference = CharField()
+ external_registry_username = EncryptedCharField(max_length=2048, null=True)
+ external_registry_password = EncryptedCharField(max_length=2048, null=True)
+ external_registry_config = JSONField(default={})
+
+ # Worker Queuing
+ sync_interval = IntegerField() # seconds between syncs
+ sync_start_date = DateTimeField(null=True) # next start time
+ sync_expiration_date = DateTimeField(null=True) # max duration
+ sync_retries_remaining = IntegerField(default=3)
+ sync_status = ClientEnumField(RepoMirrorStatus, default=RepoMirrorStatus.NEVER_RUN)
+ sync_transaction_id = CharField(default=uuid_generator, max_length=36)
+
+ # Tag-Matching Rules
+ root_rule = ForeignKeyField(RepoMirrorRule)
+
+
+appr_classes = set([ApprTag, ApprTagKind, ApprBlobPlacementLocation, ApprManifestList,
+ ApprManifestBlob, ApprBlob, ApprManifestListManifest, ApprManifest,
+ ApprBlobPlacement])
+v22_classes = set([Manifest, ManifestLabel, ManifestBlob, ManifestLegacyImage, TagKind,
+ ManifestChild, Tag])
+transition_classes = set([TagManifestToManifest, TagManifestLabelMap, TagToRepositoryTag])
+
+is_model = lambda x: inspect.isclass(x) and issubclass(x, BaseModel) and x is not BaseModel
+all_models = [model[1] for model in inspect.getmembers(sys.modules[__name__], is_model)]
diff --git a/data/encryption.py b/data/encryption.py
new file mode 100644
index 000000000..83a90860a
--- /dev/null
+++ b/data/encryption.py
@@ -0,0 +1,82 @@
+import os
+import logging
+import base64
+
+from collections import namedtuple
+from cryptography.hazmat.primitives.ciphers.aead import AESCCM
+
+from util.security.secret import convert_secret_key
+
+class DecryptionFailureException(Exception):
+ """ Exception raised if a field could not be decrypted. """
+
+
+EncryptionVersion = namedtuple('EncryptionVersion', ['prefix', 'encrypt', 'decrypt'])
+
+logger = logging.getLogger(__name__)
+
+
+_SEPARATOR = '$$'
+AES_CCM_NONCE_LENGTH = 13
+
+
+def _encrypt_ccm(secret_key, value, field_max_length=None):
+ aesccm = AESCCM(secret_key)
+ nonce = os.urandom(AES_CCM_NONCE_LENGTH)
+ ct = aesccm.encrypt(nonce, value.encode('utf-8'), None)
+ encrypted = base64.b64encode(nonce + ct)
+ if field_max_length:
+ msg = 'Tried to encode a value too large for this field'
+ assert (len(encrypted) + _RESERVED_FIELD_SPACE) <= field_max_length, msg
+
+ return encrypted
+
+
+def _decrypt_ccm(secret_key, value):
+ aesccm = AESCCM(secret_key)
+ try:
+ decoded = base64.b64decode(value)
+ nonce = decoded[:AES_CCM_NONCE_LENGTH]
+ ct = decoded[AES_CCM_NONCE_LENGTH:]
+ decrypted = aesccm.decrypt(nonce, ct, None)
+ return decrypted.decode('utf-8')
+ except Exception:
+ logger.exception('Got exception when trying to decrypt value `%s`', value)
+ raise DecryptionFailureException()
+
+
+# Defines the versions of encryptions we support. This will allow us to upgrade to newer encryption
+# protocols (fairly seamlessly) if need be in the future.
+_VERSIONS = {
+ 'v0': EncryptionVersion('v0', _encrypt_ccm, _decrypt_ccm),
+}
+
+_RESERVED_FIELD_SPACE = len(_SEPARATOR) + max([len(k) for k in _VERSIONS.keys()])
+
+
+class FieldEncrypter(object):
+ """ Helper object for defining how fields are encrypted and decrypted between the database
+ and the application.
+ """
+ def __init__(self, secret_key, version='v0'):
+ self._secret_key = convert_secret_key(secret_key)
+ self._encryption_version = _VERSIONS[version]
+
+ def encrypt_value(self, value, field_max_length=None):
+ """ Encrypts the value using the current version of encryption. """
+ encrypted_value = self._encryption_version.encrypt(self._secret_key, value, field_max_length)
+ return '%s%s%s' % (self._encryption_version.prefix, _SEPARATOR, encrypted_value)
+
+ def decrypt_value(self, value):
+ """ Decrypts the value, returning it. If the value cannot be decrypted
+ raises a DecryptionFailureException.
+ """
+ if _SEPARATOR not in value:
+ raise DecryptionFailureException('Invalid encrypted value')
+
+ version_prefix, data = value.split(_SEPARATOR, 1)
+ if version_prefix not in _VERSIONS:
+ raise DecryptionFailureException('Unknown version prefix %s' % version_prefix)
+
+ return _VERSIONS[version_prefix].decrypt(self._secret_key, data)
+
diff --git a/data/fields.py b/data/fields.py
new file mode 100644
index 000000000..c79a7e6bd
--- /dev/null
+++ b/data/fields.py
@@ -0,0 +1,297 @@
+import base64
+import string
+import json
+
+from random import SystemRandom
+
+import bcrypt
+import resumablehashlib
+
+from peewee import TextField, CharField, SmallIntegerField
+from data.text import prefix_search
+
+
+def random_string(length=16):
+ random = SystemRandom()
+ return ''.join([random.choice(string.ascii_uppercase + string.digits)
+ for _ in range(length)])
+
+
+class _ResumableSHAField(TextField):
+ def _create_sha(self):
+ raise NotImplementedError
+
+ def db_value(self, value):
+ if value is None:
+ return None
+
+ sha_state = value.state()
+
+ # One of the fields is a byte string, let's base64 encode it to make sure
+ # we can store and fetch it regardless of default collocation.
+ sha_state[3] = base64.b64encode(sha_state[3])
+
+ return json.dumps(sha_state)
+
+ def python_value(self, value):
+ if value is None:
+ return None
+
+ sha_state = json.loads(value)
+
+ # We need to base64 decode the data bytestring.
+ sha_state[3] = base64.b64decode(sha_state[3])
+ to_resume = self._create_sha()
+ to_resume.set_state(sha_state)
+ return to_resume
+
+
+class ResumableSHA256Field(_ResumableSHAField):
+ def _create_sha(self):
+ return resumablehashlib.sha256()
+
+
+class ResumableSHA1Field(_ResumableSHAField):
+ def _create_sha(self):
+ return resumablehashlib.sha1()
+
+
+class JSONField(TextField):
+ def db_value(self, value):
+ return json.dumps(value)
+
+ def python_value(self, value):
+ if value is None or value == "":
+ return {}
+ return json.loads(value)
+
+
+class Base64BinaryField(TextField):
+ def db_value(self, value):
+ if value is None:
+ return None
+ return base64.b64encode(value)
+
+ def python_value(self, value):
+ if value is None:
+ return None
+ return base64.b64decode(value)
+
+
+class DecryptedValue(object):
+ """ Wrapper around an already decrypted value to be placed into an encrypted field. """
+ def __init__(self, decrypted_value):
+ assert decrypted_value is not None
+ self.value = decrypted_value
+
+ def decrypt(self):
+ return self.value
+
+ def matches(self, unencrypted_value):
+ """ Returns whether the value of this field matches the unencrypted_value. """
+ return self.decrypt() == unencrypted_value
+
+
+class LazyEncryptedValue(object):
+ """ Wrapper around an encrypted value in an encrypted field. Will decrypt lazily. """
+ def __init__(self, encrypted_value, field):
+ self.encrypted_value = encrypted_value
+ self._field = field
+
+ def decrypt(self):
+ """ Decrypts the value. """
+ return self._field.model._meta.encrypter.decrypt_value(self.encrypted_value)
+
+ def matches(self, unencrypted_value):
+ """ Returns whether the value of this field matches the unencrypted_value. """
+ return self.decrypt() == unencrypted_value
+
+ def __eq__(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def __mod__(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def __pow__(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def __contains__(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def contains(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def startswith(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def endswith(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+
+def _add_encryption(field_class, requires_length_check=True):
+ """ Adds support for encryption and decryption to the given field class. """
+ class indexed_class(field_class):
+ def __init__(self, default_token_length=None, *args, **kwargs):
+ def _generate_default():
+ return DecryptedValue(random_string(default_token_length))
+
+ if default_token_length is not None:
+ kwargs['default'] = _generate_default
+
+ field_class.__init__(self, *args, **kwargs)
+ assert not self.index
+
+ def db_value(self, value):
+ if value is None:
+ return None
+
+ if isinstance(value, LazyEncryptedValue):
+ return value.encrypted_value
+
+ if isinstance(value, DecryptedValue):
+ value = value.value
+
+ meta = self.model._meta
+ return meta.encrypter.encrypt_value(value, self.max_length if requires_length_check else None)
+
+ def python_value(self, value):
+ if value is None:
+ return None
+
+ return LazyEncryptedValue(value, self)
+
+ def __eq__(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def __mod__(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def __pow__(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def __contains__(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def contains(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def startswith(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ def endswith(self, _):
+ raise Exception('Disallowed operation; use `matches`')
+
+ return indexed_class
+
+
+EncryptedCharField = _add_encryption(CharField)
+EncryptedTextField = _add_encryption(TextField, requires_length_check=False)
+
+
+class EnumField(SmallIntegerField):
+ def __init__(self, enum_type, *args, **kwargs):
+ kwargs.pop('index', None)
+
+ super(EnumField, self).__init__(index=True, *args, **kwargs)
+ self.enum_type = enum_type
+
+ def db_value(self, value):
+ """Convert the python value for storage in the database."""
+ return int(value.value)
+
+ def python_value(self, value):
+ """Convert the database value to a pythonic value."""
+ return self.enum_type(value) if value is not None else None
+
+ def clone_base(self, **kwargs):
+ return super(EnumField, self).clone_base(
+ enum_type=self.enum_type,
+ **kwargs)
+
+
+def _add_fulltext(field_class):
+ """ Adds support for full text indexing and lookup to the given field class. """
+ class indexed_class(field_class):
+ # Marker used by SQLAlchemy translation layer to add the proper index for full text searching.
+ __fulltext__ = True
+
+ def __init__(self, match_function, *args, **kwargs):
+ field_class.__init__(self, *args, **kwargs)
+ self.match_function = match_function
+
+ def match(self, query):
+ return self.match_function(self, query)
+
+ def match_prefix(self, query):
+ return prefix_search(self, query)
+
+ def __mod__(self, _):
+ raise Exception('Unsafe operation: Use `match` or `match_prefix`')
+
+ def __pow__(self, _):
+ raise Exception('Unsafe operation: Use `match` or `match_prefix`')
+
+ def __contains__(self, _):
+ raise Exception('Unsafe operation: Use `match` or `match_prefix`')
+
+ def contains(self, _):
+ raise Exception('Unsafe operation: Use `match` or `match_prefix`')
+
+ def startswith(self, _):
+ raise Exception('Unsafe operation: Use `match` or `match_prefix`')
+
+ def endswith(self, _):
+ raise Exception('Unsafe operation: Use `match` or `match_prefix`')
+
+ return indexed_class
+
+
+FullIndexedCharField = _add_fulltext(CharField)
+FullIndexedTextField = _add_fulltext(TextField)
+
+
+class Credential(object):
+ """ Credential represents a hashed credential. """
+ def __init__(self, hashed):
+ self.hashed = hashed
+
+ def matches(self, value):
+ """ Returns true if this credential matches the unhashed value given. """
+ return bcrypt.hashpw(value.encode('utf-8'), self.hashed) == self.hashed
+
+ @classmethod
+ def from_string(cls, string_value):
+ """ Returns a Credential object from an unhashed string value. """
+ return Credential(bcrypt.hashpw(string_value.encode('utf-8'), bcrypt.gensalt()))
+
+ @classmethod
+ def generate(cls, length=20):
+ """ Generates a new credential and returns it, along with its unhashed form. """
+ token = random_string(length)
+ return Credential.from_string(token), token
+
+
+class CredentialField(CharField):
+ """ A character field that stores crytographically hashed credentials that should never be
+ available to the user in plaintext after initial creation. This field automatically
+ provides verification.
+ """
+ def __init__(self, *args, **kwargs):
+ CharField.__init__(self, *args, **kwargs)
+ assert 'default' not in kwargs
+ assert not self.index
+
+ def db_value(self, value):
+ if value is None:
+ return None
+
+ if isinstance(value, basestring):
+ raise Exception('A string cannot be given to a CredentialField; please wrap in a Credential')
+
+ return value.hashed
+
+ def python_value(self, value):
+ if value is None:
+ return None
+
+ return Credential(value)
diff --git a/data/logs_model/__init__.py b/data/logs_model/__init__.py
new file mode 100644
index 000000000..be8cc9402
--- /dev/null
+++ b/data/logs_model/__init__.py
@@ -0,0 +1,64 @@
+import logging
+
+from data.logs_model.table_logs_model import TableLogsModel
+from data.logs_model.document_logs_model import DocumentLogsModel
+from data.logs_model.combined_model import CombinedLogsModel
+
+logger = logging.getLogger(__name__)
+
+
+def _transition_model(*args, **kwargs):
+ return CombinedLogsModel(
+ DocumentLogsModel(*args, **kwargs),
+ TableLogsModel(*args, **kwargs),
+ )
+
+
+_LOG_MODELS = {
+ 'database': TableLogsModel,
+ 'transition_reads_both_writes_es': _transition_model,
+ 'elasticsearch': DocumentLogsModel,
+}
+
+_PULL_LOG_KINDS = {'pull_repo', 'repo_verb'}
+
+class LogsModelProxy(object):
+ def __init__(self):
+ self._model = None
+
+ def initialize(self, model):
+ self._model = model
+ logger.info('===============================')
+ logger.info('Using logs model `%s`', self._model)
+ logger.info('===============================')
+
+ def __getattr__(self, attr):
+ if not self._model:
+ raise AttributeError("LogsModelProxy is not initialized")
+ return getattr(self._model, attr)
+
+
+logs_model = LogsModelProxy()
+
+
+def configure(app_config):
+ logger.debug('Configuring log lodel')
+ model_name = app_config.get('LOGS_MODEL', 'database')
+ model_config = app_config.get('LOGS_MODEL_CONFIG', {})
+
+ def should_skip_logging(kind_name, namespace_name, is_free_namespace):
+ if namespace_name and namespace_name in app_config.get('DISABLED_FOR_AUDIT_LOGS', {}):
+ return True
+
+ if kind_name in _PULL_LOG_KINDS:
+ if namespace_name and namespace_name in app_config.get('DISABLED_FOR_PULL_LOGS', {}):
+ return True
+
+ if app_config.get('FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES'):
+ if is_free_namespace:
+ return True
+
+ return False
+
+ model_config['should_skip_logging'] = should_skip_logging
+ logs_model.initialize(_LOG_MODELS[model_name](**model_config))
diff --git a/data/logs_model/combined_model.py b/data/logs_model/combined_model.py
new file mode 100644
index 000000000..735101601
--- /dev/null
+++ b/data/logs_model/combined_model.py
@@ -0,0 +1,132 @@
+import logging
+import itertools
+
+from data.logs_model.datatypes import AggregatedLogCount, LogEntriesPage
+from data.logs_model.interface import ActionLogsDataInterface
+from data.logs_model.shared import SharedModel
+
+logger = logging.getLogger(__name__)
+
+
+def _merge_aggregated_log_counts(*args):
+ """ Merge two lists of AggregatedLogCount based on the value of their kind_id and datetime.
+ """
+ matching_keys = {}
+ aggregated_log_counts_list = itertools.chain.from_iterable(args)
+
+ def canonical_key_from_kind_date_tuple(kind_id, dt):
+ """ Return a comma separated key from an AggregatedLogCount's kind_id and datetime. """
+ return str(kind_id) + ',' + str(dt)
+
+ for kind_id, count, dt in aggregated_log_counts_list:
+ kind_date_key = canonical_key_from_kind_date_tuple(kind_id, dt)
+ if kind_date_key in matching_keys:
+ existing_count = matching_keys[kind_date_key][2]
+ matching_keys[kind_date_key] = (kind_id, dt, existing_count + count)
+ else:
+ matching_keys[kind_date_key] = (kind_id, dt, count)
+
+ return [AggregatedLogCount(kind_id, count, dt) for (kind_id, dt, count) in matching_keys.values()]
+
+
+class CombinedLogsModel(SharedModel, ActionLogsDataInterface):
+ """
+ CombinedLogsModel implements the data model that logs to the first logs model and reads from
+ both.
+ """
+
+ def __init__(self, read_write_logs_model, read_only_logs_model):
+ self.read_write_logs_model = read_write_logs_model
+ self.read_only_logs_model = read_only_logs_model
+
+ def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
+ repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
+ return self.read_write_logs_model.log_action(kind_name, namespace_name, performer, ip, metadata,
+ repository, repository_name, timestamp,
+ is_free_namespace)
+
+ def count_repository_actions(self, repository, day):
+ rw_count = self.read_write_logs_model.count_repository_actions(repository, day)
+ ro_count = self.read_only_logs_model.count_repository_actions(repository, day)
+ return rw_count + ro_count
+
+ def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
+ repository_name=None, namespace_name=None, filter_kinds=None):
+ rw_model = self.read_write_logs_model
+ ro_model = self.read_only_logs_model
+ rw_count = rw_model.get_aggregated_log_counts(start_datetime, end_datetime,
+ performer_name=performer_name,
+ repository_name=repository_name,
+ namespace_name=namespace_name,
+ filter_kinds=filter_kinds)
+ ro_count = ro_model.get_aggregated_log_counts(start_datetime, end_datetime,
+ performer_name=performer_name,
+ repository_name=repository_name,
+ namespace_name=namespace_name,
+ filter_kinds=filter_kinds)
+ return _merge_aggregated_log_counts(rw_count, ro_count)
+
+ def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
+ namespace_id=None, max_query_time=None):
+ rw_model = self.read_write_logs_model
+ ro_model = self.read_only_logs_model
+ rw_logs = rw_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
+ namespace_id, max_query_time)
+ ro_logs = ro_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
+ namespace_id, max_query_time)
+ for batch in itertools.chain(rw_logs, ro_logs):
+ yield batch
+
+ def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
+ namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
+ rw_model = self.read_write_logs_model
+ ro_model = self.read_only_logs_model
+
+ page_token = page_token or {}
+
+ new_page_token = {}
+ if page_token is None or not page_token.get('under_readonly_model', False):
+ rw_page_token = page_token.get('readwrite_page_token')
+ rw_logs = rw_model.lookup_logs(start_datetime, end_datetime, performer_name,
+ repository_name, namespace_name, filter_kinds,
+ rw_page_token, max_page_count)
+ logs, next_page_token = rw_logs
+ new_page_token['under_readonly_model'] = next_page_token is None
+ new_page_token['readwrite_page_token'] = next_page_token
+ return LogEntriesPage(logs, new_page_token)
+ else:
+ readonly_page_token = page_token.get('readonly_page_token')
+ ro_logs = ro_model.lookup_logs(start_datetime, end_datetime, performer_name,
+ repository_name, namespace_name, filter_kinds,
+ readonly_page_token, max_page_count)
+ logs, next_page_token = ro_logs
+ if next_page_token is None:
+ return LogEntriesPage(logs, None)
+
+ new_page_token['under_readonly_model'] = True
+ new_page_token['readonly_page_token'] = next_page_token
+ return LogEntriesPage(logs, new_page_token)
+
+ def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
+ filter_kinds=None, size=20):
+ latest_logs = []
+ rw_model = self.read_write_logs_model
+ ro_model = self.read_only_logs_model
+
+ rw_logs = rw_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
+ filter_kinds, size)
+ latest_logs.extend(rw_logs)
+ if len(latest_logs) < size:
+ ro_logs = ro_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
+ filter_kinds, size - len(latest_logs))
+ latest_logs.extend(ro_logs)
+
+ return latest_logs
+
+ def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
+ ro_model = self.read_only_logs_model
+ rw_model = self.read_write_logs_model
+ ro_ctx = ro_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
+ rw_ctx = rw_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
+ for ctx in itertools.chain(ro_ctx, rw_ctx):
+ yield ctx
diff --git a/data/logs_model/datatypes.py b/data/logs_model/datatypes.py
new file mode 100644
index 000000000..03db6756f
--- /dev/null
+++ b/data/logs_model/datatypes.py
@@ -0,0 +1,155 @@
+import json
+
+from calendar import timegm
+from collections import namedtuple
+from email.utils import formatdate
+
+from cachetools.func import lru_cache
+
+from data import model
+from util.morecollections import AttrDict
+
+
+def _format_date(date):
+ """ Output an RFC822 date format. """
+ if date is None:
+ return None
+
+ return formatdate(timegm(date.utctimetuple()))
+
+
+@lru_cache(maxsize=1)
+def _kinds():
+ return model.log.get_log_entry_kinds()
+
+
+class LogEntriesPage(namedtuple('LogEntriesPage', ['logs', 'next_page_token'])):
+ """ Represents a page returned by the lookup_logs call. The `logs` contains the logs
+ found for the page and `next_page_token`, if not None, contains the token to be
+ encoded and returned for the followup call.
+ """
+
+
+class Log(namedtuple('Log', [
+ 'metadata_json', 'ip', 'datetime', 'performer_email', 'performer_username', 'performer_robot',
+ 'account_organization', 'account_username', 'account_email', 'account_robot', 'kind_id'])):
+ """ Represents a single log entry returned by the logs model. """
+
+ @classmethod
+ def for_logentry(cls, log):
+ account_organization = None
+ account_username = None
+ account_email = None
+ account_robot = None
+
+ try:
+ account_organization = log.account.organization
+ account_username = log.account.username
+ account_email = log.account.email
+ account_robot = log.account.robot
+ except AttributeError:
+ pass
+
+ performer_robot = None
+ performer_username = None
+ performer_email = None
+
+ try:
+ performer_robot = log.performer.robot
+ performer_username = log.performer.username
+ performer_email = log.performer.email
+ except AttributeError:
+ pass
+
+ return Log(log.metadata_json, log.ip, log.datetime, performer_email, performer_username,
+ performer_robot, account_organization, account_username, account_email,
+ account_robot, log.kind_id)
+
+ @classmethod
+ def for_elasticsearch_log(cls, log, id_user_map):
+ account_organization = None
+ account_username = None
+ account_email = None
+ account_robot = None
+
+ try:
+ if log.account_id:
+ account = id_user_map[log.account_id]
+ account_organization = account.organization
+ account_username = account.username
+ account_email = account.email
+ account_robot = account.robot
+ except AttributeError:
+ pass
+
+ performer_robot = None
+ performer_username = None
+ performer_email = None
+
+ try:
+ if log.performer_id:
+ performer = id_user_map[log.performer_id]
+ performer_robot = performer.robot
+ performer_username = performer.username
+ performer_email = performer.email
+ except AttributeError:
+ pass
+
+ return Log(log.metadata_json, str(log.ip), log.datetime, performer_email, performer_username,
+ performer_robot, account_organization, account_username, account_email,
+ account_robot, log.kind_id)
+
+ def to_dict(self, avatar, include_namespace=False):
+ view = {
+ 'kind': _kinds()[self.kind_id],
+ 'metadata': json.loads(self.metadata_json),
+ 'ip': self.ip,
+ 'datetime': _format_date(self.datetime),
+ }
+
+ if self.performer_username:
+ performer = AttrDict({'username': self.performer_username, 'email': self.performer_email})
+ performer.robot = None
+ if self.performer_robot:
+ performer.robot = self.performer_robot
+
+ view['performer'] = {
+ 'kind': 'user',
+ 'name': self.performer_username,
+ 'is_robot': self.performer_robot,
+ 'avatar': avatar.get_data_for_user(performer),
+ }
+
+ if include_namespace:
+ if self.account_username:
+ account = AttrDict({'username': self.account_username, 'email': self.account_email})
+ if self.account_organization:
+
+ view['namespace'] = {
+ 'kind': 'org',
+ 'name': self.account_username,
+ 'avatar': avatar.get_data_for_org(account),
+ }
+ else:
+ account.robot = None
+ if self.account_robot:
+ account.robot = self.account_robot
+ view['namespace'] = {
+ 'kind': 'user',
+ 'name': self.account_username,
+ 'avatar': avatar.get_data_for_user(account),
+ }
+
+ return view
+
+
+class AggregatedLogCount(namedtuple('AggregatedLogCount', ['kind_id', 'count', 'datetime'])):
+ """ Represents the aggregated count of the number of logs, of a particular kind, on a day. """
+ def to_dict(self):
+ view = {
+ 'kind': _kinds()[self.kind_id],
+ 'count': self.count,
+ 'datetime': _format_date(self.datetime),
+ }
+
+ return view
diff --git a/data/logs_model/document_logs_model.py b/data/logs_model/document_logs_model.py
new file mode 100644
index 000000000..e93cd2062
--- /dev/null
+++ b/data/logs_model/document_logs_model.py
@@ -0,0 +1,532 @@
+# pylint: disable=protected-access
+
+import json
+import logging
+import uuid
+
+from time import time
+from datetime import timedelta, datetime, date
+from dateutil.parser import parse as parse_datetime
+
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+
+from elasticsearch.exceptions import ConnectionTimeout, NotFoundError
+
+from data import model
+from data.database import CloseForLongOperation
+from data.model import config
+from data.model.log import (_json_serialize, ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING,
+ DataModelException)
+from data.logs_model.elastic_logs import LogEntry, configure_es
+from data.logs_model.datatypes import Log, AggregatedLogCount, LogEntriesPage
+from data.logs_model.interface import (ActionLogsDataInterface, LogRotationContextInterface,
+ LogsIterationTimeout)
+from data.logs_model.shared import SharedModel, epoch_ms
+
+from data.logs_model.logs_producer import LogProducerProxy, LogSendException
+from data.logs_model.logs_producer.kafka_logs_producer import KafkaLogsProducer
+from data.logs_model.logs_producer.elasticsearch_logs_producer import ElasticsearchLogsProducer
+from data.logs_model.logs_producer.kinesis_stream_logs_producer import KinesisStreamLogsProducer
+
+
+logger = logging.getLogger(__name__)
+
+PAGE_SIZE = 20
+DEFAULT_RESULT_WINDOW = 5000
+MAX_RESULT_WINDOW = 10000
+
+# DATE_RANGE_LIMIT is to limit the query date time range to at most 1 month.
+DATE_RANGE_LIMIT = 32
+
+# Timeout for count_repository_actions
+COUNT_REPOSITORY_ACTION_TIMEOUT = 30
+
+
+
+def _date_range_descending(start_datetime, end_datetime, includes_end_datetime=False):
+ """ Generate the dates between `end_datetime` and `start_datetime`.
+
+ If `includes_end_datetime` is set, the generator starts at `end_datetime`,
+ otherwise, starts the generator at `end_datetime` minus 1 second.
+ """
+ assert end_datetime >= start_datetime
+ start_date = start_datetime.date()
+
+ if includes_end_datetime:
+ current_date = end_datetime.date()
+ else:
+ current_date = (end_datetime - timedelta(seconds=1)).date()
+
+ while current_date >= start_date:
+ yield current_date
+ current_date = current_date - timedelta(days=1)
+
+
+def _date_range_in_single_index(dt1, dt2):
+ """ Determine whether a single index can be searched given a range
+ of dates or datetimes. If date instances are given, difference should be 1 day.
+
+ NOTE: dt2 is exclusive to the search result set.
+ i.e. The date range is larger or equal to dt1 and strictly smaller than dt2
+ """
+ assert isinstance(dt1, date) and isinstance(dt2, date)
+
+ dt = dt2 - dt1
+
+ # Check if date or datetime
+ if not isinstance(dt1, datetime) and not isinstance(dt2, datetime):
+ return dt == timedelta(days=1)
+
+ if dt < timedelta(days=1) and dt >= timedelta(days=0):
+ return dt2.day == dt1.day
+
+ # Check if datetime can be interpreted as a date: hour, minutes, seconds or microseconds set to 0
+ if dt == timedelta(days=1):
+ return dt1.hour == 0 and dt1.minute == 0 and dt1.second == 0 and dt1.microsecond == 0
+
+ return False
+
+
+def _for_elasticsearch_logs(logs, repository_id=None, namespace_id=None):
+ namespace_ids = set()
+ for log in logs:
+ namespace_ids.add(log.account_id)
+ namespace_ids.add(log.performer_id)
+ assert namespace_id is None or log.account_id == namespace_id
+ assert repository_id is None or log.repository_id == repository_id
+
+ id_user_map = model.user.get_user_map_by_ids(namespace_ids)
+ return [Log.for_elasticsearch_log(log, id_user_map) for log in logs]
+
+
+def _random_id():
+ """ Generates a unique uuid4 string for the random_id field in LogEntry.
+ It is used as tie-breaker for sorting logs based on datetime:
+ https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-after.html
+ """
+ return str(uuid.uuid4())
+
+
+@add_metaclass(ABCMeta)
+class ElasticsearchLogsModelInterface(object):
+ """
+ Interface for Elasticsearch specific operations with the logs model.
+ These operations are usually index based.
+ """
+
+ @abstractmethod
+ def can_delete_index(self, index, cutoff_date):
+ """ Return whether the given index is older than the given cutoff date. """
+
+ @abstractmethod
+ def list_indices(self):
+ """ List the logs model's indices. """
+
+
+class DocumentLogsModel(SharedModel, ActionLogsDataInterface, ElasticsearchLogsModelInterface):
+ """
+ DocumentLogsModel implements the data model for the logs API backed by an
+ elasticsearch service.
+ """
+ def __init__(self, should_skip_logging=None, elasticsearch_config=None, producer=None, **kwargs):
+ self._should_skip_logging = should_skip_logging
+ self._logs_producer = LogProducerProxy()
+ self._es_client = configure_es(**elasticsearch_config)
+
+ if producer == 'kafka':
+ kafka_config = kwargs['kafka_config']
+ self._logs_producer.initialize(KafkaLogsProducer(**kafka_config))
+ elif producer == 'elasticsearch':
+ self._logs_producer.initialize(ElasticsearchLogsProducer())
+ elif producer == 'kinesis_stream':
+ kinesis_stream_config = kwargs['kinesis_stream_config']
+ self._logs_producer.initialize(KinesisStreamLogsProducer(**kinesis_stream_config))
+ else:
+ raise Exception('Invalid log producer: %s' % producer)
+
+ @staticmethod
+ def _get_ids_by_names(repository_name, namespace_name, performer_name):
+ """ Retrieve repository/namespace/performer ids based on their names.
+ throws DataModelException when the namespace_name does not match any
+ user in the database.
+ returns database ID or None if not exists.
+ """
+ repository_id = None
+ account_id = None
+ performer_id = None
+
+ if repository_name and namespace_name:
+ repository = model.repository.get_repository(namespace_name, repository_name)
+ if repository:
+ repository_id = repository.id
+ account_id = repository.namespace_user.id
+
+ if namespace_name and account_id is None:
+ account = model.user.get_user_or_org(namespace_name)
+ if account is None:
+ raise DataModelException('Invalid namespace requested')
+
+ account_id = account.id
+
+ if performer_name:
+ performer = model.user.get_user(performer_name)
+ if performer:
+ performer_id = performer.id
+
+ return repository_id, account_id, performer_id
+
+ def _base_query(self, performer_id=None, repository_id=None, account_id=None, filter_kinds=None,
+ index=None):
+ if filter_kinds is not None:
+ assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
+
+ if index is not None:
+ search = LogEntry.search(index=index)
+ else:
+ search = LogEntry.search()
+
+ if performer_id is not None:
+ assert isinstance(performer_id, int)
+ search = search.filter('term', performer_id=performer_id)
+
+ if repository_id is not None:
+ assert isinstance(repository_id, int)
+ search = search.filter('term', repository_id=repository_id)
+
+ if account_id is not None and repository_id is None:
+ assert isinstance(account_id, int)
+ search = search.filter('term', account_id=account_id)
+
+ if filter_kinds is not None:
+ kind_map = model.log.get_log_entry_kinds()
+ ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds]
+ search = search.exclude('terms', kind_id=ignore_ids)
+
+ return search
+
+ def _base_query_date_range(self, start_datetime, end_datetime, performer_id, repository_id,
+ account_id, filter_kinds, index=None):
+ skip_datetime_check = False
+ if _date_range_in_single_index(start_datetime, end_datetime):
+ index = self._es_client.index_name(start_datetime)
+ skip_datetime_check = self._es_client.index_exists(index)
+
+ if index and (skip_datetime_check or self._es_client.index_exists(index)):
+ search = self._base_query(performer_id, repository_id, account_id, filter_kinds,
+ index=index)
+ else:
+ search = self._base_query(performer_id, repository_id, account_id, filter_kinds)
+
+ if not skip_datetime_check:
+ search = search.query('range', datetime={'gte': start_datetime, 'lt': end_datetime})
+
+ return search
+
+ def _load_logs_for_day(self, logs_date, performer_id, repository_id, account_id, filter_kinds,
+ after_datetime=None, after_random_id=None, size=PAGE_SIZE):
+ index = self._es_client.index_name(logs_date)
+ if not self._es_client.index_exists(index):
+ return []
+
+ search = self._base_query(performer_id, repository_id, account_id, filter_kinds,
+ index=index)
+ search = search.sort({'datetime': 'desc'}, {'random_id.keyword': 'desc'})
+ search = search.extra(size=size)
+
+ if after_datetime is not None and after_random_id is not None:
+ after_datetime_epoch_ms = epoch_ms(after_datetime)
+ search = search.extra(search_after=[after_datetime_epoch_ms, after_random_id])
+
+ return search.execute()
+
+ def _load_latest_logs(self, performer_id, repository_id, account_id, filter_kinds, size):
+ """ Return the latest logs from Elasticsearch.
+
+ Look at indices up to theset logrotateworker threshold, or up to 30 days if not defined.
+ """
+ # Set the last index to check to be the logrotateworker threshold, or 30 days
+ end_datetime = datetime.now()
+ start_datetime = end_datetime - timedelta(days=DATE_RANGE_LIMIT)
+
+ latest_logs = []
+ for day in _date_range_descending(start_datetime, end_datetime, includes_end_datetime=True):
+ try:
+ logs = self._load_logs_for_day(day, performer_id, repository_id, account_id, filter_kinds,
+ size=size)
+ latest_logs.extend(logs)
+ except NotFoundError:
+ continue
+
+ if len(latest_logs) >= size:
+ break
+
+ return _for_elasticsearch_logs(latest_logs[:size], repository_id, account_id)
+
+ def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
+ namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
+ assert start_datetime is not None and end_datetime is not None
+
+ # Check for a valid combined model token when migrating online from a combined model
+ if page_token is not None and page_token.get('readwrite_page_token') is not None:
+ page_token = page_token.get('readwrite_page_token')
+
+ if page_token is not None and max_page_count is not None:
+ page_number = page_token.get('page_number')
+ if page_number is not None and page_number + 1 > max_page_count:
+ return LogEntriesPage([], None)
+
+ repository_id, account_id, performer_id = DocumentLogsModel._get_ids_by_names(
+ repository_name, namespace_name, performer_name)
+
+ after_datetime = None
+ after_random_id = None
+ if page_token is not None:
+ after_datetime = parse_datetime(page_token['datetime'])
+ after_random_id = page_token['random_id']
+
+ if after_datetime is not None:
+ end_datetime = min(end_datetime, after_datetime)
+
+ all_logs = []
+
+ with CloseForLongOperation(config.app_config):
+ for current_date in _date_range_descending(start_datetime, end_datetime):
+ try:
+ logs = self._load_logs_for_day(current_date, performer_id, repository_id, account_id,
+ filter_kinds, after_datetime, after_random_id,
+ size=PAGE_SIZE+1)
+
+ all_logs.extend(logs)
+ except NotFoundError:
+ continue
+
+ if len(all_logs) > PAGE_SIZE:
+ break
+
+ next_page_token = None
+ all_logs = all_logs[0:PAGE_SIZE+1]
+
+ if len(all_logs) == PAGE_SIZE + 1:
+ # The last element in the response is used to check if there's more elements.
+ # The second element in the response is used as the pagination token because search_after does
+ # not include the exact match, and so the next page will start with the last element.
+ # This keeps the behavior exactly the same as table_logs_model, so that
+ # the caller can expect when a pagination token is non-empty, there must be
+ # at least 1 log to be retrieved.
+ next_page_token = {
+ 'datetime': all_logs[-2].datetime.isoformat(),
+ 'random_id': all_logs[-2].random_id,
+ 'page_number': page_token['page_number'] + 1 if page_token else 1,
+ }
+
+ return LogEntriesPage(_for_elasticsearch_logs(all_logs[:PAGE_SIZE], repository_id, account_id),
+ next_page_token)
+
+ def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
+ filter_kinds=None, size=20):
+ repository_id, account_id, performer_id = DocumentLogsModel._get_ids_by_names(
+ repository_name, namespace_name, performer_name)
+
+ with CloseForLongOperation(config.app_config):
+ latest_logs = self._load_latest_logs(performer_id, repository_id, account_id, filter_kinds,
+ size)
+
+ return latest_logs
+
+
+ def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
+ repository_name=None, namespace_name=None, filter_kinds=None):
+ if end_datetime - start_datetime >= timedelta(days=DATE_RANGE_LIMIT):
+ raise Exception('Cannot lookup aggregated logs over a period longer than a month')
+
+ repository_id, account_id, performer_id = DocumentLogsModel._get_ids_by_names(
+ repository_name, namespace_name, performer_name)
+
+ with CloseForLongOperation(config.app_config):
+ search = self._base_query_date_range(start_datetime, end_datetime, performer_id,
+ repository_id, account_id, filter_kinds)
+ search.aggs.bucket('by_id', 'terms', field='kind_id').bucket('by_date', 'date_histogram',
+ field='datetime', interval='day')
+ # es returns all buckets when size=0
+ search = search.extra(size=0)
+ resp = search.execute()
+
+ if not resp.aggregations:
+ return []
+
+ counts = []
+ by_id = resp.aggregations['by_id']
+
+ for id_bucket in by_id.buckets:
+ for date_bucket in id_bucket.by_date.buckets:
+ if date_bucket.doc_count > 0:
+ counts.append(AggregatedLogCount(id_bucket.key, date_bucket.doc_count, date_bucket.key))
+
+ return counts
+
+ def count_repository_actions(self, repository, day):
+ index = self._es_client.index_name(day)
+ search = self._base_query_date_range(day, day + timedelta(days=1),
+ None,
+ repository.id,
+ None,
+ None,
+ index=index)
+ search = search.params(request_timeout=COUNT_REPOSITORY_ACTION_TIMEOUT)
+
+ try:
+ return search.count()
+ except NotFoundError:
+ return 0
+
+ def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
+ repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
+ if self._should_skip_logging and self._should_skip_logging(kind_name, namespace_name,
+ is_free_namespace):
+ return
+
+ if repository_name is not None:
+ assert repository is None
+ assert namespace_name is not None
+ repository = model.repository.get_repository(namespace_name, repository_name)
+
+ if timestamp is None:
+ timestamp = datetime.today()
+
+ account_id = None
+ performer_id = None
+ repository_id = None
+
+ if namespace_name is not None:
+ account_id = model.user.get_namespace_user(namespace_name).id
+
+ if performer is not None:
+ performer_id = performer.id
+
+ if repository is not None:
+ repository_id = repository.id
+
+ metadata_json = json.dumps(metadata or {}, default=_json_serialize)
+ kind_id = model.log._get_log_entry_kind(kind_name)
+ log = LogEntry(random_id=_random_id(), kind_id=kind_id, account_id=account_id,
+ performer_id=performer_id, ip=ip, metadata_json=metadata_json,
+ repository_id=repository_id, datetime=timestamp)
+
+ try:
+ self._logs_producer.send(log)
+ except LogSendException as lse:
+ strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING')
+ logger.exception('log_action failed', extra=({'exception': lse}).update(log.to_dict()))
+ if not (strict_logging_disabled and kind_name in ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING):
+ raise
+
+ def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
+ namespace_id=None, max_query_time=None):
+ max_query_time = max_query_time.total_seconds() if max_query_time is not None else 300
+ search = self._base_query_date_range(start_datetime, end_datetime, None, repository_id,
+ namespace_id, None)
+
+ def raise_on_timeout(batch_generator):
+ start = time()
+ for batch in batch_generator:
+ elapsed = time() - start
+ if elapsed > max_query_time:
+ logger.error('Retrieval of logs `%s/%s` timed out with time of `%s`', namespace_id,
+ repository_id, elapsed)
+ raise LogsIterationTimeout()
+
+ yield batch
+ start = time()
+
+ def read_batch(scroll):
+ batch = []
+ for log in scroll:
+ batch.append(log)
+ if len(batch) == DEFAULT_RESULT_WINDOW:
+ yield _for_elasticsearch_logs(batch, repository_id=repository_id,
+ namespace_id=namespace_id)
+ batch = []
+
+ if batch:
+ yield _for_elasticsearch_logs(batch, repository_id=repository_id, namespace_id=namespace_id)
+
+ search = search.params(size=DEFAULT_RESULT_WINDOW, request_timeout=max_query_time)
+
+ try:
+ with CloseForLongOperation(config.app_config):
+ for batch in raise_on_timeout(read_batch(search.scan())):
+ yield batch
+ except ConnectionTimeout:
+ raise LogsIterationTimeout()
+
+ def can_delete_index(self, index, cutoff_date):
+ return self._es_client.can_delete_index(index, cutoff_date)
+
+ def list_indices(self):
+ return self._es_client.list_indices()
+
+ def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
+ """ Yield a context manager for a group of outdated logs. """
+ all_indices = self.list_indices()
+ for index in all_indices:
+ if not self.can_delete_index(index, cutoff_date):
+ continue
+
+ context = ElasticsearchLogRotationContext(index, min_logs_per_rotation, self._es_client)
+ yield context
+
+
+class ElasticsearchLogRotationContext(LogRotationContextInterface):
+ """
+ ElasticsearchLogRotationContext yield batch of logs from an index.
+
+ When completed without exceptions, this context will delete its associated
+ Elasticsearch index.
+ """
+ def __init__(self, index, min_logs_per_rotation, es_client):
+ self._es_client = es_client
+ self.min_logs_per_rotation = min_logs_per_rotation
+ self.index = index
+
+ self.start_pos = 0
+ self.end_pos = 0
+
+ self.scroll = None
+
+ def __enter__(self):
+ search = self._base_query()
+ self.scroll = search.scan()
+ return self
+
+ def __exit__(self, ex_type, ex_value, ex_traceback):
+ if ex_type is None and ex_value is None and ex_traceback is None:
+ logger.debug('Deleting index %s', self.index)
+ self._es_client.delete_index(self.index)
+
+ def yield_logs_batch(self):
+ def batched_logs(gen, size):
+ batch = []
+ for log in gen:
+ batch.append(log)
+ if len(batch) == size:
+ yield batch
+ batch = []
+
+ if batch:
+ yield batch
+
+ for batch in batched_logs(self.scroll, self.min_logs_per_rotation):
+ self.end_pos = self.start_pos + len(batch) - 1
+ yield batch, self._generate_filename()
+ self.start_pos = self.end_pos + 1
+
+ def _base_query(self):
+ search = LogEntry.search(index=self.index)
+ return search
+
+ def _generate_filename(self):
+ """ Generate the filenames used to archive the action logs. """
+ filename = '%s_%d-%d' % (self.index, self.start_pos, self.end_pos)
+ filename = '.'.join((filename, 'txt.gz'))
+ return filename
diff --git a/data/logs_model/elastic_logs.py b/data/logs_model/elastic_logs.py
new file mode 100644
index 000000000..cd3ff675d
--- /dev/null
+++ b/data/logs_model/elastic_logs.py
@@ -0,0 +1,255 @@
+import os
+import logging
+import re
+from datetime import datetime, timedelta
+
+from requests_aws4auth import AWS4Auth
+
+from elasticsearch import RequestsHttpConnection
+from elasticsearch.exceptions import NotFoundError, AuthorizationException
+from elasticsearch_dsl import Index, Document, Integer, Date, Text, Ip, Keyword
+from elasticsearch_dsl.connections import connections
+
+
+logger = logging.getLogger(__name__)
+
+# Name of the connection used for Elasticearch's template API
+ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS = 'logentry_template'
+
+# Prefix of autogenerated indices
+INDEX_NAME_PREFIX = 'logentry_'
+
+# Time-based index date format
+INDEX_DATE_FORMAT = '%Y-%m-%d'
+
+# Timeout for default connection
+ELASTICSEARCH_DEFAULT_CONNECTION_TIMEOUT = 15
+
+# Timeout for template api Connection
+ELASTICSEARCH_TEMPLATE_CONNECTION_TIMEOUT = 60
+
+# Force an index template update
+ELASTICSEARCH_FORCE_INDEX_TEMPLATE_UPDATE = os.environ.get('FORCE_INDEX_TEMPLATE_UPDATE', '')
+
+# Valid index prefix pattern
+VALID_INDEX_PATTERN = r'^((?!\.$|\.\.$|[-_+])([^A-Z:\/*?\"<>|,# ]){1,255})$'
+
+
+class LogEntry(Document):
+ # random_id is the tie-breaker for sorting in pagination.
+ # random_id is also used for deduplication of records when using a "at-least-once" delivery stream.
+ # Reference: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-search-after.html
+ #
+ # We use don't use the _id of a document since a `doc_values` is not build for this field:
+ # An on-disk data structure that stores the same data in a columnar format
+ # for optimized sorting and aggregations.
+ # Reference: https://github.com/elastic/elasticsearch/issues/35369
+ random_id = Text(fields={'keyword': Keyword()})
+ kind_id = Integer()
+ account_id = Integer()
+ performer_id = Integer()
+ repository_id = Integer()
+ ip = Ip()
+ metadata_json = Text()
+ datetime = Date()
+
+ _initialized = False
+
+ @classmethod
+ def init(cls, index_prefix, index_settings=None, skip_template_init=False):
+ """
+ Create the index template, and populate LogEntry's mapping and index settings.
+ """
+ wildcard_index = Index(name=index_prefix + '*')
+ wildcard_index.settings(**(index_settings or {}))
+ wildcard_index.document(cls)
+ cls._index = wildcard_index
+ cls._index_prefix = index_prefix
+
+ if not skip_template_init:
+ cls.create_or_update_template()
+
+ # Since the elasticsearch-dsl API requires the document's index being defined as an inner class at the class level,
+ # this function needs to be called first before being able to call `save`.
+ cls._initialized = True
+
+ @classmethod
+ def create_or_update_template(cls):
+ assert cls._index and cls._index_prefix
+ index_template = cls._index.as_template(cls._index_prefix)
+ index_template.save(using=ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS)
+
+ def save(self, **kwargs):
+ # We group the logs based on year, month and day as different indexes, so that
+ # dropping those indexes based on retention range is easy.
+ #
+ # NOTE: This is only used if logging directly to Elasticsearch
+ # When using Kinesis or Kafka, the consumer of these streams
+ # will be responsible for the management of the indices' lifecycle.
+ assert LogEntry._initialized
+ kwargs['index'] = self.datetime.strftime(self._index_prefix + INDEX_DATE_FORMAT)
+ return super(LogEntry, self).save(**kwargs)
+
+
+class ElasticsearchLogs(object):
+ """
+ Model for logs operations stored in an Elasticsearch cluster.
+ """
+
+ def __init__(self, host=None, port=None, access_key=None, secret_key=None, aws_region=None,
+ index_settings=None, use_ssl=True, index_prefix=INDEX_NAME_PREFIX):
+ # For options in index_settings, refer to:
+ # https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html
+ # some index settings are set at index creation time, and therefore, you should NOT
+ # change those settings once the index is set.
+ self._host = host
+ self._port = port
+ self._access_key = access_key
+ self._secret_key = secret_key
+ self._aws_region = aws_region
+ self._index_prefix = index_prefix
+ self._index_settings = index_settings
+ self._use_ssl = use_ssl
+
+ self._client = None
+ self._initialized = False
+
+ def _initialize(self):
+ """
+ Initialize a connection to an ES cluster and
+ creates an index template if it does not exist.
+ """
+ if not self._initialized:
+ http_auth = None
+ if self._access_key and self._secret_key and self._aws_region:
+ http_auth = AWS4Auth(self._access_key, self._secret_key, self._aws_region, 'es')
+ elif self._access_key and self._secret_key:
+ http_auth = (self._access_key, self._secret_key)
+ else:
+ logger.warn("Connecting to Elasticsearch without HTTP auth")
+
+ self._client = connections.create_connection(
+ hosts=[{
+ 'host': self._host,
+ 'port': self._port
+ }],
+ http_auth=http_auth,
+ use_ssl=self._use_ssl,
+ verify_certs=True,
+ connection_class=RequestsHttpConnection,
+ timeout=ELASTICSEARCH_DEFAULT_CONNECTION_TIMEOUT,
+ )
+
+ # Create a second connection with a timeout of 60s vs 10s.
+ # For some reason the PUT template API can take anywhere between
+ # 10s and 30s on the test cluster.
+ # This only needs to be done once to initialize the index template
+ connections.create_connection(
+ alias=ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS,
+ hosts=[{
+ 'host': self._host,
+ 'port': self._port
+ }],
+ http_auth=http_auth,
+ use_ssl=self._use_ssl,
+ verify_certs=True,
+ connection_class=RequestsHttpConnection,
+ timeout=ELASTICSEARCH_TEMPLATE_CONNECTION_TIMEOUT,
+ )
+
+ try:
+ force_template_update = ELASTICSEARCH_FORCE_INDEX_TEMPLATE_UPDATE.lower() == 'true'
+ self._client.indices.get_template(self._index_prefix)
+ LogEntry.init(self._index_prefix, self._index_settings,
+ skip_template_init=not force_template_update)
+ except NotFoundError:
+ LogEntry.init(self._index_prefix, self._index_settings, skip_template_init=False)
+ finally:
+ try:
+ connections.remove_connection(ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS)
+ except KeyError as ke:
+ logger.exception('Elasticsearch connection not found to remove %s: %s',
+ ELASTICSEARCH_TEMPLATE_CONNECTION_ALIAS, ke)
+
+ self._initialized = True
+
+ def index_name(self, day):
+ """ Return an index name for the given day. """
+ return self._index_prefix + day.strftime(INDEX_DATE_FORMAT)
+
+ def index_exists(self, index):
+ try:
+ return index in self._client.indices.get(index)
+ except NotFoundError:
+ return False
+
+ @staticmethod
+ def _valid_index_prefix(prefix):
+ """ Check that the given index prefix is valid with the set of
+ indices used by this class.
+ """
+ return re.match(VALID_INDEX_PATTERN, prefix) is not None
+
+ def _valid_index_name(self, index):
+ """ Check that the given index name is valid and follows the format:
+ YYYY-MM-DD
+ """
+ if not ElasticsearchLogs._valid_index_prefix(index):
+ return False
+
+ if not index.startswith(self._index_prefix) or len(index) > 255:
+ return False
+
+ index_dt_str = index.split(self._index_prefix, 1)[-1]
+ try:
+ datetime.strptime(index_dt_str, INDEX_DATE_FORMAT)
+ return True
+ except ValueError:
+ logger.exception('Invalid date format (YYYY-MM-DD) for index: %s', index)
+ return False
+
+ def can_delete_index(self, index, cutoff_date):
+ """ Check if the given index can be deleted based on the given index's date and cutoff date. """
+ assert self._valid_index_name(index)
+ index_dt = datetime.strptime(index[len(self._index_prefix):], INDEX_DATE_FORMAT)
+ return index_dt < cutoff_date and cutoff_date - index_dt >= timedelta(days=1)
+
+ def list_indices(self):
+ self._initialize()
+ try:
+ return self._client.indices.get(self._index_prefix + '*').keys()
+ except NotFoundError as nfe:
+ logger.exception('`%s` indices not found: %s', self._index_prefix, nfe.info)
+ return []
+ except AuthorizationException as ae:
+ logger.exception('Unauthorized for indices `%s`: %s', self._index_prefix, ae.info)
+ return None
+
+ def delete_index(self, index):
+ self._initialize()
+ assert self._valid_index_name(index)
+
+ try:
+ self._client.indices.delete(index)
+ return index
+ except NotFoundError as nfe:
+ logger.exception('`%s` indices not found: %s', index, nfe.info)
+ return None
+ except AuthorizationException as ae:
+ logger.exception('Unauthorized to delete index `%s`: %s', index, ae.info)
+ return None
+
+
+def configure_es(host, port, access_key=None, secret_key=None, aws_region=None,
+ index_prefix=None, use_ssl=True, index_settings=None):
+ """
+ For options in index_settings, refer to:
+ https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html
+ some index settings are set at index creation time, and therefore, you should NOT
+ change those settings once the index is set.
+ """
+ es_client = ElasticsearchLogs(host=host, port=port, access_key=access_key, secret_key=secret_key,
+ aws_region=aws_region, index_prefix=index_prefix or INDEX_NAME_PREFIX,
+ use_ssl=use_ssl, index_settings=index_settings)
+ es_client._initialize()
+ return es_client
diff --git a/data/logs_model/inmemory_model.py b/data/logs_model/inmemory_model.py
new file mode 100644
index 000000000..f9a219f51
--- /dev/null
+++ b/data/logs_model/inmemory_model.py
@@ -0,0 +1,244 @@
+import logging
+import json
+
+from collections import namedtuple
+from datetime import datetime
+from tzlocal import get_localzone
+from dateutil.relativedelta import relativedelta
+
+from data import model
+from data.logs_model.datatypes import AggregatedLogCount, LogEntriesPage, Log
+from data.logs_model.interface import (ActionLogsDataInterface, LogRotationContextInterface,
+ LogsIterationTimeout)
+
+logger = logging.getLogger(__name__)
+
+LogAndRepository = namedtuple('LogAndRepository', ['log', 'stored_log', 'repository'])
+
+StoredLog = namedtuple('StoredLog', ['kind_id',
+ 'account_id',
+ 'performer_id',
+ 'ip',
+ 'metadata_json',
+ 'repository_id',
+ 'datetime'])
+
+class InMemoryModel(ActionLogsDataInterface):
+ """
+ InMemoryModel implements the data model for logs in-memory. FOR TESTING ONLY.
+ """
+ def __init__(self):
+ self.logs = []
+
+ def _filter_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
+ namespace_name=None, filter_kinds=None):
+ if filter_kinds is not None:
+ assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
+
+ for log_and_repo in self.logs:
+ if log_and_repo.log.datetime < start_datetime or log_and_repo.log.datetime > end_datetime:
+ continue
+
+ if performer_name and log_and_repo.log.performer_username != performer_name:
+ continue
+
+ if (repository_name and
+ (not log_and_repo.repository or log_and_repo.repository.name != repository_name)):
+ continue
+
+ if namespace_name and log_and_repo.log.account_username != namespace_name:
+ continue
+
+ if filter_kinds:
+ kind_map = model.log.get_log_entry_kinds()
+ ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds]
+ if log_and_repo.log.kind_id in ignore_ids:
+ continue
+
+ yield log_and_repo
+
+ def _filter_latest_logs(self, performer_name=None, repository_name=None,
+ namespace_name=None, filter_kinds=None):
+ if filter_kinds is not None:
+ assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
+
+ for log_and_repo in sorted(self.logs, key=lambda t: t.log.datetime, reverse=True):
+ if performer_name and log_and_repo.log.performer_username != performer_name:
+ continue
+
+ if (repository_name and
+ (not log_and_repo.repository or log_and_repo.repository.name != repository_name)):
+ continue
+
+ if namespace_name and log_and_repo.log.account_username != namespace_name:
+ continue
+
+ if filter_kinds:
+ kind_map = model.log.get_log_entry_kinds()
+ ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds]
+ if log_and_repo.log.kind_id in ignore_ids:
+ continue
+
+ yield log_and_repo
+
+ def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
+ namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
+ logs = []
+ for log_and_repo in self._filter_logs(start_datetime, end_datetime, performer_name,
+ repository_name, namespace_name, filter_kinds):
+ logs.append(log_and_repo.log)
+ return LogEntriesPage(logs, None)
+
+ def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
+ filter_kinds=None, size=20):
+ latest_logs = []
+ for log_and_repo in self._filter_latest_logs(performer_name, repository_name, namespace_name,
+ filter_kinds):
+ if size is not None and len(latest_logs) == size:
+ break
+
+ latest_logs.append(log_and_repo.log)
+
+ return latest_logs
+
+ def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
+ repository_name=None, namespace_name=None, filter_kinds=None):
+ entries = {}
+ for log_and_repo in self._filter_logs(start_datetime, end_datetime, performer_name,
+ repository_name, namespace_name, filter_kinds):
+ entry = log_and_repo.log
+ synthetic_date = datetime(start_datetime.year, start_datetime.month, int(entry.datetime.day),
+ tzinfo=get_localzone())
+ if synthetic_date.day < start_datetime.day:
+ synthetic_date = synthetic_date + relativedelta(months=1)
+
+ key = '%s-%s' % (entry.kind_id, entry.datetime.day)
+
+ if key in entries:
+ entries[key] = AggregatedLogCount(entry.kind_id, entries[key].count + 1,
+ synthetic_date)
+ else:
+ entries[key] = AggregatedLogCount(entry.kind_id, 1, synthetic_date)
+
+ return entries.values()
+
+ def count_repository_actions(self, repository, day):
+ count = 0
+ for log_and_repo in self.logs:
+ if log_and_repo.repository != repository:
+ continue
+
+ if log_and_repo.log.datetime.day != day.day:
+ continue
+
+ count += 1
+
+ return count
+
+ def queue_logs_export(self, start_datetime, end_datetime, export_action_logs_queue,
+ namespace_name=None, repository_name=None, callback_url=None,
+ callback_email=None, filter_kinds=None):
+ raise NotImplementedError
+
+ def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
+ repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
+ timestamp = timestamp or datetime.today()
+
+ if not repository and repository_name and namespace_name:
+ repository = model.repository.get_repository(namespace_name, repository_name)
+
+ account = None
+ account_id = None
+ performer_id = None
+ repository_id = None
+
+ if namespace_name is not None:
+ account = model.user.get_namespace_user(namespace_name)
+ account_id = account.id
+
+ if performer is not None:
+ performer_id = performer.id
+
+ if repository is not None:
+ repository_id = repository.id
+
+ metadata_json = json.dumps(metadata or {})
+ kind_id = model.log.get_log_entry_kinds()[kind_name]
+
+ stored_log = StoredLog(
+ kind_id,
+ account_id,
+ performer_id,
+ ip,
+ metadata_json,
+ repository_id,
+ timestamp
+ )
+
+ log = Log(metadata_json=metadata,
+ ip=ip,
+ datetime=timestamp,
+ performer_email=performer.email if performer else None,
+ performer_username=performer.username if performer else None,
+ performer_robot=performer.robot if performer else None,
+ account_organization=account.organization if account else None,
+ account_username=account.username if account else None,
+ account_email=account.email if account else None,
+ account_robot=account.robot if account else None,
+ kind_id=kind_id)
+
+ self.logs.append(LogAndRepository(log, stored_log, repository))
+
+ def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
+ namespace_id=None, max_query_time=None):
+ # Just for testing.
+ if max_query_time is not None:
+ raise LogsIterationTimeout()
+
+ logs = []
+ for log_and_repo in self._filter_logs(start_datetime, end_datetime):
+ if (repository_id and
+ (not log_and_repo.repository or log_and_repo.repository.id != repository_id)):
+ continue
+
+ if namespace_id:
+ if log_and_repo.log.account_username is None:
+ continue
+
+ namespace = model.user.get_namespace_user(log_and_repo.log.account_username)
+ if namespace.id != namespace_id:
+ continue
+
+ logs.append(log_and_repo.log)
+
+ yield logs
+
+ def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
+ expired_logs = [log_and_repo for log_and_repo in self.logs
+ if log_and_repo.log.datetime <= cutoff_date]
+ while True:
+ if not expired_logs:
+ break
+ context = InMemoryLogRotationContext(expired_logs[:min_logs_per_rotation], self.logs)
+ expired_logs = expired_logs[min_logs_per_rotation:]
+ yield context
+
+
+class InMemoryLogRotationContext(LogRotationContextInterface):
+ def __init__(self, expired_logs, all_logs):
+ self.expired_logs = expired_logs
+ self.all_logs = all_logs
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, ex_type, ex_value, ex_traceback):
+ if ex_type is None and ex_value is None and ex_traceback is None:
+ for log in self.expired_logs:
+ self.all_logs.remove(log)
+
+ def yield_logs_batch(self):
+ """ Yield a batch of logs and a filename for that batch. """
+ filename = 'inmemory_model_filename_placeholder'
+ filename = '.'.join((filename, 'txt.gz'))
+ yield [log_and_repo.stored_log for log_and_repo in self.expired_logs], filename
diff --git a/data/logs_model/interface.py b/data/logs_model/interface.py
new file mode 100644
index 000000000..705d46cc0
--- /dev/null
+++ b/data/logs_model/interface.py
@@ -0,0 +1,95 @@
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+
+class LogsIterationTimeout(Exception):
+ """ Exception raised if logs iteration times out. """
+
+
+@add_metaclass(ABCMeta)
+class ActionLogsDataInterface(object):
+ """ Interface for code to work with the logs data model. The logs data model consists
+ of all access for reading and writing action logs.
+ """
+ @abstractmethod
+ def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
+ namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
+ """ Looks up all logs between the start_datetime and end_datetime, filtered
+ by performer (a user), repository or namespace. Note that one (and only one) of the three
+ can be specified. Returns a LogEntriesPage. `filter_kinds`, if specified, is a set/list
+ of the kinds of logs to filter out.
+ """
+
+ @abstractmethod
+ def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
+ filter_kinds=None, size=20):
+ """ Looks up latest logs of a specific kind, filtered by performer (a user),
+ repository or namespace. Note that one (and only one) of the three can be specified.
+ Returns a list of `Log`.
+ """
+
+ @abstractmethod
+ def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
+ repository_name=None, namespace_name=None, filter_kinds=None):
+ """ Returns the aggregated count of logs, by kind, between the start_datetime and end_datetime,
+ filtered by performer (a user), repository or namespace. Note that one (and only one) of
+ the three can be specified. Returns a list of AggregatedLogCount.
+ """
+
+ @abstractmethod
+ def count_repository_actions(self, repository, day):
+ """ Returns the total number of repository actions over the given day, in the given repository
+ or None on error.
+ """
+
+ @abstractmethod
+ def queue_logs_export(self, start_datetime, end_datetime, export_action_logs_queue,
+ namespace_name=None, repository_name=None, callback_url=None,
+ callback_email=None, filter_kinds=None):
+ """ Queues logs between the start_datetime and end_time, filtered by a repository or namespace,
+ for export to the specified URL and/or email address. Returns the ID of the export job
+ queued or None if error.
+ """
+
+ @abstractmethod
+ def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
+ repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
+ """ Logs a single action as having taken place. """
+
+ @abstractmethod
+ def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
+ namespace_id=None, max_query_time=None):
+ """ Returns an iterator that yields bundles of all logs found between the start_datetime and
+ end_datetime, optionally filtered by the repository or namespace. This function should be
+ used for any bulk lookup operations, and should be implemented by implementors to put
+ minimal strain on the backing storage for large operations. If there was an error in setting
+ up, returns None.
+
+ If max_query_time is specified, each iteration that yields a log bundle will have its
+ queries run with a maximum timeout of that specified, and, if any exceed that threshold,
+ LogsIterationTimeout will be raised instead of returning the logs bundle.
+ """
+
+ @abstractmethod
+ def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
+ """
+ A generator that yields contexts implementing the LogRotationContextInterface.
+ Each context represents a set of logs to be archived and deleted once
+ the context completes without exceptions.
+
+ For database logs, the LogRotationContext abstracts over a set of rows. When the context
+ finishes, its associated rows get deleted.
+
+ For Elasticsearch logs, the LogRotationContext abstracts over indices. When the context
+ finishes, its associated index gets deleted.
+ """
+
+
+@add_metaclass(ABCMeta)
+class LogRotationContextInterface(object):
+ """ Interface for iterating over a set of logs to be archived. """
+ @abstractmethod
+ def yield_logs_batch(self):
+ """
+ Generator yielding batch of logs and a filename for that batch.
+ A batch is a subset of the logs part of the context.
+ """
diff --git a/data/logs_model/logs_producer/__init__.py b/data/logs_model/logs_producer/__init__.py
new file mode 100644
index 000000000..17bd605ad
--- /dev/null
+++ b/data/logs_model/logs_producer/__init__.py
@@ -0,0 +1,27 @@
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class LogSendException(Exception):
+ """ A generic error when sending the logs to its destination.
+ e.g. Kinesis, Kafka, Elasticsearch, ...
+ """
+ pass
+
+
+class LogProducerProxy(object):
+ def __init__(self):
+ self._model = None
+
+ def initialize(self, model):
+ self._model = model
+ logger.info('===============================')
+ logger.info('Using producer `%s`', self._model)
+ logger.info('===============================')
+
+ def __getattr__(self, attr):
+ if not self._model:
+ raise AttributeError("LogsModelProxy is not initialized")
+ return getattr(self._model, attr)
diff --git a/data/logs_model/logs_producer/elasticsearch_logs_producer.py b/data/logs_model/logs_producer/elasticsearch_logs_producer.py
new file mode 100644
index 000000000..175fb4ac6
--- /dev/null
+++ b/data/logs_model/logs_producer/elasticsearch_logs_producer.py
@@ -0,0 +1,25 @@
+import logging
+
+from elasticsearch.exceptions import ElasticsearchException
+
+from data.logs_model.logs_producer.interface import LogProducerInterface
+from data.logs_model.logs_producer import LogSendException
+
+
+logger = logging.getLogger(__name__)
+
+
+class ElasticsearchLogsProducer(LogProducerInterface):
+ """ Log producer writing log entries to Elasticsearch.
+
+ This implementation writes directly to Elasticsearch without a streaming/queueing service.
+ """
+ def send(self, logentry):
+ try:
+ logentry.save()
+ except ElasticsearchException as ex:
+ logger.exception('ElasticsearchLogsProducer error sending log to Elasticsearch: %s', ex)
+ raise LogSendException('ElasticsearchLogsProducer error sending log to Elasticsearch: %s' % ex)
+ except Exception as e:
+ logger.exception('ElasticsearchLogsProducer exception sending log to Elasticsearch: %s', e)
+ raise LogSendException('ElasticsearchLogsProducer exception sending log to Elasticsearch: %s' % e)
diff --git a/data/logs_model/logs_producer/interface.py b/data/logs_model/logs_producer/interface.py
new file mode 100644
index 000000000..d0d9b71d4
--- /dev/null
+++ b/data/logs_model/logs_producer/interface.py
@@ -0,0 +1,8 @@
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+
+@add_metaclass(ABCMeta)
+class LogProducerInterface(object):
+ @abstractmethod
+ def send(self, logentry):
+ """ Send a log entry to the configured log infrastructure. """
diff --git a/data/logs_model/logs_producer/kafka_logs_producer.py b/data/logs_model/logs_producer/kafka_logs_producer.py
new file mode 100644
index 000000000..9c13a441d
--- /dev/null
+++ b/data/logs_model/logs_producer/kafka_logs_producer.py
@@ -0,0 +1,45 @@
+import logging
+
+from kafka.errors import KafkaError, KafkaTimeoutError
+from kafka import KafkaProducer
+
+from data.logs_model.shared import epoch_ms
+from data.logs_model.logs_producer.interface import LogProducerInterface
+from data.logs_model.logs_producer.util import logs_json_serializer
+from data.logs_model.logs_producer import LogSendException
+
+
+logger = logging.getLogger(__name__)
+
+DEFAULT_MAX_BLOCK_SECONDS = 5
+
+
+class KafkaLogsProducer(LogProducerInterface):
+ """ Log producer writing log entries to a Kafka stream. """
+ def __init__(self, bootstrap_servers=None, topic=None, client_id=None, max_block_seconds=None):
+ self.bootstrap_servers = bootstrap_servers
+ self.topic = topic
+ self.client_id = client_id
+ self.max_block_ms = (max_block_seconds or DEFAULT_MAX_BLOCK_SECONDS) * 1000
+
+ self._producer = KafkaProducer(bootstrap_servers=self.bootstrap_servers,
+ client_id=self.client_id,
+ max_block_ms=self.max_block_ms,
+ value_serializer=logs_json_serializer)
+
+ def send(self, logentry):
+ try:
+ # send() has a (max_block_ms) timeout and get() has a (max_block_ms) timeout
+ # for an upper bound of 2x(max_block_ms) before guaranteed delivery
+ future = self._producer.send(self.topic, logentry.to_dict(), timestamp_ms=epoch_ms(logentry.datetime))
+ record_metadata = future.get(timeout=self.max_block_ms)
+ assert future.succeeded
+ except KafkaTimeoutError as kte:
+ logger.exception('KafkaLogsProducer timeout sending log to Kafka: %s', kte)
+ raise LogSendException('KafkaLogsProducer timeout sending log to Kafka: %s' % kte)
+ except KafkaError as ke:
+ logger.exception('KafkaLogsProducer error sending log to Kafka: %s', ke)
+ raise LogSendException('KafkaLogsProducer error sending log to Kafka: %s' % ke)
+ except Exception as e:
+ logger.exception('KafkaLogsProducer exception sending log to Kafka: %s', e)
+ raise LogSendException('KafkaLogsProducer exception sending log to Kafka: %s' % e)
diff --git a/data/logs_model/logs_producer/kinesis_stream_logs_producer.py b/data/logs_model/logs_producer/kinesis_stream_logs_producer.py
new file mode 100644
index 000000000..d4c03f711
--- /dev/null
+++ b/data/logs_model/logs_producer/kinesis_stream_logs_producer.py
@@ -0,0 +1,75 @@
+import logging
+import hashlib
+import random
+
+import boto3
+from botocore.exceptions import ClientError
+from botocore.client import Config
+
+from data.logs_model.logs_producer.interface import LogProducerInterface
+from data.logs_model.logs_producer.util import logs_json_serializer
+from data.logs_model.logs_producer import LogSendException
+
+
+logger = logging.getLogger(__name__)
+
+KINESIS_PARTITION_KEY_PREFIX = 'logentry_partition_key_'
+DEFAULT_CONNECT_TIMEOUT = 5
+DEFAULT_READ_TIMEOUT = 5
+MAX_RETRY_ATTEMPTS = 5
+DEFAULT_MAX_POOL_CONNECTIONS = 10
+
+
+def _partition_key(number_of_shards=None):
+ """ Generate a partition key for AWS Kinesis stream.
+ If the number of shards is specified, generate keys where the size of the key space is
+ the number of shards.
+ """
+ key = None
+ if number_of_shards is not None:
+ shard_number = random.randrange(0, number_of_shards)
+ key = hashlib.sha1(KINESIS_PARTITION_KEY_PREFIX + str(shard_number)).hexdigest()
+ else:
+ key = hashlib.sha1(KINESIS_PARTITION_KEY_PREFIX + str(random.getrandbits(256))).hexdigest()
+
+ return key
+
+
+class KinesisStreamLogsProducer(LogProducerInterface):
+ """ Log producer writing log entries to an Amazon Kinesis Data Stream. """
+ def __init__(self, stream_name, aws_region, aws_access_key=None, aws_secret_key=None,
+ connect_timeout=None, read_timeout=None, max_retries=None,
+ max_pool_connections=None):
+ self._stream_name = stream_name
+ self._aws_region = aws_region
+ self._aws_access_key = aws_access_key
+ self._aws_secret_key = aws_secret_key
+ self._connect_timeout = connect_timeout or DEFAULT_CONNECT_TIMEOUT
+ self._read_timeout = read_timeout or DEFAULT_READ_TIMEOUT
+ self._max_retries = max_retries or MAX_RETRY_ATTEMPTS
+ self._max_pool_connections=max_pool_connections or DEFAULT_MAX_POOL_CONNECTIONS
+
+ client_config = Config(connect_timeout=self._connect_timeout,
+ read_timeout=self._read_timeout ,
+ retries={'max_attempts': self._max_retries},
+ max_pool_connections=self._max_pool_connections)
+ self._producer = boto3.client('kinesis', use_ssl=True,
+ region_name=self._aws_region,
+ aws_access_key_id=self._aws_access_key,
+ aws_secret_access_key=self._aws_secret_key,
+ config=client_config)
+
+ def send(self, logentry):
+ try:
+ data = logs_json_serializer(logentry)
+ self._producer.put_record(
+ StreamName=self._stream_name,
+ Data=data,
+ PartitionKey=_partition_key()
+ )
+ except ClientError as ce:
+ logger.exception('KinesisStreamLogsProducer client error sending log to Kinesis: %s', ce)
+ raise LogSendException('KinesisStreamLogsProducer client error sending log to Kinesis: %s' % ce)
+ except Exception as e:
+ logger.exception('KinesisStreamLogsProducer exception sending log to Kinesis: %s', e)
+ raise LogSendException('KinesisStreamLogsProducer exception sending log to Kinesis: %s' % e)
diff --git a/data/logs_model/logs_producer/test/test_json_logs_serializer.py b/data/logs_model/logs_producer/test/test_json_logs_serializer.py
new file mode 100644
index 000000000..a45b0c5bb
--- /dev/null
+++ b/data/logs_model/logs_producer/test/test_json_logs_serializer.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+import logging
+import json
+from datetime import datetime
+import pytest
+
+from data.logs_model.logs_producer.util import logs_json_serializer
+from data.logs_model.elastic_logs import LogEntry
+
+
+logger = logging.getLogger(__name__)
+
+
+TEST_DATETIME = datetime.utcnow()
+
+TEST_JSON_STRING = '{"a": "b", "c": "d"}'
+TEST_JSON_STRING_WITH_UNICODE = u'{"éëê": "îôû"}'
+
+VALID_LOGENTRY = LogEntry(random_id='123-45', ip='0.0.0.0', metadata_json=TEST_JSON_STRING, datetime=TEST_DATETIME)
+VALID_LOGENTRY_WITH_UNICODE = LogEntry(random_id='123-45', ip='0.0.0.0', metadata_json=TEST_JSON_STRING_WITH_UNICODE, datetime=TEST_DATETIME)
+
+VALID_LOGENTRY_EXPECTED_OUTPUT = '{"datetime": "%s", "ip": "0.0.0.0", "metadata_json": "{\\"a\\": \\"b\\", \\"c\\": \\"d\\"}", "random_id": "123-45"}' % TEST_DATETIME.isoformat()
+VALID_LOGENTRY_WITH_UNICODE_EXPECTED_OUTPUT = '{"datetime": "%s", "ip": "0.0.0.0", "metadata_json": "{\\"\\u00e9\\u00eb\\u00ea\\": \\"\\u00ee\\u00f4\\u00fb\\"}", "random_id": "123-45"}' % TEST_DATETIME.isoformat()
+
+
+@pytest.mark.parametrize(
+ 'is_valid, given_input, expected_output',
+ [
+ # Valid inputs
+ pytest.param(True, VALID_LOGENTRY, VALID_LOGENTRY_EXPECTED_OUTPUT),
+ # With unicode
+ pytest.param(True, VALID_LOGENTRY_WITH_UNICODE, VALID_LOGENTRY_WITH_UNICODE_EXPECTED_OUTPUT),
+ ])
+def test_logs_json_serializer(is_valid, given_input, expected_output):
+ if not is_valid:
+ with pytest.raises(ValueError) as ve:
+ data = logs_json_serializer(given_input)
+ else:
+ data = logs_json_serializer(given_input, sort_keys=True)
+ assert data == expected_output
+
+ # Make sure the datetime was serialized in the correct ISO8601
+ datetime_str = json.loads(data)['datetime']
+ assert datetime_str == TEST_DATETIME.isoformat()
diff --git a/data/logs_model/logs_producer/util.py b/data/logs_model/logs_producer/util.py
new file mode 100644
index 000000000..d6c3e2d8d
--- /dev/null
+++ b/data/logs_model/logs_producer/util.py
@@ -0,0 +1,15 @@
+import json
+from datetime import datetime
+
+class LogEntryJSONEncoder(json.JSONEncoder):
+ """ JSON encoder to encode datetimes to ISO8601 format. """
+ def default(self, obj):
+ if isinstance(obj, datetime):
+ return obj.isoformat()
+
+ return super(LogEntryJSONEncoder, self).default(obj)
+
+def logs_json_serializer(logentry, sort_keys=False):
+ """ Serializes a LogEntry to json bytes. """
+ return json.dumps(logentry.to_dict(), cls=LogEntryJSONEncoder,
+ ensure_ascii=True, sort_keys=sort_keys).encode('ascii')
diff --git a/data/logs_model/shared.py b/data/logs_model/shared.py
new file mode 100644
index 000000000..550cac95e
--- /dev/null
+++ b/data/logs_model/shared.py
@@ -0,0 +1,53 @@
+import uuid
+import json
+
+from calendar import timegm
+
+from data import model
+
+
+class SharedModel:
+ def queue_logs_export(self, start_datetime, end_datetime, export_action_logs_queue,
+ namespace_name=None, repository_name=None, callback_url=None,
+ callback_email=None, filter_kinds=None):
+ """ Queues logs between the start_datetime and end_time, filtered by a repository or namespace,
+ for export to the specified URL and/or email address. Returns the ID of the export job
+ queued or None if error.
+ """
+ export_id = str(uuid.uuid4())
+ namespace = model.user.get_namespace_user(namespace_name)
+ if namespace is None:
+ return None
+
+ repository = None
+ if repository_name is not None:
+ repository = model.repository.get_repository(namespace_name, repository_name)
+ if repository is None:
+ return None
+
+ export_action_logs_queue.put([namespace_name],
+ json.dumps({
+ 'export_id': export_id,
+ 'repository_id': repository.id if repository else None,
+ 'namespace_id': namespace.id,
+ 'namespace_name': namespace.username,
+ 'repository_name': repository.name if repository else None,
+ 'start_time': start_datetime.strftime('%m/%d/%Y'),
+ 'end_time': end_datetime.strftime('%m/%d/%Y'),
+ 'callback_url': callback_url,
+ 'callback_email': callback_email,
+ }), retries_remaining=3)
+
+ return export_id
+
+
+def epoch_ms(dt):
+ return (timegm(dt.timetuple()) * 1000) + (dt.microsecond / 1000)
+
+
+def get_kinds_filter(kinds):
+ """ Given a list of kinds, return the set of kinds not that are not part of that list.
+ i.e Returns the list of kinds to be filtered out. """
+ kind_map = model.log.get_log_entry_kinds()
+ kind_map = {key: kind_map[key] for key in kind_map if not isinstance(key, int)}
+ return [kind_name for kind_name in kind_map if kind_name not in kinds]
diff --git a/data/logs_model/table_logs_model.py b/data/logs_model/table_logs_model.py
new file mode 100644
index 000000000..697bf2dc6
--- /dev/null
+++ b/data/logs_model/table_logs_model.py
@@ -0,0 +1,291 @@
+# pylint: disable=protected-access
+
+import logging
+
+from datetime import datetime, timedelta
+
+from tzlocal import get_localzone
+from dateutil.relativedelta import relativedelta
+
+from data import model
+from data.model import config
+from data.database import LogEntry, LogEntry2, LogEntry3, UseThenDisconnect
+from data.logs_model.interface import ActionLogsDataInterface, LogsIterationTimeout, \
+ LogRotationContextInterface
+from data.logs_model.datatypes import Log, AggregatedLogCount, LogEntriesPage
+from data.logs_model.shared import SharedModel
+from data.model.log import get_stale_logs, get_stale_logs_start_id, delete_stale_logs
+
+logger = logging.getLogger(__name__)
+
+MINIMUM_RANGE_SIZE = 1 # second
+MAXIMUM_RANGE_SIZE = 60 * 60 * 24 * 30 # seconds ~= 1 month
+EXPECTED_ITERATION_LOG_COUNT = 1000
+
+
+LOG_MODELS = [LogEntry3, LogEntry2, LogEntry]
+
+
+class TableLogsModel(SharedModel, ActionLogsDataInterface):
+ """
+ TableLogsModel implements the data model for the logs API backed by a single table
+ in the database.
+ """
+ def __init__(self, should_skip_logging=None, **kwargs):
+ self._should_skip_logging = should_skip_logging
+
+ def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
+ namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
+ if filter_kinds is not None:
+ assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
+
+ assert start_datetime is not None
+ assert end_datetime is not None
+
+ repository = None
+ if repository_name and namespace_name:
+ repository = model.repository.get_repository(namespace_name, repository_name)
+ assert repository
+
+ performer = None
+ if performer_name:
+ performer = model.user.get_user(performer_name)
+ assert performer
+
+ def get_logs(m, page_token):
+ logs_query = model.log.get_logs_query(start_datetime, end_datetime, performer=performer,
+ repository=repository, namespace=namespace_name,
+ ignore=filter_kinds, model=m)
+
+ logs, next_page_token = model.modelutil.paginate(logs_query, m,
+ descending=True,
+ page_token=page_token,
+ limit=20,
+ max_page=max_page_count,
+ sort_field_name='datetime')
+
+ return logs, next_page_token
+
+ TOKEN_TABLE_ID = 'tti'
+ table_index = 0
+ logs = []
+ next_page_token = page_token or None
+
+ # Skip empty pages (empty table)
+ while len(logs) == 0 and table_index < len(LOG_MODELS) - 1:
+ table_specified = next_page_token is not None and next_page_token.get(TOKEN_TABLE_ID) is not None
+ if table_specified:
+ table_index = next_page_token.get(TOKEN_TABLE_ID)
+
+ logs_result, next_page_token = get_logs(LOG_MODELS[table_index], next_page_token)
+ logs.extend(logs_result)
+
+ if next_page_token is None and table_index < len(LOG_MODELS) - 1:
+ next_page_token = {TOKEN_TABLE_ID: table_index + 1}
+
+ return LogEntriesPage([Log.for_logentry(log) for log in logs], next_page_token)
+
+ def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
+ filter_kinds=None, size=20):
+ if filter_kinds is not None:
+ assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
+
+ repository = None
+ if repository_name and namespace_name:
+ repository = model.repository.get_repository(namespace_name, repository_name)
+ assert repository
+
+ performer = None
+ if performer_name:
+ performer = model.user.get_user(performer_name)
+ assert performer
+
+ def get_latest_logs(m):
+ logs_query = model.log.get_latest_logs_query(performer=performer, repository=repository,
+ namespace=namespace_name, ignore=filter_kinds,
+ model=m, size=size)
+
+ logs = list(logs_query)
+ return [Log.for_logentry(log) for log in logs]
+
+ return get_latest_logs(LOG_MODELS[0])
+
+ def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
+ repository_name=None, namespace_name=None, filter_kinds=None):
+ if filter_kinds is not None:
+ assert all(isinstance(kind_name, str) for kind_name in filter_kinds)
+
+ if end_datetime - start_datetime >= timedelta(weeks=4):
+ raise Exception('Cannot lookup aggregated logs over a period longer than a month')
+
+ repository = None
+ if repository_name and namespace_name:
+ repository = model.repository.get_repository(namespace_name, repository_name)
+
+ performer = None
+ if performer_name:
+ performer = model.user.get_user(performer_name)
+
+ entries = {}
+ for log_model in LOG_MODELS:
+ aggregated = model.log.get_aggregated_logs(start_datetime, end_datetime,
+ performer=performer,
+ repository=repository,
+ namespace=namespace_name,
+ ignore=filter_kinds,
+ model=log_model)
+
+ for entry in aggregated:
+ synthetic_date = datetime(start_datetime.year, start_datetime.month, int(entry.day),
+ tzinfo=get_localzone())
+ if synthetic_date.day < start_datetime.day:
+ synthetic_date = synthetic_date + relativedelta(months=1)
+
+ key = '%s-%s' % (entry.kind_id, entry.day)
+
+ if key in entries:
+ entries[key] = AggregatedLogCount(entry.kind_id, entry.count + entries[key].count,
+ synthetic_date)
+ else:
+ entries[key] = AggregatedLogCount(entry.kind_id, entry.count, synthetic_date)
+
+ return entries.values()
+
+ def count_repository_actions(self, repository, day):
+ return model.repositoryactioncount.count_repository_actions(repository, day)
+
+ def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
+ repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
+ if self._should_skip_logging and self._should_skip_logging(kind_name, namespace_name,
+ is_free_namespace):
+ return
+
+ if repository_name is not None:
+ assert repository is None
+ assert namespace_name is not None
+ repository = model.repository.get_repository(namespace_name, repository_name)
+
+ model.log.log_action(kind_name, namespace_name, performer=performer, repository=repository,
+ ip=ip, metadata=metadata or {}, timestamp=timestamp)
+
+ def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
+ namespace_id=None, max_query_time=None):
+ # Using an adjusting scale, start downloading log rows in batches, starting at
+ # MINIMUM_RANGE_SIZE and doubling until we've reached EXPECTED_ITERATION_LOG_COUNT or
+ # the lookup range has reached MAXIMUM_RANGE_SIZE. If at any point this operation takes
+ # longer than the MAXIMUM_WORK_PERIOD_SECONDS, terminate the batch operation as timed out.
+ batch_start_time = datetime.utcnow()
+
+ current_start_datetime = start_datetime
+ current_batch_size = timedelta(seconds=MINIMUM_RANGE_SIZE)
+
+ while current_start_datetime < end_datetime:
+ # Verify we haven't been working for too long.
+ work_elapsed = datetime.utcnow() - batch_start_time
+ if max_query_time is not None and work_elapsed > max_query_time:
+ logger.error('Retrieval of logs `%s/%s` timed out with time of `%s`',
+ namespace_id, repository_id, work_elapsed)
+ raise LogsIterationTimeout()
+
+ current_end_datetime = current_start_datetime + current_batch_size
+ current_end_datetime = min(current_end_datetime, end_datetime)
+
+ # Load the next set of logs.
+ def load_logs():
+ logger.debug('Retrieving logs over range %s -> %s with namespace %s and repository %s',
+ current_start_datetime, current_end_datetime, namespace_id, repository_id)
+
+ logs_query = model.log.get_logs_query(namespace=namespace_id,
+ repository=repository_id,
+ start_time=current_start_datetime,
+ end_time=current_end_datetime)
+ logs = list(logs_query)
+ for log in logs:
+ if namespace_id is not None:
+ assert log.account_id == namespace_id
+
+ if repository_id is not None:
+ assert log.repository_id == repository_id
+
+ logs = [Log.for_logentry(log) for log in logs]
+ return logs
+
+ logs, elapsed = _run_and_time(load_logs)
+ if max_query_time is not None and elapsed > max_query_time:
+ logger.error('Retrieval of logs for export `%s/%s` with range `%s-%s` timed out at `%s`',
+ namespace_id, repository_id, current_start_datetime, current_end_datetime,
+ elapsed)
+ raise LogsIterationTimeout()
+
+ yield logs
+
+ # Move forward.
+ current_start_datetime = current_end_datetime
+
+ # Increase the batch size if necessary.
+ if len(logs) < EXPECTED_ITERATION_LOG_COUNT:
+ seconds = min(MAXIMUM_RANGE_SIZE, current_batch_size.total_seconds() * 2)
+ current_batch_size = timedelta(seconds=seconds)
+
+ def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
+ """ Yield a context manager for a group of outdated logs. """
+ for log_model in LOG_MODELS:
+ while True:
+ with UseThenDisconnect(config.app_config):
+ start_id = get_stale_logs_start_id(log_model)
+
+ if start_id is None:
+ logger.warning('Failed to find start id')
+ break
+
+ logger.debug('Found starting ID %s', start_id)
+ lookup_end_id = start_id + min_logs_per_rotation
+ logs = [log for log in get_stale_logs(start_id, lookup_end_id,
+ log_model, cutoff_date)]
+
+ if not logs:
+ logger.debug('No further logs found')
+ break
+
+ end_id = max([log.id for log in logs])
+ context = DatabaseLogRotationContext(logs, log_model, start_id, end_id)
+ yield context
+
+
+def _run_and_time(fn):
+ start_time = datetime.utcnow()
+ result = fn()
+ return result, datetime.utcnow() - start_time
+
+
+table_logs_model = TableLogsModel()
+
+
+class DatabaseLogRotationContext(LogRotationContextInterface):
+ """
+ DatabaseLogRotationContext represents a batch of logs to be archived together.
+ i.e A set of logs to be archived in the same file (based on the number of logs per rotation).
+
+ When completed without exceptions, this context will delete the stale logs
+ from rows `start_id` to `end_id`.
+ """
+ def __init__(self, logs, log_model, start_id, end_id):
+ self.logs = logs
+ self.log_model = log_model
+ self.start_id = start_id
+ self.end_id = end_id
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, ex_type, ex_value, ex_traceback):
+ if ex_type is None and ex_value is None and ex_traceback is None:
+ with UseThenDisconnect(config.app_config):
+ logger.debug('Deleting logs from IDs %s to %s', self.start_id, self.end_id)
+ delete_stale_logs(self.start_id, self.end_id, self.log_model)
+
+ def yield_logs_batch(self):
+ """ Yield a batch of logs and a filename for that batch. """
+ filename = '%d-%d-%s.txt.gz' % (self.start_id, self.end_id,
+ self.log_model.__name__.lower())
+ yield self.logs, filename
diff --git a/data/logs_model/test/__init__.py b/data/logs_model/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/data/logs_model/test/fake_elasticsearch.py b/data/logs_model/test/fake_elasticsearch.py
new file mode 100644
index 000000000..439dfd806
--- /dev/null
+++ b/data/logs_model/test/fake_elasticsearch.py
@@ -0,0 +1,390 @@
+import json
+import uuid
+import fnmatch
+
+from collections import defaultdict
+from contextlib import contextmanager
+from datetime import datetime
+
+import dateutil.parser
+
+from httmock import urlmatch, HTTMock
+
+FAKE_ES_HOST = 'fakees'
+
+EMPTY_RESULT = {
+ 'hits': {'hits': [], 'total': 0},
+ '_shards': {'successful': 1, 'total': 1},
+}
+
+def parse_query(query):
+ if not query:
+ return {}
+
+ return {s.split('=')[0]: s.split('=')[1] for s in query.split("&")}
+
+
+@contextmanager
+def fake_elasticsearch(allow_wildcard=True):
+ templates = {}
+ docs = defaultdict(list)
+ scrolls = {}
+ id_counter = [1]
+
+ def transform(value, field_name):
+ # TODO: implement this using a real index template if we ever need more than a few
+ # fields here.
+ if field_name == 'datetime':
+ if isinstance(value, int):
+ return datetime.utcfromtimestamp(value / 1000)
+
+ parsed = dateutil.parser.parse(value)
+ return parsed
+
+ return value
+
+ @urlmatch(netloc=FAKE_ES_HOST, path=r'/_template/(.+)', method='GET')
+ def get_template(url, request):
+ template_name = url[len('/_template/'):]
+ if template_name in templates:
+ return {'status_code': 200}
+
+ return {'status_code': 404}
+
+ @urlmatch(netloc=FAKE_ES_HOST, path=r'/_template/(.+)', method='PUT')
+ def put_template(url, request):
+ template_name = url[len('/_template/'):]
+ templates[template_name] = True
+ return {'status_code': 201}
+
+ @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)/_doc', method='POST')
+ def post_doc(url, request):
+ index_name, _ = url.path[1:].split('/')
+ item = json.loads(request.body)
+ item['_id'] = item['random_id']
+ id_counter[0] += 1
+ docs[index_name].append(item)
+ return {
+ 'status_code': 204,
+ 'headers': {
+ 'Content-Type': 'application/json',
+ },
+ 'content': json.dumps({
+ "result": "created",
+ }),
+ }
+
+ @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)$', method='DELETE')
+ def index_delete(url, request):
+ index_name_or_pattern = url.path[1:]
+ to_delete = []
+ for index_name in docs.keys():
+ if not fnmatch.fnmatch(index_name, index_name_or_pattern):
+ continue
+
+ to_delete.append(index_name)
+
+ for index in to_delete:
+ docs.pop(index)
+
+ return {
+ 'status_code': 200,
+ 'headers': {
+ 'Content-Type': 'application/json',
+ },
+ 'content': {'acknowledged': True}
+ }
+
+ @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)$', method='GET')
+ def index_lookup(url, request):
+ index_name_or_pattern = url.path[1:]
+ found = {}
+ for index_name in docs.keys():
+ if not fnmatch.fnmatch(index_name, index_name_or_pattern):
+ continue
+
+ found[index_name] = {}
+
+ if not found:
+ return {
+ 'status_code': 404,
+ }
+
+ return {
+ 'status_code': 200,
+ 'headers': {
+ 'Content-Type': 'application/json',
+ },
+ 'content': json.dumps(found),
+ }
+
+ def _match_query(index_name_or_pattern, query):
+ found = []
+ found_index = False
+
+ for index_name in docs.keys():
+ if not allow_wildcard and index_name_or_pattern.find('*') >= 0:
+ break
+
+ if not fnmatch.fnmatch(index_name, index_name_or_pattern):
+ continue
+
+ found_index = True
+
+ def _is_match(doc, current_query):
+ if current_query is None:
+ return True
+
+ for filter_type, filter_params in current_query.iteritems():
+ for field_name, filter_props in filter_params.iteritems():
+ if filter_type == 'range':
+ lt = transform(filter_props['lt'], field_name)
+ gte = transform(filter_props['gte'], field_name)
+ doc_value = transform(doc[field_name], field_name)
+ if not (doc_value < lt and doc_value >= gte):
+ return False
+ elif filter_type == 'term':
+ doc_value = transform(doc[field_name], field_name)
+ return doc_value == filter_props
+ elif filter_type == 'terms':
+ doc_value = transform(doc[field_name], field_name)
+ return doc_value in filter_props
+ elif filter_type == 'bool':
+ assert not 'should' in filter_params, 'should is unsupported'
+
+ must = filter_params.get('must')
+ must_not = filter_params.get('must_not')
+ filter_bool = filter_params.get('filter')
+
+ if must:
+ for check in must:
+ if not _is_match(doc, check):
+ return False
+
+ if must_not:
+ for check in must_not:
+ if _is_match(doc, check):
+ return False
+
+ if filter_bool:
+ for check in filter_bool:
+ if not _is_match(doc, check):
+ return False
+ else:
+ raise Exception('Unimplemented query %s: %s' % (filter_type, query))
+
+ return True
+
+ for doc in docs[index_name]:
+ if not _is_match(doc, query):
+ continue
+
+ found.append({'_source': doc, '_index': index_name})
+
+ return found, found_index or (index_name_or_pattern.find('*') >= 0)
+
+ @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)/_count$', method='GET')
+ def count_docs(url, request):
+ request = json.loads(request.body)
+ index_name_or_pattern, _ = url.path[1:].split('/')
+
+ found, found_index = _match_query(index_name_or_pattern, request['query'])
+ if not found_index:
+ return {
+ 'status_code': 404,
+ }
+
+ return {
+ 'status_code': 200,
+ 'headers': {
+ 'Content-Type': 'application/json',
+ },
+ 'content': json.dumps({'count': len(found)}),
+ }
+
+ @urlmatch(netloc=FAKE_ES_HOST, path=r'/_search/scroll$', method='GET')
+ def lookup_scroll(url, request):
+ request_obj = json.loads(request.body)
+ scroll_id = request_obj['scroll_id']
+ if scroll_id in scrolls:
+ return {
+ 'status_code': 200,
+ 'headers': {
+ 'Content-Type': 'application/json',
+ },
+ 'content': json.dumps(scrolls[scroll_id]),
+ }
+
+ return {
+ 'status_code': 404,
+ }
+
+ @urlmatch(netloc=FAKE_ES_HOST, path=r'/_search/scroll$', method='DELETE')
+ def delete_scroll(url, request):
+ request = json.loads(request.body)
+ for scroll_id in request['scroll_id']:
+ scrolls.pop(scroll_id, None)
+
+ return {
+ 'status_code': 404,
+ }
+
+ @urlmatch(netloc=FAKE_ES_HOST, path=r'/([^/]+)/_search$', method='GET')
+ def lookup_docs(url, request):
+ query_params = parse_query(url.query)
+
+ request = json.loads(request.body)
+ index_name_or_pattern, _ = url.path[1:].split('/')
+
+ # Find matching docs.
+ query = request.get('query')
+ found, found_index = _match_query(index_name_or_pattern, query)
+ if not found_index:
+ return {
+ 'status_code': 404,
+ }
+
+ # Sort.
+ sort = request.get('sort')
+ if sort:
+ if sort == ['_doc'] or sort == '_doc':
+ found.sort(key=lambda x: x['_source']['_id'])
+ else:
+ def get_sort_key(item):
+ source = item['_source']
+ key = ''
+ for sort_config in sort:
+ for sort_key, direction in sort_config.iteritems():
+ assert direction == 'desc'
+ sort_key = sort_key.replace('.keyword', '')
+ key += str(transform(source[sort_key], sort_key))
+ key += '|'
+ return key
+
+ found.sort(key=get_sort_key, reverse=True)
+
+ # Search after.
+ search_after = request.get('search_after')
+ if search_after:
+ sort_fields = []
+ for sort_config in sort:
+ if isinstance(sort_config, unicode):
+ sort_fields.append(sort_config)
+ continue
+
+ for sort_key, _ in sort_config.iteritems():
+ sort_key = sort_key.replace('.keyword', '')
+ sort_fields.append(sort_key)
+
+ for index, search_after_value in enumerate(search_after):
+ field_name = sort_fields[index]
+ value = transform(search_after_value, field_name)
+ if field_name == '_doc':
+ found = [f for f in found if transform(f['_source']['_id'], field_name) > value]
+ else:
+ found = [f for f in found if transform(f['_source'][field_name], field_name) < value]
+ if len(found) < 2:
+ break
+
+ if field_name == '_doc':
+ if found[0]['_source']['_id'] != found[1]['_source']:
+ break
+ else:
+ if found[0]['_source'][field_name] != found[1]['_source']:
+ break
+
+ # Size.
+ size = request.get('size')
+ if size:
+ found = found[0:size]
+
+ # Aggregation.
+ # {u'query':
+ # {u'range':
+ # {u'datetime': {u'lt': u'2019-06-27T15:45:09.768085',
+ # u'gte': u'2019-06-27T15:35:09.768085'}}},
+ # u'aggs': {
+ # u'by_id': {
+ # u'terms': {u'field': u'kind_id'},
+ # u'aggs': {
+ # u'by_date': {u'date_histogram': {u'field': u'datetime', u'interval': u'day'}}}}},
+ # u'size': 0}
+ def _by_field(agg_field_params, results):
+ aggregated_by_field = defaultdict(list)
+
+ for agg_means, agg_means_params in agg_field_params.iteritems():
+ if agg_means == 'terms':
+ field_name = agg_means_params['field']
+ for result in results:
+ value = result['_source'][field_name]
+ aggregated_by_field[value].append(result)
+ elif agg_means == 'date_histogram':
+ field_name = agg_means_params['field']
+ interval = agg_means_params['interval']
+ for result in results:
+ value = transform(result['_source'][field_name], field_name)
+ aggregated_by_field[getattr(value, interval)].append(result)
+ elif agg_means == 'aggs':
+ # Skip. Handled below.
+ continue
+ else:
+ raise Exception('Unsupported aggregation method: %s' % agg_means)
+
+ # Invoke the aggregation recursively.
+ buckets = []
+ for field_value, field_results in aggregated_by_field.iteritems():
+ aggregated = _aggregate(agg_field_params, field_results)
+ if isinstance(aggregated, list):
+ aggregated = {'doc_count': len(aggregated)}
+
+ aggregated['key'] = field_value
+ buckets.append(aggregated)
+
+ return {'buckets': buckets}
+
+ def _aggregate(query_config, results):
+ agg_params = query_config.get(u'aggs')
+ if not agg_params:
+ return results
+
+ by_field_name = {}
+ for agg_field_name, agg_field_params in agg_params.iteritems():
+ by_field_name[agg_field_name] = _by_field(agg_field_params, results)
+
+ return by_field_name
+
+ final_result = {
+ 'hits': {
+ 'hits': found,
+ 'total': len(found),
+ },
+ '_shards': {
+ 'successful': 1,
+ 'total': 1,
+ },
+ 'aggregations': _aggregate(request, found),
+ }
+
+ if query_params.get('scroll'):
+ scroll_id = str(uuid.uuid4())
+ scrolls[scroll_id] = EMPTY_RESULT
+ final_result['_scroll_id'] = scroll_id
+
+ return {
+ 'status_code': 200,
+ 'headers': {
+ 'Content-Type': 'application/json',
+ },
+ 'content': json.dumps(final_result),
+ }
+
+ @urlmatch(netloc=FAKE_ES_HOST)
+ def catchall_handler(url, request):
+ print "Unsupported URL: %s %s" % (request.method, url, )
+ return {'status_code': 501}
+
+ handlers = [get_template, put_template, index_delete, index_lookup, post_doc, count_docs,
+ lookup_docs, lookup_scroll, delete_scroll, catchall_handler]
+
+ with HTTMock(*handlers):
+ yield
diff --git a/data/logs_model/test/mock_elasticsearch.py b/data/logs_model/test/mock_elasticsearch.py
new file mode 100644
index 000000000..bd26a10c7
--- /dev/null
+++ b/data/logs_model/test/mock_elasticsearch.py
@@ -0,0 +1,400 @@
+# -*- coding: utf-8 -*-
+import json
+
+from datetime import datetime
+from dateutil.parser import parse
+
+from data.logs_model.datatypes import LogEntriesPage, Log, AggregatedLogCount
+
+
+def _status(d, code=200):
+ return {"status_code": code, "content": json.dumps(d)}
+
+
+def _shards(d, total=5, failed=0, successful=5):
+ d.update({"_shards": {"total": total, "failed": failed, "successful": successful}})
+ return d
+
+
+def _hits(hits):
+ return {"hits": {"total": len(hits), "max_score": None, "hits": hits}}
+
+
+INDEX_LIST_RESPONSE_HIT1_HIT2 = _status({
+ "logentry_2018-03-08": {},
+ "logentry_2018-04-02": {}
+})
+
+
+INDEX_LIST_RESPONSE_HIT2 = _status({
+ "logentry_2018-04-02": {}
+})
+
+
+INDEX_LIST_RESPONSE = _status({
+ "logentry_2019-01-01": {},
+ "logentry_2017-03-08": {},
+ "logentry_2018-03-08": {},
+ "logentry_2018-04-02": {}
+})
+
+
+DEFAULT_TEMPLATE_RESPONSE = _status({"acknowledged": True})
+INDEX_RESPONSE_2019_01_01 = _status(
+ _shards({
+ "_index": "logentry_2019-01-01",
+ "_type": "_doc",
+ "_id": "1",
+ "_version": 1,
+ "_seq_no": 0,
+ "_primary_term": 1,
+ "result": "created"
+ }))
+
+INDEX_RESPONSE_2017_03_08 = _status(
+ _shards({
+ "_index": "logentry_2017-03-08",
+ "_type": "_doc",
+ "_id": "1",
+ "_version": 1,
+ "_seq_no": 0,
+ "_primary_term": 1,
+ "result": "created"
+ }))
+
+FAILURE_400 = _status({}, 400)
+
+INDEX_REQUEST_2019_01_01 = [
+ "logentry_2019-01-01", {
+ "account_id":
+ 1,
+ "repository_id":
+ 1,
+ "ip":
+ "192.168.1.1",
+ "random_id":
+ 233,
+ "datetime":
+ "2019-01-01T03:30:00",
+ "metadata_json": json.loads("{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1520479800}"),
+ "performer_id":
+ 1,
+ "kind_id":
+ 1
+ }
+]
+
+INDEX_REQUEST_2017_03_08 = [
+ "logentry_2017-03-08", {
+ "repository_id":
+ 1,
+ "account_id":
+ 1,
+ "ip":
+ "192.168.1.1",
+ "random_id":
+ 233,
+ "datetime":
+ "2017-03-08T03:30:00",
+ "metadata_json": json.loads("{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1520479800}"),
+ "performer_id":
+ 1,
+ "kind_id":
+ 2
+ }
+]
+
+_hit1 = {
+ "_index": "logentry_2018-03-08",
+ "_type": "doc",
+ "_id": "1",
+ "_score": None,
+ "_source": {
+ "random_id":
+ 233,
+ "kind_id":
+ 1,
+ "account_id":
+ 1,
+ "performer_id":
+ 1,
+ "repository_id":
+ 1,
+ "ip":
+ "192.168.1.1",
+ "metadata_json":
+ "{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1520479800}",
+ "datetime":
+ "2018-03-08T03:30",
+ },
+ "sort": [1520479800000, 233]
+}
+
+_hit2 = {
+ "_index": "logentry_2018-04-02",
+ "_type": "doc",
+ "_id": "2",
+ "_score": None,
+ "_source": {
+ "random_id":
+ 233,
+ "kind_id":
+ 2,
+ "account_id":
+ 1,
+ "performer_id":
+ 1,
+ "repository_id":
+ 1,
+ "ip":
+ "192.168.1.2",
+ "metadata_json":
+ "{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1522639800}",
+ "datetime":
+ "2018-04-02T03:30",
+ },
+ "sort": [1522639800000, 233]
+}
+
+_log1 = Log(
+ "{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1520479800}",
+ "192.168.1.1", parse("2018-03-08T03:30"), "user1.email", "user1.username", "user1.robot",
+ "user1.organization", "user1.username", "user1.email", "user1.robot", 1)
+_log2 = Log(
+ "{\"\\ud83d\\ude02\": \"\\ud83d\\ude02\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\\ud83d\\udc4c\", \"key\": \"value\", \"time\": 1522639800}",
+ "192.168.1.2", parse("2018-04-02T03:30"), "user1.email", "user1.username", "user1.robot",
+ "user1.organization", "user1.username", "user1.email", "user1.robot", 2)
+
+SEARCH_RESPONSE_START = _status(_shards(_hits([_hit1, _hit2])))
+SEARCH_RESPONSE_END = _status(_shards(_hits([_hit2])))
+SEARCH_REQUEST_START = {
+ "sort": [{
+ "datetime": "desc"
+ }, {
+ "random_id.keyword": "desc"
+ }],
+ "query": {
+ "bool": {
+ "filter": [{
+ "term": {
+ "performer_id": 1
+ }
+ }, {
+ "term": {
+ "repository_id": 1
+ }
+ }]
+ }
+ },
+ "size": 2
+}
+SEARCH_REQUEST_END = {
+ "sort": [{
+ "datetime": "desc"
+ }, {
+ "random_id.keyword": "desc"
+ }],
+ "query": {
+ "bool": {
+ "filter": [{
+ "term": {
+ "performer_id": 1
+ }
+ }, {
+ "term": {
+ "repository_id": 1
+ }
+ }]
+ }
+ },
+ "search_after": [1520479800000, 233],
+ "size": 2
+}
+SEARCH_REQUEST_FILTER = {
+ "sort": [{
+ "datetime": "desc"
+ }, {
+ "random_id.keyword": "desc"
+ }],
+ "query": {
+ "bool": {
+ "filter": [{
+ "term": {
+ "performer_id": 1
+ }
+ }, {
+ "term": {
+ "repository_id": 1
+ }
+ }, {
+ "bool": {
+ "must_not": [{
+ "terms": {
+ "kind_id": [1]
+ }
+ }]
+ }
+ }]
+ }
+ },
+ "size": 2
+}
+SEARCH_PAGE_TOKEN = {
+ "datetime": datetime(2018, 3, 8, 3, 30).isoformat(),
+ "random_id": 233,
+ "page_number": 1
+}
+SEARCH_PAGE_START = LogEntriesPage(logs=[_log1], next_page_token=SEARCH_PAGE_TOKEN)
+SEARCH_PAGE_END = LogEntriesPage(logs=[_log2], next_page_token=None)
+SEARCH_PAGE_EMPTY = LogEntriesPage([], None)
+
+AGGS_RESPONSE = _status(
+ _shards({
+ "hits": {
+ "total": 4,
+ "max_score": None,
+ "hits": []
+ },
+ "aggregations": {
+ "by_id": {
+ "doc_count_error_upper_bound":
+ 0,
+ "sum_other_doc_count":
+ 0,
+ "buckets": [{
+ "key": 2,
+ "doc_count": 3,
+ "by_date": {
+ "buckets": [{
+ "key_as_string": "2009-11-12T00:00:00.000Z",
+ "key": 1257984000000,
+ "doc_count": 1
+ }, {
+ "key_as_string": "2009-11-13T00:00:00.000Z",
+ "key": 1258070400000,
+ "doc_count": 0
+ }, {
+ "key_as_string": "2009-11-14T00:00:00.000Z",
+ "key": 1258156800000,
+ "doc_count": 2
+ }]
+ }
+ }, {
+ "key": 1,
+ "doc_count": 1,
+ "by_date": {
+ "buckets": [{
+ "key_as_string": "2009-11-15T00:00:00.000Z",
+ "key": 1258243200000,
+ "doc_count": 1
+ }]
+ }
+ }]
+ }
+ }
+ }))
+
+AGGS_REQUEST = {
+ "query": {
+ "bool": {
+ "filter": [{
+ "term": {
+ "performer_id": 1
+ }
+ }, {
+ "term": {
+ "repository_id": 1
+ }
+ }, {
+ "bool": {
+ "must_not": [{
+ "terms": {
+ "kind_id": [2]
+ }
+ }]
+ }
+ }],
+ "must": [{
+ "range": {
+ "datetime": {
+ "lt": "2018-04-08T03:30:00",
+ "gte": "2018-03-08T03:30:00"
+ }
+ }
+ }]
+ }
+ },
+ "aggs": {
+ "by_id": {
+ "terms": {
+ "field": "kind_id"
+ },
+ "aggs": {
+ "by_date": {
+ "date_histogram": {
+ "field": "datetime",
+ "interval": "day"
+ }
+ }
+ }
+ }
+ },
+ "size": 0
+}
+
+AGGS_COUNT = [
+ AggregatedLogCount(1, 1, parse("2009-11-15T00:00:00.000")),
+ AggregatedLogCount(2, 1, parse("2009-11-12T00:00:00.000")),
+ AggregatedLogCount(2, 2, parse("2009-11-14T00:00:00.000"))
+]
+
+COUNT_REQUEST = {
+ "query": {
+ "bool": {
+ "filter": [{
+ "term": {
+ "repository_id": 1
+ }
+ }]
+ }
+ }
+}
+COUNT_RESPONSE = _status(_shards({
+ "count": 1,
+}))
+
+# assume there are 2 pages
+_scroll_id = "DnF1ZXJ5VGhlbkZldGNoBQAAAAAAACEmFkk1aGlTRzdSUWllejZmYTlEYTN3SVEAAAAAAAAhJRZJNWhpU0c3UlFpZXo2ZmE5RGEzd0lRAAAAAAAAHtAWLWZpaFZXVzVSTy1OTXA5V3MwcHZrZwAAAAAAAB7RFi1maWhWV1c1Uk8tTk1wOVdzMHB2a2cAAAAAAAAhJxZJNWhpU0c3UlFpZXo2ZmE5RGEzd0lR"
+
+
+def _scroll(d):
+ d["_scroll_id"] = _scroll_id
+ return d
+
+
+SCROLL_CREATE = _status(_shards(_scroll(_hits([_hit1]))))
+SCROLL_GET = _status(_shards(_scroll(_hits([_hit2]))))
+SCROLL_GET_2 = _status(_shards(_scroll(_hits([]))))
+SCROLL_DELETE = _status({"succeeded": True, "num_freed": 5})
+SCROLL_LOGS = [[_log1], [_log2]]
+
+SCROLL_REQUESTS = [
+ [
+ "5m", 1, {
+ "sort": "_doc",
+ "query": {
+ "range": {
+ "datetime": {
+ "lt": "2018-04-02T00:00:00",
+ "gte": "2018-03-08T00:00:00"
+ }
+ }
+ }
+ }
+ ],
+ [{"scroll": "5m", "scroll_id": _scroll_id}],
+ [{"scroll":"5m", "scroll_id": _scroll_id}],
+ [{"scroll_id": [_scroll_id]}],
+]
+
+SCROLL_RESPONSES = [SCROLL_CREATE, SCROLL_GET, SCROLL_GET_2, SCROLL_DELETE]
diff --git a/data/logs_model/test/test_combined_model.py b/data/logs_model/test/test_combined_model.py
new file mode 100644
index 000000000..7b288e72f
--- /dev/null
+++ b/data/logs_model/test/test_combined_model.py
@@ -0,0 +1,130 @@
+from datetime import date, datetime, timedelta
+
+from freezegun import freeze_time
+
+from data.logs_model.inmemory_model import InMemoryModel
+from data.logs_model.combined_model import CombinedLogsModel
+
+from test.fixtures import *
+
+
+@pytest.fixture()
+def first_model():
+ return InMemoryModel()
+
+
+@pytest.fixture()
+def second_model():
+ return InMemoryModel()
+
+
+@pytest.fixture()
+def combined_model(first_model, second_model, initialized_db):
+ return CombinedLogsModel(first_model, second_model)
+
+
+def test_log_action(first_model, second_model, combined_model, initialized_db):
+ day = date(2019, 1, 1)
+
+ # Write to the combined model.
+ with freeze_time(day):
+ combined_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ simple_repo = model.repository.get_repository('devtable', 'simple')
+
+ # Make sure it is found in the first model but not the second.
+ assert combined_model.count_repository_actions(simple_repo, day) == 1
+ assert first_model.count_repository_actions(simple_repo, day) == 1
+ assert second_model.count_repository_actions(simple_repo, day) == 0
+
+
+def test_count_repository_actions(first_model, second_model, combined_model, initialized_db):
+ # Write to each model.
+ first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ # Ensure the counts match as expected.
+ day = datetime.today() - timedelta(minutes=60)
+ simple_repo = model.repository.get_repository('devtable', 'simple')
+
+ assert first_model.count_repository_actions(simple_repo, day) == 3
+ assert second_model.count_repository_actions(simple_repo, day) == 2
+ assert combined_model.count_repository_actions(simple_repo, day) == 5
+
+
+def test_yield_logs_for_export(first_model, second_model, combined_model, initialized_db):
+ now = datetime.now()
+
+ # Write to each model.
+ first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ later = datetime.now()
+
+ # Ensure the full set of logs is yielded.
+ first_logs = list(first_model.yield_logs_for_export(now, later))[0]
+ second_logs = list(second_model.yield_logs_for_export(now, later))[0]
+
+ combined = list(combined_model.yield_logs_for_export(now, later))
+ full_combined = []
+ for subset in combined:
+ full_combined.extend(subset)
+
+ assert len(full_combined) == len(first_logs) + len(second_logs)
+ assert full_combined == (first_logs + second_logs)
+
+
+def test_lookup_logs(first_model, second_model, combined_model, initialized_db):
+ now = datetime.now()
+
+ # Write to each model.
+ first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ first_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ second_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ later = datetime.now()
+
+ def _collect_logs(model):
+ page_token = None
+ all_logs = []
+ while True:
+ paginated_logs = model.lookup_logs(now, later, page_token=page_token)
+ page_token = paginated_logs.next_page_token
+ all_logs.extend(paginated_logs.logs)
+ if page_token is None:
+ break
+ return all_logs
+
+ first_logs = _collect_logs(first_model)
+ second_logs = _collect_logs(second_model)
+ combined = _collect_logs(combined_model)
+
+ assert len(combined) == len(first_logs) + len(second_logs)
+ assert combined == (first_logs + second_logs)
diff --git a/data/logs_model/test/test_elasticsearch.py b/data/logs_model/test/test_elasticsearch.py
new file mode 100644
index 000000000..a305010f4
--- /dev/null
+++ b/data/logs_model/test/test_elasticsearch.py
@@ -0,0 +1,529 @@
+# -*- coding: utf-8 -*-
+
+# pylint: disable=redefined-outer-name, wildcard-import
+
+import json
+from datetime import datetime, timedelta
+
+import pytest
+from mock import patch, Mock
+from dateutil.parser import parse
+
+from httmock import urlmatch, HTTMock
+
+from data.model.log import _json_serialize
+from data.logs_model.elastic_logs import ElasticsearchLogs, INDEX_NAME_PREFIX, INDEX_DATE_FORMAT
+from data.logs_model import configure, LogsModelProxy
+from mock_elasticsearch import *
+
+FAKE_ES_HOST = 'fakees'
+FAKE_ES_HOST_PATTERN = r'fakees.*'
+FAKE_ES_PORT = 443
+FAKE_AWS_ACCESS_KEY = None
+FAKE_AWS_SECRET_KEY = None
+FAKE_AWS_REGION = None
+
+@pytest.fixture()
+def logs_model_config():
+ conf = {
+ 'LOGS_MODEL': 'elasticsearch',
+ 'LOGS_MODEL_CONFIG': {
+ 'producer': 'elasticsearch',
+ 'elasticsearch_config': {
+ 'host': FAKE_ES_HOST,
+ 'port': FAKE_ES_PORT,
+ 'access_key': FAKE_AWS_ACCESS_KEY,
+ 'secret_key': FAKE_AWS_SECRET_KEY,
+ 'aws_region': FAKE_AWS_REGION
+ }
+ }
+ }
+ return conf
+
+
+FAKE_LOG_ENTRY_KINDS = {'push_repo': 1, 'pull_repo': 2}
+FAKE_NAMESPACES = {
+ 'user1':
+ Mock(id=1, organization="user1.organization", username="user1.username", email="user1.email",
+ robot="user1.robot"),
+ 'user2':
+ Mock(id=2, organization="user2.organization", username="user2.username", email="user2.email",
+ robot="user2.robot")
+}
+FAKE_REPOSITORIES = {
+ 'user1/repo1': Mock(id=1, namespace_user=FAKE_NAMESPACES['user1']),
+ 'user2/repo2': Mock(id=2, namespace_user=FAKE_NAMESPACES['user2']),
+}
+
+
+@pytest.fixture()
+def logs_model():
+ # prevent logs model from changing
+ logs_model = LogsModelProxy()
+ with patch('data.logs_model.logs_model', logs_model):
+ yield logs_model
+
+
+@pytest.fixture(scope='function')
+def app_config(logs_model_config):
+ fake_config = {}
+ fake_config.update(logs_model_config)
+ with patch("data.logs_model.document_logs_model.config.app_config", fake_config):
+ yield fake_config
+
+
+@pytest.fixture()
+def mock_page_size():
+ with patch('data.logs_model.document_logs_model.PAGE_SIZE', 1):
+ yield
+
+
+@pytest.fixture()
+def mock_max_result_window():
+ with patch('data.logs_model.document_logs_model.DEFAULT_RESULT_WINDOW', 1):
+ yield
+
+
+@pytest.fixture
+def mock_random_id():
+ mock_random = Mock(return_value=233)
+ with patch('data.logs_model.document_logs_model._random_id', mock_random):
+ yield
+
+
+@pytest.fixture()
+def mock_db_model():
+ def get_user_map_by_ids(namespace_ids):
+ mapping = {}
+ for i in namespace_ids:
+ for name in FAKE_NAMESPACES:
+ if FAKE_NAMESPACES[name].id == i:
+ mapping[i] = FAKE_NAMESPACES[name]
+ return mapping
+
+ model = Mock(
+ user=Mock(
+ get_namespace_user=FAKE_NAMESPACES.get,
+ get_user_or_org=FAKE_NAMESPACES.get,
+ get_user=FAKE_NAMESPACES.get,
+ get_user_map_by_ids=get_user_map_by_ids,
+ ),
+ repository=Mock(get_repository=lambda user_name, repo_name: FAKE_REPOSITORIES.get(
+ user_name + '/' + repo_name),
+ ),
+ log=Mock(
+ _get_log_entry_kind=lambda name: FAKE_LOG_ENTRY_KINDS[name],
+ _json_serialize=_json_serialize,
+ get_log_entry_kinds=Mock(return_value=FAKE_LOG_ENTRY_KINDS),
+ ),
+ )
+
+ with patch('data.logs_model.document_logs_model.model', model), patch(
+ 'data.logs_model.datatypes.model', model):
+ yield
+
+
+def parse_query(query):
+ return {s.split('=')[0]: s.split('=')[1] for s in query.split("&") if s != ""}
+
+
+@pytest.fixture()
+def mock_elasticsearch():
+ mock = Mock()
+ mock.template.side_effect = NotImplementedError
+ mock.index.side_effect = NotImplementedError
+ mock.count.side_effect = NotImplementedError
+ mock.scroll_get.side_effect = NotImplementedError
+ mock.scroll_delete.side_effect = NotImplementedError
+ mock.search_scroll_create.side_effect = NotImplementedError
+ mock.search_aggs.side_effect = NotImplementedError
+ mock.search_after.side_effect = NotImplementedError
+ mock.list_indices.side_effect = NotImplementedError
+
+ @urlmatch(netloc=r'.*', path=r'.*')
+ def default(url, req):
+ raise Exception('\nurl={}\nmethod={}\nreq.url={}\nheaders={}\nbody={}'.format(
+ url, req.method, req.url, req.headers, req.body))
+
+ @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/_template/.*')
+ def template(url, req):
+ return mock.template(url.query.split('/')[-1], req.body)
+
+ @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/logentry_(\*|[0-9\-]+)')
+ def list_indices(url, req):
+ return mock.list_indices()
+
+ @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/logentry_[0-9\-]*/_doc')
+ def index(url, req):
+ index = url.path.split('/')[1]
+ body = json.loads(req.body)
+ body['metadata_json'] = json.loads(body['metadata_json'])
+ return mock.index(index, body)
+
+ @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/logentry_([0-9\-]*|\*)/_count')
+ def count(_, req):
+ return mock.count(json.loads(req.body))
+
+ @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/_search/scroll')
+ def scroll(url, req):
+ if req.method == 'DELETE':
+ return mock.scroll_delete(json.loads(req.body))
+ elif req.method == 'GET':
+ request_obj = json.loads(req.body)
+ return mock.scroll_get(request_obj)
+ raise NotImplementedError()
+
+ @urlmatch(netloc=FAKE_ES_HOST_PATTERN, path=r'/logentry_(\*|[0-9\-]*)/_search')
+ def search(url, req):
+ if "scroll" in url.query:
+ query = parse_query(url.query)
+ window_size = query['scroll']
+ maximum_result_size = int(query['size'])
+ return mock.search_scroll_create(window_size, maximum_result_size, json.loads(req.body))
+ elif "aggs" in req.body:
+ return mock.search_aggs(json.loads(req.body))
+ else:
+ return mock.search_after(json.loads(req.body))
+
+ with HTTMock(scroll, count, search, index, template, list_indices, default):
+ yield mock
+
+
+@pytest.mark.parametrize(
+ """
+ unlogged_pulls_ok, kind_name, namespace_name, repository, repository_name,
+ timestamp,
+ index_response, expected_request, throws
+ """,
+ [
+ # Invalid inputs
+ pytest.param(
+ False, 'non-existing', None, None, None,
+ None,
+ None, None, True,
+ id="Invalid Kind"
+ ),
+ pytest.param(
+ False, 'pull_repo', 'user1', Mock(id=1), 'repo1',
+ None,
+ None, None, True,
+ id="Invalid Parameters"
+ ),
+
+ # Remote exceptions
+ pytest.param(
+ False, 'pull_repo', 'user1', Mock(id=1), None,
+ None,
+ FAILURE_400, None, True,
+ id="Throw on pull log failure"
+ ),
+ pytest.param(
+ True, 'pull_repo', 'user1', Mock(id=1), None,
+ parse("2017-03-08T03:30"),
+ FAILURE_400, INDEX_REQUEST_2017_03_08, False,
+ id="Ok on pull log failure"
+ ),
+
+ # Success executions
+ pytest.param(
+ False, 'pull_repo', 'user1', Mock(id=1), None,
+ parse("2017-03-08T03:30"),
+ INDEX_RESPONSE_2017_03_08, INDEX_REQUEST_2017_03_08, False,
+ id="Log with namespace name and repository"
+ ),
+ pytest.param(
+ False, 'push_repo', 'user1', None, 'repo1',
+ parse("2019-01-01T03:30"),
+ INDEX_RESPONSE_2019_01_01, INDEX_REQUEST_2019_01_01, False,
+ id="Log with namespace name and repository name"
+ ),
+ ])
+def test_log_action(unlogged_pulls_ok, kind_name, namespace_name, repository, repository_name,
+ timestamp,
+ index_response, expected_request, throws,
+ app_config, logs_model, mock_elasticsearch, mock_db_model, mock_random_id):
+ mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
+ mock_elasticsearch.index = Mock(return_value=index_response)
+ app_config['ALLOW_PULLS_WITHOUT_STRICT_LOGGING'] = unlogged_pulls_ok
+ configure(app_config)
+
+ performer = Mock(id=1)
+ ip = "192.168.1.1"
+ metadata = {'key': 'value', 'time': parse("2018-03-08T03:30"), 'đź‚': 'đź‚👌👌👌👌'}
+ if throws:
+ with pytest.raises(Exception):
+ logs_model.log_action(kind_name, namespace_name, performer, ip, metadata, repository,
+ repository_name, timestamp)
+ else:
+ logs_model.log_action(kind_name, namespace_name, performer, ip, metadata, repository,
+ repository_name, timestamp)
+ mock_elasticsearch.index.assert_called_with(*expected_request)
+
+
+@pytest.mark.parametrize(
+ """
+ start_datetime, end_datetime,
+ performer_name, repository_name, namespace_name,
+ filter_kinds,
+ page_token,
+ max_page_count,
+ search_response,
+ list_indices_response,
+ expected_request,
+ expected_page,
+ throws
+ """,
+ [
+ # 1st page
+ pytest.param(
+ parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
+ 'user1', 'repo1', 'user1',
+ None,
+ None,
+ None,
+ SEARCH_RESPONSE_START,
+ INDEX_LIST_RESPONSE_HIT1_HIT2,
+ SEARCH_REQUEST_START,
+ SEARCH_PAGE_START,
+ False,
+ id="1st page"
+ ),
+
+ # Last page
+ pytest.param(
+ parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
+ 'user1', 'repo1', 'user1',
+ None,
+ SEARCH_PAGE_TOKEN,
+ None,
+ SEARCH_RESPONSE_END,
+ INDEX_LIST_RESPONSE_HIT1_HIT2,
+ SEARCH_REQUEST_END,
+ SEARCH_PAGE_END,
+ False,
+ id="Search using pagination token"
+ ),
+
+ # Filter
+ pytest.param(
+ parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
+ 'user1', 'repo1', 'user1',
+ ['push_repo'],
+ None,
+ None,
+ SEARCH_RESPONSE_END,
+ INDEX_LIST_RESPONSE_HIT2,
+ SEARCH_REQUEST_FILTER,
+ SEARCH_PAGE_END,
+ False,
+ id="Filtered search"
+ ),
+
+ # Max page count
+ pytest.param(
+ parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
+ 'user1', 'repo1', 'user1',
+ None,
+ SEARCH_PAGE_TOKEN,
+ 1,
+ AssertionError, # Assert that it should not reach the ES server
+ None,
+ None,
+ SEARCH_PAGE_EMPTY,
+ False,
+ id="Page token reaches maximum page count",
+ ),
+ ])
+def test_lookup_logs(start_datetime, end_datetime,
+ performer_name, repository_name, namespace_name,
+ filter_kinds,
+ page_token,
+ max_page_count,
+ search_response,
+ list_indices_response,
+ expected_request,
+ expected_page,
+ throws,
+ logs_model, mock_elasticsearch, mock_db_model, mock_page_size, app_config):
+ mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
+ mock_elasticsearch.search_after = Mock(return_value=search_response)
+ mock_elasticsearch.list_indices = Mock(return_value=list_indices_response)
+
+ configure(app_config)
+ if throws:
+ with pytest.raises(Exception):
+ logs_model.lookup_logs(start_datetime, end_datetime, performer_name, repository_name,
+ namespace_name, filter_kinds, page_token, max_page_count)
+ else:
+ page = logs_model.lookup_logs(start_datetime, end_datetime, performer_name, repository_name,
+ namespace_name, filter_kinds, page_token, max_page_count)
+ assert page == expected_page
+ if expected_request:
+ mock_elasticsearch.search_after.assert_called_with(expected_request)
+
+
+@pytest.mark.parametrize(
+ """
+ start_datetime, end_datetime,
+ performer_name, repository_name, namespace_name,
+ filter_kinds, search_response, expected_request, expected_counts, throws
+ """,
+ [
+ # Valid
+ pytest.param(
+ parse('2018-03-08T03:30'), parse('2018-04-08T03:30'),
+ 'user1', 'repo1', 'user1',
+ ['pull_repo'], AGGS_RESPONSE, AGGS_REQUEST, AGGS_COUNT, False,
+ id="Valid Counts"
+ ),
+
+ # Invalid case: date range too big
+ pytest.param(
+ parse('2018-03-08T03:30'), parse('2018-04-09T03:30'),
+ 'user1', 'repo1', 'user1',
+ [], None, None, None, True,
+ id="Throw on date range too big"
+ )
+ ])
+def test_get_aggregated_log_counts(start_datetime, end_datetime,
+ performer_name, repository_name, namespace_name,
+ filter_kinds, search_response, expected_request, expected_counts, throws,
+ logs_model, mock_elasticsearch, mock_db_model, app_config):
+ mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
+ mock_elasticsearch.search_aggs = Mock(return_value=search_response)
+
+ configure(app_config)
+ if throws:
+ with pytest.raises(Exception):
+ logs_model.get_aggregated_log_counts(start_datetime, end_datetime, performer_name,
+ repository_name, namespace_name, filter_kinds)
+ else:
+ counts = logs_model.get_aggregated_log_counts(start_datetime, end_datetime, performer_name,
+ repository_name, namespace_name, filter_kinds)
+ assert set(counts) == set(expected_counts)
+ if expected_request:
+ mock_elasticsearch.search_aggs.assert_called_with(expected_request)
+
+
+@pytest.mark.parametrize(
+ """
+ repository,
+ day,
+ count_response, expected_request, expected_count, throws
+ """,
+ [
+ pytest.param(
+ FAKE_REPOSITORIES['user1/repo1'],
+ parse("2018-03-08").date(),
+ COUNT_RESPONSE, COUNT_REQUEST, 1, False,
+ id="Valid Count with 1 as result"),
+ ])
+def test_count_repository_actions(repository,
+ day,
+ count_response, expected_request, expected_count, throws,
+ logs_model, mock_elasticsearch, mock_db_model, app_config):
+ mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
+ mock_elasticsearch.count = Mock(return_value=count_response)
+ mock_elasticsearch.list_indices = Mock(return_value=INDEX_LIST_RESPONSE)
+
+ configure(app_config)
+ if throws:
+ with pytest.raises(Exception):
+ logs_model.count_repository_actions(repository, day)
+ else:
+ count = logs_model.count_repository_actions(repository, day)
+ assert count == expected_count
+ if expected_request:
+ mock_elasticsearch.count.assert_called_with(expected_request)
+
+
+@pytest.mark.parametrize(
+ """
+ start_datetime, end_datetime,
+ repository_id, namespace_id,
+ max_query_time, scroll_responses, expected_requests, expected_logs, throws
+ """,
+ [
+ pytest.param(
+ parse("2018-03-08"), parse("2018-04-02"),
+ 1, 1,
+ timedelta(seconds=10), SCROLL_RESPONSES, SCROLL_REQUESTS, SCROLL_LOGS, False,
+ id="Scroll 3 pages with page size = 1"
+ ),
+ ])
+def test_yield_logs_for_export(start_datetime, end_datetime,
+ repository_id, namespace_id,
+ max_query_time, scroll_responses, expected_requests, expected_logs, throws,
+ logs_model, mock_elasticsearch, mock_db_model, mock_max_result_window, app_config):
+ mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
+ mock_elasticsearch.search_scroll_create = Mock(return_value=scroll_responses[0])
+ mock_elasticsearch.scroll_get = Mock(side_effect=scroll_responses[1:-1])
+ mock_elasticsearch.scroll_delete = Mock(return_value=scroll_responses[-1])
+
+ configure(app_config)
+ if throws:
+ with pytest.raises(Exception):
+ logs_model.yield_logs_for_export(start_datetime, end_datetime, max_query_time=max_query_time)
+ else:
+ log_generator = logs_model.yield_logs_for_export(start_datetime, end_datetime,
+ max_query_time=max_query_time)
+ counter = 0
+ for logs in log_generator:
+ if counter == 0:
+ mock_elasticsearch.search_scroll_create.assert_called_with(*expected_requests[counter])
+ else:
+ mock_elasticsearch.scroll_get.assert_called_with(*expected_requests[counter])
+ assert expected_logs[counter] == logs
+ counter += 1
+ # the last two requests must be
+ # 1. get with response scroll with 0 hits, which indicates the termination condition
+ # 2. delete scroll request
+ mock_elasticsearch.scroll_get.assert_called_with(*expected_requests[-2])
+ mock_elasticsearch.scroll_delete.assert_called_with(*expected_requests[-1])
+
+
+@pytest.mark.parametrize('prefix, is_valid', [
+ pytest.param('..', False, id='Invalid `..`'),
+ pytest.param('.', False, id='Invalid `.`'),
+ pytest.param('-prefix', False, id='Invalid prefix start -'),
+ pytest.param('_prefix', False, id='Invalid prefix start _'),
+ pytest.param('+prefix', False, id='Invalid prefix start +'),
+ pytest.param('prefix_with_UPPERCASES', False, id='Invalid uppercase'),
+ pytest.param('valid_index', True, id='Valid prefix'),
+ pytest.param('valid_index_with_numbers1234', True, id='Valid prefix with numbers'),
+ pytest.param('a'*256, False, id='Prefix too long')
+])
+def test_valid_index_prefix(prefix, is_valid):
+ assert ElasticsearchLogs._valid_index_prefix(prefix) == is_valid
+
+
+@pytest.mark.parametrize('index, cutoff_date, expected_result', [
+ pytest.param(
+ INDEX_NAME_PREFIX+'2019-06-06',
+ datetime(2019, 6, 8),
+ True,
+ id="Index older than cutoff"
+ ),
+ pytest.param(
+ INDEX_NAME_PREFIX+'2019-06-06',
+ datetime(2019, 6, 4),
+ False,
+ id="Index younger than cutoff"
+ ),
+ pytest.param(
+ INDEX_NAME_PREFIX+'2019-06-06',
+ datetime(2019, 6, 6, 23),
+ False,
+ id="Index older than cutoff but timedelta less than 1 day"
+ ),
+ pytest.param(
+ INDEX_NAME_PREFIX+'2019-06-06',
+ datetime(2019, 6, 7),
+ True,
+ id="Index older than cutoff by exactly one day"
+ ),
+])
+def test_can_delete_index(index, cutoff_date, expected_result):
+ es = ElasticsearchLogs(index_prefix=INDEX_NAME_PREFIX)
+ assert datetime.strptime(index.split(es._index_prefix, 1)[-1], INDEX_DATE_FORMAT)
+ assert es.can_delete_index(index, cutoff_date) == expected_result
diff --git a/data/logs_model/test/test_logs_interface.py b/data/logs_model/test/test_logs_interface.py
new file mode 100644
index 000000000..8f4f143c0
--- /dev/null
+++ b/data/logs_model/test/test_logs_interface.py
@@ -0,0 +1,473 @@
+from datetime import datetime, timedelta, date
+from data.logs_model.datatypes import AggregatedLogCount
+from data.logs_model.table_logs_model import TableLogsModel
+from data.logs_model.combined_model import CombinedLogsModel
+from data.logs_model.inmemory_model import InMemoryModel
+from data.logs_model.combined_model import _merge_aggregated_log_counts
+from data.logs_model.document_logs_model import _date_range_in_single_index, DocumentLogsModel
+from data.logs_model.interface import LogsIterationTimeout
+from data.logs_model.test.fake_elasticsearch import FAKE_ES_HOST, fake_elasticsearch
+
+from data.database import LogEntry, LogEntry2, LogEntry3, LogEntryKind
+from data import model
+
+from test.fixtures import *
+
+
+@pytest.fixture()
+def mock_page_size():
+ page_size = 2
+ with patch('data.logs_model.document_logs_model.PAGE_SIZE', page_size):
+ yield page_size
+
+
+@pytest.fixture()
+def clear_db_logs(initialized_db):
+ LogEntry.delete().execute()
+ LogEntry2.delete().execute()
+ LogEntry3.delete().execute()
+
+
+def combined_model():
+ return CombinedLogsModel(TableLogsModel(), InMemoryModel())
+
+
+def es_model():
+ return DocumentLogsModel(producer='elasticsearch', elasticsearch_config={
+ 'host': FAKE_ES_HOST,
+ 'port': 12345,
+ })
+
+@pytest.fixture()
+def fake_es():
+ with fake_elasticsearch():
+ yield
+
+
+@pytest.fixture(params=[TableLogsModel, InMemoryModel, es_model, combined_model])
+def logs_model(request, clear_db_logs, fake_es):
+ return request.param()
+
+
+def _lookup_logs(logs_model, start_time, end_time, **kwargs):
+ logs_found = []
+ page_token = None
+ while True:
+ found = logs_model.lookup_logs(start_time, end_time, page_token=page_token, **kwargs)
+ logs_found.extend(found.logs)
+ page_token = found.next_page_token
+ if not found.logs or not page_token:
+ break
+
+ assert len(logs_found) == len(set(logs_found))
+ return logs_found
+
+
+@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('mysql') >= 0,
+ reason='Flaky on MySQL')
+@pytest.mark.parametrize('namespace_name, repo_name, performer_name, check_args, expect_results', [
+ pytest.param('devtable', 'simple', 'devtable', {}, True, id='no filters'),
+ pytest.param('devtable', 'simple', 'devtable', {
+ 'performer_name': 'devtable',
+ }, True, id='matching performer'),
+
+ pytest.param('devtable', 'simple', 'devtable', {
+ 'namespace_name': 'devtable',
+ }, True, id='matching namespace'),
+
+ pytest.param('devtable', 'simple', 'devtable', {
+ 'namespace_name': 'devtable',
+ 'repository_name': 'simple',
+ }, True, id='matching repository'),
+
+ pytest.param('devtable', 'simple', 'devtable', {
+ 'performer_name': 'public',
+ }, False, id='different performer'),
+
+ pytest.param('devtable', 'simple', 'devtable', {
+ 'namespace_name': 'public',
+ }, False, id='different namespace'),
+
+ pytest.param('devtable', 'simple', 'devtable', {
+ 'namespace_name': 'devtable',
+ 'repository_name': 'complex',
+ }, False, id='different repository'),
+])
+def test_logs(namespace_name, repo_name, performer_name, check_args, expect_results, logs_model):
+ # Add some logs.
+ kinds = list(LogEntryKind.select())
+ user = model.user.get_user(performer_name)
+
+ start_timestamp = datetime.utcnow()
+ timestamp = start_timestamp
+
+ for kind in kinds:
+ for index in range(0, 3):
+ logs_model.log_action(kind.name, namespace_name=namespace_name, repository_name=repo_name,
+ performer=user, ip='1.2.3.4', timestamp=timestamp)
+ timestamp = timestamp + timedelta(seconds=1)
+
+ found = _lookup_logs(logs_model, start_timestamp, start_timestamp + timedelta(minutes=10),
+ **check_args)
+ if expect_results:
+ assert len(found) == len(kinds) * 3
+ else:
+ assert not found
+
+ aggregated_counts = logs_model.get_aggregated_log_counts(start_timestamp,
+ start_timestamp + timedelta(minutes=10),
+ **check_args)
+ if expect_results:
+ assert len(aggregated_counts) == len(kinds)
+ for ac in aggregated_counts:
+ assert ac.count == 3
+ else:
+ assert not aggregated_counts
+
+
+@pytest.mark.parametrize('filter_kinds, expect_results', [
+ pytest.param(None, True),
+ pytest.param(['push_repo'], True, id='push_repo filter'),
+ pytest.param(['pull_repo'], True, id='pull_repo filter'),
+ pytest.param(['push_repo', 'pull_repo'], False, id='push and pull filters')
+])
+def test_lookup_latest_logs(filter_kinds, expect_results, logs_model):
+ kind_map = model.log.get_log_entry_kinds()
+ if filter_kinds:
+ ignore_ids = [kind_map[kind_name] for kind_name in filter_kinds if filter_kinds]
+ else:
+ ignore_ids = []
+
+ now = datetime.now()
+ namespace_name = 'devtable'
+ repo_name = 'simple'
+ performer_name = 'devtable'
+
+ user = model.user.get_user(performer_name)
+ size = 3
+
+ # Log some push actions
+ logs_model.log_action('push_repo', namespace_name=namespace_name, repository_name=repo_name,
+ performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=1, seconds=11))
+ logs_model.log_action('push_repo', namespace_name=namespace_name, repository_name=repo_name,
+ performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=7, seconds=33))
+
+ # Log some pull actions
+ logs_model.log_action('pull_repo', namespace_name=namespace_name, repository_name=repo_name,
+ performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=0, seconds=3))
+ logs_model.log_action('pull_repo', namespace_name=namespace_name, repository_name=repo_name,
+ performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=3, seconds=55))
+ logs_model.log_action('pull_repo', namespace_name=namespace_name, repository_name=repo_name,
+ performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=5, seconds=3))
+ logs_model.log_action('pull_repo', namespace_name=namespace_name, repository_name=repo_name,
+ performer=user, ip='0.0.0.0', timestamp=now-timedelta(days=11, seconds=11))
+
+ # Get the latest logs
+ latest_logs = logs_model.lookup_latest_logs(performer_name, repo_name, namespace_name,
+ filter_kinds=filter_kinds, size=size)
+
+ # Test max lookup size
+ assert len(latest_logs) <= size
+
+ # Make sure that the latest logs returned are in decreasing order
+ assert all(x >= y for x, y in zip(latest_logs, latest_logs[1:]))
+
+ if expect_results:
+ assert latest_logs
+
+ # Lookup all logs filtered by kinds and sort them in reverse chronological order
+ all_logs = _lookup_logs(logs_model, now - timedelta(days=30), now + timedelta(days=30),
+ filter_kinds=filter_kinds, namespace_name=namespace_name,
+ repository_name=repo_name)
+ all_logs = sorted(all_logs, key=lambda l: l.datetime, reverse=True)
+
+ # Check that querying all logs does not return the filtered kinds
+ assert all([log.kind_id not in ignore_ids for log in all_logs])
+
+ # Check that the latest logs contains only th most recent ones
+ assert latest_logs == all_logs[:len(latest_logs)]
+
+
+def test_count_repository_actions(logs_model):
+ # Log some actions.
+ logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ # Log some actions to a different repo.
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='complex',
+ ip='1.2.3.4')
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='complex',
+ ip='1.2.3.4')
+
+ # Count the actions.
+ day = date.today()
+ simple_repo = model.repository.get_repository('devtable', 'simple')
+
+ count = logs_model.count_repository_actions(simple_repo, day)
+ assert count == 3
+
+ complex_repo = model.repository.get_repository('devtable', 'complex')
+ count = logs_model.count_repository_actions(complex_repo, day)
+ assert count == 2
+
+ # Try counting actions for a few days in the future to ensure it doesn't raise an error.
+ count = logs_model.count_repository_actions(simple_repo, day + timedelta(days=5))
+ assert count == 0
+
+
+def test_yield_log_rotation_context(logs_model):
+ cutoff_date = datetime.now()
+ min_logs_per_rotation = 3
+
+ # Log some actions to be archived
+ # One day
+ logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple1',
+ ip='1.2.3.4', timestamp=cutoff_date-timedelta(days=1, seconds=1))
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
+ ip='5.6.7.8', timestamp=cutoff_date-timedelta(days=1, seconds=2))
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple3',
+ ip='9.10.11.12', timestamp=cutoff_date-timedelta(days=1, seconds=3))
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple4',
+ ip='0.0.0.0', timestamp=cutoff_date-timedelta(days=1, seconds=4))
+ # Another day
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple5',
+ ip='1.1.1.1', timestamp=cutoff_date-timedelta(days=2, seconds=1))
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple5',
+ ip='1.1.1.1', timestamp=cutoff_date-timedelta(days=2, seconds=2))
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple5',
+ ip='1.1.1.1', timestamp=cutoff_date-timedelta(days=2, seconds=3))
+
+ found = _lookup_logs(logs_model, cutoff_date - timedelta(days=3), cutoff_date + timedelta(days=1))
+ assert found is not None and len(found) == 7
+
+ # Iterate the logs using the log rotation contexts
+ all_logs = []
+ for log_rotation_context in logs_model.yield_log_rotation_context(cutoff_date,
+ min_logs_per_rotation):
+ with log_rotation_context as context:
+ for logs, _ in context.yield_logs_batch():
+ all_logs.extend(logs)
+
+ assert len(all_logs) == 7
+ found = _lookup_logs(logs_model, cutoff_date - timedelta(days=3), cutoff_date + timedelta(days=1))
+ assert not found
+
+ # Make sure all datetimes are monotonically increasing (by datetime) after sorting the lookup
+ # to make sure no duplicates were returned
+ all_logs.sort(key=lambda d: d.datetime)
+ assert all(x.datetime < y.datetime for x, y in zip(all_logs, all_logs[1:]))
+
+
+def test_count_repository_actions_with_wildcard_disabled(initialized_db):
+ with fake_elasticsearch(allow_wildcard=False):
+ logs_model = es_model()
+
+ # Log some actions.
+ logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ # Log some actions to a different repo.
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='complex',
+ ip='1.2.3.4')
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='complex',
+ ip='1.2.3.4')
+
+ # Count the actions.
+ day = date.today()
+ simple_repo = model.repository.get_repository('devtable', 'simple')
+
+ count = logs_model.count_repository_actions(simple_repo, day)
+ assert count == 3
+
+ complex_repo = model.repository.get_repository('devtable', 'complex')
+ count = logs_model.count_repository_actions(complex_repo, day)
+ assert count == 2
+
+ # Try counting actions for a few days in the future to ensure it doesn't raise an error.
+ count = logs_model.count_repository_actions(simple_repo, day + timedelta(days=5))
+ assert count == 0
+
+
+@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('mysql') >= 0,
+ reason='Flaky on MySQL')
+def test_yield_logs_for_export(logs_model):
+ # Add some logs.
+ kinds = list(LogEntryKind.select())
+ user = model.user.get_user('devtable')
+
+ start_timestamp = datetime.utcnow()
+ timestamp = start_timestamp
+
+ for kind in kinds:
+ for index in range(0, 10):
+ logs_model.log_action(kind.name, namespace_name='devtable', repository_name='simple',
+ performer=user, ip='1.2.3.4', timestamp=timestamp)
+ timestamp = timestamp + timedelta(seconds=1)
+
+ # Yield the logs.
+ simple_repo = model.repository.get_repository('devtable', 'simple')
+ logs_found = []
+ for logs in logs_model.yield_logs_for_export(start_timestamp, timestamp + timedelta(minutes=10),
+ repository_id=simple_repo.id):
+ logs_found.extend(logs)
+
+ # Ensure we found all added logs.
+ assert len(logs_found) == len(kinds) * 10
+
+
+def test_yield_logs_for_export_timeout(logs_model):
+ # Add some logs.
+ kinds = list(LogEntryKind.select())
+ user = model.user.get_user('devtable')
+
+ start_timestamp = datetime.utcnow()
+ timestamp = start_timestamp
+
+ for kind in kinds:
+ for _ in range(0, 2):
+ logs_model.log_action(kind.name, namespace_name='devtable', repository_name='simple',
+ performer=user, ip='1.2.3.4', timestamp=timestamp)
+ timestamp = timestamp + timedelta(seconds=1)
+
+ # Yield the logs. Since we set the timeout to nothing, it should immediately fail.
+ simple_repo = model.repository.get_repository('devtable', 'simple')
+ with pytest.raises(LogsIterationTimeout):
+ list(logs_model.yield_logs_for_export(start_timestamp, timestamp + timedelta(minutes=1),
+ repository_id=simple_repo.id,
+ max_query_time=timedelta(seconds=0)))
+
+
+def test_disabled_namespace(clear_db_logs):
+ logs_model = TableLogsModel(lambda kind, namespace, is_free: namespace == 'devtable')
+
+ # Log some actions.
+ logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple',
+ ip='1.2.3.4')
+
+ # Log some actions to a different namespace.
+ logs_model.log_action('push_repo', namespace_name='buynlarge', repository_name='orgrepo',
+ ip='1.2.3.4')
+
+ logs_model.log_action('pull_repo', namespace_name='buynlarge', repository_name='orgrepo',
+ ip='1.2.3.4')
+ logs_model.log_action('pull_repo', namespace_name='buynlarge', repository_name='orgrepo',
+ ip='1.2.3.4')
+
+ # Count the actions.
+ day = datetime.today() - timedelta(minutes=60)
+ simple_repo = model.repository.get_repository('devtable', 'simple')
+ count = logs_model.count_repository_actions(simple_repo, day)
+ assert count == 0
+
+ org_repo = model.repository.get_repository('buynlarge', 'orgrepo')
+ count = logs_model.count_repository_actions(org_repo, day)
+ assert count == 3
+
+
+@pytest.mark.parametrize('aggregated_log_counts1, aggregated_log_counts2, expected_result', [
+ pytest.param(
+ [
+ AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)), # 1
+ AggregatedLogCount(1, 3, datetime(2019, 6, 7, 0, 0)), # 2
+ ],
+ [
+ AggregatedLogCount(1, 5, datetime(2019, 6, 6, 0, 0)), # 1
+ AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)), # 2
+ AggregatedLogCount(3, 3, datetime(2019, 6, 1, 0, 0)), # 3
+ ],
+ [
+ AggregatedLogCount(1, 8, datetime(2019, 6, 6, 0, 0)), # 1
+ AggregatedLogCount(1, 10, datetime(2019, 6, 7, 0, 0)), # 2
+ AggregatedLogCount(3, 3, datetime(2019, 6, 1, 0, 0)) # 3
+ ]
+ ),
+ pytest.param(
+ [
+ AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)), # 1
+ ],
+ [
+ AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)), # 2
+ ],
+ [
+ AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0)), # 1
+ AggregatedLogCount(1, 7, datetime(2019, 6, 7, 0, 0)), # 2
+ ]
+ ),
+ pytest.param(
+ [],
+ [AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0))],
+ [AggregatedLogCount(1, 3, datetime(2019, 6, 6, 0, 0))]
+ ),
+])
+def test_merge_aggregated_log_counts(aggregated_log_counts1, aggregated_log_counts2, expected_result):
+ assert (sorted(_merge_aggregated_log_counts(aggregated_log_counts1, aggregated_log_counts2)) ==
+ sorted(expected_result))
+
+
+@pytest.mark.parametrize('dt1, dt2, expected_result', [
+ # Valid dates
+ pytest.param(date(2019, 6, 17), date(2019, 6, 18), True),
+
+ # Invalid dates
+ pytest.param(date(2019, 6, 17), date(2019, 6, 17), False),
+ pytest.param(date(2019, 6, 17), date(2019, 6, 19), False),
+ pytest.param(date(2019, 6, 18), date(2019, 6, 17), False),
+
+ # Valid datetimes
+ pytest.param(datetime(2019, 6, 17, 0, 1), datetime(2019, 6, 17, 0, 2), True),
+
+ # Invalid datetimes
+ pytest.param(datetime(2019, 6, 17, 0, 2), datetime(2019, 6, 17, 0, 1), False),
+ pytest.param(datetime(2019, 6, 17, 11), datetime(2019, 6, 17, 11) + timedelta(hours=14), False),
+])
+def test_date_range_in_single_index(dt1, dt2, expected_result):
+ assert _date_range_in_single_index(dt1, dt2) == expected_result
+
+
+def test_pagination(logs_model, mock_page_size):
+ """
+ Make sure that pagination does not stop if searching through multiple indices by day,
+ and the current log count matches the page size while there are still indices to be searched.
+ """
+ day1 = datetime.now()
+ day2 = day1 + timedelta(days=1)
+ day3 = day2 + timedelta(days=1)
+
+ # Log some actions in day indices
+ # One day
+ logs_model.log_action('push_repo', namespace_name='devtable', repository_name='simple1',
+ ip='1.2.3.4', timestamp=day1)
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple1',
+ ip='5.6.7.8', timestamp=day1)
+
+ found = _lookup_logs(logs_model, day1-timedelta(seconds=1), day3+timedelta(seconds=1))
+ assert len(found) == mock_page_size
+
+ # Another day
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
+ ip='1.1.1.1', timestamp=day2)
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
+ ip='0.0.0.0', timestamp=day2)
+
+ # Yet another day
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
+ ip='1.1.1.1', timestamp=day3)
+ logs_model.log_action('pull_repo', namespace_name='devtable', repository_name='simple2',
+ ip='0.0.0.0', timestamp=day3)
+
+ found = _lookup_logs(logs_model, day1-timedelta(seconds=1), day3+timedelta(seconds=1))
+ assert len(found) == 6
diff --git a/data/logs_model/test/test_logs_producer.py b/data/logs_model/test/test_logs_producer.py
new file mode 100644
index 000000000..382684244
--- /dev/null
+++ b/data/logs_model/test/test_logs_producer.py
@@ -0,0 +1,77 @@
+import logging
+import pytest
+from dateutil.parser import parse
+from mock import patch, Mock
+
+import botocore
+
+from data.logs_model import configure
+
+from test_elasticsearch import app_config, logs_model_config, logs_model, mock_elasticsearch, mock_db_model
+from mock_elasticsearch import *
+
+
+logger = logging.getLogger(__name__)
+
+FAKE_KAFKA_BROKERS = ['fake_server1', 'fake_server2']
+FAKE_KAFKA_TOPIC = 'sometopic'
+FAKE_MAX_BLOCK_SECONDS = 1
+
+@pytest.fixture()
+def kafka_logs_producer_config(app_config):
+ producer_config = {}
+ producer_config.update(app_config)
+
+ kafka_config = {
+ 'bootstrap_servers': FAKE_KAFKA_BROKERS,
+ 'topic': FAKE_KAFKA_TOPIC,
+ 'max_block_seconds': FAKE_MAX_BLOCK_SECONDS
+ }
+
+ producer_config['LOGS_MODEL_CONFIG']['producer'] = 'kafka'
+ producer_config['LOGS_MODEL_CONFIG']['kafka_config'] = kafka_config
+ return producer_config
+
+
+@pytest.fixture()
+def kinesis_logs_producer_config(app_config):
+ producer_config = {}
+ producer_config.update(app_config)
+
+ kinesis_stream_config = {
+ 'stream_name': 'test-stream',
+ 'aws_region': 'fake_region',
+ 'aws_access_key': 'some_key',
+ 'aws_secret_key': 'some_secret'
+ }
+
+ producer_config['LOGS_MODEL_CONFIG']['producer'] = 'kinesis_stream'
+ producer_config['LOGS_MODEL_CONFIG']['kinesis_stream_config'] = kinesis_stream_config
+ return producer_config
+
+
+def test_kafka_logs_producers(logs_model, mock_elasticsearch, mock_db_model, kafka_logs_producer_config):
+ mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
+
+ producer_config = kafka_logs_producer_config
+ with patch('kafka.client_async.KafkaClient.check_version'), patch('kafka.KafkaProducer.send') as mock_send:
+ configure(producer_config)
+ logs_model.log_action('pull_repo', 'user1', Mock(id=1), '192.168.1.1', {'key': 'value'},
+ None, 'repo1', parse("2019-01-01T03:30"))
+
+ mock_send.assert_called_once()
+
+
+def test_kinesis_logs_producers(logs_model, mock_elasticsearch, mock_db_model, kinesis_logs_producer_config):
+ mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
+
+ producer_config = kinesis_logs_producer_config
+ with patch('botocore.endpoint.EndpointCreator.create_endpoint'), \
+ patch('botocore.client.BaseClient._make_api_call') as mock_send:
+ configure(producer_config)
+ logs_model.log_action('pull_repo', 'user1', Mock(id=1), '192.168.1.1', {'key': 'value'},
+ None, 'repo1', parse("2019-01-01T03:30"))
+
+ # Check that a PutRecord api call is made.
+ # NOTE: The second arg of _make_api_call uses a randomized PartitionKey
+ mock_send.assert_called_once_with(u'PutRecord', mock_send.call_args_list[0][0][1])
diff --git a/data/migrations/__init__.py b/data/migrations/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/data/migrations/env.py b/data/migrations/env.py
new file mode 100644
index 000000000..fdc870672
--- /dev/null
+++ b/data/migrations/env.py
@@ -0,0 +1,154 @@
+import logging
+import os
+
+from logging.config import fileConfig
+from urllib import unquote
+
+from alembic import context
+from alembic.script.revision import ResolutionError
+from alembic.util import CommandError
+from sqlalchemy import engine_from_config, pool
+from peewee import SqliteDatabase
+
+from data.database import all_models, db
+from data.migrations.tester import NoopTester, PopulateTestDataTester
+from data.model.sqlalchemybridge import gen_sqlalchemy_metadata
+from release import GIT_HEAD, REGION, SERVICE
+from util.morecollections import AttrDict
+from data.migrations.progress import PrometheusReporter, NullReporter
+
+
+config = context.config
+DB_URI = config.get_main_option('db_uri', 'sqlite:///test/data/test.db')
+PROM_LABEL_PREFIX = 'DBA_OP_LABEL_'
+
+
+# This option exists because alembic needs the db proxy to be configured in order
+# to perform migrations. The app import does the init of the proxy, but we don't
+# want that in the case of the config app, as we are explicitly connecting to a
+# db that the user has passed in, and we can't have import dependency on app
+if config.get_main_option('alembic_setup_app', 'True') == 'True':
+ from app import app
+ DB_URI = app.config['DB_URI']
+
+config.set_main_option('sqlalchemy.url', unquote(DB_URI))
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name:
+ fileConfig(config.config_file_name)
+
+logger = logging.getLogger(__name__)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = gen_sqlalchemy_metadata(all_models)
+tables = AttrDict(target_metadata.tables)
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+def get_tester():
+ """ Returns the tester to use. We only return the tester that populates data
+ if the TEST_MIGRATE env var is set to `true` AND we make sure we're not
+ connecting to a production database.
+ """
+ if os.environ.get('TEST_MIGRATE', '') == 'true':
+ url = unquote(DB_URI)
+ if url.find('amazonaws.com') < 0:
+ return PopulateTestDataTester()
+
+ return NoopTester()
+
+def get_progress_reporter():
+ prom_addr = os.environ.get('DBA_OP_PROMETHEUS_PUSH_GATEWAY_ADDR', None)
+
+ if prom_addr is not None:
+ prom_job = os.environ.get('DBA_OP_JOB_ID')
+
+ def _process_label_key(label_key):
+ return label_key[len(PROM_LABEL_PREFIX):].lower()
+ labels = {_process_label_key(k): v for k, v in os.environ.items()
+ if k.startswith(PROM_LABEL_PREFIX)}
+
+ return PrometheusReporter(prom_addr, prom_job, labels)
+ else:
+ return NullReporter()
+
+def report_success(ctx=None, step=None, heads=None, run_args=None):
+ progress_reporter = run_args['progress_reporter']
+ progress_reporter.report_version_complete(success=True)
+
+def run_migrations_offline():
+ """Run migrations in 'offline' mode.
+
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
+
+ Calls to context.execute() here emit the given string to the
+ script output.
+
+ """
+ url = unquote(DB_URI)
+ context.configure(url=url, target_metadata=target_metadata, transactional_ddl=True)
+
+ with context.begin_transaction():
+ context.run_migrations(tables=tables, tester=get_tester(), progress_reporter=NullReporter())
+
+def run_migrations_online():
+ """Run migrations in 'online' mode.
+
+ In this scenario we need to create an Engine
+ and associate a connection with the context.
+
+ """
+
+ if (isinstance(db.obj, SqliteDatabase) and
+ not 'GENMIGRATE' in os.environ and
+ not 'DB_URI' in os.environ):
+ print 'Skipping Sqlite migration!'
+ return
+
+ progress_reporter = get_progress_reporter()
+ engine = engine_from_config(config.get_section(config.config_ini_section),
+ prefix='sqlalchemy.',
+ poolclass=pool.NullPool)
+
+ connection = engine.connect()
+ context.configure(connection=connection,
+ target_metadata=target_metadata,
+ transactional_ddl=False,
+ on_version_apply=report_success)
+
+ try:
+ with context.begin_transaction():
+ try:
+ context.run_migrations(tables=tables, tester=get_tester(),
+ progress_reporter=progress_reporter)
+ except (CommandError, ResolutionError) as ex:
+ if 'No such revision' not in str(ex):
+ raise
+
+ if not REGION or not GIT_HEAD:
+ raise
+
+ from data.model.release import get_recent_releases
+
+ # ignore revision error if we're running the previous release
+ releases = list(get_recent_releases(SERVICE, REGION).offset(1).limit(1))
+ if releases and releases[0].version == GIT_HEAD:
+ logger.warn('Skipping database migration because revision not found')
+ else:
+ raise
+ finally:
+ connection.close()
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/data/migrations/migration.sh b/data/migrations/migration.sh
new file mode 100755
index 000000000..bf8d234b6
--- /dev/null
+++ b/data/migrations/migration.sh
@@ -0,0 +1,147 @@
+set -e
+
+PARSED_DOCKER_HOST=`echo $DOCKER_HOST | sed 's/tcp:\/\///' | sed 's/:.*//'`
+DOCKER_IP="${PARSED_DOCKER_HOST:-127.0.0.1}"
+MYSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"mysql+pymysql://root:password@$DOCKER_IP/genschema\"}"
+PERCONA_CONFIG_OVERRIDE="{\"DB_URI\":\"mysql+pymysql://root:password@$DOCKER_IP/genschema\"}"
+PGSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"postgresql://postgres@$DOCKER_IP/genschema\"}"
+
+up_mysql() {
+ # Run a SQL database on port 3306 inside of Docker.
+ docker run --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mysql:5.7
+
+ echo 'Sleeping for 25...'
+ sleep 25
+
+ # Add the database to mysql.
+ docker run --rm --link mysql:mysql mysql:5.7 sh -c 'echo "create database genschema" | mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT" -uroot -ppassword'
+}
+
+down_mysql() {
+ docker kill mysql || true
+ docker rm -v mysql || true
+}
+
+up_mariadb() {
+ # Run a SQL database on port 3306 inside of Docker.
+ docker run --name mariadb -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d mariadb
+
+ echo 'Sleeping for 25...'
+ sleep 25
+
+ # Add the database to mysql.
+ docker run --rm --link mariadb:mariadb mariadb sh -c 'echo "create database genschema" | mysql -h"$MARIADB_PORT_3306_TCP_ADDR" -P"$MARIADB_PORT_3306_TCP_PORT" -uroot -ppassword'
+}
+
+down_mariadb() {
+ docker kill mariadb || true
+ docker rm -v mariadb || true
+}
+
+up_percona() {
+ # Run a SQL database on port 3306 inside of Docker.
+ docker run --name percona -p 3306:3306 -e MYSQL_ROOT_PASSWORD=password -d percona
+
+ echo 'Sleeping for 25...'
+ sleep 25
+
+ # Add the daabase to mysql.
+ docker run --rm --link percona:percona percona sh -c 'echo "create database genschema" | mysql -h $PERCONA_PORT_3306_TCP_ADDR -uroot -ppassword'
+}
+
+down_percona() {
+ docker kill percona || true
+ docker rm -v percona || true
+}
+
+up_postgres() {
+ # Run a SQL database on port 5432 inside of Docker.
+ docker run --name postgres -p 5432:5432 -d postgres
+
+ # Sleep for 5s to get SQL get started.
+ echo 'Sleeping for 5...'
+ sleep 5
+
+ # Add the database to postgres.
+ docker run --rm --link postgres:postgres postgres sh -c 'echo "create database genschema" | psql -h "$POSTGRES_PORT_5432_TCP_ADDR" -p "$POSTGRES_PORT_5432_TCP_PORT" -U postgres'
+ docker run --rm --link postgres:postgres postgres sh -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm;" | psql -h "$POSTGRES_PORT_5432_TCP_ADDR" -p "$POSTGRES_PORT_5432_TCP_PORT" -U postgres -d genschema'
+
+}
+
+down_postgres() {
+ docker kill postgres || true
+ docker rm -v postgres || true
+}
+
+gen_migrate() {
+ # Generate a database with the schema as defined by the existing alembic model.
+ QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic upgrade head
+
+
+ # Generate the migration to the current model.
+ QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic revision --autogenerate -m "$2"
+}
+
+test_migrate() {
+ # Generate a database with the schema as defined by the existing alembic model.
+ echo '> Running upgrade'
+ TEST_MIGRATE=true QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic upgrade head
+
+ # Downgrade to verify it works in both directions.
+ echo '> Running downgrade'
+ COUNT=`ls data/migrations/versions/*.py | wc -l | tr -d ' '`
+ TEST_MIGRATE=true QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic downgrade "-$COUNT"
+}
+
+down_mysql
+down_postgres
+down_mariadb
+down_percona
+
+# Test (and generate, if requested) via MySQL.
+echo '> Starting MySQL'
+up_mysql
+
+if [ ! -z "$@" ]
+ then
+ set +e
+ echo '> Generating Migration'
+ gen_migrate $MYSQL_CONFIG_OVERRIDE "$@"
+ set -e
+ fi
+
+echo '> Testing Migration (mysql)'
+set +e
+test_migrate $MYSQL_CONFIG_OVERRIDE
+set -e
+down_mysql
+
+# Test via Postgres.
+echo '> Starting Postgres'
+up_postgres
+
+echo '> Testing Migration (postgres)'
+set +e
+test_migrate $PGSQL_CONFIG_OVERRIDE
+set -e
+down_postgres
+
+# Test via MariaDB.
+echo '> Starting MariaDB'
+up_mariadb
+
+echo '> Testing Migration (mariadb)'
+set +e
+test_migrate $MYSQL_CONFIG_OVERRIDE
+set -e
+down_mariadb
+
+# Test via Percona.
+echo '> Starting Percona'
+up_percona
+
+echo '> Testing Migration (percona)'
+set +e
+test_migrate $PERCONA_CONFIG_OVERRIDE
+set -e
+down_percona
diff --git a/data/migrations/progress.py b/data/migrations/progress.py
new file mode 100644
index 000000000..91278beea
--- /dev/null
+++ b/data/migrations/progress.py
@@ -0,0 +1,101 @@
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+from functools import partial, wraps
+
+from prometheus_client import CollectorRegistry, Gauge, Counter, push_to_gateway
+
+from util.abchelpers import nooper
+
+
+@add_metaclass(ABCMeta)
+class ProgressReporter(object):
+ """ Implements an interface for reporting progress with the migrations.
+ """
+ @abstractmethod
+ def report_version_complete(self, success):
+ """ Called when an entire migration is complete. """
+
+ @abstractmethod
+ def report_step_progress(self):
+ """ Called when a single step in the migration has been completed. """
+
+
+@nooper
+class NullReporter(ProgressReporter):
+ """ No-op version of the progress reporter, designed for use when no progress
+ reporting endpoint is provided. """
+
+
+class PrometheusReporter(ProgressReporter):
+ def __init__(self, prom_pushgateway_addr, prom_job, labels, total_steps_num=None):
+ self._total_steps_num = total_steps_num
+ self._completed_steps = 0.0
+
+ registry = CollectorRegistry()
+
+ self._migration_completion_percent = Gauge(
+ 'migration_completion_percent',
+ 'Estimate of the completion percentage of the job',
+ registry=registry,
+ )
+ self._migration_complete_total = Counter(
+ 'migration_complete_total',
+ 'Binary value of whether or not the job is complete',
+ registry=registry,
+ )
+ self._migration_failed_total = Counter(
+ 'migration_failed_total',
+ 'Binary value of whether or not the job has failed',
+ registry=registry,
+ )
+ self._migration_items_completed_total = Counter(
+ 'migration_items_completed_total',
+ 'Number of items this migration has completed',
+ registry=registry,
+ )
+
+ self._push = partial(push_to_gateway,
+ prom_pushgateway_addr,
+ job=prom_job,
+ registry=registry,
+ grouping_key=labels,
+ )
+
+ def report_version_complete(self, success=True):
+ if success:
+ self._migration_complete_total.inc()
+ else:
+ self._migration_failed_total.inc()
+ self._migration_completion_percent.set(1.0)
+
+ self._push()
+
+ def report_step_progress(self):
+ self._migration_items_completed_total.inc()
+
+ if self._total_steps_num is not None:
+ self._completed_steps += 1
+ self._migration_completion_percent = self._completed_steps / self._total_steps_num
+
+ self._push()
+
+
+class ProgressWrapper(object):
+ def __init__(self, delegate_module, progress_monitor):
+ self._delegate_module = delegate_module
+ self._progress_monitor = progress_monitor
+
+ def __getattr__(self, attr_name):
+ # Will raise proper attribute error
+ maybe_callable = self._delegate_module.__dict__[attr_name]
+ if callable(maybe_callable):
+ # Build a callable which when executed places the request
+ # onto a queue
+ @wraps(maybe_callable)
+ def wrapped_method(*args, **kwargs):
+ result = maybe_callable(*args, **kwargs)
+ self._progress_monitor.report_step_progress()
+ return result
+
+ return wrapped_method
+ return maybe_callable
diff --git a/data/migrations/script.py.mako b/data/migrations/script.py.mako
new file mode 100644
index 000000000..f17f94d2b
--- /dev/null
+++ b/data/migrations/script.py.mako
@@ -0,0 +1,27 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision}
+Create Date: ${create_date}
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+
+from alembic import op as original_op
+from progress import ProgressWrapper
+import sqlalchemy as sa
+${imports if imports else ""}
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+
+ ${downgrades if downgrades else "pass"}
diff --git a/data/migrations/test/test_db_config.py b/data/migrations/test/test_db_config.py
new file mode 100644
index 000000000..747c5eb73
--- /dev/null
+++ b/data/migrations/test/test_db_config.py
@@ -0,0 +1,21 @@
+import pytest
+from mock import patch
+
+from data.runmigration import run_alembic_migration
+from alembic.script import ScriptDirectory
+from test.fixtures import *
+
+@pytest.mark.parametrize('db_uri, is_valid', [
+ ('postgresql://devtable:password@quay-postgres/registry_database', True),
+ ('postgresql://devtable:password%25@quay-postgres/registry_database', False),
+ ('postgresql://devtable:password%%25@quay-postgres/registry_database', True),
+ ('postgresql://devtable@db:password@quay-postgres/registry_database', True),
+])
+def test_alembic_db_uri(db_uri, is_valid):
+ """ Test if the given URI is escaped for string interpolation (Python's configparser). """
+ with patch('alembic.script.ScriptDirectory.run_env') as m:
+ if is_valid:
+ run_alembic_migration(db_uri)
+ else:
+ with pytest.raises(ValueError):
+ run_alembic_migration(db_uri)
diff --git a/data/migrations/tester.py b/data/migrations/tester.py
new file mode 100644
index 000000000..01e862909
--- /dev/null
+++ b/data/migrations/tester.py
@@ -0,0 +1,132 @@
+import json
+import logging
+import uuid
+
+from abc import ABCMeta, abstractmethod
+from datetime import datetime
+from six import add_metaclass
+
+from alembic import op
+from sqlalchemy import text
+
+from util.abchelpers import nooper
+
+logger = logging.getLogger(__name__)
+
+def escape_table_name(table_name):
+ if op.get_bind().engine.name == 'postgresql':
+ # Needed for the `user` table.
+ return '"%s"' % table_name
+
+ return table_name
+
+
+class DataTypes(object):
+ @staticmethod
+ def DateTime():
+ return datetime.now()
+
+ @staticmethod
+ def Date():
+ return datetime.now()
+
+ @staticmethod
+ def String():
+ return 'somestringvalue'
+
+ @staticmethod
+ def Token():
+ return '%s%s' % ('a' * 60, 'b' * 60)
+
+ @staticmethod
+ def UTF8Char():
+ return 'some other value'
+
+ @staticmethod
+ def UUID():
+ return str(uuid.uuid4())
+
+ @staticmethod
+ def JSON():
+ return json.dumps(dict(foo='bar', baz='meh'))
+
+ @staticmethod
+ def Boolean():
+ if op.get_bind().engine.name == 'postgresql':
+ return True
+
+ return 1
+
+ @staticmethod
+ def BigInteger():
+ return 21474836470
+
+ @staticmethod
+ def Integer():
+ return 42
+
+ @staticmethod
+ def Constant(value):
+ def get_value():
+ return value
+ return get_value
+
+ @staticmethod
+ def Foreign(table_name):
+ def get_index():
+ result = op.get_bind().execute("SELECT id FROM %s LIMIT 1" % escape_table_name(table_name))
+ try:
+ return list(result)[0][0]
+ except IndexError:
+ raise Exception('Could not find row for table %s' % table_name)
+ finally:
+ result.close()
+
+ return get_index
+
+
+@add_metaclass(ABCMeta)
+class MigrationTester(object):
+ """ Implements an interface for adding testing capabilities to the
+ data model migration system in Alembic.
+ """
+ TestDataType = DataTypes
+
+ @abstractmethod
+ def populate_table(self, table_name, fields):
+ """ Called to populate a table with the given fields filled in with testing data. """
+
+ @abstractmethod
+ def populate_column(self, table_name, col_name, field_type):
+ """ Called to populate a column in a table to be filled in with testing data. """
+
+
+@nooper
+class NoopTester(MigrationTester):
+ """ No-op version of the tester, designed for production workloads. """
+
+
+class PopulateTestDataTester(MigrationTester):
+ def populate_table(self, table_name, fields):
+ columns = {field_name: field_type() for field_name, field_type in fields}
+ field_name_vars = [':' + field_name for field_name, _ in fields]
+
+ if op.get_bind().engine.name == 'postgresql':
+ field_names = ["%s" % field_name for field_name, _ in fields]
+ else:
+ field_names = ["`%s`" % field_name for field_name, _ in fields]
+
+ table_name = escape_table_name(table_name)
+ query = text('INSERT INTO %s (%s) VALUES (%s)' % (table_name, ', '.join(field_names),
+ ', '.join(field_name_vars)))
+ logger.info("Executing test query %s with values %s", query, columns.values())
+ op.get_bind().execute(query, **columns)
+
+ def populate_column(self, table_name, col_name, field_type):
+ col_value = field_type()
+ row_id = DataTypes.Foreign(table_name)()
+
+ table_name = escape_table_name(table_name)
+ update_text = text("UPDATE %s SET %s=:col_value where ID=:row_id" % (table_name, col_name))
+ logger.info("Executing test query %s with value %s on row %s", update_text, col_value, row_id)
+ op.get_bind().execute(update_text, col_value=col_value, row_id=row_id)
diff --git a/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py b/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py
new file mode 100644
index 000000000..2a995e58c
--- /dev/null
+++ b/data/migrations/versions/0cf50323c78b_add_creation_date_to_user_table.py
@@ -0,0 +1,33 @@
+"""Add creation date to User table
+
+Revision ID: 0cf50323c78b
+Revises: 87fbbc224f10
+Create Date: 2018-03-09 13:19:41.903196
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '0cf50323c78b'
+down_revision = '87fbbc224f10'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('user', sa.Column('creation_date', sa.DateTime(), nullable=True))
+ # ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_column('user', 'creation_date', tester.TestDataType.DateTime)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('user', 'creation_date')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py b/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py
new file mode 100644
index 000000000..e2b4073da
--- /dev/null
+++ b/data/migrations/versions/10f45ee2310b_add_tag_tagkind_and_manifestchild_tables.py
@@ -0,0 +1,100 @@
+"""Add Tag, TagKind and ManifestChild tables
+
+Revision ID: 10f45ee2310b
+Revises: 13411de1c0ff
+Create Date: 2018-10-29 15:22:53.552216
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '10f45ee2310b'
+down_revision = '13411de1c0ff'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from util.migrate import UTF8CharField
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('tagkind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagkind'))
+ )
+ op.create_index('tagkind_name', 'tagkind', ['name'], unique=True)
+ op.create_table('manifestchild',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('child_manifest_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['child_manifest_id'], ['manifest.id'], name=op.f('fk_manifestchild_child_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestchild_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestchild_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestchild'))
+ )
+ op.create_index('manifestchild_child_manifest_id', 'manifestchild', ['child_manifest_id'], unique=False)
+ op.create_index('manifestchild_manifest_id', 'manifestchild', ['manifest_id'], unique=False)
+ op.create_index('manifestchild_manifest_id_child_manifest_id', 'manifestchild', ['manifest_id', 'child_manifest_id'], unique=True)
+ op.create_index('manifestchild_repository_id', 'manifestchild', ['repository_id'], unique=False)
+ op.create_index('manifestchild_repository_id_child_manifest_id', 'manifestchild', ['repository_id', 'child_manifest_id'], unique=False)
+ op.create_index('manifestchild_repository_id_manifest_id', 'manifestchild', ['repository_id', 'manifest_id'], unique=False)
+ op.create_index('manifestchild_repository_id_manifest_id_child_manifest_id', 'manifestchild', ['repository_id', 'manifest_id', 'child_manifest_id'], unique=False)
+ op.create_table('tag',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=True),
+ sa.Column('lifetime_start_ms', sa.BigInteger(), nullable=False),
+ sa.Column('lifetime_end_ms', sa.BigInteger(), nullable=True),
+ sa.Column('hidden', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.Column('reversion', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.Column('tag_kind_id', sa.Integer(), nullable=False),
+ sa.Column('linked_tag_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['linked_tag_id'], ['tag.id'], name=op.f('fk_tag_linked_tag_id_tag')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_tag_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tag_repository_id_repository')),
+ sa.ForeignKeyConstraint(['tag_kind_id'], ['tagkind.id'], name=op.f('fk_tag_tag_kind_id_tagkind')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tag'))
+ )
+ op.create_index('tag_lifetime_end_ms', 'tag', ['lifetime_end_ms'], unique=False)
+ op.create_index('tag_linked_tag_id', 'tag', ['linked_tag_id'], unique=False)
+ op.create_index('tag_manifest_id', 'tag', ['manifest_id'], unique=False)
+ op.create_index('tag_repository_id', 'tag', ['repository_id'], unique=False)
+ op.create_index('tag_repository_id_name', 'tag', ['repository_id', 'name'], unique=False)
+ op.create_index('tag_repository_id_name_hidden', 'tag', ['repository_id', 'name', 'hidden'], unique=False)
+ op.create_index('tag_repository_id_name_lifetime_end_ms', 'tag', ['repository_id', 'name', 'lifetime_end_ms'], unique=True)
+ op.create_index('tag_repository_id_name_tag_kind_id', 'tag', ['repository_id', 'name', 'tag_kind_id'], unique=False)
+ op.create_index('tag_tag_kind_id', 'tag', ['tag_kind_id'], unique=False)
+ # ### end Alembic commands ###
+
+ op.bulk_insert(tables.tagkind,
+ [
+ {'name': 'tag'},
+ ])
+
+ # ### population of test data ### #
+ tester.populate_table('tag', [
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('tag_kind_id', tester.TestDataType.Foreign('tagkind')),
+ ('name', tester.TestDataType.String),
+ ('manifest_id', tester.TestDataType.Foreign('manifest')),
+ ('lifetime_start_ms', tester.TestDataType.BigInteger),
+ ])
+
+ tester.populate_table('manifestchild', [
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('manifest_id', tester.TestDataType.Foreign('manifest')),
+ ('child_manifest_id', tester.TestDataType.Foreign('manifest')),
+ ])
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('tag')
+ op.drop_table('manifestchild')
+ op.drop_table('tagkind')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py b/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py
new file mode 100644
index 000000000..70e0a21d7
--- /dev/null
+++ b/data/migrations/versions/13411de1c0ff_remove_unique_from_tagmanifesttomanifest.py
@@ -0,0 +1,46 @@
+"""Remove unique from TagManifestToManifest
+
+Revision ID: 13411de1c0ff
+Revises: 654e6df88b71
+Create Date: 2018-08-19 23:30:24.969549
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '13411de1c0ff'
+down_revision = '654e6df88b71'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # Note: Because of a restriction in MySQL, we cannot simply remove the index and re-add
+ # it without the unique=False, nor can we simply alter the index. To make it work, we'd have to
+ # remove the primary key on the field, so instead we simply drop the table entirely and
+ # recreate it with the modified index. The backfill will re-fill this in.
+ op.drop_table('tagmanifesttomanifest')
+
+ op.create_table('tagmanifesttomanifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('tag_manifest_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('broken', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_tagmanifesttomanifest_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['tag_manifest_id'], ['tagmanifest.id'], name=op.f('fk_tagmanifesttomanifest_tag_manifest_id_tagmanifest')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifesttomanifest'))
+ )
+ op.create_index('tagmanifesttomanifest_broken', 'tagmanifesttomanifest', ['broken'], unique=False)
+ op.create_index('tagmanifesttomanifest_manifest_id', 'tagmanifesttomanifest', ['manifest_id'], unique=False)
+ op.create_index('tagmanifesttomanifest_tag_manifest_id', 'tagmanifesttomanifest', ['tag_manifest_id'], unique=True)
+
+ tester.populate_table('tagmanifesttomanifest', [
+ ('manifest_id', tester.TestDataType.Foreign('manifest')),
+ ('tag_manifest_id', tester.TestDataType.Foreign('tagmanifest')),
+ ])
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ pass
diff --git a/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py b/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py
new file mode 100644
index 000000000..489303dde
--- /dev/null
+++ b/data/migrations/versions/152bb29a1bb3_add_maximum_build_queue_count_setting_.py
@@ -0,0 +1,33 @@
+"""Add maximum build queue count setting to user table
+
+Revision ID: 152bb29a1bb3
+Revises: 7367229b38d9
+Create Date: 2018-02-20 13:34:34.902415
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '152bb29a1bb3'
+down_revision = 'cbc8177760d9'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('user', sa.Column('maximum_queued_builds_count', sa.Integer(), nullable=True))
+ # ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_column('user', 'maximum_queued_builds_count', tester.TestDataType.Integer)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('user', 'maximum_queued_builds_count')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py b/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py
new file mode 100644
index 000000000..6eca834fa
--- /dev/null
+++ b/data/migrations/versions/152edccba18c_make_blodupload_byte_count_not_nullable.py
@@ -0,0 +1,27 @@
+"""Make BlodUpload byte_count not nullable
+
+Revision ID: 152edccba18c
+Revises: c91c564aad34
+Create Date: 2018-02-23 12:41:25.571835
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '152edccba18c'
+down_revision = 'c91c564aad34'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.alter_column('blobupload', 'byte_count', existing_type=sa.BigInteger(),
+ nullable=False)
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.alter_column('blobupload', 'byte_count', existing_type=sa.BigInteger(),
+ nullable=True)
diff --git a/data/migrations/versions/1783530bee68_add_logentry2_table_quay_io_only.py b/data/migrations/versions/1783530bee68_add_logentry2_table_quay_io_only.py
new file mode 100644
index 000000000..ffe5d9176
--- /dev/null
+++ b/data/migrations/versions/1783530bee68_add_logentry2_table_quay_io_only.py
@@ -0,0 +1,49 @@
+"""Add LogEntry2 table - QUAY.IO ONLY
+
+Revision ID: 1783530bee68
+Revises: 5b7503aada1b
+Create Date: 2018-05-17 16:32:28.532264
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1783530bee68'
+down_revision = '5b7503aada1b'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('logentry2',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=False),
+ sa.Column('account_id', sa.Integer(), nullable=False),
+ sa.Column('performer_id', sa.Integer(), nullable=True),
+ sa.Column('repository_id', sa.Integer(), nullable=True),
+ sa.Column('datetime', sa.DateTime(), nullable=False),
+ sa.Column('ip', sa.String(length=255), nullable=True),
+ sa.Column('metadata_json', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['kind_id'], ['logentrykind.id'], name=op.f('fk_logentry2_kind_id_logentrykind')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_logentry2'))
+ )
+ op.create_index('logentry2_account_id', 'logentry2', ['account_id'], unique=False)
+ op.create_index('logentry2_account_id_datetime', 'logentry2', ['account_id', 'datetime'], unique=False)
+ op.create_index('logentry2_datetime', 'logentry2', ['datetime'], unique=False)
+ op.create_index('logentry2_kind_id', 'logentry2', ['kind_id'], unique=False)
+ op.create_index('logentry2_performer_id', 'logentry2', ['performer_id'], unique=False)
+ op.create_index('logentry2_performer_id_datetime', 'logentry2', ['performer_id', 'datetime'], unique=False)
+ op.create_index('logentry2_repository_id', 'logentry2', ['repository_id'], unique=False)
+ op.create_index('logentry2_repository_id_datetime', 'logentry2', ['repository_id', 'datetime'], unique=False)
+ op.create_index('logentry2_repository_id_datetime_kind_id', 'logentry2', ['repository_id', 'datetime', 'kind_id'], unique=False)
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('logentry2')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py b/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py
new file mode 100644
index 000000000..27f1aafa6
--- /dev/null
+++ b/data/migrations/versions/17aff2e1354e_add_automatic_disable_of_build_triggers.py
@@ -0,0 +1,54 @@
+"""Add automatic disable of build triggers
+
+Revision ID: 17aff2e1354e
+Revises: 61cadbacb9fc
+Create Date: 2017-10-18 15:58:03.971526
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '17aff2e1354e'
+down_revision = '61cadbacb9fc'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorybuildtrigger', sa.Column('successive_failure_count', sa.Integer(), server_default='0', nullable=False))
+ op.add_column('repositorybuildtrigger', sa.Column('successive_internal_error_count', sa.Integer(), server_default='0', nullable=False))
+ # ### end Alembic commands ###
+
+ op.bulk_insert(
+ tables.disablereason,
+ [
+ {'id': 2, 'name': 'successive_build_failures'},
+ {'id': 3, 'name': 'successive_build_internal_errors'},
+ ],
+ )
+
+ # ### population of test data ### #
+ tester.populate_column('repositorybuildtrigger', 'successive_failure_count', tester.TestDataType.Integer)
+ tester.populate_column('repositorybuildtrigger', 'successive_internal_error_count', tester.TestDataType.Integer)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('repositorybuildtrigger', 'successive_internal_error_count')
+ op.drop_column('repositorybuildtrigger', 'successive_failure_count')
+ # ### end Alembic commands ###
+
+ op.execute(tables
+ .disablereason
+ .delete()
+ .where(tables.disablereason.c.name == op.inline_literal('successive_internal_error_count')))
+
+ op.execute(tables
+ .disablereason
+ .delete()
+ .where(tables.disablereason.c.name == op.inline_literal('successive_failure_count')))
diff --git a/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py b/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py
new file mode 100644
index 000000000..9b9bb1978
--- /dev/null
+++ b/data/migrations/versions/224ce4c72c2f_add_last_accessed_field_to_user_table.py
@@ -0,0 +1,35 @@
+"""Add last_accessed field to User table
+
+Revision ID: 224ce4c72c2f
+Revises: b547bc139ad8
+Create Date: 2018-03-12 22:44:07.070490
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '224ce4c72c2f'
+down_revision = 'b547bc139ad8'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('user', sa.Column('last_accessed', sa.DateTime(), nullable=True))
+ op.create_index('user_last_accessed', 'user', ['last_accessed'], unique=False)
+ # ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_column('user', 'last_accessed', tester.TestDataType.DateTime)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('user_last_accessed', table_name='user')
+ op.drop_column('user', 'last_accessed')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py b/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py
new file mode 100644
index 000000000..3a48b9b45
--- /dev/null
+++ b/data/migrations/versions/34c8ef052ec9_repo_mirror_columns.py
@@ -0,0 +1,125 @@
+"""repo mirror columns
+
+Revision ID: 34c8ef052ec9
+Revises: c059b952ed76
+Create Date: 2019-10-07 13:11:20.424715
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '34c8ef052ec9'
+down_revision = 'cc6778199cdb'
+
+from alembic import op
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+from datetime import datetime
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+from peewee import ForeignKeyField, DateTimeField, BooleanField
+from data.database import (BaseModel, RepoMirrorType, RepoMirrorStatus, RepoMirrorRule, uuid_generator,
+ QuayUserField, Repository, IntegerField, JSONField)
+from data.fields import EnumField as ClientEnumField, CharField, EncryptedCharField
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+BATCH_SIZE = 10
+
+
+# Original model
+class RepoMirrorConfig(BaseModel):
+ """
+ Represents a repository to be mirrored and any additional configuration
+ required to perform the mirroring.
+ """
+ repository = ForeignKeyField(Repository, index=True, unique=True, backref='mirror')
+ creation_date = DateTimeField(default=datetime.utcnow)
+ is_enabled = BooleanField(default=True)
+
+ # Mirror Configuration
+ mirror_type = ClientEnumField(RepoMirrorType, default=RepoMirrorType.PULL)
+ internal_robot = QuayUserField(allows_robots=True, null=True, backref='mirrorpullrobot',
+ robot_null_delete=True)
+ external_reference = CharField()
+ external_registry = CharField()
+ external_namespace = CharField()
+ external_repository = CharField()
+ external_registry_username = EncryptedCharField(max_length=2048, null=True)
+ external_registry_password = EncryptedCharField(max_length=2048, null=True)
+ external_registry_config = JSONField(default={})
+
+ # Worker Queuing
+ sync_interval = IntegerField() # seconds between syncs
+ sync_start_date = DateTimeField(null=True) # next start time
+ sync_expiration_date = DateTimeField(null=True) # max duration
+ sync_retries_remaining = IntegerField(default=3)
+ sync_status = ClientEnumField(RepoMirrorStatus, default=RepoMirrorStatus.NEVER_RUN)
+ sync_transaction_id = CharField(default=uuid_generator, max_length=36)
+
+ # Tag-Matching Rules
+ root_rule = ForeignKeyField(RepoMirrorRule)
+
+
+def _iterate(model_class, clause):
+ while True:
+ has_rows = False
+ for row in list(model_class.select().where(clause).limit(BATCH_SIZE)):
+ has_rows = True
+ yield row
+
+ if not has_rows:
+ break
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+
+ logger.info('Migrating to external_reference from existing columns')
+
+ op.add_column('repomirrorconfig', sa.Column('external_reference', sa.Text(), nullable=True))
+
+ for repo_mirror in _iterate(RepoMirrorConfig, (RepoMirrorConfig.external_reference >> None)):
+ repo = '%s/%s/%s' % (repo_mirror.external_registry, repo_mirror.external_namespace, repo_mirror.external_repository)
+ logger.info('migrating %s' % repo)
+ repo_mirror.external_reference = repo
+ repo_mirror.save()
+
+ op.drop_column('repomirrorconfig', 'external_registry')
+ op.drop_column('repomirrorconfig', 'external_namespace')
+ op.drop_column('repomirrorconfig', 'external_repository')
+
+ op.alter_column('repomirrorconfig', 'external_reference', nullable=False, existing_type=sa.Text())
+
+
+ tester.populate_column('repomirrorconfig', 'external_reference', tester.TestDataType.String)
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+
+ '''
+ This will downgrade existing data but may not exactly match previous data structure. If the
+ external_reference does not have three parts (registry, namespace, repository) then a failed
+ value is inserted.
+ '''
+
+ op.add_column('repomirrorconfig', sa.Column('external_registry', sa.String(length=255), nullable=True))
+ op.add_column('repomirrorconfig', sa.Column('external_namespace', sa.String(length=255), nullable=True))
+ op.add_column('repomirrorconfig', sa.Column('external_repository', sa.String(length=255), nullable=True))
+
+ logger.info('Restoring columns from external_reference')
+ for repo_mirror in _iterate(RepoMirrorConfig, (RepoMirrorConfig.external_registry >> None)):
+ logger.info('Restoring %s' % repo_mirror.external_reference)
+ parts = repo_mirror.external_reference.split('/', 2)
+ repo_mirror.external_registry = parts[0] if len(parts) >= 1 else 'DOWNGRADE-FAILED'
+ repo_mirror.external_namespace = parts[1] if len(parts) >= 2 else 'DOWNGRADE-FAILED'
+ repo_mirror.external_repository = parts[2] if len(parts) >= 3 else 'DOWNGRADE-FAILED'
+ repo_mirror.save()
+
+ op.drop_column('repomirrorconfig', 'external_reference')
+
+ op.alter_column('repomirrorconfig', 'external_registry', nullable=False, existing_type=sa.String(length=255))
+ op.alter_column('repomirrorconfig', 'external_namespace', nullable=False, existing_type=sa.String(length=255))
+ op.alter_column('repomirrorconfig', 'external_repository', nullable=False, existing_type=sa.String(length=255))
diff --git a/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py b/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py
new file mode 100644
index 000000000..87e6f8890
--- /dev/null
+++ b/data/migrations/versions/3e8cc74a1e7b_add_severity_and_media_type_to_global_.py
@@ -0,0 +1,63 @@
+"""Add severity and media_type to global messages
+
+Revision ID: 3e8cc74a1e7b
+Revises: fc47c1ec019f
+Create Date: 2017-01-17 16:22:28.584237
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3e8cc74a1e7b'
+down_revision = 'fc47c1ec019f'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('messages', sa.Column('media_type_id', sa.Integer(), nullable=False, server_default='1'))
+ op.add_column('messages', sa.Column('severity', sa.String(length=255), nullable=False, server_default='info'))
+ op.alter_column('messages', 'uuid',
+ existing_type=mysql.VARCHAR(length=36),
+ server_default='',
+ nullable=False)
+ op.create_index('messages_media_type_id', 'messages', ['media_type_id'], unique=False)
+ op.create_index('messages_severity', 'messages', ['severity'], unique=False)
+ op.create_index('messages_uuid', 'messages', ['uuid'], unique=False)
+ op.create_foreign_key(op.f('fk_messages_media_type_id_mediatype'), 'messages', 'mediatype', ['media_type_id'], ['id'])
+ # ### end Alembic commands ###
+
+ op.bulk_insert(tables.mediatype,
+ [
+ {'name': 'text/markdown'},
+ ])
+
+ # ### population of test data ### #
+ tester.populate_column('messages', 'media_type_id', tester.TestDataType.Foreign('mediatype'))
+ tester.populate_column('messages', 'severity', lambda: 'info')
+ tester.populate_column('messages', 'uuid', tester.TestDataType.UUID)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint(op.f('fk_messages_media_type_id_mediatype'), 'messages', type_='foreignkey')
+ op.drop_index('messages_uuid', table_name='messages')
+ op.drop_index('messages_severity', table_name='messages')
+ op.drop_index('messages_media_type_id', table_name='messages')
+ op.alter_column('messages', 'uuid',
+ existing_type=mysql.VARCHAR(length=36),
+ nullable=True)
+ op.drop_column('messages', 'severity')
+ op.drop_column('messages', 'media_type_id')
+ # ### end Alembic commands ###
+
+ op.execute(tables
+ .mediatype
+ .delete()
+ .where(tables.
+ mediatype.c.name == op.inline_literal('text/markdown')))
diff --git a/data/migrations/versions/45fd8b9869d4_add_notification_type.py b/data/migrations/versions/45fd8b9869d4_add_notification_type.py
new file mode 100644
index 000000000..66f5c0870
--- /dev/null
+++ b/data/migrations/versions/45fd8b9869d4_add_notification_type.py
@@ -0,0 +1,30 @@
+"""add_notification_type
+
+Revision ID: 45fd8b9869d4
+Revises: 94836b099894
+Create Date: 2016-12-01 12:02:19.724528
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '45fd8b9869d4'
+down_revision = '94836b099894'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.bulk_insert(tables.notificationkind,
+ [
+ {'name': 'build_cancelled'},
+ ])
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.execute(tables
+ .notificationkind
+ .delete()
+ .where(tables.
+ notificationkind.c.name == op.inline_literal('build_cancelled')))
diff --git a/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py b/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py
new file mode 100644
index 000000000..da8476f8a
--- /dev/null
+++ b/data/migrations/versions/481623ba00ba_add_index_on_logs_archived_on_.py
@@ -0,0 +1,27 @@
+"""Add index on logs_archived on repositorybuild
+
+Revision ID: 481623ba00ba
+Revises: b9045731c4de
+Create Date: 2019-02-15 16:09:47.326805
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '481623ba00ba'
+down_revision = 'b9045731c4de'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('repositorybuild_logs_archived', 'repositorybuild', ['logs_archived'], unique=False)
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('repositorybuild_logs_archived', table_name='repositorybuild')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/5248ddf35167_repository_mirror.py b/data/migrations/versions/5248ddf35167_repository_mirror.py
new file mode 100644
index 000000000..8bb806105
--- /dev/null
+++ b/data/migrations/versions/5248ddf35167_repository_mirror.py
@@ -0,0 +1,144 @@
+"""Repository Mirror
+
+Revision ID: 5248ddf35167
+Revises: b918abdbee43
+Create Date: 2019-06-25 16:22:36.310532
+
+"""
+
+revision = '5248ddf35167'
+down_revision = 'b918abdbee43'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.create_table('repomirrorrule',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=36), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('creation_date', sa.DateTime(), nullable=False),
+ sa.Column('rule_type', sa.Integer(), nullable=False),
+ sa.Column('rule_value', sa.Text(), nullable=False),
+ sa.Column('left_child_id', sa.Integer(), nullable=True),
+ sa.Column('right_child_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['left_child_id'], ['repomirrorrule.id'], name=op.f('fk_repomirrorrule_left_child_id_repomirrorrule')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repomirrorrule_repository_id_repository')),
+ sa.ForeignKeyConstraint(['right_child_id'], ['repomirrorrule.id'], name=op.f('fk_repomirrorrule_right_child_id_repomirrorrule')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repomirrorrule')))
+ op.create_index('repomirrorrule_left_child_id', 'repomirrorrule', ['left_child_id'], unique=False)
+ op.create_index('repomirrorrule_repository_id', 'repomirrorrule', ['repository_id'], unique=False)
+ op.create_index('repomirrorrule_right_child_id', 'repomirrorrule', ['right_child_id'], unique=False)
+ op.create_index('repomirrorrule_rule_type', 'repomirrorrule', ['rule_type'], unique=False)
+ op.create_index('repomirrorrule_uuid', 'repomirrorrule', ['uuid'], unique=True)
+
+ op.create_table('repomirrorconfig',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('creation_date', sa.DateTime(), nullable=False),
+ sa.Column('is_enabled', sa.Boolean(), nullable=False),
+ sa.Column('mirror_type', sa.Integer(), nullable=False),
+ sa.Column('internal_robot_id', sa.Integer(), nullable=False),
+ sa.Column('external_registry', sa.String(length=255), nullable=False),
+ sa.Column('external_namespace', sa.String(length=255), nullable=False),
+ sa.Column('external_repository', sa.String(length=255), nullable=False),
+ sa.Column('external_registry_username', sa.String(length=2048), nullable=True),
+ sa.Column('external_registry_password', sa.String(length=2048), nullable=True),
+ sa.Column('external_registry_config', sa.Text(), nullable=False),
+ sa.Column('sync_interval', sa.Integer(), nullable=False, server_default='60'),
+ sa.Column('sync_start_date', sa.DateTime(), nullable=True),
+ sa.Column('sync_expiration_date', sa.DateTime(), nullable=True),
+ sa.Column('sync_retries_remaining', sa.Integer(), nullable=False, server_default='3'),
+ sa.Column('sync_status', sa.Integer(), nullable=False),
+ sa.Column('sync_transaction_id', sa.String(length=36), nullable=True),
+ sa.Column('root_rule_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repomirrorconfig_repository_id_repository')),
+ sa.ForeignKeyConstraint(['root_rule_id'], ['repomirrorrule.id'], name=op.f('fk_repomirrorconfig_root_rule_id_repomirrorrule')),
+ sa.ForeignKeyConstraint(['internal_robot_id'], ['user.id'], name=op.f('fk_repomirrorconfig_internal_robot_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repomirrorconfig'))
+ )
+ op.create_index('repomirrorconfig_mirror_type', 'repomirrorconfig', ['mirror_type'], unique=False)
+ op.create_index('repomirrorconfig_repository_id', 'repomirrorconfig', ['repository_id'], unique=True)
+ op.create_index('repomirrorconfig_root_rule_id', 'repomirrorconfig', ['root_rule_id'], unique=False)
+ op.create_index('repomirrorconfig_sync_status', 'repomirrorconfig', ['sync_status'], unique=False)
+ op.create_index('repomirrorconfig_sync_transaction_id', 'repomirrorconfig', ['sync_transaction_id'], unique=False)
+ op.create_index('repomirrorconfig_internal_robot_id', 'repomirrorconfig', ['internal_robot_id'], unique=False)
+
+ op.add_column(u'repository', sa.Column('state', sa.Integer(), nullable=False, server_default='0'))
+ op.create_index('repository_state', 'repository', ['state'], unique=False)
+
+ op.bulk_insert(tables.logentrykind,
+ [
+ {'name': 'repo_mirror_enabled'},
+ {'name': 'repo_mirror_disabled'},
+ {'name': 'repo_mirror_config_changed'},
+ {'name': 'repo_mirror_sync_started'},
+ {'name': 'repo_mirror_sync_failed'},
+ {'name': 'repo_mirror_sync_success'},
+ {'name': 'repo_mirror_sync_now_requested'},
+ {'name': 'repo_mirror_sync_tag_success'},
+ {'name': 'repo_mirror_sync_tag_failed'},
+ {'name': 'repo_mirror_sync_test_success'},
+ {'name': 'repo_mirror_sync_test_failed'},
+ {'name': 'repo_mirror_sync_test_started'},
+ {'name': 'change_repo_state'}
+ ])
+
+
+ tester.populate_table('repomirrorrule', [
+ ('uuid', tester.TestDataType.String),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('creation_date', tester.TestDataType.DateTime),
+ ('rule_type', tester.TestDataType.Integer),
+ ('rule_value', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('repomirrorconfig', [
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('creation_date', tester.TestDataType.DateTime),
+ ('is_enabled', tester.TestDataType.Boolean),
+ ('mirror_type', tester.TestDataType.Constant(1)),
+ ('internal_robot_id', tester.TestDataType.Foreign('user')),
+ ('external_registry', tester.TestDataType.String),
+ ('external_namespace', tester.TestDataType.String),
+ ('external_repository', tester.TestDataType.String),
+ ('external_registry_username', tester.TestDataType.String),
+ ('external_registry_password', tester.TestDataType.String),
+ ('external_registry_config', tester.TestDataType.JSON),
+ ('sync_start_date', tester.TestDataType.DateTime),
+ ('sync_expiration_date', tester.TestDataType.DateTime),
+ ('sync_retries_remaining', tester.TestDataType.Integer),
+ ('sync_status', tester.TestDataType.Constant(0)),
+ ('sync_transaction_id', tester.TestDataType.String),
+ ('root_rule_id', tester.TestDataType.Foreign('repomirrorrule')),
+ ])
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.drop_column(u'repository', 'state')
+
+ op.drop_table('repomirrorconfig')
+
+ op.drop_table('repomirrorrule')
+
+ for logentrykind in [
+ 'repo_mirror_enabled',
+ 'repo_mirror_disabled',
+ 'repo_mirror_config_changed',
+ 'repo_mirror_sync_started',
+ 'repo_mirror_sync_failed',
+ 'repo_mirror_sync_success',
+ 'repo_mirror_sync_now_requested',
+ 'repo_mirror_sync_tag_success',
+ 'repo_mirror_sync_tag_failed',
+ 'repo_mirror_sync_test_success',
+ 'repo_mirror_sync_test_failed',
+ 'repo_mirror_sync_test_started',
+ 'change_repo_state'
+ ]:
+ op.execute(tables.logentrykind.delete()
+ .where(tables.logentrykind.c.name == op.inline_literal(logentrykind)))
diff --git a/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py b/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py
new file mode 100644
index 000000000..e0b61814b
--- /dev/null
+++ b/data/migrations/versions/53e2ac668296_remove_reference_to_subdir.py
@@ -0,0 +1,63 @@
+"""Remove reference to subdir
+
+Revision ID: 53e2ac668296
+Revises: ed01e313d3cb
+Create Date: 2017-03-28 15:01:31.073382
+
+"""
+
+# revision identifiers, used by Alembic.
+import json
+
+import logging
+from alembic.script.revision import RevisionError
+from alembic.util import CommandError
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+revision = '53e2ac668296'
+down_revision = 'ed01e313d3cb'
+
+log = logging.getLogger(__name__)
+
+
+def run_migration(migrate_function, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ conn = op.get_bind()
+ triggers = conn.execute("SELECT id, config FROM repositorybuildtrigger")
+ for trigger in triggers:
+ config = json.dumps(migrate_function(json.loads(trigger[1])))
+ try:
+ conn.execute("UPDATE repositorybuildtrigger SET config=%s WHERE id=%s", config, trigger[0])
+ except(RevisionError, CommandError) as e:
+ log.warning("Failed to update build trigger %s with exception: ", trigger[0], e)
+
+
+def upgrade(tables, tester, progress_reporter):
+ run_migration(delete_subdir, progress_reporter)
+
+
+def downgrade(tables, tester, progress_reporter):
+ run_migration(add_subdir, progress_reporter)
+
+
+def delete_subdir(config):
+ """ Remove subdir from config """
+ if not config:
+ return config
+ if 'subdir' in config:
+ del config['subdir']
+
+ return config
+
+
+def add_subdir(config):
+ """ Add subdir back into config """
+ if not config:
+ return config
+ if 'context' in config:
+ config['subdir'] = config['context']
+
+ return config
diff --git a/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py b/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py
new file mode 100644
index 000000000..efe900ad7
--- /dev/null
+++ b/data/migrations/versions/54492a68a3cf_add_namespacegeorestriction_table.py
@@ -0,0 +1,49 @@
+"""Add NamespaceGeoRestriction table
+
+Revision ID: 54492a68a3cf
+Revises: c00a1f15968b
+Create Date: 2018-12-05 15:12:14.201116
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '54492a68a3cf'
+down_revision = 'c00a1f15968b'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('namespacegeorestriction',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('namespace_id', sa.Integer(), nullable=False),
+ sa.Column('added', sa.DateTime(), nullable=False),
+ sa.Column('description', sa.String(length=255), nullable=False),
+ sa.Column('unstructured_json', sa.Text(), nullable=False),
+ sa.Column('restricted_region_iso_code', sa.String(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['namespace_id'], ['user.id'], name=op.f('fk_namespacegeorestriction_namespace_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_namespacegeorestriction'))
+ )
+ op.create_index('namespacegeorestriction_namespace_id', 'namespacegeorestriction', ['namespace_id'], unique=False)
+ op.create_index('namespacegeorestriction_namespace_id_restricted_region_iso_code', 'namespacegeorestriction', ['namespace_id', 'restricted_region_iso_code'], unique=True)
+ op.create_index('namespacegeorestriction_restricted_region_iso_code', 'namespacegeorestriction', ['restricted_region_iso_code'], unique=False)
+ # ### end Alembic commands ###
+
+ tester.populate_table('namespacegeorestriction', [
+ ('namespace_id', tester.TestDataType.Foreign('user')),
+ ('added', tester.TestDataType.DateTime),
+ ('description', tester.TestDataType.String),
+ ('unstructured_json', tester.TestDataType.JSON),
+ ('restricted_region_iso_code', tester.TestDataType.String),
+ ])
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('namespacegeorestriction')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py b/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py
new file mode 100644
index 000000000..89b469d6b
--- /dev/null
+++ b/data/migrations/versions/5b7503aada1b_cleanup_old_robots.py
@@ -0,0 +1,26 @@
+"""Cleanup old robots
+
+Revision ID: 5b7503aada1b
+Revises: 224ce4c72c2f
+Create Date: 2018-05-09 17:18:52.230504
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '5b7503aada1b'
+down_revision = '224ce4c72c2f'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+from util.migrate.cleanup_old_robots import cleanup_old_robots
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ cleanup_old_robots()
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # Nothing to do.
+ pass
diff --git a/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py b/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py
new file mode 100644
index 000000000..46a2c3cec
--- /dev/null
+++ b/data/migrations/versions/5cbbfc95bac7_remove_oci_tables_not_used_by_cnr_the_.py
@@ -0,0 +1,170 @@
+"""Remove 'oci' tables not used by CNR. The rest will be migrated and renamed.
+
+Revision ID: 5cbbfc95bac7
+Revises: 1783530bee68
+Create Date: 2018-05-23 17:28:40.114433
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '5cbbfc95bac7'
+down_revision = '1783530bee68'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+from util.migrate import UTF8LongText, UTF8CharField
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('derivedimage')
+ op.drop_table('manifestlabel')
+ op.drop_table('blobplacementlocationpreference')
+ op.drop_table('blobuploading')
+ op.drop_table('bittorrentpieces')
+ op.drop_table('manifestlayerdockerv1')
+ op.drop_table('manifestlayerscan')
+ op.drop_table('manifestlayer')
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ 'manifestlayer',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_index', sa.BigInteger(), nullable=False),
+ sa.Column('metadata_json', UTF8LongText, nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_manifestlayer_blob_id_blob')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlayer_manifest_id_manifest')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayer'))
+ )
+ op.create_index('manifestlayer_manifest_index', 'manifestlayer', ['manifest_index'], unique=False)
+ op.create_index('manifestlayer_manifest_id_manifest_index', 'manifestlayer', ['manifest_id', 'manifest_index'], unique=True)
+ op.create_index('manifestlayer_manifest_id', 'manifestlayer', ['manifest_id'], unique=False)
+ op.create_index('manifestlayer_blob_id', 'manifestlayer', ['blob_id'], unique=False)
+
+ op.create_table(
+ 'manifestlayerscan',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('layer_id', sa.Integer(), nullable=False),
+ sa.Column('scannable', sa.Boolean(), nullable=False),
+ sa.Column('scanned_by', UTF8CharField(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['layer_id'], ['manifestlayer.id'], name=op.f('fk_manifestlayerscan_layer_id_manifestlayer')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayerscan'))
+ )
+
+ op.create_index('manifestlayerscan_layer_id', 'manifestlayerscan', ['layer_id'], unique=True)
+
+ op.create_table(
+ 'bittorrentpieces',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('pieces', UTF8LongText, nullable=False),
+ sa.Column('piece_length', sa.BigInteger(), nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_bittorrentpieces_blob_id_blob')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_bittorrentpieces'))
+ )
+
+ op.create_index('bittorrentpieces_blob_id_piece_length', 'bittorrentpieces', ['blob_id', 'piece_length'], unique=True)
+ op.create_index('bittorrentpieces_blob_id', 'bittorrentpieces', ['blob_id'], unique=False)
+
+ op.create_table(
+ 'blobuploading',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.Column('byte_count', sa.BigInteger(), nullable=False),
+ sa.Column('uncompressed_byte_count', sa.BigInteger(), nullable=True),
+ sa.Column('chunk_count', sa.BigInteger(), nullable=False),
+ sa.Column('storage_metadata', UTF8LongText, nullable=True),
+ sa.Column('sha_state', UTF8LongText, nullable=True),
+ sa.Column('piece_sha_state', UTF8LongText, nullable=True),
+ sa.Column('piece_hashes', UTF8LongText, nullable=True),
+ sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobuploading_location_id_blobplacementlocation')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_blobuploading_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blobuploading'))
+ )
+
+ op.create_index('blobuploading_uuid', 'blobuploading', ['uuid'], unique=True)
+ op.create_index('blobuploading_repository_id_uuid', 'blobuploading', ['repository_id', 'uuid'], unique=True)
+ op.create_index('blobuploading_repository_id', 'blobuploading', ['repository_id'], unique=False)
+ op.create_index('blobuploading_location_id', 'blobuploading', ['location_id'], unique=False)
+ op.create_index('blobuploading_created', 'blobuploading', ['created'], unique=False)
+
+ op.create_table(
+ 'manifestlayerdockerv1',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('manifest_layer_id', sa.Integer(), nullable=False),
+ sa.Column('image_id', UTF8CharField(length=255), nullable=False),
+ sa.Column('checksum', UTF8CharField(length=255), nullable=False),
+ sa.Column('compat_json', UTF8LongText, nullable=False),
+ sa.ForeignKeyConstraint(['manifest_layer_id'], ['manifestlayer.id'], name=op.f('fk_manifestlayerdockerv1_manifest_layer_id_manifestlayer')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayerdockerv1'))
+ )
+
+ op.create_index('manifestlayerdockerv1_manifest_layer_id', 'manifestlayerdockerv1', ['manifest_layer_id'], unique=False)
+ op.create_index('manifestlayerdockerv1_image_id', 'manifestlayerdockerv1', ['image_id'], unique=False)
+
+ op.create_table(
+ 'manifestlabel',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('annotated_id', sa.Integer(), nullable=False),
+ sa.Column('label_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['annotated_id'], ['manifest.id'], name=op.f('fk_manifestlabel_annotated_id_manifest')),
+ sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_manifestlabel_label_id_label')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestlabel_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlabel'))
+ )
+
+ op.create_index('manifestlabel_repository_id_annotated_id_label_id', 'manifestlabel', ['repository_id', 'annotated_id', 'label_id'], unique=True)
+ op.create_index('manifestlabel_repository_id', 'manifestlabel', ['repository_id'], unique=False)
+ op.create_index('manifestlabel_label_id', 'manifestlabel', ['label_id'], unique=False)
+ op.create_index('manifestlabel_annotated_id', 'manifestlabel', ['annotated_id'], unique=False)
+
+ op.create_table(
+ 'blobplacementlocationpreference',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobplacementlocpref_locid_blobplacementlocation')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_blobplacementlocationpreference_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacementlocationpreference'))
+ )
+ op.create_index('blobplacementlocationpreference_user_id', 'blobplacementlocationpreference', ['user_id'], unique=False)
+ op.create_index('blobplacementlocationpreference_location_id', 'blobplacementlocationpreference', ['location_id'], unique=False)
+
+
+ op.create_table(
+ 'derivedimage',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('source_manifest_id', sa.Integer(), nullable=False),
+ sa.Column('derived_manifest_json', UTF8LongText, nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('uniqueness_hash', sa.String(length=255), nullable=False),
+ sa.Column('signature_blob_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_derivedimage_blob_id_blob')),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_derivedimage_media_type_id_mediatype')),
+ sa.ForeignKeyConstraint(['signature_blob_id'], ['blob.id'], name=op.f('fk_derivedimage_signature_blob_id_blob')),
+ sa.ForeignKeyConstraint(['source_manifest_id'], ['manifest.id'], name=op.f('fk_derivedimage_source_manifest_id_manifest')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_derivedimage'))
+ )
+ op.create_index('derivedimage_uuid', 'derivedimage', ['uuid'], unique=True)
+ op.create_index('derivedimage_uniqueness_hash', 'derivedimage', ['uniqueness_hash'], unique=True)
+ op.create_index('derivedimage_source_manifest_id_media_type_id_uniqueness_hash', 'derivedimage', ['source_manifest_id', 'media_type_id', 'uniqueness_hash'], unique=True)
+ op.create_index('derivedimage_source_manifest_id_blob_id', 'derivedimage', ['source_manifest_id', 'blob_id'], unique=True)
+ op.create_index('derivedimage_source_manifest_id', 'derivedimage', ['source_manifest_id'], unique=False)
+ op.create_index('derivedimage_signature_blob_id', 'derivedimage', ['signature_blob_id'], unique=False)
+ op.create_index('derivedimage_media_type_id', 'derivedimage', ['media_type_id'], unique=False)
+ op.create_index('derivedimage_blob_id', 'derivedimage', ['blob_id'], unique=False)
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py b/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py
new file mode 100644
index 000000000..a0df295dc
--- /dev/null
+++ b/data/migrations/versions/5d463ea1e8a8_backfill_new_appr_tables.py
@@ -0,0 +1,32 @@
+"""Backfill new appr tables
+
+Revision ID: 5d463ea1e8a8
+Revises: 610320e9dacf
+Create Date: 2018-07-08 10:01:19.756126
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '5d463ea1e8a8'
+down_revision = '610320e9dacf'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from util.migrate.table_ops import copy_table_contents
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ conn = op.get_bind()
+
+ copy_table_contents('blob', 'apprblob', conn)
+ copy_table_contents('manifest', 'apprmanifest', conn)
+ copy_table_contents('manifestlist', 'apprmanifestlist', conn)
+ copy_table_contents('blobplacement', 'apprblobplacement', conn)
+ copy_table_contents('manifestblob', 'apprmanifestblob', conn)
+ copy_table_contents('manifestlistmanifest', 'apprmanifestlistmanifest', conn)
+ copy_table_contents('tag', 'apprtag', conn)
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ pass
diff --git a/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py b/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py
new file mode 100644
index 000000000..99c365260
--- /dev/null
+++ b/data/migrations/versions/610320e9dacf_add_new_appr_specific_tables.py
@@ -0,0 +1,206 @@
+"""Add new Appr-specific tables
+
+Revision ID: 610320e9dacf
+Revises: 5cbbfc95bac7
+Create Date: 2018-05-24 16:46:13.514562
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '610320e9dacf'
+down_revision = '5cbbfc95bac7'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+from util.migrate.table_ops import copy_table_contents
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('apprblobplacementlocation',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_apprblobplacementlocation'))
+ )
+ op.create_index('apprblobplacementlocation_name', 'apprblobplacementlocation', ['name'], unique=True)
+ op.create_table('apprtagkind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_apprtagkind'))
+ )
+ op.create_index('apprtagkind_name', 'apprtagkind', ['name'], unique=True)
+ op.create_table('apprblob',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('size', sa.BigInteger(), nullable=False),
+ sa.Column('uncompressed_size', sa.BigInteger(), nullable=True),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_apprblob_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_apprblob'))
+ )
+ op.create_index('apprblob_digest', 'apprblob', ['digest'], unique=True)
+ op.create_index('apprblob_media_type_id', 'apprblob', ['media_type_id'], unique=False)
+ op.create_table('apprmanifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_json', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_apprmanifest_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_apprmanifest'))
+ )
+ op.create_index('apprmanifest_digest', 'apprmanifest', ['digest'], unique=True)
+ op.create_index('apprmanifest_media_type_id', 'apprmanifest', ['media_type_id'], unique=False)
+ op.create_table('apprmanifestlist',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('manifest_list_json', sa.Text(), nullable=False),
+ sa.Column('schema_version', sa.String(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_apprmanifestlist_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_apprmanifestlist'))
+ )
+ op.create_index('apprmanifestlist_digest', 'apprmanifestlist', ['digest'], unique=True)
+ op.create_index('apprmanifestlist_media_type_id', 'apprmanifestlist', ['media_type_id'], unique=False)
+ op.create_table('apprblobplacement',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['apprblob.id'], name=op.f('fk_apprblobplacement_blob_id_apprblob')),
+ sa.ForeignKeyConstraint(['location_id'], ['apprblobplacementlocation.id'], name=op.f('fk_apprblobplacement_location_id_apprblobplacementlocation')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_apprblobplacement'))
+ )
+ op.create_index('apprblobplacement_blob_id', 'apprblobplacement', ['blob_id'], unique=False)
+ op.create_index('apprblobplacement_blob_id_location_id', 'apprblobplacement', ['blob_id', 'location_id'], unique=True)
+ op.create_index('apprblobplacement_location_id', 'apprblobplacement', ['location_id'], unique=False)
+ op.create_table('apprmanifestblob',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['apprblob.id'], name=op.f('fk_apprmanifestblob_blob_id_apprblob')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['apprmanifest.id'], name=op.f('fk_apprmanifestblob_manifest_id_apprmanifest')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_apprmanifestblob'))
+ )
+ op.create_index('apprmanifestblob_blob_id', 'apprmanifestblob', ['blob_id'], unique=False)
+ op.create_index('apprmanifestblob_manifest_id', 'apprmanifestblob', ['manifest_id'], unique=False)
+ op.create_index('apprmanifestblob_manifest_id_blob_id', 'apprmanifestblob', ['manifest_id', 'blob_id'], unique=True)
+ op.create_table('apprmanifestlistmanifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('manifest_list_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('operating_system', sa.String(length=255), nullable=True),
+ sa.Column('architecture', sa.String(length=255), nullable=True),
+ sa.Column('platform_json', sa.Text(), nullable=True),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['manifest_id'], ['apprmanifest.id'], name=op.f('fk_apprmanifestlistmanifest_manifest_id_apprmanifest')),
+ sa.ForeignKeyConstraint(['manifest_list_id'], ['apprmanifestlist.id'], name=op.f('fk_apprmanifestlistmanifest_manifest_list_id_apprmanifestlist')),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_apprmanifestlistmanifest_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_apprmanifestlistmanifest'))
+ )
+ op.create_index('apprmanifestlistmanifest_manifest_id', 'apprmanifestlistmanifest', ['manifest_id'], unique=False)
+ op.create_index('apprmanifestlistmanifest_manifest_list_id', 'apprmanifestlistmanifest', ['manifest_list_id'], unique=False)
+ op.create_index('apprmanifestlistmanifest_manifest_list_id_media_type_id', 'apprmanifestlistmanifest', ['manifest_list_id', 'media_type_id'], unique=False)
+ op.create_index('apprmanifestlistmanifest_manifest_list_id_operating_system_arch', 'apprmanifestlistmanifest', ['manifest_list_id', 'operating_system', 'architecture', 'media_type_id'], unique=False)
+ op.create_index('apprmanifestlistmanifest_media_type_id', 'apprmanifestlistmanifest', ['media_type_id'], unique=False)
+ op.create_table('apprtag',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_list_id', sa.Integer(), nullable=True),
+ sa.Column('lifetime_start', sa.BigInteger(), nullable=False),
+ sa.Column('lifetime_end', sa.BigInteger(), nullable=True),
+ sa.Column('hidden', sa.Boolean(), nullable=False),
+ sa.Column('reverted', sa.Boolean(), nullable=False),
+ sa.Column('protected', sa.Boolean(), nullable=False),
+ sa.Column('tag_kind_id', sa.Integer(), nullable=False),
+ sa.Column('linked_tag_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['linked_tag_id'], ['apprtag.id'], name=op.f('fk_apprtag_linked_tag_id_apprtag')),
+ sa.ForeignKeyConstraint(['manifest_list_id'], ['apprmanifestlist.id'], name=op.f('fk_apprtag_manifest_list_id_apprmanifestlist')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_apprtag_repository_id_repository')),
+ sa.ForeignKeyConstraint(['tag_kind_id'], ['apprtagkind.id'], name=op.f('fk_apprtag_tag_kind_id_apprtagkind')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_apprtag'))
+ )
+ op.create_index('apprtag_lifetime_end', 'apprtag', ['lifetime_end'], unique=False)
+ op.create_index('apprtag_linked_tag_id', 'apprtag', ['linked_tag_id'], unique=False)
+ op.create_index('apprtag_manifest_list_id', 'apprtag', ['manifest_list_id'], unique=False)
+ op.create_index('apprtag_repository_id', 'apprtag', ['repository_id'], unique=False)
+ op.create_index('apprtag_repository_id_name', 'apprtag', ['repository_id', 'name'], unique=False)
+ op.create_index('apprtag_repository_id_name_hidden', 'apprtag', ['repository_id', 'name', 'hidden'], unique=False)
+ op.create_index('apprtag_repository_id_name_lifetime_end', 'apprtag', ['repository_id', 'name', 'lifetime_end'], unique=True)
+ op.create_index('apprtag_tag_kind_id', 'apprtag', ['tag_kind_id'], unique=False)
+ # ### end Alembic commands ###
+
+ conn = op.get_bind()
+ copy_table_contents('blobplacementlocation', 'apprblobplacementlocation', conn)
+ copy_table_contents('tagkind', 'apprtagkind', conn)
+
+ # ### population of test data ### #
+
+ tester.populate_table('apprmanifest', [
+ ('digest', tester.TestDataType.String),
+ ('media_type_id', tester.TestDataType.Foreign('mediatype')),
+ ('manifest_json', tester.TestDataType.JSON),
+ ])
+
+ tester.populate_table('apprmanifestlist', [
+ ('digest', tester.TestDataType.String),
+ ('manifest_list_json', tester.TestDataType.JSON),
+ ('schema_version', tester.TestDataType.String),
+ ('media_type_id', tester.TestDataType.Foreign('mediatype')),
+ ])
+
+ tester.populate_table('apprmanifestlistmanifest', [
+ ('manifest_list_id', tester.TestDataType.Foreign('apprmanifestlist')),
+ ('manifest_id', tester.TestDataType.Foreign('apprmanifest')),
+ ('operating_system', tester.TestDataType.String),
+ ('architecture', tester.TestDataType.String),
+ ('platform_json', tester.TestDataType.JSON),
+ ('media_type_id', tester.TestDataType.Foreign('mediatype')),
+ ])
+
+ tester.populate_table('apprblob', [
+ ('digest', tester.TestDataType.String),
+ ('media_type_id', tester.TestDataType.Foreign('mediatype')),
+ ('size', tester.TestDataType.BigInteger),
+ ('uncompressed_size', tester.TestDataType.BigInteger),
+ ])
+
+ tester.populate_table('apprmanifestblob', [
+ ('manifest_id', tester.TestDataType.Foreign('apprmanifest')),
+ ('blob_id', tester.TestDataType.Foreign('apprblob')),
+ ])
+
+ tester.populate_table('apprtag', [
+ ('name', tester.TestDataType.String),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('manifest_list_id', tester.TestDataType.Foreign('apprmanifestlist')),
+ ('lifetime_start', tester.TestDataType.Integer),
+ ('hidden', tester.TestDataType.Boolean),
+ ('reverted', tester.TestDataType.Boolean),
+ ('protected', tester.TestDataType.Boolean),
+ ('tag_kind_id', tester.TestDataType.Foreign('apprtagkind')),
+ ])
+
+ tester.populate_table('apprblobplacement', [
+ ('blob_id', tester.TestDataType.Foreign('apprmanifestblob')),
+ ('location_id', tester.TestDataType.Foreign('apprblobplacementlocation')),
+ ])
+
+ # ### end population of test data ### #
+
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('apprtag')
+ op.drop_table('apprmanifestlistmanifest')
+ op.drop_table('apprmanifestblob')
+ op.drop_table('apprblobplacement')
+ op.drop_table('apprmanifestlist')
+ op.drop_table('apprmanifest')
+ op.drop_table('apprblob')
+ op.drop_table('apprtagkind')
+ op.drop_table('apprblobplacementlocation')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py b/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py
new file mode 100644
index 000000000..1dbb1e7a4
--- /dev/null
+++ b/data/migrations/versions/61cadbacb9fc_add_ability_for_build_triggers_to_be_.py
@@ -0,0 +1,64 @@
+"""Add ability for build triggers to be disabled
+
+Revision ID: 61cadbacb9fc
+Revises: b4c2d45bc132
+Create Date: 2017-10-18 12:07:26.190901
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '61cadbacb9fc'
+down_revision = 'b4c2d45bc132'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('disablereason',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_disablereason'))
+ )
+ op.create_index('disablereason_name', 'disablereason', ['name'], unique=True)
+
+ op.bulk_insert(
+ tables.disablereason,
+ [
+ {'id': 1, 'name': 'user_toggled'},
+ ],
+ )
+
+ op.bulk_insert(tables.logentrykind, [
+ {'name': 'toggle_repo_trigger'},
+ ])
+
+ op.add_column(u'repositorybuildtrigger', sa.Column('disabled_reason_id', sa.Integer(), nullable=True))
+ op.add_column(u'repositorybuildtrigger', sa.Column('enabled', sa.Boolean(), nullable=False, server_default=sa.sql.expression.true()))
+ op.create_index('repositorybuildtrigger_disabled_reason_id', 'repositorybuildtrigger', ['disabled_reason_id'], unique=False)
+ op.create_foreign_key(op.f('fk_repositorybuildtrigger_disabled_reason_id_disablereason'), 'repositorybuildtrigger', 'disablereason', ['disabled_reason_id'], ['id'])
+ # ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_column('repositorybuildtrigger', 'disabled_reason_id', tester.TestDataType.Foreign('disablereason'))
+ tester.populate_column('repositorybuildtrigger', 'enabled', tester.TestDataType.Boolean)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint(op.f('fk_repositorybuildtrigger_disabled_reason_id_disablereason'), 'repositorybuildtrigger', type_='foreignkey')
+ op.drop_index('repositorybuildtrigger_disabled_reason_id', table_name='repositorybuildtrigger')
+ op.drop_column(u'repositorybuildtrigger', 'enabled')
+ op.drop_column(u'repositorybuildtrigger', 'disabled_reason_id')
+ op.drop_table('disablereason')
+ # ### end Alembic commands ###
+
+ op.execute(tables
+ .logentrykind
+ .delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('toggle_repo_trigger')))
diff --git a/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py b/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py
new file mode 100644
index 000000000..b7d17207f
--- /dev/null
+++ b/data/migrations/versions/654e6df88b71_change_manifest_bytes_to_a_utf8_text_.py
@@ -0,0 +1,26 @@
+"""Change manifest_bytes to a UTF8 text field
+
+Revision ID: 654e6df88b71
+Revises: eafdeadcebc7
+Create Date: 2018-08-15 09:58:46.109277
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '654e6df88b71'
+down_revision = 'eafdeadcebc7'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+from util.migrate import UTF8LongText
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.alter_column('manifest', 'manifest_bytes', existing_type=sa.Text(), type_=UTF8LongText())
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.alter_column('manifest', 'manifest_bytes', existing_type=UTF8LongText(), type_=sa.Text())
diff --git a/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py b/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py
new file mode 100644
index 000000000..aae5325b9
--- /dev/null
+++ b/data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py
@@ -0,0 +1,47 @@
+"""Add TagToRepositoryTag table
+
+Revision ID: 67f0abd172ae
+Revises: 10f45ee2310b
+Create Date: 2018-10-30 11:31:06.615488
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '67f0abd172ae'
+down_revision = '10f45ee2310b'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('tagtorepositorytag',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('tag_id', sa.Integer(), nullable=False),
+ sa.Column('repository_tag_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tagtorepositorytag_repository_id_repository')),
+ sa.ForeignKeyConstraint(['repository_tag_id'], ['repositorytag.id'], name=op.f('fk_tagtorepositorytag_repository_tag_id_repositorytag')),
+ sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name=op.f('fk_tagtorepositorytag_tag_id_tag')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagtorepositorytag'))
+ )
+ op.create_index('tagtorepositorytag_repository_id', 'tagtorepositorytag', ['repository_id'], unique=False)
+ op.create_index('tagtorepositorytag_repository_tag_id', 'tagtorepositorytag', ['repository_tag_id'], unique=True)
+ op.create_index('tagtorepositorytag_tag_id', 'tagtorepositorytag', ['tag_id'], unique=True)
+ # ### end Alembic commands ###
+
+ tester.populate_table('tagtorepositorytag', [
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('tag_id', tester.TestDataType.Foreign('tag')),
+ ('repository_tag_id', tester.TestDataType.Foreign('repositorytag')),
+ ])
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('tagtorepositorytag')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py b/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py
new file mode 100644
index 000000000..789ba4fa4
--- /dev/null
+++ b/data/migrations/versions/6c21e2cfb8b6_change_logentry_to_use_a_biginteger_as_.py
@@ -0,0 +1,36 @@
+"""Change LogEntry to use a BigInteger as its primary key
+
+Revision ID: 6c21e2cfb8b6
+Revises: d17c695859ea
+Create Date: 2018-07-27 16:30:02.877346
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '6c21e2cfb8b6'
+down_revision = 'd17c695859ea'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.alter_column(
+ table_name='logentry',
+ column_name='id',
+ nullable=False,
+ autoincrement=True,
+ type_=sa.BigInteger(),
+ )
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.alter_column(
+ table_name='logentry',
+ column_name='id',
+ nullable=False,
+ autoincrement=True,
+ type_=sa.Integer(),
+ )
diff --git a/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py b/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py
new file mode 100644
index 000000000..99ee1e77c
--- /dev/null
+++ b/data/migrations/versions/6c7014e84a5e_add_user_prompt_support.py
@@ -0,0 +1,56 @@
+"""Add user prompt support
+
+Revision ID: 6c7014e84a5e
+Revises: c156deb8845d
+Create Date: 2016-10-31 16:26:31.447705
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '6c7014e84a5e'
+down_revision = 'c156deb8845d'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('userpromptkind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_userpromptkind'))
+ )
+ op.create_index('userpromptkind_name', 'userpromptkind', ['name'], unique=False)
+ op.create_table('userprompt',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['kind_id'], ['userpromptkind.id'], name=op.f('fk_userprompt_kind_id_userpromptkind')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_userprompt_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_userprompt'))
+ )
+ op.create_index('userprompt_kind_id', 'userprompt', ['kind_id'], unique=False)
+ op.create_index('userprompt_user_id', 'userprompt', ['user_id'], unique=False)
+ op.create_index('userprompt_user_id_kind_id', 'userprompt', ['user_id', 'kind_id'], unique=True)
+ ### end Alembic commands ###
+
+ op.bulk_insert(tables.userpromptkind,
+ [
+ {'name':'confirm_username'},
+ ])
+
+ # ### population of test data ### #
+ tester.populate_table('userprompt', [
+ ('user_id', tester.TestDataType.Foreign('user')),
+ ('kind_id', tester.TestDataType.Foreign('userpromptkind')),
+ ])
+ # ### end population of test data ### #
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('userprompt')
+ op.drop_table('userpromptkind')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py b/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py
new file mode 100644
index 000000000..47ecf1cb1
--- /dev/null
+++ b/data/migrations/versions/6ec8726c0ace_add_logentry3_table.py
@@ -0,0 +1,43 @@
+"""Add LogEntry3 table
+
+Revision ID: 6ec8726c0ace
+Revises: 54492a68a3cf
+Create Date: 2019-01-03 13:41:02.897957
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '6ec8726c0ace'
+down_revision = '54492a68a3cf'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('logentry3',
+ sa.Column('id', sa.BigInteger(), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=False),
+ sa.Column('account_id', sa.Integer(), nullable=False),
+ sa.Column('performer_id', sa.Integer(), nullable=True),
+ sa.Column('repository_id', sa.Integer(), nullable=True),
+ sa.Column('datetime', sa.DateTime(), nullable=False),
+ sa.Column('ip', sa.String(length=255), nullable=True),
+ sa.Column('metadata_json', sa.Text(), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_logentry3'))
+ )
+ op.create_index('logentry3_account_id_datetime', 'logentry3', ['account_id', 'datetime'], unique=False)
+ op.create_index('logentry3_datetime', 'logentry3', ['datetime'], unique=False)
+ op.create_index('logentry3_performer_id_datetime', 'logentry3', ['performer_id', 'datetime'], unique=False)
+ op.create_index('logentry3_repository_id_datetime_kind_id', 'logentry3', ['repository_id', 'datetime', 'kind_id'], unique=False)
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('logentry3')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py b/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py
new file mode 100644
index 000000000..4a1416eaa
--- /dev/null
+++ b/data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py
@@ -0,0 +1,289 @@
+"""Backfill new encrypted fields
+
+Revision ID: 703298a825c2
+Revises: c13c8052f7a6
+Create Date: 2019-08-19 16:07:48.109889
+
+"""
+# revision identifiers, used by Alembic.
+revision = '703298a825c2'
+down_revision = 'c13c8052f7a6'
+
+import logging
+import uuid
+
+from datetime import datetime
+
+from peewee import (JOIN, IntegrityError, DateTimeField, CharField, ForeignKeyField,
+ BooleanField, TextField, IntegerField)
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+import sqlalchemy as sa
+
+from data.database import (BaseModel, User, Repository, AccessTokenKind, Role,
+ random_string_generator, QuayUserField, BuildTriggerService,
+ uuid_generator, DisableReason)
+from data.fields import Credential, DecryptedValue, EncryptedCharField, EncryptedTextField, EnumField, CredentialField
+from data.model.token import ACCESS_TOKEN_NAME_PREFIX_LENGTH
+from data.model.appspecifictoken import TOKEN_NAME_PREFIX_LENGTH as AST_TOKEN_NAME_PREFIX_LENGTH
+from data.model.oauth import ACCESS_TOKEN_PREFIX_LENGTH as OAUTH_ACCESS_TOKEN_PREFIX_LENGTH
+from data.model.oauth import AUTHORIZATION_CODE_PREFIX_LENGTH
+
+BATCH_SIZE = 10
+
+logger = logging.getLogger(__name__)
+
+def _iterate(model_class, clause):
+ while True:
+ has_rows = False
+ for row in list(model_class.select().where(clause).limit(BATCH_SIZE)):
+ has_rows = True
+ yield row
+
+ if not has_rows:
+ break
+
+
+def _decrypted(value):
+ if value is None:
+ return None
+
+ assert isinstance(value, basestring)
+ return DecryptedValue(value)
+
+
+# NOTE: As per standard migrations involving Peewee models, we copy them here, as they will change
+# after this call.
+class AccessToken(BaseModel):
+ code = CharField(default=random_string_generator(length=64), unique=True, index=True)
+ token_name = CharField(default=random_string_generator(length=32), unique=True, index=True)
+ token_code = EncryptedCharField(default_token_length=32)
+
+class RobotAccountToken(BaseModel):
+ robot_account = QuayUserField(index=True, allows_robots=True, unique=True)
+ token = EncryptedCharField(default_token_length=64)
+ fully_migrated = BooleanField(default=False)
+
+class RepositoryBuildTrigger(BaseModel):
+ uuid = CharField(default=uuid_generator, index=True)
+ auth_token = CharField(null=True)
+ private_key = TextField(null=True)
+
+ secure_auth_token = EncryptedCharField(null=True)
+ secure_private_key = EncryptedTextField(null=True)
+ fully_migrated = BooleanField(default=False)
+
+class AppSpecificAuthToken(BaseModel):
+ token_name = CharField(index=True, unique=True, default=random_string_generator(60))
+ token_secret = EncryptedCharField(default_token_length=60)
+ token_code = CharField(default=random_string_generator(length=120), unique=True, index=True)
+
+class OAuthAccessToken(BaseModel):
+ token_name = CharField(index=True, unique=True)
+ token_code = CredentialField()
+ access_token = CharField(index=True)
+
+class OAuthAuthorizationCode(BaseModel):
+ code = CharField(index=True, unique=True, null=True)
+ code_name = CharField(index=True, unique=True)
+ code_credential = CredentialField()
+
+class OAuthApplication(BaseModel):
+ secure_client_secret = EncryptedCharField(default_token_length=40, null=True)
+ fully_migrated = BooleanField(default=False)
+ client_secret = CharField(default=random_string_generator(length=40))
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+
+ # Empty all access token names to fix the bug where we put the wrong name and code
+ # in for some tokens.
+ AccessToken.update(token_name=None).where(AccessToken.token_name >> None).execute()
+
+ # AccessToken.
+ logger.info('Backfilling encrypted credentials for access tokens')
+ for access_token in _iterate(AccessToken, ((AccessToken.token_name >> None) |
+ (AccessToken.token_name == ''))):
+ logger.info('Backfilling encrypted credentials for access token %s', access_token.id)
+ assert access_token.code is not None
+ assert access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
+ assert access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:]
+
+ token_name = access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
+ token_code = _decrypted(access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:])
+
+ (AccessToken
+ .update(token_name=token_name, token_code=token_code)
+ .where(AccessToken.id == access_token.id, AccessToken.code == access_token.code)
+ .execute())
+
+ assert AccessToken.select().where(AccessToken.token_name >> None).count() == 0
+
+ # Robots.
+ logger.info('Backfilling encrypted credentials for robots')
+ while True:
+ has_row = False
+ query = (User
+ .select()
+ .join(RobotAccountToken, JOIN.LEFT_OUTER)
+ .where(User.robot == True, RobotAccountToken.id >> None)
+ .limit(BATCH_SIZE))
+
+ for robot_user in query:
+ logger.info('Backfilling encrypted credentials for robot %s', robot_user.id)
+ has_row = True
+ try:
+ RobotAccountToken.create(robot_account=robot_user,
+ token=_decrypted(robot_user.email),
+ fully_migrated=False)
+ except IntegrityError:
+ break
+
+ if not has_row:
+ break
+
+ # RepositoryBuildTrigger
+ logger.info('Backfilling encrypted credentials for repo build triggers')
+ for repo_build_trigger in _iterate(RepositoryBuildTrigger,
+ (RepositoryBuildTrigger.fully_migrated == False)):
+ logger.info('Backfilling encrypted credentials for repo build trigger %s',
+ repo_build_trigger.id)
+
+ (RepositoryBuildTrigger
+ .update(secure_auth_token=_decrypted(repo_build_trigger.auth_token),
+ secure_private_key=_decrypted(repo_build_trigger.private_key),
+ fully_migrated=True)
+ .where(RepositoryBuildTrigger.id == repo_build_trigger.id,
+ RepositoryBuildTrigger.uuid == repo_build_trigger.uuid)
+ .execute())
+
+ assert (RepositoryBuildTrigger
+ .select()
+ .where(RepositoryBuildTrigger.fully_migrated == False)
+ .count()) == 0
+
+ # AppSpecificAuthToken
+ logger.info('Backfilling encrypted credentials for app specific auth tokens')
+ for token in _iterate(AppSpecificAuthToken, ((AppSpecificAuthToken.token_name >> None) |
+ (AppSpecificAuthToken.token_name == '') |
+ (AppSpecificAuthToken.token_secret >> None))):
+ logger.info('Backfilling encrypted credentials for app specific auth %s',
+ token.id)
+ assert token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:]
+
+ token_name = token.token_code[:AST_TOKEN_NAME_PREFIX_LENGTH]
+ token_secret = _decrypted(token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:])
+ assert token_name
+ assert token_secret
+
+ (AppSpecificAuthToken
+ .update(token_name=token_name,
+ token_secret=token_secret)
+ .where(AppSpecificAuthToken.id == token.id,
+ AppSpecificAuthToken.token_code == token.token_code)
+ .execute())
+
+ assert (AppSpecificAuthToken
+ .select()
+ .where(AppSpecificAuthToken.token_name >> None)
+ .count()) == 0
+
+ # OAuthAccessToken
+ logger.info('Backfilling credentials for OAuth access tokens')
+ for token in _iterate(OAuthAccessToken, ((OAuthAccessToken.token_name >> None) |
+ (OAuthAccessToken.token_name == ''))):
+ logger.info('Backfilling credentials for OAuth access token %s', token.id)
+ token_name = token.access_token[:OAUTH_ACCESS_TOKEN_PREFIX_LENGTH]
+ token_code = Credential.from_string(token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:])
+ assert token_name
+ assert token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:]
+
+ (OAuthAccessToken
+ .update(token_name=token_name,
+ token_code=token_code)
+ .where(OAuthAccessToken.id == token.id,
+ OAuthAccessToken.access_token == token.access_token)
+ .execute())
+
+ assert (OAuthAccessToken
+ .select()
+ .where(OAuthAccessToken.token_name >> None)
+ .count()) == 0
+
+ # OAuthAuthorizationCode
+ logger.info('Backfilling credentials for OAuth auth code')
+ for code in _iterate(OAuthAuthorizationCode, ((OAuthAuthorizationCode.code_name >> None) |
+ (OAuthAuthorizationCode.code_name == ''))):
+ logger.info('Backfilling credentials for OAuth auth code %s', code.id)
+ user_code = code.code or random_string_generator(AUTHORIZATION_CODE_PREFIX_LENGTH * 2)()
+ code_name = user_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
+ code_credential = Credential.from_string(user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:])
+ assert code_name
+ assert user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:]
+
+ (OAuthAuthorizationCode
+ .update(code_name=code_name, code_credential=code_credential)
+ .where(OAuthAuthorizationCode.id == code.id)
+ .execute())
+
+ assert (OAuthAuthorizationCode
+ .select()
+ .where(OAuthAuthorizationCode.code_name >> None)
+ .count()) == 0
+
+ # OAuthApplication
+ logger.info('Backfilling secret for OAuth applications')
+ for app in _iterate(OAuthApplication, OAuthApplication.fully_migrated == False):
+ logger.info('Backfilling secret for OAuth application %s', app.id)
+ client_secret = app.client_secret or str(uuid.uuid4())
+ secure_client_secret = _decrypted(client_secret)
+
+ (OAuthApplication
+ .update(secure_client_secret=secure_client_secret, fully_migrated=True)
+ .where(OAuthApplication.id == app.id, OAuthApplication.fully_migrated == False)
+ .execute())
+
+ assert (OAuthApplication
+ .select()
+ .where(OAuthApplication.fully_migrated == False)
+ .count()) == 0
+
+ # Adjust existing fields to be nullable.
+ op.alter_column('accesstoken', 'code', nullable=True, existing_type=sa.String(length=255))
+ op.alter_column('oauthaccesstoken', 'access_token', nullable=True, existing_type=sa.String(length=255))
+ op.alter_column('oauthauthorizationcode', 'code', nullable=True, existing_type=sa.String(length=255))
+ op.alter_column('appspecificauthtoken', 'token_code', nullable=True, existing_type=sa.String(length=255))
+
+ # Adjust new fields to be non-nullable.
+ op.alter_column('accesstoken', 'token_name', nullable=False, existing_type=sa.String(length=255))
+ op.alter_column('accesstoken', 'token_code', nullable=False, existing_type=sa.String(length=255))
+
+ op.alter_column('appspecificauthtoken', 'token_name', nullable=False, existing_type=sa.String(length=255))
+ op.alter_column('appspecificauthtoken', 'token_secret', nullable=False, existing_type=sa.String(length=255))
+
+ op.alter_column('oauthaccesstoken', 'token_name', nullable=False, existing_type=sa.String(length=255))
+ op.alter_column('oauthaccesstoken', 'token_code', nullable=False, existing_type=sa.String(length=255))
+
+ op.alter_column('oauthauthorizationcode', 'code_name', nullable=False, existing_type=sa.String(length=255))
+ op.alter_column('oauthauthorizationcode', 'code_credential', nullable=False, existing_type=sa.String(length=255))
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.alter_column('accesstoken', 'code', nullable=False, existing_type=sa.String(length=255))
+ op.alter_column('oauthaccesstoken', 'access_token', nullable=False, existing_type=sa.String(length=255))
+ op.alter_column('oauthauthorizationcode', 'code', nullable=False, existing_type=sa.String(length=255))
+ op.alter_column('appspecificauthtoken', 'token_code', nullable=False, existing_type=sa.String(length=255))
+
+ op.alter_column('accesstoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
+ op.alter_column('accesstoken', 'token_code', nullable=True, existing_type=sa.String(length=255))
+
+ op.alter_column('appspecificauthtoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
+ op.alter_column('appspecificauthtoken', 'token_secret', nullable=True, existing_type=sa.String(length=255))
+
+ op.alter_column('oauthaccesstoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
+ op.alter_column('oauthaccesstoken', 'token_code', nullable=True, existing_type=sa.String(length=255))
+
+ op.alter_column('oauthauthorizationcode', 'code_name', nullable=True, existing_type=sa.String(length=255))
+ op.alter_column('oauthauthorizationcode', 'code_credential', nullable=True, existing_type=sa.String(length=255))
diff --git a/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py b/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py
new file mode 100644
index 000000000..b5fb97d63
--- /dev/null
+++ b/data/migrations/versions/7367229b38d9_add_support_for_app_specific_tokens.py
@@ -0,0 +1,74 @@
+"""Add support for app specific tokens
+
+Revision ID: 7367229b38d9
+Revises: d8989249f8f6
+Create Date: 2017-12-12 13:15:42.419764
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '7367229b38d9'
+down_revision = 'd8989249f8f6'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+from util.migrate import UTF8CharField
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('appspecificauthtoken',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=36), nullable=False),
+ sa.Column('title', UTF8CharField(length=255), nullable=False),
+ sa.Column('token_code', sa.String(length=255), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.Column('expiration', sa.DateTime(), nullable=True),
+ sa.Column('last_accessed', sa.DateTime(), nullable=True),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_appspecificauthtoken_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_appspecificauthtoken'))
+ )
+ op.create_index('appspecificauthtoken_token_code', 'appspecificauthtoken', ['token_code'], unique=True)
+ op.create_index('appspecificauthtoken_user_id', 'appspecificauthtoken', ['user_id'], unique=False)
+ op.create_index('appspecificauthtoken_user_id_expiration', 'appspecificauthtoken', ['user_id', 'expiration'], unique=False)
+ op.create_index('appspecificauthtoken_uuid', 'appspecificauthtoken', ['uuid'], unique=False)
+ # ### end Alembic commands ###
+
+ op.bulk_insert(tables.logentrykind, [
+ {'name': 'create_app_specific_token'},
+ {'name': 'revoke_app_specific_token'},
+ ])
+
+ # ### population of test data ### #
+ tester.populate_table('appspecificauthtoken', [
+ ('user_id', tester.TestDataType.Foreign('user')),
+ ('uuid', tester.TestDataType.UUID),
+ ('title', tester.TestDataType.UTF8Char),
+ ('token_code', tester.TestDataType.String),
+ ('created', tester.TestDataType.DateTime),
+ ('expiration', tester.TestDataType.DateTime),
+ ('last_accessed', tester.TestDataType.DateTime),
+ ])
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('appspecificauthtoken')
+ # ### end Alembic commands ###
+
+ op.execute(tables
+ .logentrykind
+ .delete()
+ .where(tables.
+ logentrykind.name == op.inline_literal('create_app_specific_token')))
+
+ op.execute(tables
+ .logentrykind
+ .delete()
+ .where(tables.
+ logentrykind.name == op.inline_literal('revoke_app_specific_token')))
diff --git a/data/migrations/versions/7a525c68eb13_add_oci_app_models.py b/data/migrations/versions/7a525c68eb13_add_oci_app_models.py
new file mode 100644
index 000000000..7cade6854
--- /dev/null
+++ b/data/migrations/versions/7a525c68eb13_add_oci_app_models.py
@@ -0,0 +1,340 @@
+"""Add OCI/App models
+
+Revision ID: 7a525c68eb13
+Revises: e2894a3a3c19
+Create Date: 2017-01-24 16:25:52.170277
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '7a525c68eb13'
+down_revision = 'e2894a3a3c19'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+from sqlalchemy.sql import table, column
+from util.migrate import UTF8LongText, UTF8CharField
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.create_table(
+ 'tagkind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagkind'))
+ )
+ op.create_index('tagkind_name', 'tagkind', ['name'], unique=True)
+
+ op.create_table(
+ 'blobplacementlocation',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacementlocation'))
+ )
+ op.create_index('blobplacementlocation_name', 'blobplacementlocation', ['name'], unique=True)
+
+ op.create_table(
+ 'blob',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('size', sa.BigInteger(), nullable=False),
+ sa.Column('uncompressed_size', sa.BigInteger(), nullable=True),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_blob_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blob'))
+ )
+ op.create_index('blob_digest', 'blob', ['digest'], unique=True)
+ op.create_index('blob_media_type_id', 'blob', ['media_type_id'], unique=False)
+
+ op.create_table(
+ 'blobplacementlocationpreference',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobplacementlocpref_locid_blobplacementlocation')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_blobplacementlocationpreference_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacementlocationpreference'))
+ )
+ op.create_index('blobplacementlocationpreference_location_id', 'blobplacementlocationpreference', ['location_id'], unique=False)
+ op.create_index('blobplacementlocationpreference_user_id', 'blobplacementlocationpreference', ['user_id'], unique=False)
+
+ op.create_table(
+ 'manifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_json', UTF8LongText, nullable=False),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifest_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifest'))
+ )
+ op.create_index('manifest_digest', 'manifest', ['digest'], unique=True)
+ op.create_index('manifest_media_type_id', 'manifest', ['media_type_id'], unique=False)
+
+ op.create_table(
+ 'manifestlist',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('manifest_list_json', UTF8LongText, nullable=False),
+ sa.Column('schema_version', UTF8CharField(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifestlist_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlist'))
+ )
+ op.create_index('manifestlist_digest', 'manifestlist', ['digest'], unique=True)
+ op.create_index('manifestlist_media_type_id', 'manifestlist', ['media_type_id'], unique=False)
+
+ op.create_table(
+ 'bittorrentpieces',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('pieces', UTF8LongText, nullable=False),
+ sa.Column('piece_length', sa.BigInteger(), nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_bittorrentpieces_blob_id_blob')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_bittorrentpieces'))
+ )
+ op.create_index('bittorrentpieces_blob_id', 'bittorrentpieces', ['blob_id'], unique=False)
+ op.create_index('bittorrentpieces_blob_id_piece_length', 'bittorrentpieces', ['blob_id', 'piece_length'], unique=True)
+
+ op.create_table(
+ 'blobplacement',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_blobplacement_blob_id_blob')),
+ sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobplacement_location_id_blobplacementlocation')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacement'))
+ )
+ op.create_index('blobplacement_blob_id', 'blobplacement', ['blob_id'], unique=False)
+ op.create_index('blobplacement_blob_id_location_id', 'blobplacement', ['blob_id', 'location_id'], unique=True)
+ op.create_index('blobplacement_location_id', 'blobplacement', ['location_id'], unique=False)
+
+ op.create_table(
+ 'blobuploading',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.Column('byte_count', sa.BigInteger(), nullable=False),
+ sa.Column('uncompressed_byte_count', sa.BigInteger(), nullable=True),
+ sa.Column('chunk_count', sa.BigInteger(), nullable=False),
+ sa.Column('storage_metadata', UTF8LongText, nullable=True),
+ sa.Column('sha_state', UTF8LongText, nullable=True),
+ sa.Column('piece_sha_state', UTF8LongText, nullable=True),
+ sa.Column('piece_hashes', UTF8LongText, nullable=True),
+ sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobuploading_location_id_blobplacementlocation')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_blobuploading_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blobuploading'))
+ )
+ op.create_index('blobuploading_created', 'blobuploading', ['created'], unique=False)
+ op.create_index('blobuploading_location_id', 'blobuploading', ['location_id'], unique=False)
+ op.create_index('blobuploading_repository_id', 'blobuploading', ['repository_id'], unique=False)
+ op.create_index('blobuploading_repository_id_uuid', 'blobuploading', ['repository_id', 'uuid'], unique=True)
+ op.create_index('blobuploading_uuid', 'blobuploading', ['uuid'], unique=True)
+
+ op.create_table(
+ 'derivedimage',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('source_manifest_id', sa.Integer(), nullable=False),
+ sa.Column('derived_manifest_json', UTF8LongText, nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('uniqueness_hash', sa.String(length=255), nullable=False),
+ sa.Column('signature_blob_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_derivedimage_blob_id_blob')),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_derivedimage_media_type_id_mediatype')),
+ sa.ForeignKeyConstraint(['signature_blob_id'], ['blob.id'], name=op.f('fk_derivedimage_signature_blob_id_blob')),
+ sa.ForeignKeyConstraint(['source_manifest_id'], ['manifest.id'], name=op.f('fk_derivedimage_source_manifest_id_manifest')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_derivedimage'))
+ )
+ op.create_index('derivedimage_blob_id', 'derivedimage', ['blob_id'], unique=False)
+ op.create_index('derivedimage_media_type_id', 'derivedimage', ['media_type_id'], unique=False)
+ op.create_index('derivedimage_signature_blob_id', 'derivedimage', ['signature_blob_id'], unique=False)
+ op.create_index('derivedimage_source_manifest_id', 'derivedimage', ['source_manifest_id'], unique=False)
+ op.create_index('derivedimage_source_manifest_id_blob_id', 'derivedimage', ['source_manifest_id', 'blob_id'], unique=True)
+ op.create_index('derivedimage_source_manifest_id_media_type_id_uniqueness_hash', 'derivedimage', ['source_manifest_id', 'media_type_id', 'uniqueness_hash'], unique=True)
+ op.create_index('derivedimage_uniqueness_hash', 'derivedimage', ['uniqueness_hash'], unique=True)
+ op.create_index('derivedimage_uuid', 'derivedimage', ['uuid'], unique=True)
+
+ op.create_table(
+ 'manifestblob',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_manifestblob_blob_id_blob')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestblob_manifest_id_manifest')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestblob'))
+ )
+ op.create_index('manifestblob_blob_id', 'manifestblob', ['blob_id'], unique=False)
+ op.create_index('manifestblob_manifest_id', 'manifestblob', ['manifest_id'], unique=False)
+ op.create_index('manifestblob_manifest_id_blob_id', 'manifestblob', ['manifest_id', 'blob_id'], unique=True)
+
+ op.create_table(
+ 'manifestlabel',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('annotated_id', sa.Integer(), nullable=False),
+ sa.Column('label_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['annotated_id'], ['manifest.id'], name=op.f('fk_manifestlabel_annotated_id_manifest')),
+ sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_manifestlabel_label_id_label')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestlabel_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlabel'))
+ )
+ op.create_index('manifestlabel_annotated_id', 'manifestlabel', ['annotated_id'], unique=False)
+ op.create_index('manifestlabel_label_id', 'manifestlabel', ['label_id'], unique=False)
+ op.create_index('manifestlabel_repository_id', 'manifestlabel', ['repository_id'], unique=False)
+ op.create_index('manifestlabel_repository_id_annotated_id_label_id', 'manifestlabel', ['repository_id', 'annotated_id', 'label_id'], unique=True)
+
+ op.create_table(
+ 'manifestlayer',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_index', sa.BigInteger(), nullable=False),
+ sa.Column('metadata_json', UTF8LongText, nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_manifestlayer_blob_id_blob')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlayer_manifest_id_manifest')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayer'))
+ )
+ op.create_index('manifestlayer_blob_id', 'manifestlayer', ['blob_id'], unique=False)
+ op.create_index('manifestlayer_manifest_id', 'manifestlayer', ['manifest_id'], unique=False)
+ op.create_index('manifestlayer_manifest_id_manifest_index', 'manifestlayer', ['manifest_id', 'manifest_index'], unique=True)
+ op.create_index('manifestlayer_manifest_index', 'manifestlayer', ['manifest_index'], unique=False)
+
+ op.create_table(
+ 'manifestlistmanifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('manifest_list_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('operating_system', UTF8CharField(length=255), nullable=True),
+ sa.Column('architecture', UTF8CharField(length=255), nullable=True),
+ sa.Column('platform_json', UTF8LongText, nullable=True),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlistmanifest_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['manifest_list_id'], ['manifestlist.id'], name=op.f('fk_manifestlistmanifest_manifest_list_id_manifestlist')),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifestlistmanifest_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlistmanifest'))
+ )
+ op.create_index('manifestlistmanifest_manifest_id', 'manifestlistmanifest', ['manifest_id'], unique=False)
+ op.create_index('manifestlistmanifest_manifest_list_id', 'manifestlistmanifest', ['manifest_list_id'], unique=False)
+ op.create_index('manifestlistmanifest_manifest_listid_os_arch_mtid', 'manifestlistmanifest', ['manifest_list_id', 'operating_system', 'architecture', 'media_type_id'], unique=False)
+ op.create_index('manifestlistmanifest_manifest_listid_mtid', 'manifestlistmanifest', ['manifest_list_id', 'media_type_id'], unique=False)
+ op.create_index('manifestlistmanifest_media_type_id', 'manifestlistmanifest', ['media_type_id'], unique=False)
+
+ op.create_table(
+ 'tag',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', UTF8CharField(length=190), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_list_id', sa.Integer(), nullable=True),
+ sa.Column('lifetime_start', sa.BigInteger(), nullable=False),
+ sa.Column('lifetime_end', sa.BigInteger(), nullable=True),
+ sa.Column('hidden', sa.Boolean(), nullable=False),
+ sa.Column('reverted', sa.Boolean(), nullable=False),
+ sa.Column('protected', sa.Boolean(), nullable=False),
+ sa.Column('tag_kind_id', sa.Integer(), nullable=False),
+ sa.Column('linked_tag_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['linked_tag_id'], ['tag.id'], name=op.f('fk_tag_linked_tag_id_tag')),
+ sa.ForeignKeyConstraint(['manifest_list_id'], ['manifestlist.id'], name=op.f('fk_tag_manifest_list_id_manifestlist')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tag_repository_id_repository')),
+ sa.ForeignKeyConstraint(['tag_kind_id'], ['tagkind.id'], name=op.f('fk_tag_tag_kind_id_tagkind')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tag'))
+ )
+ op.create_index('tag_lifetime_end', 'tag', ['lifetime_end'], unique=False)
+ op.create_index('tag_linked_tag_id', 'tag', ['linked_tag_id'], unique=False)
+ op.create_index('tag_manifest_list_id', 'tag', ['manifest_list_id'], unique=False)
+ op.create_index('tag_repository_id', 'tag', ['repository_id'], unique=False)
+ op.create_index('tag_repository_id_name_hidden', 'tag', ['repository_id', 'name', 'hidden'], unique=False)
+ op.create_index('tag_repository_id_name_lifetime_end', 'tag', ['repository_id', 'name', 'lifetime_end'], unique=True)
+ op.create_index('tag_repository_id_name', 'tag', ['repository_id', 'name'], unique=False)
+ op.create_index('tag_tag_kind_id', 'tag', ['tag_kind_id'], unique=False)
+
+ op.create_table(
+ 'manifestlayerdockerv1',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('manifest_layer_id', sa.Integer(), nullable=False),
+ sa.Column('image_id', UTF8CharField(length=255), nullable=False),
+ sa.Column('checksum', UTF8CharField(length=255), nullable=False),
+ sa.Column('compat_json', UTF8LongText, nullable=False),
+ sa.ForeignKeyConstraint(['manifest_layer_id'], ['manifestlayer.id'], name=op.f('fk_manifestlayerdockerv1_manifest_layer_id_manifestlayer')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayerdockerv1'))
+ )
+ op.create_index('manifestlayerdockerv1_image_id', 'manifestlayerdockerv1', ['image_id'], unique=False)
+ op.create_index('manifestlayerdockerv1_manifest_layer_id', 'manifestlayerdockerv1', ['manifest_layer_id'], unique=False)
+
+ op.create_table(
+ 'manifestlayerscan',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('layer_id', sa.Integer(), nullable=False),
+ sa.Column('scannable', sa.Boolean(), nullable=False),
+ sa.Column('scanned_by', UTF8CharField(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['layer_id'], ['manifestlayer.id'], name=op.f('fk_manifestlayerscan_layer_id_manifestlayer')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlayerscan'))
+ )
+ op.create_index('manifestlayerscan_layer_id', 'manifestlayerscan', ['layer_id'], unique=True)
+
+ blobplacementlocation_table = table('blobplacementlocation',
+ column('id', sa.Integer()),
+ column('name', sa.String()),
+ )
+
+ op.bulk_insert(
+ blobplacementlocation_table,
+ [
+ {'name': 'local_eu'},
+ {'name': 'local_us'},
+ ],
+ )
+
+ op.bulk_insert(
+ tables.mediatype,
+ [
+ {'name': 'application/vnd.cnr.blob.v0.tar+gzip'},
+ {'name': 'application/vnd.cnr.package-manifest.helm.v0.json'},
+ {'name': 'application/vnd.cnr.package-manifest.kpm.v0.json'},
+ {'name': 'application/vnd.cnr.package-manifest.docker-compose.v0.json'},
+ {'name': 'application/vnd.cnr.package.kpm.v0.tar+gzip'},
+ {'name': 'application/vnd.cnr.package.helm.v0.tar+gzip'},
+ {'name': 'application/vnd.cnr.package.docker-compose.v0.tar+gzip'},
+ {'name': 'application/vnd.cnr.manifests.v0.json'},
+ {'name': 'application/vnd.cnr.manifest.list.v0.json'},
+ ],
+ )
+
+ tagkind_table = table('tagkind',
+ column('id', sa.Integer()),
+ column('name', sa.String()),
+ )
+
+ op.bulk_insert(
+ tagkind_table,
+ [
+ {'id': 1, 'name': 'tag'},
+ {'id': 2, 'name': 'release'},
+ {'id': 3, 'name': 'channel'},
+ ]
+ )
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.drop_table('manifestlayerscan')
+ op.drop_table('manifestlayerdockerv1')
+ op.drop_table('tag')
+ op.drop_table('manifestlistmanifest')
+ op.drop_table('manifestlayer')
+ op.drop_table('manifestlabel')
+ op.drop_table('manifestblob')
+ op.drop_table('derivedimage')
+ op.drop_table('blobuploading')
+ op.drop_table('blobplacement')
+ op.drop_table('bittorrentpieces')
+ op.drop_table('manifestlist')
+ op.drop_table('manifest')
+ op.drop_table('blobplacementlocationpreference')
+ op.drop_table('blob')
+ op.drop_table('tagkind')
+ op.drop_table('blobplacementlocation')
diff --git a/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py b/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py
new file mode 100644
index 000000000..ac177cd9f
--- /dev/null
+++ b/data/migrations/versions/87fbbc224f10_add_disabled_datetime_to_trigger.py
@@ -0,0 +1,35 @@
+"""Add disabled datetime to trigger
+
+Revision ID: 87fbbc224f10
+Revises: 17aff2e1354e
+Create Date: 2017-10-24 14:06:37.658705
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '87fbbc224f10'
+down_revision = '17aff2e1354e'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repositorybuildtrigger', sa.Column('disabled_datetime', sa.DateTime(), nullable=True))
+ op.create_index('repositorybuildtrigger_disabled_datetime', 'repositorybuildtrigger', ['disabled_datetime'], unique=False)
+ # ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_column('repositorybuildtrigger', 'disabled_datetime', tester.TestDataType.DateTime)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('repositorybuildtrigger_disabled_datetime', table_name='repositorybuildtrigger')
+ op.drop_column('repositorybuildtrigger', 'disabled_datetime')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py b/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py
new file mode 100644
index 000000000..49797c6ae
--- /dev/null
+++ b/data/migrations/versions/9093adccc784_add_v2_2_data_models_for_manifest_.py
@@ -0,0 +1,180 @@
+"""Add V2_2 data models for Manifest, ManifestBlob and ManifestLegacyImage
+
+Revision ID: 9093adccc784
+Revises: 6c21e2cfb8b6
+Create Date: 2018-08-06 16:07:50.222749
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '9093adccc784'
+down_revision = '6c21e2cfb8b6'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from image.docker.schema1 import DOCKER_SCHEMA1_CONTENT_TYPES
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('manifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_bytes', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifest_media_type_id_mediatype')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifest_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifest'))
+ )
+ op.create_index('manifest_digest', 'manifest', ['digest'], unique=False)
+ op.create_index('manifest_media_type_id', 'manifest', ['media_type_id'], unique=False)
+ op.create_index('manifest_repository_id', 'manifest', ['repository_id'], unique=False)
+ op.create_index('manifest_repository_id_digest', 'manifest', ['repository_id', 'digest'], unique=True)
+ op.create_index('manifest_repository_id_media_type_id', 'manifest', ['repository_id', 'media_type_id'], unique=False)
+ op.create_table('manifestblob',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('blob_index', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['imagestorage.id'], name=op.f('fk_manifestblob_blob_id_imagestorage')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestblob_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestblob_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestblob'))
+ )
+ op.create_index('manifestblob_blob_id', 'manifestblob', ['blob_id'], unique=False)
+ op.create_index('manifestblob_manifest_id', 'manifestblob', ['manifest_id'], unique=False)
+ op.create_index('manifestblob_manifest_id_blob_id', 'manifestblob', ['manifest_id', 'blob_id'], unique=True)
+ op.create_index('manifestblob_manifest_id_blob_index', 'manifestblob', ['manifest_id', 'blob_index'], unique=True)
+ op.create_index('manifestblob_repository_id', 'manifestblob', ['repository_id'], unique=False)
+ op.create_table('manifestlabel',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('label_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_manifestlabel_label_id_label')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlabel_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestlabel_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlabel'))
+ )
+ op.create_index('manifestlabel_label_id', 'manifestlabel', ['label_id'], unique=False)
+ op.create_index('manifestlabel_manifest_id', 'manifestlabel', ['manifest_id'], unique=False)
+ op.create_index('manifestlabel_manifest_id_label_id', 'manifestlabel', ['manifest_id', 'label_id'], unique=True)
+ op.create_index('manifestlabel_repository_id', 'manifestlabel', ['repository_id'], unique=False)
+ op.create_table('manifestlegacyimage',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('image_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['image_id'], ['image.id'], name=op.f('fk_manifestlegacyimage_image_id_image')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlegacyimage_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_manifestlegacyimage_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlegacyimage'))
+ )
+ op.create_index('manifestlegacyimage_image_id', 'manifestlegacyimage', ['image_id'], unique=False)
+ op.create_index('manifestlegacyimage_manifest_id', 'manifestlegacyimage', ['manifest_id'], unique=True)
+ op.create_index('manifestlegacyimage_repository_id', 'manifestlegacyimage', ['repository_id'], unique=False)
+ op.create_table('tagmanifesttomanifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('tag_manifest_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('broken', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_tagmanifesttomanifest_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['tag_manifest_id'], ['tagmanifest.id'], name=op.f('fk_tagmanifesttomanifest_tag_manifest_id_tagmanifest')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifesttomanifest'))
+ )
+ op.create_index('tagmanifesttomanifest_broken', 'tagmanifesttomanifest', ['broken'], unique=False)
+ op.create_index('tagmanifesttomanifest_manifest_id', 'tagmanifesttomanifest', ['manifest_id'], unique=True)
+ op.create_index('tagmanifesttomanifest_tag_manifest_id', 'tagmanifesttomanifest', ['tag_manifest_id'], unique=True)
+ op.create_table('tagmanifestlabelmap',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('tag_manifest_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=True),
+ sa.Column('label_id', sa.Integer(), nullable=False),
+ sa.Column('tag_manifest_label_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_label_id', sa.Integer(), nullable=True),
+ sa.Column('broken_manifest', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_tagmanifestlabelmap_label_id_label')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_tagmanifestlabelmap_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['manifest_label_id'], ['manifestlabel.id'], name=op.f('fk_tagmanifestlabelmap_manifest_label_id_manifestlabel')),
+ sa.ForeignKeyConstraint(['tag_manifest_id'], ['tagmanifest.id'], name=op.f('fk_tagmanifestlabelmap_tag_manifest_id_tagmanifest')),
+ sa.ForeignKeyConstraint(['tag_manifest_label_id'], ['tagmanifestlabel.id'], name=op.f('fk_tagmanifestlabelmap_tag_manifest_label_id_tagmanifestlabel')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifestlabelmap'))
+ )
+ op.create_index('tagmanifestlabelmap_broken_manifest', 'tagmanifestlabelmap', ['broken_manifest'], unique=False)
+ op.create_index('tagmanifestlabelmap_label_id', 'tagmanifestlabelmap', ['label_id'], unique=False)
+ op.create_index('tagmanifestlabelmap_manifest_id', 'tagmanifestlabelmap', ['manifest_id'], unique=False)
+ op.create_index('tagmanifestlabelmap_manifest_label_id', 'tagmanifestlabelmap', ['manifest_label_id'], unique=False)
+ op.create_index('tagmanifestlabelmap_tag_manifest_id', 'tagmanifestlabelmap', ['tag_manifest_id'], unique=False)
+ op.create_index('tagmanifestlabelmap_tag_manifest_label_id', 'tagmanifestlabelmap', ['tag_manifest_label_id'], unique=False)
+ # ### end Alembic commands ###
+
+ for media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
+ op.bulk_insert(tables.mediatype,
+ [
+ {'name': media_type},
+ ])
+
+ # ### population of test data ### #
+ tester.populate_table('manifest', [
+ ('digest', tester.TestDataType.String),
+ ('manifest_bytes', tester.TestDataType.JSON),
+ ('media_type_id', tester.TestDataType.Foreign('mediatype')),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ])
+
+ tester.populate_table('manifestblob', [
+ ('manifest_id', tester.TestDataType.Foreign('manifest')),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('blob_id', tester.TestDataType.Foreign('imagestorage')),
+ ('blob_index', tester.TestDataType.Integer),
+ ])
+
+ tester.populate_table('manifestlabel', [
+ ('manifest_id', tester.TestDataType.Foreign('manifest')),
+ ('label_id', tester.TestDataType.Foreign('label')),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ])
+
+ tester.populate_table('manifestlegacyimage', [
+ ('manifest_id', tester.TestDataType.Foreign('manifest')),
+ ('image_id', tester.TestDataType.Foreign('image')),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ])
+
+ tester.populate_table('tagmanifesttomanifest', [
+ ('manifest_id', tester.TestDataType.Foreign('manifest')),
+ ('tag_manifest_id', tester.TestDataType.Foreign('tagmanifest')),
+ ])
+
+ tester.populate_table('tagmanifestlabelmap', [
+ ('manifest_id', tester.TestDataType.Foreign('manifest')),
+ ('tag_manifest_id', tester.TestDataType.Foreign('tagmanifest')),
+ ('tag_manifest_label_id', tester.TestDataType.Foreign('tagmanifestlabel')),
+ ('manifest_label_id', tester.TestDataType.Foreign('manifestlabel')),
+ ('label_id', tester.TestDataType.Foreign('label')),
+ ])
+
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ for media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
+ op.execute(tables
+ .mediatype
+ .delete()
+ .where(tables.
+ mediatype.c.name == op.inline_literal(media_type)))
+
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('tagmanifestlabelmap')
+ op.drop_table('tagmanifesttomanifest')
+ op.drop_table('manifestlegacyimage')
+ op.drop_table('manifestlabel')
+ op.drop_table('manifestblob')
+ op.drop_table('manifest')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/94836b099894_create_new_notification_type.py b/data/migrations/versions/94836b099894_create_new_notification_type.py
new file mode 100644
index 000000000..6bc780d01
--- /dev/null
+++ b/data/migrations/versions/94836b099894_create_new_notification_type.py
@@ -0,0 +1,31 @@
+"""Create new notification type
+
+Revision ID: 94836b099894
+Revises: faf752bd2e0a
+Create Date: 2016-11-30 10:29:51.519278
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '94836b099894'
+down_revision = 'faf752bd2e0a'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.bulk_insert(tables.externalnotificationevent,
+ [
+ {'name': 'build_cancelled'},
+ ])
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.execute(tables
+ .externalnotificationevent
+ .delete()
+ .where(tables.
+ externalnotificationevent.c.name == op.inline_literal('build_cancelled')))
diff --git a/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py b/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py
new file mode 100644
index 000000000..c4c6b3f33
--- /dev/null
+++ b/data/migrations/versions/a6c463dfb9fe_back_fill_build_expand_config.py
@@ -0,0 +1,101 @@
+"""back fill build expand_config
+
+Revision ID: a6c463dfb9fe
+Revises: b4df55dea4b3
+Create Date: 2017-03-17 10:00:19.739858
+
+"""
+
+# revision identifiers, used by Alembic.
+import json
+import os
+
+from app import app
+from peewee import *
+from data.database import BaseModel
+
+revision = 'a6c463dfb9fe'
+down_revision = 'b4df55dea4b3'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+
+class RepositoryBuildTrigger(BaseModel):
+ config = TextField(default='{}')
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ if not app.config.get('SETUP_COMPLETE', False):
+ return
+
+ repostioryBuildTriggers = RepositoryBuildTrigger.select()
+ for repositoryBuildTrigger in repostioryBuildTriggers:
+ config = json.loads(repositoryBuildTrigger.config)
+ repositoryBuildTrigger.config = json.dumps(get_config_expand(config))
+ repositoryBuildTrigger.save()
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ if not app.config.get('SETUP_COMPLETE', False):
+ return
+
+ repostioryBuildTriggers = RepositoryBuildTrigger.select()
+ for repositoryBuildTrigger in repostioryBuildTriggers:
+ config = json.loads(repositoryBuildTrigger.config)
+ repositoryBuildTrigger.config = json.dumps(get_config_expand(config))
+ repositoryBuildTrigger.save()
+
+
+def create_context(current_subdir):
+ if current_subdir == "":
+ current_subdir = os.path.sep + current_subdir
+
+ if current_subdir[len(current_subdir) - 1] != os.path.sep:
+ current_subdir += os.path.sep
+
+ context, _ = os.path.split(current_subdir)
+ return context
+
+
+def create_dockerfile_path(current_subdir):
+ if current_subdir == "":
+ current_subdir = os.path.sep + current_subdir
+
+ if current_subdir[len(current_subdir) - 1] != os.path.sep:
+ current_subdir += os.path.sep
+
+ return current_subdir + "Dockerfile"
+
+
+def get_config_expand(config):
+ """ A function to transform old records into new records """
+ if not config:
+ return config
+
+ # skip records that have been updated
+ if "context" in config or "dockerfile_path" in config:
+ return config
+
+ config_expand = {}
+ if "subdir" in config:
+ config_expand = dict(config)
+ config_expand["context"] = create_context(config["subdir"])
+ config_expand["dockerfile_path"] = create_dockerfile_path(config["subdir"])
+
+ return config_expand
+
+
+def get_config_contract(config):
+ """ A function to delete context and dockerfile_path from config """
+ if not config:
+ return config
+
+ if "context" in config:
+ del config["context"]
+
+ if "dockerfile_path" in config:
+ del config["dockerfile_path"]
+
+ return config
diff --git a/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py b/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py
new file mode 100644
index 000000000..d9c53f10c
--- /dev/null
+++ b/data/migrations/versions/b4c2d45bc132_add_deleted_namespace_table.py
@@ -0,0 +1,53 @@
+"""Add deleted namespace table
+
+Revision ID: b4c2d45bc132
+Revises: 152edccba18c
+Create Date: 2018-02-27 11:43:02.329941
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'b4c2d45bc132'
+down_revision = '152edccba18c'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('deletednamespace',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('namespace_id', sa.Integer(), nullable=False),
+ sa.Column('marked', sa.DateTime(), nullable=False),
+ sa.Column('original_username', sa.String(length=255), nullable=False),
+ sa.Column('original_email', sa.String(length=255), nullable=False),
+ sa.Column('queue_id', sa.String(length=255), nullable=True),
+ sa.ForeignKeyConstraint(['namespace_id'], ['user.id'], name=op.f('fk_deletednamespace_namespace_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_deletednamespace'))
+ )
+ op.create_index('deletednamespace_namespace_id', 'deletednamespace', ['namespace_id'], unique=True)
+ op.create_index('deletednamespace_original_email', 'deletednamespace', ['original_email'], unique=False)
+ op.create_index('deletednamespace_original_username', 'deletednamespace', ['original_username'], unique=False)
+ op.create_index('deletednamespace_queue_id', 'deletednamespace', ['queue_id'], unique=False)
+ # ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_table('deletednamespace', [
+ ('namespace_id', tester.TestDataType.Foreign('user')),
+ ('marked', tester.TestDataType.DateTime),
+ ('original_username', tester.TestDataType.UTF8Char),
+ ('original_email', tester.TestDataType.String),
+ ('queue_id', tester.TestDataType.Foreign('queueitem')),
+ ])
+ # ### end population of test data ### #
+
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('deletednamespace')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/b4df55dea4b3_add_repository_kind.py b/data/migrations/versions/b4df55dea4b3_add_repository_kind.py
new file mode 100644
index 000000000..d96dd8c43
--- /dev/null
+++ b/data/migrations/versions/b4df55dea4b3_add_repository_kind.py
@@ -0,0 +1,51 @@
+"""add repository kind
+
+Revision ID: b4df55dea4b3
+Revises: 7a525c68eb13
+Create Date: 2017-03-19 12:59:41.484430
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'b4df55dea4b3'
+down_revision = 'b8ae68ad3e52'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.create_table(
+ 'repositorykind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorykind'))
+ )
+ op.create_index('repositorykind_name', 'repositorykind', ['name'], unique=True)
+
+ op.bulk_insert(
+ tables.repositorykind,
+ [
+ {'id': 1, 'name': 'image'},
+ {'id': 2, 'name': 'application'},
+ ],
+ )
+
+ op.add_column(u'repository', sa.Column('kind_id', sa.Integer(), nullable=False, server_default='1'))
+ op.create_index('repository_kind_id', 'repository', ['kind_id'], unique=False)
+ op.create_foreign_key(op.f('fk_repository_kind_id_repositorykind'), 'repository', 'repositorykind', ['kind_id'], ['id'])
+
+ # ### population of test data ### #
+ tester.populate_column('repository', 'kind_id', tester.TestDataType.Foreign('repositorykind'))
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.drop_constraint(op.f('fk_repository_kind_id_repositorykind'), 'repository', type_='foreignkey')
+ op.drop_index('repository_kind_id', table_name='repository')
+ op.drop_column(u'repository', 'kind_id')
+ op.drop_table('repositorykind')
diff --git a/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py b/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py
new file mode 100644
index 000000000..1d26fa2d9
--- /dev/null
+++ b/data/migrations/versions/b547bc139ad8_add_robotaccountmetadata_table.py
@@ -0,0 +1,46 @@
+"""Add RobotAccountMetadata table
+
+Revision ID: b547bc139ad8
+Revises: 0cf50323c78b
+Create Date: 2018-03-09 15:50:48.298880
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'b547bc139ad8'
+down_revision = '0cf50323c78b'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from util.migrate import UTF8CharField
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('robotaccountmetadata',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('robot_account_id', sa.Integer(), nullable=False),
+ sa.Column('description', UTF8CharField(length=255), nullable=False),
+ sa.Column('unstructured_json', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['robot_account_id'], ['user.id'], name=op.f('fk_robotaccountmetadata_robot_account_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_robotaccountmetadata'))
+ )
+ op.create_index('robotaccountmetadata_robot_account_id', 'robotaccountmetadata', ['robot_account_id'], unique=True)
+ # ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_table('robotaccountmetadata', [
+ ('robot_account_id', tester.TestDataType.Foreign('user')),
+ ('description', tester.TestDataType.UTF8Char),
+ ('unstructured_json', tester.TestDataType.JSON),
+ ])
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('robotaccountmetadata')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py b/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py
new file mode 100644
index 000000000..d76c8e018
--- /dev/null
+++ b/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py
@@ -0,0 +1,37 @@
+"""Change BlobUpload fields to BigIntegers to allow layers > 8GB
+
+Revision ID: b8ae68ad3e52
+Revises: 7a525c68eb13
+Create Date: 2017-02-27 11:26:49.182349
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'b8ae68ad3e52'
+down_revision = '7a525c68eb13'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.alter_column('blobupload', 'byte_count', existing_type=sa.Integer(), type_=sa.BigInteger())
+ op.alter_column('blobupload', 'uncompressed_byte_count', existing_type=sa.Integer(), type_=sa.BigInteger())
+
+ # ### population of test data ### #
+ tester.populate_column('blobupload', 'byte_count', tester.TestDataType.BigInteger)
+ tester.populate_column('blobupload', 'uncompressed_byte_count', tester.TestDataType.BigInteger)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### population of test data ### #
+ tester.populate_column('blobupload', 'byte_count', tester.TestDataType.Integer)
+ tester.populate_column('blobupload', 'uncompressed_byte_count', tester.TestDataType.Integer)
+ # ### end population of test data ### #
+
+ op.alter_column('blobupload', 'byte_count', existing_type=sa.BigInteger(), type_=sa.Integer())
+ op.alter_column('blobupload', 'uncompressed_byte_count', existing_type=sa.BigInteger(), type_=sa.Integer())
diff --git a/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py b/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py
new file mode 100644
index 000000000..b85ae3514
--- /dev/null
+++ b/data/migrations/versions/b9045731c4de_add_lifetime_indexes_to_tag_tables.py
@@ -0,0 +1,35 @@
+"""Add lifetime end indexes to tag tables
+
+Revision ID: b9045731c4de
+Revises: e184af42242d
+Create Date: 2019-02-14 17:18:40.474310
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'b9045731c4de'
+down_revision = 'e184af42242d'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('repositorytag_repository_id_lifetime_end_ts', 'repositorytag', ['repository_id', 'lifetime_end_ts'], unique=False)
+ op.create_index('tag_repository_id_lifetime_end_ms', 'tag', ['repository_id', 'lifetime_end_ms'], unique=False)
+
+ op.create_index('repositorytag_repository_id_lifetime_start_ts', 'repositorytag', ['repository_id', 'lifetime_start_ts'], unique=False)
+ op.create_index('tag_repository_id_lifetime_start_ms', 'tag', ['repository_id', 'lifetime_start_ms'], unique=False)
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('tag_repository_id_lifetime_end_ms', table_name='tag')
+ op.drop_index('repositorytag_repository_id_lifetime_end_ts', table_name='repositorytag')
+
+ op.drop_index('tag_repository_id_lifetime_start_ms', table_name='tag')
+ op.drop_index('repositorytag_repository_id_lifetime_start_ts', table_name='repositorytag')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py b/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py
new file mode 100644
index 000000000..3968abd32
--- /dev/null
+++ b/data/migrations/versions/b918abdbee43_run_full_tag_backfill.py
@@ -0,0 +1,71 @@
+"""Run full tag backfill
+
+Revision ID: b918abdbee43
+Revises: 481623ba00ba
+Create Date: 2019-03-14 13:38:03.411609
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'b918abdbee43'
+down_revision = '481623ba00ba'
+
+import logging.config
+
+from app import app
+from peewee import JOIN, fn
+
+from workers.tagbackfillworker import backfill_tag
+from data.database import RepositoryTag, Repository, User, TagToRepositoryTag
+from util.log import logfile_path
+
+logger = logging.getLogger(__name__)
+
+
+def upgrade(tables, tester, progress_reporter):
+ if not app.config.get('SETUP_COMPLETE', False):
+ return
+
+ start_id = 0
+ end_id = 1000
+ size = 1000
+
+ max_id = RepositoryTag.select(fn.Max(RepositoryTag.id)).scalar()
+ if max_id is None:
+ return
+
+ logger.info("Found maximum ID %s" % max_id)
+
+ while True:
+ if start_id > max_id:
+ break
+
+ logger.info('Checking tag range %s - %s', start_id, end_id)
+ r = list(RepositoryTag
+ .select()
+ .join(Repository)
+ .switch(RepositoryTag)
+ .join(TagToRepositoryTag, JOIN.LEFT_OUTER)
+ .where(TagToRepositoryTag.id >> None)
+ .where(RepositoryTag.hidden == False,
+ RepositoryTag.id >= start_id,
+ RepositoryTag.id < end_id))
+
+ if len(r) < 1000 and size < 100000:
+ size *= 2
+
+ start_id = end_id
+ end_id = start_id + size
+
+ if not len(r):
+ continue
+
+ logger.info('Found %s tags to backfill', len(r))
+ for index, t in enumerate(r):
+ logger.info("Backfilling tag %s of %s", index, len(r))
+ backfill_tag(t)
+
+
+def downgrade(tables, tester, progress_reporter):
+ # Nothing to do.
+ pass
diff --git a/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py b/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py
new file mode 100644
index 000000000..62c0aba44
--- /dev/null
+++ b/data/migrations/versions/be8d1c402ce0_add_teamsync_table.py
@@ -0,0 +1,52 @@
+"""Add TeamSync table
+
+Revision ID: be8d1c402ce0
+Revises: a6c463dfb9fe
+Create Date: 2017-02-23 13:34:52.356812
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'be8d1c402ce0'
+down_revision = 'a6c463dfb9fe'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from util.migrate import UTF8LongText
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('teamsync',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('team_id', sa.Integer(), nullable=False),
+ sa.Column('transaction_id', sa.String(length=255), nullable=False),
+ sa.Column('last_updated', sa.DateTime(), nullable=True),
+ sa.Column('service_id', sa.Integer(), nullable=False),
+ sa.Column('config', UTF8LongText(), nullable=False),
+ sa.ForeignKeyConstraint(['service_id'], ['loginservice.id'], name=op.f('fk_teamsync_service_id_loginservice')),
+ sa.ForeignKeyConstraint(['team_id'], ['team.id'], name=op.f('fk_teamsync_team_id_team')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_teamsync'))
+ )
+ op.create_index('teamsync_last_updated', 'teamsync', ['last_updated'], unique=False)
+ op.create_index('teamsync_service_id', 'teamsync', ['service_id'], unique=False)
+ op.create_index('teamsync_team_id', 'teamsync', ['team_id'], unique=True)
+ ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_table('teamsync', [
+ ('team_id', tester.TestDataType.Foreign('team')),
+ ('transaction_id', tester.TestDataType.String),
+ ('last_updated', tester.TestDataType.DateTime),
+ ('service_id', tester.TestDataType.Foreign('loginservice')),
+ ('config', tester.TestDataType.JSON),
+ ])
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('teamsync')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/c00a1f15968b_add_schema2_media_types.py b/data/migrations/versions/c00a1f15968b_add_schema2_media_types.py
new file mode 100644
index 000000000..2d2a050df
--- /dev/null
+++ b/data/migrations/versions/c00a1f15968b_add_schema2_media_types.py
@@ -0,0 +1,34 @@
+from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
+
+"""Add schema2 media types
+
+Revision ID: c00a1f15968b
+Revises: 67f0abd172ae
+Create Date: 2018-11-13 09:20:21.968503
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'c00a1f15968b'
+down_revision = '67f0abd172ae'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ for media_type in DOCKER_SCHEMA2_CONTENT_TYPES:
+ op.bulk_insert(tables.mediatype,
+ [
+ {'name': media_type},
+ ])
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ for media_type in DOCKER_SCHEMA2_CONTENT_TYPES:
+ op.execute(tables
+ .mediatype
+ .delete()
+ .where(tables.
+ mediatype.c.name == op.inline_literal(media_type)))
diff --git a/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py b/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py
new file mode 100644
index 000000000..15d1ac8b6
--- /dev/null
+++ b/data/migrations/versions/c059b952ed76_remove_unencrypted_fields_and_data.py
@@ -0,0 +1,82 @@
+"""Remove unencrypted fields and data
+
+Revision ID: c059b952ed76
+Revises: 703298a825c2
+Create Date: 2019-08-19 16:31:00.952773
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'c059b952ed76'
+down_revision = '703298a825c2'
+
+import uuid
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+from data.database import FederatedLogin, User, RobotAccountToken
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('oauthaccesstoken_refresh_token', table_name='oauthaccesstoken')
+ op.drop_column(u'oauthaccesstoken', 'refresh_token')
+
+ op.drop_column('accesstoken', 'code')
+
+ op.drop_column('appspecificauthtoken', 'token_code')
+
+ op.drop_column('oauthaccesstoken', 'access_token')
+ op.drop_column('oauthapplication', 'client_secret')
+
+ op.drop_column('oauthauthorizationcode', 'code')
+
+ op.drop_column('repositorybuildtrigger', 'private_key')
+ op.drop_column('repositorybuildtrigger', 'auth_token')
+ # ### end Alembic commands ###
+
+ # Overwrite all plaintext robot credentials.
+ while True:
+ try:
+ robot_account_token = RobotAccountToken.get(fully_migrated=False)
+ robot_account = robot_account_token.robot_account
+
+ robot_account.email = str(uuid.uuid4())
+ robot_account.save()
+
+ federated_login = FederatedLogin.get(user=robot_account)
+ federated_login.service_ident = 'robot:%s' % robot_account.id
+ federated_login.save()
+
+ robot_account_token.fully_migrated = True
+ robot_account_token.save()
+ except RobotAccountToken.DoesNotExist:
+ break
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column(u'oauthaccesstoken', sa.Column('refresh_token', sa.String(length=255), nullable=True))
+ op.create_index('oauthaccesstoken_refresh_token', 'oauthaccesstoken', ['refresh_token'], unique=False)
+
+ op.add_column('repositorybuildtrigger', sa.Column('auth_token', sa.String(length=255), nullable=True))
+ op.add_column('repositorybuildtrigger', sa.Column('private_key', sa.Text(), nullable=True))
+
+ op.add_column('oauthauthorizationcode', sa.Column('code', sa.String(length=255), nullable=True))
+ op.create_index('oauthauthorizationcode_code', 'oauthauthorizationcode', ['code'], unique=True)
+
+ op.add_column('oauthapplication', sa.Column('client_secret', sa.String(length=255), nullable=True))
+ op.add_column('oauthaccesstoken', sa.Column('access_token', sa.String(length=255), nullable=True))
+
+ op.create_index('oauthaccesstoken_access_token', 'oauthaccesstoken', ['access_token'], unique=False)
+
+ op.add_column('appspecificauthtoken', sa.Column('token_code', sa.String(length=255), nullable=True))
+ op.create_index('appspecificauthtoken_token_code', 'appspecificauthtoken', ['token_code'], unique=True)
+
+ op.add_column('accesstoken', sa.Column('code', sa.String(length=255), nullable=True))
+ op.create_index('accesstoken_code', 'accesstoken', ['code'], unique=True)
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py b/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py
new file mode 100644
index 000000000..15ecabd00
--- /dev/null
+++ b/data/migrations/versions/c13c8052f7a6_add_new_fields_and_tables_for_encrypted_.py
@@ -0,0 +1,104 @@
+"""Add new fields and tables for encrypted tokens
+
+Revision ID: c13c8052f7a6
+Revises: 5248ddf35167
+Create Date: 2019-08-19 15:59:36.269155
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'c13c8052f7a6'
+down_revision = '5248ddf35167'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('robotaccounttoken',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('robot_account_id', sa.Integer(), nullable=False),
+ sa.Column('token', sa.String(length=255), nullable=False),
+ sa.Column('fully_migrated', sa.Boolean(), nullable=False, server_default='0'),
+ sa.ForeignKeyConstraint(['robot_account_id'], ['user.id'], name=op.f('fk_robotaccounttoken_robot_account_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_robotaccounttoken'))
+ )
+ op.create_index('robotaccounttoken_robot_account_id', 'robotaccounttoken', ['robot_account_id'], unique=True)
+
+ op.add_column(u'accesstoken', sa.Column('token_code', sa.String(length=255), nullable=True))
+ op.add_column(u'accesstoken', sa.Column('token_name', sa.String(length=255), nullable=True))
+ op.create_index('accesstoken_token_name', 'accesstoken', ['token_name'], unique=True)
+
+ op.add_column(u'appspecificauthtoken', sa.Column('token_name', sa.String(length=255), nullable=True))
+ op.add_column(u'appspecificauthtoken', sa.Column('token_secret', sa.String(length=255), nullable=True))
+ op.create_index('appspecificauthtoken_token_name', 'appspecificauthtoken', ['token_name'], unique=True)
+
+ op.add_column(u'emailconfirmation', sa.Column('verification_code', sa.String(length=255), nullable=True))
+
+ op.add_column(u'oauthaccesstoken', sa.Column('token_code', sa.String(length=255), nullable=True))
+ op.add_column(u'oauthaccesstoken', sa.Column('token_name', sa.String(length=255), nullable=True))
+ op.create_index('oauthaccesstoken_token_name', 'oauthaccesstoken', ['token_name'], unique=True)
+
+ op.add_column(u'oauthapplication', sa.Column('secure_client_secret', sa.String(length=255), nullable=True))
+ op.add_column(u'oauthapplication', sa.Column('fully_migrated', sa.Boolean(), server_default='0', nullable=False))
+
+ op.add_column(u'oauthauthorizationcode', sa.Column('code_credential', sa.String(length=255), nullable=True))
+ op.add_column(u'oauthauthorizationcode', sa.Column('code_name', sa.String(length=255), nullable=True))
+ op.create_index('oauthauthorizationcode_code_name', 'oauthauthorizationcode', ['code_name'], unique=True)
+ op.drop_index('oauthauthorizationcode_code', table_name='oauthauthorizationcode')
+ op.create_index('oauthauthorizationcode_code', 'oauthauthorizationcode', ['code'], unique=True)
+
+ op.add_column(u'repositorybuildtrigger', sa.Column('secure_auth_token', sa.String(length=255), nullable=True))
+ op.add_column(u'repositorybuildtrigger', sa.Column('secure_private_key', sa.Text(), nullable=True))
+ op.add_column(u'repositorybuildtrigger', sa.Column('fully_migrated', sa.Boolean(), server_default='0', nullable=False))
+ # ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_table('robotaccounttoken', [
+ ('robot_account_id', tester.TestDataType.Foreign('user')),
+ ('token', tester.TestDataType.Token),
+ ('fully_migrated', tester.TestDataType.Boolean),
+ ])
+
+ tester.populate_column('accesstoken', 'code', tester.TestDataType.Token)
+
+ tester.populate_column('appspecificauthtoken', 'token_code', tester.TestDataType.Token)
+
+ tester.populate_column('emailconfirmation', 'verification_code', tester.TestDataType.Token)
+
+ tester.populate_column('oauthaccesstoken', 'token_code', tester.TestDataType.Token)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column(u'repositorybuildtrigger', 'secure_private_key')
+ op.drop_column(u'repositorybuildtrigger', 'secure_auth_token')
+
+ op.drop_index('oauthauthorizationcode_code', table_name='oauthauthorizationcode')
+ op.create_index('oauthauthorizationcode_code', 'oauthauthorizationcode', ['code'], unique=False)
+ op.drop_index('oauthauthorizationcode_code_name', table_name='oauthauthorizationcode')
+ op.drop_column(u'oauthauthorizationcode', 'code_name')
+ op.drop_column(u'oauthauthorizationcode', 'code_credential')
+
+ op.drop_column(u'oauthapplication', 'secure_client_secret')
+
+ op.drop_index('oauthaccesstoken_token_name', table_name='oauthaccesstoken')
+ op.drop_column(u'oauthaccesstoken', 'token_name')
+ op.drop_column(u'oauthaccesstoken', 'token_code')
+
+ op.drop_column(u'emailconfirmation', 'verification_code')
+
+ op.drop_index('appspecificauthtoken_token_name', table_name='appspecificauthtoken')
+ op.drop_column(u'appspecificauthtoken', 'token_secret')
+ op.drop_column(u'appspecificauthtoken', 'token_name')
+
+ op.drop_index('accesstoken_token_name', table_name='accesstoken')
+ op.drop_column(u'accesstoken', 'token_name')
+ op.drop_column(u'accesstoken', 'token_code')
+
+ op.drop_table('robotaccounttoken')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py b/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py
new file mode 100644
index 000000000..3277f5ae6
--- /dev/null
+++ b/data/migrations/versions/c156deb8845d_reset_our_migrations_with_a_required_.py
@@ -0,0 +1,1254 @@
+"""Reset our migrations with a required update
+
+Revision ID: c156deb8845d
+Revises: None
+Create Date: 2016-11-08 11:58:11.110762
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'c156deb8845d'
+down_revision = None
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from util.migrate import UTF8LongText, UTF8CharField
+from datetime import datetime
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ now = datetime.now().strftime("'%Y-%m-%d %H:%M:%S'")
+
+ op.create_table('accesstokenkind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_accesstokenkind'))
+ )
+ op.create_index('accesstokenkind_name', 'accesstokenkind', ['name'], unique=True)
+ op.create_table('buildtriggerservice',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_buildtriggerservice'))
+ )
+ op.create_index('buildtriggerservice_name', 'buildtriggerservice', ['name'], unique=True)
+ op.create_table('externalnotificationevent',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_externalnotificationevent'))
+ )
+ op.create_index('externalnotificationevent_name', 'externalnotificationevent', ['name'], unique=True)
+ op.create_table('externalnotificationmethod',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_externalnotificationmethod'))
+ )
+ op.create_index('externalnotificationmethod_name', 'externalnotificationmethod', ['name'], unique=True)
+ op.create_table('imagestorage',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('checksum', sa.String(length=255), nullable=True),
+ sa.Column('image_size', sa.BigInteger(), nullable=True),
+ sa.Column('uncompressed_size', sa.BigInteger(), nullable=True),
+ sa.Column('uploading', sa.Boolean(), nullable=True),
+ sa.Column('cas_path', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.Column('content_checksum', sa.String(length=255), nullable=True),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestorage'))
+ )
+ op.create_index('imagestorage_content_checksum', 'imagestorage', ['content_checksum'], unique=False)
+ op.create_index('imagestorage_uuid', 'imagestorage', ['uuid'], unique=True)
+ op.create_table('imagestoragelocation',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragelocation'))
+ )
+ op.create_index('imagestoragelocation_name', 'imagestoragelocation', ['name'], unique=True)
+ op.create_table('imagestoragesignaturekind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignaturekind'))
+ )
+ op.create_index('imagestoragesignaturekind_name', 'imagestoragesignaturekind', ['name'], unique=True)
+ op.create_table('imagestoragetransformation',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragetransformation'))
+ )
+ op.create_index('imagestoragetransformation_name', 'imagestoragetransformation', ['name'], unique=True)
+ op.create_table('labelsourcetype',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('mutable', sa.Boolean(), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_labelsourcetype'))
+ )
+ op.create_index('labelsourcetype_name', 'labelsourcetype', ['name'], unique=True)
+ op.create_table('logentrykind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_logentrykind'))
+ )
+ op.create_index('logentrykind_name', 'logentrykind', ['name'], unique=True)
+ op.create_table('loginservice',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_loginservice'))
+ )
+ op.create_index('loginservice_name', 'loginservice', ['name'], unique=True)
+ op.create_table('mediatype',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_mediatype'))
+ )
+ op.create_index('mediatype_name', 'mediatype', ['name'], unique=True)
+ op.create_table('messages',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('content', sa.Text(), nullable=False),
+ sa.Column('uuid', sa.String(length=36), nullable=True),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_messages'))
+ )
+ op.create_table('notificationkind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_notificationkind'))
+ )
+ op.create_index('notificationkind_name', 'notificationkind', ['name'], unique=True)
+ op.create_table('quayregion',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_quayregion'))
+ )
+ op.create_index('quayregion_name', 'quayregion', ['name'], unique=True)
+ op.create_table('quayservice',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_quayservice'))
+ )
+ op.create_index('quayservice_name', 'quayservice', ['name'], unique=True)
+ op.create_table('queueitem',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('queue_name', sa.String(length=1024), nullable=False),
+ sa.Column('body', sa.Text(), nullable=False),
+ sa.Column('available_after', sa.DateTime(), nullable=False),
+ sa.Column('available', sa.Boolean(), nullable=False),
+ sa.Column('processing_expires', sa.DateTime(), nullable=True),
+ sa.Column('retries_remaining', sa.Integer(), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_queueitem'))
+ )
+ op.create_index('queueitem_available', 'queueitem', ['available'], unique=False)
+ op.create_index('queueitem_available_after', 'queueitem', ['available_after'], unique=False)
+ op.create_index('queueitem_processing_expires', 'queueitem', ['processing_expires'], unique=False)
+ op.create_index('queueitem_queue_name', 'queueitem', ['queue_name'], unique=False, mysql_length=767)
+ op.create_index('queueitem_retries_remaining', 'queueitem', ['retries_remaining'], unique=False)
+ op.create_table('role',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_role'))
+ )
+ op.create_index('role_name', 'role', ['name'], unique=True)
+ op.create_table('servicekeyapproval',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('approver_id', sa.Integer(), nullable=True),
+ sa.Column('approval_type', sa.String(length=255), nullable=False),
+ sa.Column('approved_date', sa.DateTime(), nullable=False),
+ sa.Column('notes', UTF8LongText(), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_servicekeyapproval'))
+ )
+ op.create_index('servicekeyapproval_approval_type', 'servicekeyapproval', ['approval_type'], unique=False)
+ op.create_index('servicekeyapproval_approver_id', 'servicekeyapproval', ['approver_id'], unique=False)
+ op.create_table('teamrole',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_teamrole'))
+ )
+ op.create_index('teamrole_name', 'teamrole', ['name'], unique=False)
+ op.create_table('user',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=36), nullable=True),
+ sa.Column('username', sa.String(length=255), nullable=False),
+ sa.Column('password_hash', sa.String(length=255), nullable=True),
+ sa.Column('email', sa.String(length=255), nullable=False),
+ sa.Column('verified', sa.Boolean(), nullable=False),
+ sa.Column('stripe_id', sa.String(length=255), nullable=True),
+ sa.Column('organization', sa.Boolean(), nullable=False),
+ sa.Column('robot', sa.Boolean(), nullable=False),
+ sa.Column('invoice_email', sa.Boolean(), nullable=False),
+ sa.Column('invalid_login_attempts', sa.Integer(), nullable=False, server_default='0'),
+ sa.Column('last_invalid_login', sa.DateTime(), nullable=False),
+ sa.Column('removed_tag_expiration_s', sa.Integer(), nullable=False, server_default='1209600'),
+ sa.Column('enabled', sa.Boolean(), nullable=False, server_default=sa.sql.expression.true()),
+ sa.Column('invoice_email_address', sa.String(length=255), nullable=True),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_user'))
+ )
+ op.create_index('user_email', 'user', ['email'], unique=True)
+ op.create_index('user_invoice_email_address', 'user', ['invoice_email_address'], unique=False)
+ op.create_index('user_organization', 'user', ['organization'], unique=False)
+ op.create_index('user_robot', 'user', ['robot'], unique=False)
+ op.create_index('user_stripe_id', 'user', ['stripe_id'], unique=False)
+ op.create_index('user_username', 'user', ['username'], unique=True)
+ op.create_table('visibility',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_visibility'))
+ )
+ op.create_index('visibility_name', 'visibility', ['name'], unique=True)
+ op.create_table('emailconfirmation',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('code', sa.String(length=255), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('pw_reset', sa.Boolean(), nullable=False),
+ sa.Column('new_email', sa.String(length=255), nullable=True),
+ sa.Column('email_confirm', sa.Boolean(), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_emailconfirmation_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_emailconfirmation'))
+ )
+ op.create_index('emailconfirmation_code', 'emailconfirmation', ['code'], unique=True)
+ op.create_index('emailconfirmation_user_id', 'emailconfirmation', ['user_id'], unique=False)
+ op.create_table('federatedlogin',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('service_id', sa.Integer(), nullable=False),
+ sa.Column('service_ident', sa.String(length=255), nullable=False),
+ sa.Column('metadata_json', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['service_id'], ['loginservice.id'], name=op.f('fk_federatedlogin_service_id_loginservice')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_federatedlogin_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_federatedlogin'))
+ )
+ op.create_index('federatedlogin_service_id', 'federatedlogin', ['service_id'], unique=False)
+ op.create_index('federatedlogin_service_id_service_ident', 'federatedlogin', ['service_id', 'service_ident'], unique=True)
+ op.create_index('federatedlogin_service_id_user_id', 'federatedlogin', ['service_id', 'user_id'], unique=True)
+ op.create_index('federatedlogin_user_id', 'federatedlogin', ['user_id'], unique=False)
+ op.create_table('imagestorageplacement',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('storage_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['location_id'], ['imagestoragelocation.id'], name=op.f('fk_imagestorageplacement_location_id_imagestoragelocation')),
+ sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_imagestorageplacement_storage_id_imagestorage')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestorageplacement'))
+ )
+ op.create_index('imagestorageplacement_location_id', 'imagestorageplacement', ['location_id'], unique=False)
+ op.create_index('imagestorageplacement_storage_id', 'imagestorageplacement', ['storage_id'], unique=False)
+ op.create_index('imagestorageplacement_storage_id_location_id', 'imagestorageplacement', ['storage_id', 'location_id'], unique=True)
+ op.create_table('imagestoragesignature',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('storage_id', sa.Integer(), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=False),
+ sa.Column('signature', sa.Text(), nullable=True),
+ sa.Column('uploading', sa.Boolean(), nullable=True),
+ sa.ForeignKeyConstraint(['kind_id'], ['imagestoragesignaturekind.id'], name=op.f('fk_imagestoragesignature_kind_id_imagestoragesignaturekind')),
+ sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_imagestoragesignature_storage_id_imagestorage')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_imagestoragesignature'))
+ )
+ op.create_index('imagestoragesignature_kind_id', 'imagestoragesignature', ['kind_id'], unique=False)
+ op.create_index('imagestoragesignature_kind_id_storage_id', 'imagestoragesignature', ['kind_id', 'storage_id'], unique=True)
+ op.create_index('imagestoragesignature_storage_id', 'imagestoragesignature', ['storage_id'], unique=False)
+ op.create_table('label',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('key', UTF8CharField(length=255), nullable=False),
+ sa.Column('value', UTF8LongText(), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('source_type_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_label_media_type_id_mediatype')),
+ sa.ForeignKeyConstraint(['source_type_id'], ['labelsourcetype.id'], name=op.f('fk_label_source_type_id_labelsourcetype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_label'))
+ )
+ op.create_index('label_key', 'label', ['key'], unique=False)
+ op.create_index('label_media_type_id', 'label', ['media_type_id'], unique=False)
+ op.create_index('label_source_type_id', 'label', ['source_type_id'], unique=False)
+ op.create_index('label_uuid', 'label', ['uuid'], unique=True)
+ op.create_table('logentry',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=False),
+ sa.Column('account_id', sa.Integer(), nullable=False),
+ sa.Column('performer_id', sa.Integer(), nullable=True),
+ sa.Column('repository_id', sa.Integer(), nullable=True),
+ sa.Column('datetime', sa.DateTime(), nullable=False),
+ sa.Column('ip', sa.String(length=255), nullable=True),
+ sa.Column('metadata_json', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['kind_id'], ['logentrykind.id'], name=op.f('fk_logentry_kind_id_logentrykind')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_logentry'))
+ )
+ op.create_index('logentry_account_id', 'logentry', ['account_id'], unique=False)
+ op.create_index('logentry_account_id_datetime', 'logentry', ['account_id', 'datetime'], unique=False)
+ op.create_index('logentry_datetime', 'logentry', ['datetime'], unique=False)
+ op.create_index('logentry_kind_id', 'logentry', ['kind_id'], unique=False)
+ op.create_index('logentry_performer_id', 'logentry', ['performer_id'], unique=False)
+ op.create_index('logentry_performer_id_datetime', 'logentry', ['performer_id', 'datetime'], unique=False)
+ op.create_index('logentry_repository_id', 'logentry', ['repository_id'], unique=False)
+ op.create_index('logentry_repository_id_datetime', 'logentry', ['repository_id', 'datetime'], unique=False)
+ op.create_index('logentry_repository_id_datetime_kind_id', 'logentry', ['repository_id', 'datetime', 'kind_id'], unique=False)
+ op.create_table('notification',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=False),
+ sa.Column('target_id', sa.Integer(), nullable=False),
+ sa.Column('metadata_json', sa.Text(), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.Column('dismissed', sa.Boolean(), nullable=False),
+ sa.Column('lookup_path', sa.String(length=255), nullable=True),
+ sa.ForeignKeyConstraint(['kind_id'], ['notificationkind.id'], name=op.f('fk_notification_kind_id_notificationkind')),
+ sa.ForeignKeyConstraint(['target_id'], ['user.id'], name=op.f('fk_notification_target_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_notification'))
+ )
+ op.create_index('notification_created', 'notification', ['created'], unique=False)
+ op.create_index('notification_kind_id', 'notification', ['kind_id'], unique=False)
+ op.create_index('notification_lookup_path', 'notification', ['lookup_path'], unique=False)
+ op.create_index('notification_target_id', 'notification', ['target_id'], unique=False)
+ op.create_index('notification_uuid', 'notification', ['uuid'], unique=False)
+ op.create_table('oauthapplication',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('client_id', sa.String(length=255), nullable=False),
+ sa.Column('client_secret', sa.String(length=255), nullable=False),
+ sa.Column('redirect_uri', sa.String(length=255), nullable=False),
+ sa.Column('application_uri', sa.String(length=255), nullable=False),
+ sa.Column('organization_id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('description', sa.Text(), nullable=False),
+ sa.Column('gravatar_email', sa.String(length=255), nullable=True),
+ sa.ForeignKeyConstraint(['organization_id'], ['user.id'], name=op.f('fk_oauthapplication_organization_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_oauthapplication'))
+ )
+ op.create_index('oauthapplication_client_id', 'oauthapplication', ['client_id'], unique=False)
+ op.create_index('oauthapplication_organization_id', 'oauthapplication', ['organization_id'], unique=False)
+ op.create_table('quayrelease',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('service_id', sa.Integer(), nullable=False),
+ sa.Column('version', sa.String(length=255), nullable=False),
+ sa.Column('region_id', sa.Integer(), nullable=False),
+ sa.Column('reverted', sa.Boolean(), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['region_id'], ['quayregion.id'], name=op.f('fk_quayrelease_region_id_quayregion')),
+ sa.ForeignKeyConstraint(['service_id'], ['quayservice.id'], name=op.f('fk_quayrelease_service_id_quayservice')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_quayrelease'))
+ )
+ op.create_index('quayrelease_created', 'quayrelease', ['created'], unique=False)
+ op.create_index('quayrelease_region_id', 'quayrelease', ['region_id'], unique=False)
+ op.create_index('quayrelease_service_id', 'quayrelease', ['service_id'], unique=False)
+ op.create_index('quayrelease_service_id_region_id_created', 'quayrelease', ['service_id', 'region_id', 'created'], unique=False)
+ op.create_index('quayrelease_service_id_version_region_id', 'quayrelease', ['service_id', 'version', 'region_id'], unique=True)
+ op.create_table('repository',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('namespace_user_id', sa.Integer(), nullable=True),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('visibility_id', sa.Integer(), nullable=False),
+ sa.Column('description', sa.Text(), nullable=True),
+ sa.Column('badge_token', sa.String(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['namespace_user_id'], ['user.id'], name=op.f('fk_repository_namespace_user_id_user')),
+ sa.ForeignKeyConstraint(['visibility_id'], ['visibility.id'], name=op.f('fk_repository_visibility_id_visibility')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repository'))
+ )
+ op.create_index('repository_namespace_user_id', 'repository', ['namespace_user_id'], unique=False)
+ op.create_index('repository_namespace_user_id_name', 'repository', ['namespace_user_id', 'name'], unique=True)
+ op.create_index('repository_visibility_id', 'repository', ['visibility_id'], unique=False)
+ op.create_table('servicekey',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('kid', sa.String(length=255), nullable=False),
+ sa.Column('service', sa.String(length=255), nullable=False),
+ sa.Column('jwk', UTF8LongText(), nullable=False),
+ sa.Column('metadata', UTF8LongText(), nullable=False),
+ sa.Column('created_date', sa.DateTime(), nullable=False),
+ sa.Column('expiration_date', sa.DateTime(), nullable=True),
+ sa.Column('rotation_duration', sa.Integer(), nullable=True),
+ sa.Column('approval_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['approval_id'], ['servicekeyapproval.id'], name=op.f('fk_servicekey_approval_id_servicekeyapproval')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_servicekey'))
+ )
+ op.create_index('servicekey_approval_id', 'servicekey', ['approval_id'], unique=False)
+ op.create_index('servicekey_kid', 'servicekey', ['kid'], unique=True)
+ op.create_index('servicekey_service', 'servicekey', ['service'], unique=False)
+ op.create_table('team',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('organization_id', sa.Integer(), nullable=False),
+ sa.Column('role_id', sa.Integer(), nullable=False),
+ sa.Column('description', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['organization_id'], ['user.id'], name=op.f('fk_team_organization_id_user')),
+ sa.ForeignKeyConstraint(['role_id'], ['teamrole.id'], name=op.f('fk_team_role_id_teamrole')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_team'))
+ )
+ op.create_index('team_name', 'team', ['name'], unique=False)
+ op.create_index('team_name_organization_id', 'team', ['name', 'organization_id'], unique=True)
+ op.create_index('team_organization_id', 'team', ['organization_id'], unique=False)
+ op.create_index('team_role_id', 'team', ['role_id'], unique=False)
+ op.create_table('torrentinfo',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('storage_id', sa.Integer(), nullable=False),
+ sa.Column('piece_length', sa.Integer(), nullable=False),
+ sa.Column('pieces', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_torrentinfo_storage_id_imagestorage')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_torrentinfo'))
+ )
+ op.create_index('torrentinfo_storage_id', 'torrentinfo', ['storage_id'], unique=False)
+ op.create_index('torrentinfo_storage_id_piece_length', 'torrentinfo', ['storage_id', 'piece_length'], unique=True)
+ op.create_table('userregion',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['location_id'], ['imagestoragelocation.id'], name=op.f('fk_userregion_location_id_imagestoragelocation')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_userregion_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_userregion'))
+ )
+ op.create_index('userregion_location_id', 'userregion', ['location_id'], unique=False)
+ op.create_index('userregion_user_id', 'userregion', ['user_id'], unique=False)
+ op.create_table('accesstoken',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('friendly_name', sa.String(length=255), nullable=True),
+ sa.Column('code', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.Column('role_id', sa.Integer(), nullable=False),
+ sa.Column('temporary', sa.Boolean(), nullable=False),
+ sa.Column('kind_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['kind_id'], ['accesstokenkind.id'], name=op.f('fk_accesstoken_kind_id_accesstokenkind')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_accesstoken_repository_id_repository')),
+ sa.ForeignKeyConstraint(['role_id'], ['role.id'], name=op.f('fk_accesstoken_role_id_role')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_accesstoken'))
+ )
+ op.create_index('accesstoken_code', 'accesstoken', ['code'], unique=True)
+ op.create_index('accesstoken_kind_id', 'accesstoken', ['kind_id'], unique=False)
+ op.create_index('accesstoken_repository_id', 'accesstoken', ['repository_id'], unique=False)
+ op.create_index('accesstoken_role_id', 'accesstoken', ['role_id'], unique=False)
+ op.create_table('blobupload',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('byte_count', sa.Integer(), nullable=False),
+ sa.Column('sha_state', sa.Text(), nullable=True),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.Column('storage_metadata', sa.Text(), nullable=True),
+ sa.Column('chunk_count', sa.Integer(), nullable=False, server_default='0'),
+ sa.Column('uncompressed_byte_count', sa.Integer(), nullable=True),
+ sa.Column('created', sa.DateTime(), nullable=False, server_default=sa.text(now)),
+ sa.Column('piece_sha_state', UTF8LongText(), nullable=True),
+ sa.Column('piece_hashes', UTF8LongText(), nullable=True),
+ sa.ForeignKeyConstraint(['location_id'], ['imagestoragelocation.id'], name=op.f('fk_blobupload_location_id_imagestoragelocation')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_blobupload_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blobupload'))
+ )
+ op.create_index('blobupload_created', 'blobupload', ['created'], unique=False)
+ op.create_index('blobupload_location_id', 'blobupload', ['location_id'], unique=False)
+ op.create_index('blobupload_repository_id', 'blobupload', ['repository_id'], unique=False)
+ op.create_index('blobupload_repository_id_uuid', 'blobupload', ['repository_id', 'uuid'], unique=True)
+ op.create_index('blobupload_uuid', 'blobupload', ['uuid'], unique=True)
+ op.create_table('image',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('docker_image_id', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('ancestors', sa.String(length=60535), nullable=True),
+ sa.Column('storage_id', sa.Integer(), nullable=True),
+ sa.Column('created', sa.DateTime(), nullable=True),
+ sa.Column('comment', UTF8LongText(), nullable=True),
+ sa.Column('command', sa.Text(), nullable=True),
+ sa.Column('aggregate_size', sa.BigInteger(), nullable=True),
+ sa.Column('v1_json_metadata', UTF8LongText(), nullable=True),
+ sa.Column('v1_checksum', sa.String(length=255), nullable=True),
+ sa.Column('security_indexed', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.Column('security_indexed_engine', sa.Integer(), nullable=False, server_default='-1'),
+ sa.Column('parent_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_image_repository_id_repository')),
+ sa.ForeignKeyConstraint(['storage_id'], ['imagestorage.id'], name=op.f('fk_image_storage_id_imagestorage')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_image'))
+ )
+ op.create_index('image_ancestors', 'image', ['ancestors'], unique=False, mysql_length=767)
+ op.create_index('image_docker_image_id', 'image', ['docker_image_id'], unique=False)
+ op.create_index('image_parent_id', 'image', ['parent_id'], unique=False)
+ op.create_index('image_repository_id', 'image', ['repository_id'], unique=False)
+ op.create_index('image_repository_id_docker_image_id', 'image', ['repository_id', 'docker_image_id'], unique=True)
+ op.create_index('image_security_indexed', 'image', ['security_indexed'], unique=False)
+ op.create_index('image_security_indexed_engine', 'image', ['security_indexed_engine'], unique=False)
+ op.create_index('image_security_indexed_engine_security_indexed', 'image', ['security_indexed_engine', 'security_indexed'], unique=False)
+ op.create_index('image_storage_id', 'image', ['storage_id'], unique=False)
+ op.create_table('oauthaccesstoken',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('application_id', sa.Integer(), nullable=False),
+ sa.Column('authorized_user_id', sa.Integer(), nullable=False),
+ sa.Column('scope', sa.String(length=255), nullable=False),
+ sa.Column('access_token', sa.String(length=255), nullable=False),
+ sa.Column('token_type', sa.String(length=255), nullable=False),
+ sa.Column('expires_at', sa.DateTime(), nullable=False),
+ sa.Column('refresh_token', sa.String(length=255), nullable=True),
+ sa.Column('data', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['application_id'], ['oauthapplication.id'], name=op.f('fk_oauthaccesstoken_application_id_oauthapplication')),
+ sa.ForeignKeyConstraint(['authorized_user_id'], ['user.id'], name=op.f('fk_oauthaccesstoken_authorized_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_oauthaccesstoken'))
+ )
+ op.create_index('oauthaccesstoken_access_token', 'oauthaccesstoken', ['access_token'], unique=False)
+ op.create_index('oauthaccesstoken_application_id', 'oauthaccesstoken', ['application_id'], unique=False)
+ op.create_index('oauthaccesstoken_authorized_user_id', 'oauthaccesstoken', ['authorized_user_id'], unique=False)
+ op.create_index('oauthaccesstoken_refresh_token', 'oauthaccesstoken', ['refresh_token'], unique=False)
+ op.create_index('oauthaccesstoken_uuid', 'oauthaccesstoken', ['uuid'], unique=False)
+ op.create_table('oauthauthorizationcode',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('application_id', sa.Integer(), nullable=False),
+ sa.Column('code', sa.String(length=255), nullable=False),
+ sa.Column('scope', sa.String(length=255), nullable=False),
+ sa.Column('data', sa.Text(), nullable=False),
+ sa.ForeignKeyConstraint(['application_id'], ['oauthapplication.id'], name=op.f('fk_oauthauthorizationcode_application_id_oauthapplication')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_oauthauthorizationcode'))
+ )
+ op.create_index('oauthauthorizationcode_application_id', 'oauthauthorizationcode', ['application_id'], unique=False)
+ op.create_index('oauthauthorizationcode_code', 'oauthauthorizationcode', ['code'], unique=False)
+ op.create_table('permissionprototype',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('org_id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('activating_user_id', sa.Integer(), nullable=True),
+ sa.Column('delegate_user_id', sa.Integer(), nullable=True),
+ sa.Column('delegate_team_id', sa.Integer(), nullable=True),
+ sa.Column('role_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['activating_user_id'], ['user.id'], name=op.f('fk_permissionprototype_activating_user_id_user')),
+ sa.ForeignKeyConstraint(['delegate_team_id'], ['team.id'], name=op.f('fk_permissionprototype_delegate_team_id_team')),
+ sa.ForeignKeyConstraint(['delegate_user_id'], ['user.id'], name=op.f('fk_permissionprototype_delegate_user_id_user')),
+ sa.ForeignKeyConstraint(['org_id'], ['user.id'], name=op.f('fk_permissionprototype_org_id_user')),
+ sa.ForeignKeyConstraint(['role_id'], ['role.id'], name=op.f('fk_permissionprototype_role_id_role')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_permissionprototype'))
+ )
+ op.create_index('permissionprototype_activating_user_id', 'permissionprototype', ['activating_user_id'], unique=False)
+ op.create_index('permissionprototype_delegate_team_id', 'permissionprototype', ['delegate_team_id'], unique=False)
+ op.create_index('permissionprototype_delegate_user_id', 'permissionprototype', ['delegate_user_id'], unique=False)
+ op.create_index('permissionprototype_org_id', 'permissionprototype', ['org_id'], unique=False)
+ op.create_index('permissionprototype_org_id_activating_user_id', 'permissionprototype', ['org_id', 'activating_user_id'], unique=False)
+ op.create_index('permissionprototype_role_id', 'permissionprototype', ['role_id'], unique=False)
+ op.create_table('repositoryactioncount',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('count', sa.Integer(), nullable=False),
+ sa.Column('date', sa.Date(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositoryactioncount_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositoryactioncount'))
+ )
+ op.create_index('repositoryactioncount_date', 'repositoryactioncount', ['date'], unique=False)
+ op.create_index('repositoryactioncount_repository_id', 'repositoryactioncount', ['repository_id'], unique=False)
+ op.create_index('repositoryactioncount_repository_id_date', 'repositoryactioncount', ['repository_id', 'date'], unique=True)
+ op.create_table('repositoryauthorizedemail',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('email', sa.String(length=255), nullable=False),
+ sa.Column('code', sa.String(length=255), nullable=False),
+ sa.Column('confirmed', sa.Boolean(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositoryauthorizedemail_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositoryauthorizedemail'))
+ )
+ op.create_index('repositoryauthorizedemail_code', 'repositoryauthorizedemail', ['code'], unique=True)
+ op.create_index('repositoryauthorizedemail_email_repository_id', 'repositoryauthorizedemail', ['email', 'repository_id'], unique=True)
+ op.create_index('repositoryauthorizedemail_repository_id', 'repositoryauthorizedemail', ['repository_id'], unique=False)
+ op.create_table('repositorynotification',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('event_id', sa.Integer(), nullable=False),
+ sa.Column('method_id', sa.Integer(), nullable=False),
+ sa.Column('title', sa.String(length=255), nullable=True),
+ sa.Column('config_json', sa.Text(), nullable=False),
+ sa.Column('event_config_json', UTF8LongText(), nullable=False),
+ sa.ForeignKeyConstraint(['event_id'], ['externalnotificationevent.id'], name=op.f('fk_repositorynotification_event_id_externalnotificationevent')),
+ sa.ForeignKeyConstraint(['method_id'], ['externalnotificationmethod.id'], name=op.f('fk_repositorynotification_method_id_externalnotificationmethod')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorynotification_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorynotification'))
+ )
+ op.create_index('repositorynotification_event_id', 'repositorynotification', ['event_id'], unique=False)
+ op.create_index('repositorynotification_method_id', 'repositorynotification', ['method_id'], unique=False)
+ op.create_index('repositorynotification_repository_id', 'repositorynotification', ['repository_id'], unique=False)
+ op.create_index('repositorynotification_uuid', 'repositorynotification', ['uuid'], unique=False)
+ op.create_table('repositorypermission',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('team_id', sa.Integer(), nullable=True),
+ sa.Column('user_id', sa.Integer(), nullable=True),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('role_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorypermission_repository_id_repository')),
+ sa.ForeignKeyConstraint(['role_id'], ['role.id'], name=op.f('fk_repositorypermission_role_id_role')),
+ sa.ForeignKeyConstraint(['team_id'], ['team.id'], name=op.f('fk_repositorypermission_team_id_team')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_repositorypermission_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorypermission'))
+ )
+ op.create_index('repositorypermission_repository_id', 'repositorypermission', ['repository_id'], unique=False)
+ op.create_index('repositorypermission_role_id', 'repositorypermission', ['role_id'], unique=False)
+ op.create_index('repositorypermission_team_id', 'repositorypermission', ['team_id'], unique=False)
+ op.create_index('repositorypermission_team_id_repository_id', 'repositorypermission', ['team_id', 'repository_id'], unique=True)
+ op.create_index('repositorypermission_user_id', 'repositorypermission', ['user_id'], unique=False)
+ op.create_index('repositorypermission_user_id_repository_id', 'repositorypermission', ['user_id', 'repository_id'], unique=True)
+ op.create_table('star',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('created', sa.DateTime(), nullable=False),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_star_repository_id_repository')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_star_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_star'))
+ )
+ op.create_index('star_repository_id', 'star', ['repository_id'], unique=False)
+ op.create_index('star_user_id', 'star', ['user_id'], unique=False)
+ op.create_index('star_user_id_repository_id', 'star', ['user_id', 'repository_id'], unique=True)
+ op.create_table('teammember',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('team_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['team_id'], ['team.id'], name=op.f('fk_teammember_team_id_team')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_teammember_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_teammember'))
+ )
+ op.create_index('teammember_team_id', 'teammember', ['team_id'], unique=False)
+ op.create_index('teammember_user_id', 'teammember', ['user_id'], unique=False)
+ op.create_index('teammember_user_id_team_id', 'teammember', ['user_id', 'team_id'], unique=True)
+ op.create_table('teammemberinvite',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=True),
+ sa.Column('email', sa.String(length=255), nullable=True),
+ sa.Column('team_id', sa.Integer(), nullable=False),
+ sa.Column('inviter_id', sa.Integer(), nullable=False),
+ sa.Column('invite_token', sa.String(length=255), nullable=False),
+ sa.ForeignKeyConstraint(['inviter_id'], ['user.id'], name=op.f('fk_teammemberinvite_inviter_id_user')),
+ sa.ForeignKeyConstraint(['team_id'], ['team.id'], name=op.f('fk_teammemberinvite_team_id_team')),
+ sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_teammemberinvite_user_id_user')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_teammemberinvite'))
+ )
+ op.create_index('teammemberinvite_inviter_id', 'teammemberinvite', ['inviter_id'], unique=False)
+ op.create_index('teammemberinvite_team_id', 'teammemberinvite', ['team_id'], unique=False)
+ op.create_index('teammemberinvite_user_id', 'teammemberinvite', ['user_id'], unique=False)
+ op.create_table('derivedstorageforimage',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('source_image_id', sa.Integer(), nullable=False),
+ sa.Column('derivative_id', sa.Integer(), nullable=False),
+ sa.Column('transformation_id', sa.Integer(), nullable=False),
+ sa.Column('uniqueness_hash', sa.String(length=255), nullable=True),
+ sa.ForeignKeyConstraint(['derivative_id'], ['imagestorage.id'], name=op.f('fk_derivedstorageforimage_derivative_id_imagestorage')),
+ sa.ForeignKeyConstraint(['source_image_id'], ['image.id'], name=op.f('fk_derivedstorageforimage_source_image_id_image')),
+ sa.ForeignKeyConstraint(['transformation_id'], ['imagestoragetransformation.id'], name=op.f('fk_derivedstorageforimage_transformation_constraint')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_derivedstorageforimage'))
+ )
+ op.create_index('derivedstorageforimage_derivative_id', 'derivedstorageforimage', ['derivative_id'], unique=False)
+ op.create_index('derivedstorageforimage_source_image_id', 'derivedstorageforimage', ['source_image_id'], unique=False)
+ op.create_index('uniqueness_index', 'derivedstorageforimage', ['source_image_id', 'transformation_id', 'uniqueness_hash'], unique=True)
+ op.create_index('derivedstorageforimage_transformation_id', 'derivedstorageforimage', ['transformation_id'], unique=False)
+ op.create_table('repositorybuildtrigger',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('service_id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('connected_user_id', sa.Integer(), nullable=False),
+ sa.Column('auth_token', sa.String(length=255), nullable=True),
+ sa.Column('private_key', sa.Text(), nullable=True),
+ sa.Column('config', sa.Text(), nullable=False),
+ sa.Column('write_token_id', sa.Integer(), nullable=True),
+ sa.Column('pull_robot_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['connected_user_id'], ['user.id'], name=op.f('fk_repositorybuildtrigger_connected_user_id_user')),
+ sa.ForeignKeyConstraint(['pull_robot_id'], ['user.id'], name=op.f('fk_repositorybuildtrigger_pull_robot_id_user')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorybuildtrigger_repository_id_repository')),
+ sa.ForeignKeyConstraint(['service_id'], ['buildtriggerservice.id'], name=op.f('fk_repositorybuildtrigger_service_id_buildtriggerservice')),
+ sa.ForeignKeyConstraint(['write_token_id'], ['accesstoken.id'], name=op.f('fk_repositorybuildtrigger_write_token_id_accesstoken')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorybuildtrigger'))
+ )
+ op.create_index('repositorybuildtrigger_connected_user_id', 'repositorybuildtrigger', ['connected_user_id'], unique=False)
+ op.create_index('repositorybuildtrigger_pull_robot_id', 'repositorybuildtrigger', ['pull_robot_id'], unique=False)
+ op.create_index('repositorybuildtrigger_repository_id', 'repositorybuildtrigger', ['repository_id'], unique=False)
+ op.create_index('repositorybuildtrigger_service_id', 'repositorybuildtrigger', ['service_id'], unique=False)
+ op.create_index('repositorybuildtrigger_write_token_id', 'repositorybuildtrigger', ['write_token_id'], unique=False)
+ op.create_table('repositorytag',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.Column('image_id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('lifetime_start_ts', sa.Integer(), nullable=False, server_default='0'),
+ sa.Column('lifetime_end_ts', sa.Integer(), nullable=True),
+ sa.Column('hidden', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.Column('reversion', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.ForeignKeyConstraint(['image_id'], ['image.id'], name=op.f('fk_repositorytag_image_id_image')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorytag_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorytag'))
+ )
+ op.create_index('repositorytag_image_id', 'repositorytag', ['image_id'], unique=False)
+ op.create_index('repositorytag_lifetime_end_ts', 'repositorytag', ['lifetime_end_ts'], unique=False)
+ op.create_index('repositorytag_repository_id', 'repositorytag', ['repository_id'], unique=False)
+ op.create_index('repositorytag_repository_id_name', 'repositorytag', ['repository_id', 'name'], unique=False)
+ op.create_index('repositorytag_repository_id_name_lifetime_end_ts', 'repositorytag', ['repository_id', 'name', 'lifetime_end_ts'], unique=True)
+ op.create_table('repositorybuild',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=255), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('access_token_id', sa.Integer(), nullable=False),
+ sa.Column('resource_key', sa.String(length=255), nullable=True),
+ sa.Column('job_config', sa.Text(), nullable=False),
+ sa.Column('phase', sa.String(length=255), nullable=False),
+ sa.Column('started', sa.DateTime(), nullable=False),
+ sa.Column('display_name', sa.String(length=255), nullable=False),
+ sa.Column('trigger_id', sa.Integer(), nullable=True),
+ sa.Column('pull_robot_id', sa.Integer(), nullable=True),
+ sa.Column('logs_archived', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()),
+ sa.Column('queue_id', sa.String(length=255), nullable=True),
+ sa.ForeignKeyConstraint(['access_token_id'], ['accesstoken.id'], name=op.f('fk_repositorybuild_access_token_id_accesstoken')),
+ sa.ForeignKeyConstraint(['pull_robot_id'], ['user.id'], name=op.f('fk_repositorybuild_pull_robot_id_user')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorybuild_repository_id_repository')),
+ sa.ForeignKeyConstraint(['trigger_id'], ['repositorybuildtrigger.id'], name=op.f('fk_repositorybuild_trigger_id_repositorybuildtrigger')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorybuild'))
+ )
+ op.create_index('repositorybuild_access_token_id', 'repositorybuild', ['access_token_id'], unique=False)
+ op.create_index('repositorybuild_pull_robot_id', 'repositorybuild', ['pull_robot_id'], unique=False)
+ op.create_index('repositorybuild_queue_id', 'repositorybuild', ['queue_id'], unique=False)
+ op.create_index('repositorybuild_repository_id', 'repositorybuild', ['repository_id'], unique=False)
+ op.create_index('repositorybuild_repository_id_started_phase', 'repositorybuild', ['repository_id', 'started', 'phase'], unique=False)
+ op.create_index('repositorybuild_resource_key', 'repositorybuild', ['resource_key'], unique=False)
+ op.create_index('repositorybuild_started', 'repositorybuild', ['started'], unique=False)
+ op.create_index('repositorybuild_started_logs_archived_phase', 'repositorybuild', ['started', 'logs_archived', 'phase'], unique=False)
+ op.create_index('repositorybuild_trigger_id', 'repositorybuild', ['trigger_id'], unique=False)
+ op.create_index('repositorybuild_uuid', 'repositorybuild', ['uuid'], unique=False)
+ op.create_table('tagmanifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('tag_id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('json_data', UTF8LongText(), nullable=False),
+ sa.ForeignKeyConstraint(['tag_id'], ['repositorytag.id'], name=op.f('fk_tagmanifest_tag_id_repositorytag')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifest'))
+ )
+ op.create_index('tagmanifest_digest', 'tagmanifest', ['digest'], unique=False)
+ op.create_index('tagmanifest_tag_id', 'tagmanifest', ['tag_id'], unique=True)
+ op.create_table('tagmanifestlabel',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('annotated_id', sa.Integer(), nullable=False),
+ sa.Column('label_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['annotated_id'], ['tagmanifest.id'], name=op.f('fk_tagmanifestlabel_annotated_id_tagmanifest')),
+ sa.ForeignKeyConstraint(['label_id'], ['label.id'], name=op.f('fk_tagmanifestlabel_label_id_label')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tagmanifestlabel_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagmanifestlabel'))
+ )
+ op.create_index('tagmanifestlabel_annotated_id', 'tagmanifestlabel', ['annotated_id'], unique=False)
+ op.create_index('tagmanifestlabel_annotated_id_label_id', 'tagmanifestlabel', ['annotated_id', 'label_id'], unique=True)
+ op.create_index('tagmanifestlabel_label_id', 'tagmanifestlabel', ['label_id'], unique=False)
+ op.create_index('tagmanifestlabel_repository_id', 'tagmanifestlabel', ['repository_id'], unique=False)
+
+ op.bulk_insert(tables.accesstokenkind,
+ [
+ {'name':'build-worker'},
+ {'name':'pushpull-token'},
+ ])
+
+ op.bulk_insert(tables.buildtriggerservice,
+ [
+ {'name':'github'},
+ {'name':'gitlab'},
+ {'name':'bitbucket'},
+ {'name':'custom-git'},
+ ])
+
+ op.bulk_insert(tables.externalnotificationevent,
+ [
+ {'name':'build_failure'},
+ {'name':'build_queued'},
+ {'name':'build_start'},
+ {'name':'build_success'},
+ {'name':'repo_push'},
+ {'name':'vulnerability_found'},
+ ])
+
+ op.bulk_insert(tables.externalnotificationmethod,
+ [
+ {'name':'email'},
+ {'name':'flowdock'},
+ {'name':'hipchat'},
+ {'name':'quay_notification'},
+ {'name':'slack'},
+ {'name':'webhook'},
+ ])
+
+ op.bulk_insert(tables.imagestoragelocation,
+ [
+ {'name':'s3_us_east_1'},
+ {'name':'s3_eu_west_1'},
+ {'name':'s3_ap_southeast_1'},
+ {'name':'s3_ap_southeast_2'},
+ {'name':'s3_ap_northeast_1'},
+ {'name':'s3_sa_east_1'},
+ {'name':'local'},
+ {'name':'s3_us_west_1'},
+ ])
+
+ op.bulk_insert(tables.imagestoragesignaturekind,
+ [
+ {'name':'gpg2'},
+ ])
+
+ op.bulk_insert(tables.imagestoragetransformation,
+ [
+ {'name':'squash'},
+ {'name':'aci'},
+ ])
+
+ op.bulk_insert(tables.labelsourcetype,
+ [
+ {'name':'manifest', 'mutable': False},
+ {'name':'api', 'mutable': True},
+ {'name':'internal', 'mutable': False},
+ ])
+
+ op.bulk_insert(tables.logentrykind,
+ [
+ {'name':'account_change_cc'},
+ {'name':'account_change_password'},
+ {'name':'account_change_plan'},
+ {'name':'account_convert'},
+ {'name':'add_repo_accesstoken'},
+ {'name':'add_repo_notification'},
+ {'name':'add_repo_permission'},
+ {'name':'add_repo_webhook'},
+ {'name':'build_dockerfile'},
+ {'name':'change_repo_permission'},
+ {'name':'change_repo_visibility'},
+ {'name':'create_application'},
+ {'name':'create_prototype_permission'},
+ {'name':'create_repo'},
+ {'name':'create_robot'},
+ {'name':'create_tag'},
+ {'name':'delete_application'},
+ {'name':'delete_prototype_permission'},
+ {'name':'delete_repo'},
+ {'name':'delete_repo_accesstoken'},
+ {'name':'delete_repo_notification'},
+ {'name':'delete_repo_permission'},
+ {'name':'delete_repo_trigger'},
+ {'name':'delete_repo_webhook'},
+ {'name':'delete_robot'},
+ {'name':'delete_tag'},
+ {'name':'manifest_label_add'},
+ {'name':'manifest_label_delete'},
+ {'name':'modify_prototype_permission'},
+ {'name':'move_tag'},
+ {'name':'org_add_team_member'},
+ {'name':'org_create_team'},
+ {'name':'org_delete_team'},
+ {'name':'org_delete_team_member_invite'},
+ {'name':'org_invite_team_member'},
+ {'name':'org_remove_team_member'},
+ {'name':'org_set_team_description'},
+ {'name':'org_set_team_role'},
+ {'name':'org_team_member_invite_accepted'},
+ {'name':'org_team_member_invite_declined'},
+ {'name':'pull_repo'},
+ {'name':'push_repo'},
+ {'name':'regenerate_robot_token'},
+ {'name':'repo_verb'},
+ {'name':'reset_application_client_secret'},
+ {'name':'revert_tag'},
+ {'name':'service_key_approve'},
+ {'name':'service_key_create'},
+ {'name':'service_key_delete'},
+ {'name':'service_key_extend'},
+ {'name':'service_key_modify'},
+ {'name':'service_key_rotate'},
+ {'name':'setup_repo_trigger'},
+ {'name':'set_repo_description'},
+ {'name':'take_ownership'},
+ {'name':'update_application'},
+ ])
+
+ op.bulk_insert(tables.loginservice,
+ [
+ {'name':'github'},
+ {'name':'quayrobot'},
+ {'name':'ldap'},
+ {'name':'google'},
+ {'name':'keystone'},
+ {'name':'dex'},
+ {'name':'jwtauthn'},
+ ])
+
+ op.bulk_insert(tables.mediatype,
+ [
+ {'name':'text/plain'},
+ {'name':'application/json'},
+ ])
+
+ op.bulk_insert(tables.notificationkind,
+ [
+ {'name':'build_failure'},
+ {'name':'build_queued'},
+ {'name':'build_start'},
+ {'name':'build_success'},
+ {'name':'expiring_license'},
+ {'name':'maintenance'},
+ {'name':'org_team_invite'},
+ {'name':'over_private_usage'},
+ {'name':'password_required'},
+ {'name':'repo_push'},
+ {'name':'service_key_submitted'},
+ {'name':'vulnerability_found'},
+ ])
+
+ op.bulk_insert(tables.role,
+ [
+ {'name':'admin'},
+ {'name':'write'},
+ {'name':'read'},
+ ])
+
+ op.bulk_insert(tables.teamrole,
+ [
+ {'name':'admin'},
+ {'name':'creator'},
+ {'name':'member'},
+ ])
+
+ op.bulk_insert(tables.visibility,
+ [
+ {'name':'public'},
+ {'name':'private'},
+ ])
+
+ # ### population of test data ### #
+ tester.populate_table('user', [
+ ('uuid', tester.TestDataType.UUID),
+ ('username', tester.TestDataType.String),
+ ('password_hash', tester.TestDataType.String),
+ ('email', tester.TestDataType.String),
+ ('verified', tester.TestDataType.Boolean),
+ ('organization', tester.TestDataType.Boolean),
+ ('robot', tester.TestDataType.Boolean),
+ ('invoice_email', tester.TestDataType.Boolean),
+ ('invalid_login_attempts', tester.TestDataType.Integer),
+ ('last_invalid_login', tester.TestDataType.DateTime),
+ ('removed_tag_expiration_s', tester.TestDataType.Integer),
+ ('enabled', tester.TestDataType.Boolean),
+ ('invoice_email_address', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('repository', [
+ ('namespace_user_id', tester.TestDataType.Foreign('user')),
+ ('name', tester.TestDataType.String),
+ ('visibility_id', tester.TestDataType.Foreign('visibility')),
+ ('description', tester.TestDataType.String),
+ ('badge_token', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('emailconfirmation', [
+ ('code', tester.TestDataType.String),
+ ('user_id', tester.TestDataType.Foreign('user')),
+ ('pw_reset', tester.TestDataType.Boolean),
+ ('email_confirm', tester.TestDataType.Boolean),
+ ('created', tester.TestDataType.DateTime),
+ ])
+
+ tester.populate_table('federatedlogin', [
+ ('user_id', tester.TestDataType.Foreign('user')),
+ ('service_id', tester.TestDataType.Foreign('loginservice')),
+ ('service_ident', tester.TestDataType.String),
+ ('metadata_json', tester.TestDataType.JSON),
+ ])
+
+ tester.populate_table('imagestorage', [
+ ('uuid', tester.TestDataType.UUID),
+ ('checksum', tester.TestDataType.String),
+ ('image_size', tester.TestDataType.BigInteger),
+ ('uncompressed_size', tester.TestDataType.BigInteger),
+ ('uploading', tester.TestDataType.Boolean),
+ ('cas_path', tester.TestDataType.Boolean),
+ ('content_checksum', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('image', [
+ ('docker_image_id', tester.TestDataType.UUID),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('ancestors', tester.TestDataType.String),
+ ('storage_id', tester.TestDataType.Foreign('imagestorage')),
+ ('security_indexed', tester.TestDataType.Boolean),
+ ('security_indexed_engine', tester.TestDataType.Integer),
+ ])
+
+ tester.populate_table('imagestorageplacement', [
+ ('storage_id', tester.TestDataType.Foreign('imagestorage')),
+ ('location_id', tester.TestDataType.Foreign('imagestoragelocation')),
+ ])
+
+ tester.populate_table('messages', [
+ ('content', tester.TestDataType.String),
+ ('uuid', tester.TestDataType.UUID),
+ ])
+
+ tester.populate_table('queueitem', [
+ ('queue_name', tester.TestDataType.String),
+ ('body', tester.TestDataType.JSON),
+ ('available_after', tester.TestDataType.DateTime),
+ ('available', tester.TestDataType.Boolean),
+ ('processing_expires', tester.TestDataType.DateTime),
+ ('retries_remaining', tester.TestDataType.Integer),
+ ])
+
+ tester.populate_table('servicekeyapproval', [
+ ('approver_id', tester.TestDataType.Foreign('user')),
+ ('approval_type', tester.TestDataType.String),
+ ('approved_date', tester.TestDataType.DateTime),
+ ('notes', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('servicekey', [
+ ('name', tester.TestDataType.String),
+ ('kid', tester.TestDataType.String),
+ ('service', tester.TestDataType.String),
+ ('jwk', tester.TestDataType.JSON),
+ ('metadata', tester.TestDataType.JSON),
+ ('created_date', tester.TestDataType.DateTime),
+ ('approval_id', tester.TestDataType.Foreign('servicekeyapproval')),
+ ])
+
+ tester.populate_table('label', [
+ ('uuid', tester.TestDataType.UUID),
+ ('key', tester.TestDataType.UTF8Char),
+ ('value', tester.TestDataType.JSON),
+ ('media_type_id', tester.TestDataType.Foreign('mediatype')),
+ ('source_type_id', tester.TestDataType.Foreign('labelsourcetype')),
+ ])
+
+ tester.populate_table('logentry', [
+ ('kind_id', tester.TestDataType.Foreign('logentrykind')),
+ ('account_id', tester.TestDataType.Foreign('user')),
+ ('performer_id', tester.TestDataType.Foreign('user')),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('datetime', tester.TestDataType.DateTime),
+ ('ip', tester.TestDataType.String),
+ ('metadata_json', tester.TestDataType.JSON),
+ ])
+
+ tester.populate_table('notification', [
+ ('uuid', tester.TestDataType.UUID),
+ ('kind_id', tester.TestDataType.Foreign('notificationkind')),
+ ('target_id', tester.TestDataType.Foreign('user')),
+ ('metadata_json', tester.TestDataType.JSON),
+ ('created', tester.TestDataType.DateTime),
+ ('dismissed', tester.TestDataType.Boolean),
+ ('lookup_path', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('oauthapplication', [
+ ('client_id', tester.TestDataType.String),
+ ('client_secret', tester.TestDataType.String),
+ ('redirect_uri', tester.TestDataType.String),
+ ('application_uri', tester.TestDataType.String),
+ ('organization_id', tester.TestDataType.Foreign('user')),
+ ('name', tester.TestDataType.String),
+ ('description', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('team', [
+ ('name', tester.TestDataType.String),
+ ('organization_id', tester.TestDataType.Foreign('user')),
+ ('role_id', tester.TestDataType.Foreign('teamrole')),
+ ('description', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('torrentinfo', [
+ ('storage_id', tester.TestDataType.Foreign('imagestorage')),
+ ('piece_length', tester.TestDataType.Integer),
+ ('pieces', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('userregion', [
+ ('user_id', tester.TestDataType.Foreign('user')),
+ ('location_id', tester.TestDataType.Foreign('imagestoragelocation')),
+ ])
+
+ tester.populate_table('accesstoken', [
+ ('friendly_name', tester.TestDataType.String),
+ ('code', tester.TestDataType.Token),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('created', tester.TestDataType.DateTime),
+ ('role_id', tester.TestDataType.Foreign('role')),
+ ('temporary', tester.TestDataType.Boolean),
+ ('kind_id', tester.TestDataType.Foreign('accesstokenkind')),
+ ])
+
+ tester.populate_table('blobupload', [
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('uuid', tester.TestDataType.UUID),
+ ('byte_count', tester.TestDataType.Integer),
+ ('sha_state', tester.TestDataType.String),
+ ('location_id', tester.TestDataType.Foreign('imagestoragelocation')),
+ ('chunk_count', tester.TestDataType.Integer),
+ ('created', tester.TestDataType.DateTime),
+ ])
+
+ tester.populate_table('oauthaccesstoken', [
+ ('uuid', tester.TestDataType.UUID),
+ ('application_id', tester.TestDataType.Foreign('oauthapplication')),
+ ('authorized_user_id', tester.TestDataType.Foreign('user')),
+ ('scope', tester.TestDataType.String),
+ ('access_token', tester.TestDataType.Token),
+ ('token_type', tester.TestDataType.String),
+ ('expires_at', tester.TestDataType.DateTime),
+ ('data', tester.TestDataType.JSON),
+ ])
+
+ tester.populate_table('oauthauthorizationcode', [
+ ('application_id', tester.TestDataType.Foreign('oauthapplication')),
+ ('code', tester.TestDataType.Token),
+ ('scope', tester.TestDataType.String),
+ ('data', tester.TestDataType.JSON),
+ ])
+
+ tester.populate_table('permissionprototype', [
+ ('org_id', tester.TestDataType.Foreign('user')),
+ ('uuid', tester.TestDataType.UUID),
+ ('activating_user_id', tester.TestDataType.Foreign('user')),
+ ('delegate_user_id', tester.TestDataType.Foreign('user')),
+ ('role_id', tester.TestDataType.Foreign('role')),
+ ])
+
+ tester.populate_table('repositoryactioncount', [
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('count', tester.TestDataType.Integer),
+ ('date', tester.TestDataType.Date),
+ ])
+
+ tester.populate_table('repositoryauthorizedemail', [
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('email', tester.TestDataType.String),
+ ('code', tester.TestDataType.String),
+ ('confirmed', tester.TestDataType.Boolean),
+ ])
+
+ tester.populate_table('repositorynotification', [
+ ('uuid', tester.TestDataType.UUID),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('event_id', tester.TestDataType.Foreign('externalnotificationevent')),
+ ('method_id', tester.TestDataType.Foreign('externalnotificationmethod')),
+ ('title', tester.TestDataType.String),
+ ('config_json', tester.TestDataType.JSON),
+ ('event_config_json', tester.TestDataType.JSON),
+ ])
+
+ tester.populate_table('repositorypermission', [
+ ('team_id', tester.TestDataType.Foreign('team')),
+ ('user_id', tester.TestDataType.Foreign('user')),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('role_id', tester.TestDataType.Foreign('role')),
+ ])
+
+ tester.populate_table('star', [
+ ('user_id', tester.TestDataType.Foreign('user')),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('created', tester.TestDataType.DateTime),
+ ])
+
+ tester.populate_table('teammember', [
+ ('user_id', tester.TestDataType.Foreign('user')),
+ ('team_id', tester.TestDataType.Foreign('team')),
+ ])
+
+ tester.populate_table('teammemberinvite', [
+ ('user_id', tester.TestDataType.Foreign('user')),
+ ('email', tester.TestDataType.String),
+ ('team_id', tester.TestDataType.Foreign('team')),
+ ('inviter_id', tester.TestDataType.Foreign('user')),
+ ('invite_token', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('derivedstorageforimage', [
+ ('source_image_id', tester.TestDataType.Foreign('image')),
+ ('derivative_id', tester.TestDataType.Foreign('imagestorage')),
+ ('transformation_id', tester.TestDataType.Foreign('imagestoragetransformation')),
+ ('uniqueness_hash', tester.TestDataType.String),
+ ])
+
+ tester.populate_table('repositorybuildtrigger', [
+ ('uuid', tester.TestDataType.UUID),
+ ('service_id', tester.TestDataType.Foreign('buildtriggerservice')),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('connected_user_id', tester.TestDataType.Foreign('user')),
+ ('auth_token', tester.TestDataType.String),
+ ('config', tester.TestDataType.JSON),
+ ])
+
+ tester.populate_table('repositorytag', [
+ ('name', tester.TestDataType.String),
+ ('image_id', tester.TestDataType.Foreign('image')),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('lifetime_start_ts', tester.TestDataType.Integer),
+ ('hidden', tester.TestDataType.Boolean),
+ ('reversion', tester.TestDataType.Boolean),
+ ])
+
+ tester.populate_table('repositorybuild', [
+ ('uuid', tester.TestDataType.UUID),
+ ('phase', tester.TestDataType.String),
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('access_token_id', tester.TestDataType.Foreign('accesstoken')),
+ ('resource_key', tester.TestDataType.String),
+ ('job_config', tester.TestDataType.JSON),
+ ('started', tester.TestDataType.DateTime),
+ ('display_name', tester.TestDataType.JSON),
+ ('trigger_id', tester.TestDataType.Foreign('repositorybuildtrigger')),
+ ('logs_archived', tester.TestDataType.Boolean),
+ ])
+
+ tester.populate_table('tagmanifest', [
+ ('tag_id', tester.TestDataType.Foreign('repositorytag')),
+ ('digest', tester.TestDataType.String),
+ ('json_data', tester.TestDataType.JSON),
+ ])
+
+ tester.populate_table('tagmanifestlabel', [
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('annotated_id', tester.TestDataType.Foreign('tagmanifest')),
+ ('label_id', tester.TestDataType.Foreign('label')),
+ ])
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.drop_table('tagmanifestlabel')
+ op.drop_table('tagmanifest')
+ op.drop_table('repositorybuild')
+ op.drop_table('repositorytag')
+ op.drop_table('repositorybuildtrigger')
+ op.drop_table('derivedstorageforimage')
+ op.drop_table('teammemberinvite')
+ op.drop_table('teammember')
+ op.drop_table('star')
+ op.drop_table('repositorypermission')
+ op.drop_table('repositorynotification')
+ op.drop_table('repositoryauthorizedemail')
+ op.drop_table('repositoryactioncount')
+ op.drop_table('permissionprototype')
+ op.drop_table('oauthauthorizationcode')
+ op.drop_table('oauthaccesstoken')
+ op.drop_table('image')
+ op.drop_table('blobupload')
+ op.drop_table('accesstoken')
+ op.drop_table('userregion')
+ op.drop_table('torrentinfo')
+ op.drop_table('team')
+ op.drop_table('servicekey')
+ op.drop_table('repository')
+ op.drop_table('quayrelease')
+ op.drop_table('oauthapplication')
+ op.drop_table('notification')
+ op.drop_table('logentry')
+ op.drop_table('label')
+ op.drop_table('imagestoragesignature')
+ op.drop_table('imagestorageplacement')
+ op.drop_table('federatedlogin')
+ op.drop_table('emailconfirmation')
+ op.drop_table('visibility')
+ op.drop_table('user')
+ op.drop_table('teamrole')
+ op.drop_table('servicekeyapproval')
+ op.drop_table('role')
+ op.drop_table('queueitem')
+ op.drop_table('quayservice')
+ op.drop_table('quayregion')
+ op.drop_table('notificationkind')
+ op.drop_table('messages')
+ op.drop_table('mediatype')
+ op.drop_table('loginservice')
+ op.drop_table('logentrykind')
+ op.drop_table('labelsourcetype')
+ op.drop_table('imagestoragetransformation')
+ op.drop_table('imagestoragesignaturekind')
+ op.drop_table('imagestoragelocation')
+ op.drop_table('imagestorage')
+ op.drop_table('externalnotificationmethod')
+ op.drop_table('externalnotificationevent')
+ op.drop_table('buildtriggerservice')
+ op.drop_table('accesstokenkind')
diff --git a/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py b/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py
new file mode 100644
index 000000000..8e0a8ab8c
--- /dev/null
+++ b/data/migrations/versions/c3d4b7ebcdf7_backfill_repositorysearchscore_table.py
@@ -0,0 +1,26 @@
+"""Backfill RepositorySearchScore table
+
+Revision ID: c3d4b7ebcdf7
+Revises: f30984525c86
+Create Date: 2017-04-13 12:01:59.572775
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'c3d4b7ebcdf7'
+down_revision = 'f30984525c86'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # Add a 0 entry into the RepositorySearchScore table for each repository that isn't present
+ conn = op.get_bind()
+ conn.execute("insert into repositorysearchscore (repository_id, score) SELECT id, 0 FROM " +
+ "repository WHERE id not in (select repository_id from repositorysearchscore)")
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ pass
diff --git a/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py b/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py
new file mode 100644
index 000000000..dc1567bd5
--- /dev/null
+++ b/data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py
@@ -0,0 +1,25 @@
+"""Drop checksum on ImageStorage
+
+Revision ID: c91c564aad34
+Revises: 152bb29a1bb3
+Create Date: 2018-02-21 12:17:52.405644
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'c91c564aad34'
+down_revision = '152bb29a1bb3'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.drop_column('imagestorage', 'checksum')
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.add_column('imagestorage', sa.Column('checksum', sa.String(length=255), nullable=True))
diff --git a/data/migrations/versions/cbc8177760d9_add_user_location_field.py b/data/migrations/versions/cbc8177760d9_add_user_location_field.py
new file mode 100644
index 000000000..cbdc87706
--- /dev/null
+++ b/data/migrations/versions/cbc8177760d9_add_user_location_field.py
@@ -0,0 +1,30 @@
+"""Add user location field
+
+Revision ID: cbc8177760d9
+Revises: 7367229b38d9
+Create Date: 2018-02-02 17:39:16.589623
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'cbc8177760d9'
+down_revision = '7367229b38d9'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+from util.migrate import UTF8CharField
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.add_column('user', sa.Column('location', UTF8CharField(length=255), nullable=True))
+
+ # ### population of test data ### #
+ tester.populate_column('user', 'location', tester.TestDataType.UTF8Char)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.drop_column('user', 'location')
diff --git a/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py b/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py
new file mode 100644
index 000000000..a44704eec
--- /dev/null
+++ b/data/migrations/versions/cc6778199cdb_repository_mirror_notification.py
@@ -0,0 +1,68 @@
+"""repository mirror notification
+
+Revision ID: cc6778199cdb
+Revises: c059b952ed76
+Create Date: 2019-10-03 17:41:23.316914
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'cc6778199cdb'
+down_revision = 'c059b952ed76'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+
+ op.bulk_insert(tables.notificationkind,
+ [
+ {'name': 'repo_mirror_sync_started'},
+ {'name': 'repo_mirror_sync_success'},
+ {'name': 'repo_mirror_sync_failed'},
+ ])
+ op.bulk_insert(tables.externalnotificationevent,
+ [
+ {'name': 'repo_mirror_sync_started'},
+ {'name': 'repo_mirror_sync_success'},
+ {'name': 'repo_mirror_sync_failed'},
+ ])
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+
+ op.execute(tables
+ .notificationkind
+ .delete()
+ .where(tables.
+ notificationkind.c.name == op.inline_literal('repo_mirror_sync_started')))
+ op.execute(tables
+ .notificationkind
+ .delete()
+ .where(tables.
+ notificationkind.c.name == op.inline_literal('repo_mirror_sync_success')))
+ op.execute(tables
+ .notificationkind
+ .delete()
+ .where(tables.
+ notificationkind.c.name == op.inline_literal('repo_mirror_sync_failed')))
+
+ op.execute(tables
+ .externalnotificationevent
+ .delete()
+ .where(tables.
+ externalnotificationevent.c.name == op.inline_literal('repo_mirror_sync_started')))
+ op.execute(tables
+ .externalnotificationevent
+ .delete()
+ .where(tables.
+ externalnotificationevent.c.name == op.inline_literal('repo_mirror_sync_success')))
+ op.execute(tables
+ .externalnotificationevent
+ .delete()
+ .where(tables.
+ externalnotificationevent.c.name == op.inline_literal('repo_mirror_sync_failed')))
diff --git a/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py b/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py
new file mode 100644
index 000000000..9e847e8e2
--- /dev/null
+++ b/data/migrations/versions/d17c695859ea_delete_old_appr_tables.py
@@ -0,0 +1,192 @@
+"""Delete old Appr tables
+
+Revision ID: d17c695859ea
+Revises: 5d463ea1e8a8
+Create Date: 2018-07-16 15:21:11.593040
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'd17c695859ea'
+down_revision = '5d463ea1e8a8'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.sql import table, column
+from util.migrate import UTF8LongText, UTF8CharField
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('tag')
+ op.drop_table('manifestlistmanifest')
+ op.drop_table('manifestlist')
+ op.drop_table('manifestblob')
+ op.drop_table('manifest')
+ op.drop_table('blobplacement')
+ op.drop_table('blob')
+ op.drop_table('blobplacementlocation')
+ op.drop_table('tagkind')
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ 'tagkind',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tagkind'))
+ )
+ op.create_index('tagkind_name', 'tagkind', ['name'], unique=True)
+
+ op.create_table(
+ 'blobplacementlocation',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=False),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacementlocation'))
+ )
+ op.create_index('blobplacementlocation_name', 'blobplacementlocation', ['name'], unique=True)
+
+ op.create_table(
+ 'blob',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('size', sa.BigInteger(), nullable=False),
+ sa.Column('uncompressed_size', sa.BigInteger(), nullable=True),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_blob_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blob'))
+ )
+ op.create_index('blob_digest', 'blob', ['digest'], unique=True)
+ op.create_index('blob_media_type_id', 'blob', ['media_type_id'], unique=False)
+
+ op.create_table(
+ 'manifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_json', UTF8LongText, nullable=False),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifest_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifest'))
+ )
+ op.create_index('manifest_digest', 'manifest', ['digest'], unique=True)
+ op.create_index('manifest_media_type_id', 'manifest', ['media_type_id'], unique=False)
+
+ op.create_table(
+ 'manifestlist',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('digest', sa.String(length=255), nullable=False),
+ sa.Column('manifest_list_json', UTF8LongText, nullable=False),
+ sa.Column('schema_version', UTF8CharField(length=255), nullable=False),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifestlist_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlist'))
+ )
+ op.create_index('manifestlist_digest', 'manifestlist', ['digest'], unique=True)
+ op.create_index('manifestlist_media_type_id', 'manifestlist', ['media_type_id'], unique=False)
+
+ op.create_table(
+ 'blobplacement',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.Column('location_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_blobplacement_blob_id_blob')),
+ sa.ForeignKeyConstraint(['location_id'], ['blobplacementlocation.id'], name=op.f('fk_blobplacement_location_id_blobplacementlocation')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_blobplacement'))
+ )
+ op.create_index('blobplacement_blob_id', 'blobplacement', ['blob_id'], unique=False)
+ op.create_index('blobplacement_blob_id_location_id', 'blobplacement', ['blob_id', 'location_id'], unique=True)
+ op.create_index('blobplacement_location_id', 'blobplacement', ['location_id'], unique=False)
+
+ op.create_table(
+ 'manifestblob',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('blob_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['blob_id'], ['blob.id'], name=op.f('fk_manifestblob_blob_id_blob')),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestblob_manifest_id_manifest')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestblob'))
+ )
+ op.create_index('manifestblob_blob_id', 'manifestblob', ['blob_id'], unique=False)
+ op.create_index('manifestblob_manifest_id', 'manifestblob', ['manifest_id'], unique=False)
+ op.create_index('manifestblob_manifest_id_blob_id', 'manifestblob', ['manifest_id', 'blob_id'], unique=True)
+
+ op.create_table(
+ 'manifestlistmanifest',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('manifest_list_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_id', sa.Integer(), nullable=False),
+ sa.Column('operating_system', UTF8CharField(length=255), nullable=True),
+ sa.Column('architecture', UTF8CharField(length=255), nullable=True),
+ sa.Column('platform_json', UTF8LongText, nullable=True),
+ sa.Column('media_type_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['manifest_id'], ['manifest.id'], name=op.f('fk_manifestlistmanifest_manifest_id_manifest')),
+ sa.ForeignKeyConstraint(['manifest_list_id'], ['manifestlist.id'], name=op.f('fk_manifestlistmanifest_manifest_list_id_manifestlist')),
+ sa.ForeignKeyConstraint(['media_type_id'], ['mediatype.id'], name=op.f('fk_manifestlistmanifest_media_type_id_mediatype')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_manifestlistmanifest'))
+ )
+ op.create_index('manifestlistmanifest_manifest_id', 'manifestlistmanifest', ['manifest_id'], unique=False)
+ op.create_index('manifestlistmanifest_manifest_list_id', 'manifestlistmanifest', ['manifest_list_id'], unique=False)
+ op.create_index('manifestlistmanifest_manifest_listid_os_arch_mtid', 'manifestlistmanifest', ['manifest_list_id', 'operating_system', 'architecture', 'media_type_id'], unique=False)
+ op.create_index('manifestlistmanifest_manifest_listid_mtid', 'manifestlistmanifest', ['manifest_list_id', 'media_type_id'], unique=False)
+ op.create_index('manifestlistmanifest_media_type_id', 'manifestlistmanifest', ['media_type_id'], unique=False)
+
+ op.create_table(
+ 'tag',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', UTF8CharField(length=190), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('manifest_list_id', sa.Integer(), nullable=True),
+ sa.Column('lifetime_start', sa.BigInteger(), nullable=False),
+ sa.Column('lifetime_end', sa.BigInteger(), nullable=True),
+ sa.Column('hidden', sa.Boolean(), nullable=False),
+ sa.Column('reverted', sa.Boolean(), nullable=False),
+ sa.Column('protected', sa.Boolean(), nullable=False),
+ sa.Column('tag_kind_id', sa.Integer(), nullable=False),
+ sa.Column('linked_tag_id', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['linked_tag_id'], ['tag.id'], name=op.f('fk_tag_linked_tag_id_tag')),
+ sa.ForeignKeyConstraint(['manifest_list_id'], ['manifestlist.id'], name=op.f('fk_tag_manifest_list_id_manifestlist')),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_tag_repository_id_repository')),
+ sa.ForeignKeyConstraint(['tag_kind_id'], ['tagkind.id'], name=op.f('fk_tag_tag_kind_id_tagkind')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_tag'))
+ )
+ op.create_index('tag_lifetime_end', 'tag', ['lifetime_end'], unique=False)
+ op.create_index('tag_linked_tag_id', 'tag', ['linked_tag_id'], unique=False)
+ op.create_index('tag_manifest_list_id', 'tag', ['manifest_list_id'], unique=False)
+ op.create_index('tag_repository_id', 'tag', ['repository_id'], unique=False)
+ op.create_index('tag_repository_id_name_hidden', 'tag', ['repository_id', 'name', 'hidden'], unique=False)
+ op.create_index('tag_repository_id_name_lifetime_end', 'tag', ['repository_id', 'name', 'lifetime_end'], unique=True)
+ op.create_index('tag_repository_id_name', 'tag', ['repository_id', 'name'], unique=False)
+ op.create_index('tag_tag_kind_id', 'tag', ['tag_kind_id'], unique=False)
+
+ # ### end Alembic commands ###
+
+ blobplacementlocation_table = table('blobplacementlocation',
+ column('id', sa.Integer()),
+ column('name', sa.String()),
+ )
+
+ op.bulk_insert(
+ blobplacementlocation_table,
+ [
+ {'name': 'local_eu'},
+ {'name': 'local_us'},
+ ],
+ )
+
+ tagkind_table = table('tagkind',
+ column('id', sa.Integer()),
+ column('name', sa.String()),
+ )
+
+ op.bulk_insert(
+ tagkind_table,
+ [
+ {'id': 1, 'name': 'tag'},
+ {'id': 2, 'name': 'release'},
+ {'id': 3, 'name': 'channel'},
+ ]
+ )
\ No newline at end of file
diff --git a/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py b/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py
new file mode 100644
index 000000000..24a65b8a4
--- /dev/null
+++ b/data/migrations/versions/d42c175b439a_backfill_state_id_and_make_it_unique.py
@@ -0,0 +1,36 @@
+"""Backfill state_id and make it unique
+
+Revision ID: d42c175b439a
+Revises: 3e8cc74a1e7b
+Create Date: 2017-01-18 15:11:01.635632
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'd42c175b439a'
+down_revision = '3e8cc74a1e7b'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # Backfill the queueitem table's state_id field with unique values for all entries which are
+ # empty.
+ conn = op.get_bind()
+ conn.execute("update queueitem set state_id = id where state_id = ''")
+
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('queueitem_state_id', table_name='queueitem')
+ op.create_index('queueitem_state_id', 'queueitem', ['state_id'], unique=True)
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('queueitem_state_id', table_name='queueitem')
+ op.create_index('queueitem_state_id', 'queueitem', ['state_id'], unique=False)
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py b/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py
new file mode 100644
index 000000000..42ec883eb
--- /dev/null
+++ b/data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py
@@ -0,0 +1,28 @@
+"""Add change_tag_expiration log type
+
+Revision ID: d8989249f8f6
+Revises: dc4af11a5f90
+Create Date: 2017-06-21 21:18:25.948689
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'd8989249f8f6'
+down_revision = 'dc4af11a5f90'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.bulk_insert(tables.logentrykind, [
+ {'name': 'change_tag_expiration'},
+ ])
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.execute(tables
+ .logentrykind
+ .delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('change_tag_expiration')))
diff --git a/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py b/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py
new file mode 100644
index 000000000..dc8512026
--- /dev/null
+++ b/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py
@@ -0,0 +1,39 @@
+"""add notification number of failures column
+
+Revision ID: dc4af11a5f90
+Revises: 53e2ac668296
+Create Date: 2017-05-16 17:24:02.630365
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'dc4af11a5f90'
+down_revision = '53e2ac668296'
+
+import sqlalchemy as sa
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.add_column('repositorynotification', sa.Column('number_of_failures',
+ sa.Integer(),
+ nullable=False,
+ server_default='0'))
+ op.bulk_insert(tables.logentrykind, [
+ {'name': 'reset_repo_notification'},
+ ])
+
+ # ### population of test data ### #
+ tester.populate_column('repositorynotification', 'number_of_failures', tester.TestDataType.Integer)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ op.drop_column('repositorynotification', 'number_of_failures')
+ op.execute(tables
+ .logentrykind
+ .delete()
+ .where(tables.logentrykind.c.name == op.inline_literal('reset_repo_notification')))
diff --git a/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py b/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py
new file mode 100644
index 000000000..b4513ce6d
--- /dev/null
+++ b/data/migrations/versions/e184af42242d_add_missing_index_on_uuid_fields.py
@@ -0,0 +1,31 @@
+"""Add missing index on UUID fields
+
+Revision ID: e184af42242d
+Revises: 6ec8726c0ace
+Create Date: 2019-02-14 16:35:47.768086
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'e184af42242d'
+down_revision = '6ec8726c0ace'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('permissionprototype_uuid', 'permissionprototype', ['uuid'], unique=False)
+ op.create_index('repositorybuildtrigger_uuid', 'repositorybuildtrigger', ['uuid'], unique=False)
+ op.create_index('user_uuid', 'user', ['uuid'], unique=False)
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('user_uuid', table_name='user')
+ op.drop_index('repositorybuildtrigger_uuid', table_name='repositorybuildtrigger')
+ op.drop_index('permissionprototype_uuid', table_name='permissionprototype')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py b/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py
new file mode 100644
index 000000000..13ed12ba5
--- /dev/null
+++ b/data/migrations/versions/e2894a3a3c19_add_full_text_search_indexing_for_repo_.py
@@ -0,0 +1,31 @@
+"""Add full text search indexing for repo name and description
+
+Revision ID: e2894a3a3c19
+Revises: d42c175b439a
+Create Date: 2017-01-11 13:55:54.890774
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'e2894a3a3c19'
+down_revision = 'd42c175b439a'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('repository_description__fulltext', 'repository', ['description'], unique=False, postgresql_using='gin', postgresql_ops={'description': 'gin_trgm_ops'}, mysql_prefix='FULLTEXT')
+ op.create_index('repository_name__fulltext', 'repository', ['name'], unique=False, postgresql_using='gin', postgresql_ops={'name': 'gin_trgm_ops'}, mysql_prefix='FULLTEXT')
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('repository_name__fulltext', table_name='repository')
+ op.drop_index('repository_description__fulltext', table_name='repository')
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py b/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py
new file mode 100644
index 000000000..e2e69d99f
--- /dev/null
+++ b/data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py
@@ -0,0 +1,31 @@
+"""Remove blob_index from ManifestBlob table
+
+Revision ID: eafdeadcebc7
+Revises: 9093adccc784
+Create Date: 2018-08-07 15:57:54.001225
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'eafdeadcebc7'
+down_revision = '9093adccc784'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('manifestblob_manifest_id_blob_index', table_name='manifestblob')
+ op.drop_column('manifestblob', 'blob_index')
+ # ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('manifestblob', sa.Column('blob_index', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
+ op.create_index('manifestblob_manifest_id_blob_index', 'manifestblob', ['manifest_id', 'blob_index'], unique=True)
+ # ### end Alembic commands ###
diff --git a/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py b/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py
new file mode 100644
index 000000000..2a59ee4ec
--- /dev/null
+++ b/data/migrations/versions/ed01e313d3cb_add_trust_enabled_to_repository.py
@@ -0,0 +1,41 @@
+"""Add trust_enabled to repository
+
+Revision ID: ed01e313d3cb
+Revises: c3d4b7ebcdf7
+Create Date: 2017-04-14 17:38:03.319695
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'ed01e313d3cb'
+down_revision = 'c3d4b7ebcdf7'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('repository', sa.Column('trust_enabled', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()))
+ ### end Alembic commands ###
+ op.bulk_insert(tables.logentrykind, [
+ {'name': 'change_repo_trust'},
+ ])
+
+ # ### population of test data ### #
+ tester.populate_column('repository', 'trust_enabled', tester.TestDataType.Boolean)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('repository', 'trust_enabled')
+ ### end Alembic commands ###
+
+ op.execute(tables
+ .logentrykind
+ .delete()
+ .where(tables.
+ logentrykind.name == op.inline_literal('change_repo_trust')))
diff --git a/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py b/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py
new file mode 100644
index 000000000..f4a0d4045
--- /dev/null
+++ b/data/migrations/versions/f30984525c86_add_repositorysearchscore_table.py
@@ -0,0 +1,46 @@
+"""Add RepositorySearchScore table
+
+Revision ID: f30984525c86
+Revises: be8d1c402ce0
+Create Date: 2017-04-04 14:30:13.270728
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'f30984525c86'
+down_revision = 'be8d1c402ce0'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('repositorysearchscore',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('repository_id', sa.Integer(), nullable=False),
+ sa.Column('score', sa.BigInteger(), nullable=False),
+ sa.Column('last_updated', sa.DateTime(), nullable=True),
+ sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_repositorysearchscore_repository_id_repository')),
+ sa.PrimaryKeyConstraint('id', name=op.f('pk_repositorysearchscore'))
+ )
+ op.create_index('repositorysearchscore_repository_id', 'repositorysearchscore', ['repository_id'], unique=True)
+ op.create_index('repositorysearchscore_score', 'repositorysearchscore', ['score'], unique=False)
+ ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_table('repositorysearchscore', [
+ ('repository_id', tester.TestDataType.Foreign('repository')),
+ ('score', tester.TestDataType.BigInteger),
+ ('last_updated', tester.TestDataType.DateTime),
+ ])
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('repositorysearchscore')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py b/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py
new file mode 100644
index 000000000..d801764c1
--- /dev/null
+++ b/data/migrations/versions/f5167870dd66_update_queue_item_table_indices.py
@@ -0,0 +1,43 @@
+"""update queue item table indices
+
+Revision ID: f5167870dd66
+Revises: 45fd8b9869d4
+Create Date: 2016-12-08 17:26:20.333846
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'f5167870dd66'
+down_revision = '45fd8b9869d4'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('queueitem_processing_expires_available', 'queueitem', ['processing_expires', 'available'], unique=False)
+ op.create_index('queueitem_pe_aafter_qname_rremaining_available', 'queueitem', ['processing_expires', 'available_after', 'queue_name', 'retries_remaining', 'available'], unique=False)
+ op.create_index('queueitem_pexpires_aafter_rremaining_available', 'queueitem', ['processing_expires', 'available_after', 'retries_remaining', 'available'], unique=False)
+ op.create_index('queueitem_processing_expires_queue_name_available', 'queueitem', ['processing_expires', 'queue_name', 'available'], unique=False)
+ op.drop_index('queueitem_available', table_name='queueitem')
+ op.drop_index('queueitem_available_after', table_name='queueitem')
+ op.drop_index('queueitem_processing_expires', table_name='queueitem')
+ op.drop_index('queueitem_retries_remaining', table_name='queueitem')
+ ### end Alembic commands ###
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('queueitem_retries_remaining', 'queueitem', ['retries_remaining'], unique=False)
+ op.create_index('queueitem_processing_expires', 'queueitem', ['processing_expires'], unique=False)
+ op.create_index('queueitem_available_after', 'queueitem', ['available_after'], unique=False)
+ op.create_index('queueitem_available', 'queueitem', ['available'], unique=False)
+ op.drop_index('queueitem_processing_expires_queue_name_available', table_name='queueitem')
+ op.drop_index('queueitem_pexpires_aafter_rremaining_available', table_name='queueitem')
+ op.drop_index('queueitem_pe_aafter_qname_rremaining_available', table_name='queueitem')
+ op.drop_index('queueitem_processing_expires_available', table_name='queueitem')
+ ### end Alembic commands ###
diff --git a/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py b/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py
new file mode 100644
index 000000000..3e3b9b9a6
--- /dev/null
+++ b/data/migrations/versions/faf752bd2e0a_add_user_metadata_fields.py
@@ -0,0 +1,56 @@
+"""Add user metadata fields
+
+Revision ID: faf752bd2e0a
+Revises: 6c7014e84a5e
+Create Date: 2016-11-14 17:29:03.984665
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'faf752bd2e0a'
+down_revision = '6c7014e84a5e'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+
+from util.migrate import UTF8CharField
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('user', sa.Column('company', UTF8CharField(length=255), nullable=True))
+ op.add_column('user', sa.Column('family_name', UTF8CharField(length=255), nullable=True))
+ op.add_column('user', sa.Column('given_name', UTF8CharField(length=255), nullable=True))
+ ### end Alembic commands ###
+
+ op.bulk_insert(tables.userpromptkind,
+ [
+ {'name':'enter_name'},
+ {'name':'enter_company'},
+ ])
+
+ # ### population of test data ### #
+ tester.populate_column('user', 'company', tester.TestDataType.UTF8Char)
+ tester.populate_column('user', 'family_name', tester.TestDataType.UTF8Char)
+ tester.populate_column('user', 'given_name', tester.TestDataType.UTF8Char)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('user', 'given_name')
+ op.drop_column('user', 'family_name')
+ op.drop_column('user', 'company')
+ ### end Alembic commands ###
+
+ op.execute(
+ (tables.userpromptkind.delete()
+ .where(tables.userpromptkind.c.name == op.inline_literal('enter_name')))
+ )
+
+ op.execute(
+ (tables.userpromptkind.delete()
+ .where(tables.userpromptkind.c.name == op.inline_literal('enter_company')))
+ )
diff --git a/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py b/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py
new file mode 100644
index 000000000..dd0363ce3
--- /dev/null
+++ b/data/migrations/versions/fc47c1ec019f_add_state_id_field_to_queueitem.py
@@ -0,0 +1,35 @@
+"""Add state_id field to QueueItem
+
+Revision ID: fc47c1ec019f
+Revises: f5167870dd66
+Create Date: 2017-01-12 15:44:23.643016
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'fc47c1ec019f'
+down_revision = 'f5167870dd66'
+
+from alembic import op as original_op
+from data.migrations.progress import ProgressWrapper
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+def upgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('queueitem', sa.Column('state_id', sa.String(length=255), nullable=False, server_default=''))
+ op.create_index('queueitem_state_id', 'queueitem', ['state_id'], unique=False)
+ # ### end Alembic commands ###
+
+ # ### population of test data ### #
+ tester.populate_column('queueitem', 'state_id', tester.TestDataType.String)
+ # ### end population of test data ### #
+
+
+def downgrade(tables, tester, progress_reporter):
+ op = ProgressWrapper(original_op, progress_reporter)
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('queueitem_state_id', table_name='queueitem')
+ op.drop_column('queueitem', 'state_id')
+ # ### end Alembic commands ###
diff --git a/data/migrationutil.py b/data/migrationutil.py
new file mode 100644
index 000000000..db34e1882
--- /dev/null
+++ b/data/migrationutil.py
@@ -0,0 +1,55 @@
+import os
+
+from abc import ABCMeta, abstractmethod, abstractproperty
+from collections import namedtuple
+from six import add_metaclass
+
+MigrationPhase = namedtuple('MigrationPhase', ['name', 'alembic_revision', 'flags'])
+
+
+@add_metaclass(ABCMeta)
+class DataMigration(object):
+ @abstractproperty
+ def alembic_migration_revision(self):
+ """ Returns the alembic migration revision corresponding to the currently configured phase.
+ """
+
+ @abstractmethod
+ def has_flag(self, flag):
+ """ Returns true if the data migration's current phase has the given flag set. """
+
+
+class NullDataMigration(DataMigration):
+ @property
+ def alembic_migration_revision(self):
+ return 'head'
+
+ def has_flag(self, flag):
+ raise NotImplementedError()
+
+
+class DefinedDataMigration(DataMigration):
+ def __init__(self, name, env_var, phases):
+ self.name = name
+ self.phases = {phase.name: phase for phase in phases}
+
+ phase_name = os.getenv(env_var)
+ if phase_name is None:
+ msg = 'Missing env var `%s` for data migration `%s`' % (env_var, self.name)
+ raise Exception(msg)
+
+ current_phase = self.phases.get(phase_name)
+ if current_phase is None:
+ msg = 'Unknown phase `%s` for data migration `%s`' % (phase_name, self.name)
+ raise Exception(msg)
+
+ self.current_phase = current_phase
+
+ @property
+ def alembic_migration_revision(self):
+ assert self.current_phase
+ return self.current_phase.alembic_revision
+
+ def has_flag(self, flag):
+ assert self.current_phase
+ return flag in self.current_phase.flags
diff --git a/data/model/__init__.py b/data/model/__init__.py
new file mode 100644
index 000000000..2c9260469
--- /dev/null
+++ b/data/model/__init__.py
@@ -0,0 +1,153 @@
+from data.database import db, db_transaction
+
+
+class DataModelException(Exception):
+ pass
+
+
+class InvalidLabelKeyException(DataModelException):
+ pass
+
+
+class InvalidMediaTypeException(DataModelException):
+ pass
+
+
+class BlobDoesNotExist(DataModelException):
+ pass
+
+
+class TorrentInfoDoesNotExist(DataModelException):
+ pass
+
+
+class InvalidBlobUpload(DataModelException):
+ pass
+
+
+class InvalidEmailAddressException(DataModelException):
+ pass
+
+
+class InvalidOrganizationException(DataModelException):
+ pass
+
+
+class InvalidPasswordException(DataModelException):
+ pass
+
+
+class InvalidRobotException(DataModelException):
+ pass
+
+
+class InvalidUsernameException(DataModelException):
+ pass
+
+
+class InvalidRepositoryBuildException(DataModelException):
+ pass
+
+
+class InvalidBuildTriggerException(DataModelException):
+ pass
+
+
+class InvalidTokenException(DataModelException):
+ pass
+
+
+class InvalidNotificationException(DataModelException):
+ pass
+
+
+class InvalidImageException(DataModelException):
+ pass
+
+
+class UserAlreadyInTeam(DataModelException):
+ pass
+
+
+class InvalidTeamException(DataModelException):
+ pass
+
+
+class InvalidTeamMemberException(DataModelException):
+ pass
+
+
+class InvalidManifestException(DataModelException):
+ pass
+
+
+class ServiceKeyDoesNotExist(DataModelException):
+ pass
+
+
+class ServiceKeyAlreadyApproved(DataModelException):
+ pass
+
+
+class ServiceNameInvalid(DataModelException):
+ pass
+
+
+class TagAlreadyCreatedException(DataModelException):
+ pass
+
+class StaleTagException(DataModelException):
+ pass
+
+
+class TooManyLoginAttemptsException(Exception):
+ def __init__(self, message, retry_after):
+ super(TooManyLoginAttemptsException, self).__init__(message)
+ self.retry_after = retry_after
+
+
+class Config(object):
+ def __init__(self):
+ self.app_config = None
+ self.store = None
+ self.image_cleanup_callbacks = []
+ self.repo_cleanup_callbacks = []
+
+ def register_image_cleanup_callback(self, callback):
+ self.image_cleanup_callbacks.append(callback)
+
+ def register_repo_cleanup_callback(self, callback):
+ self.repo_cleanup_callbacks.append(callback)
+
+
+config = Config()
+
+
+# There MUST NOT be any circular dependencies between these subsections. If there are fix it by
+# moving the minimal number of things to _basequery
+from data.model import (
+ appspecifictoken,
+ blob,
+ build,
+ gc,
+ image,
+ label,
+ log,
+ message,
+ modelutil,
+ notification,
+ oauth,
+ organization,
+ permission,
+ repositoryactioncount,
+ repo_mirror,
+ release,
+ repo_mirror,
+ repository,
+ service_keys,
+ storage,
+ tag,
+ team,
+ token,
+ user,
+)
diff --git a/data/model/_basequery.py b/data/model/_basequery.py
new file mode 100644
index 000000000..5fc1733e0
--- /dev/null
+++ b/data/model/_basequery.py
@@ -0,0 +1,198 @@
+import logging
+
+from peewee import fn, PeeweeException
+from cachetools.func import lru_cache
+
+from datetime import datetime, timedelta
+
+from data.model import DataModelException, config
+from data.readreplica import ReadOnlyModeException
+from data.database import (Repository, User, Team, TeamMember, RepositoryPermission, TeamRole,
+ Namespace, Visibility, ImageStorage, Image, RepositoryKind,
+ db_for_update)
+
+logger = logging.getLogger(__name__)
+
+def reduce_as_tree(queries_to_reduce):
+ """ This method will split a list of queries into halves recursively until we reach individual
+ queries, at which point it will start unioning the queries, or the already unioned subqueries.
+ This works around a bug in peewee SQL generation where reducing linearly generates a chain
+ of queries that will exceed the recursion depth limit when it has around 80 queries.
+ """
+ mid = len(queries_to_reduce)/2
+ left = queries_to_reduce[:mid]
+ right = queries_to_reduce[mid:]
+
+ to_reduce_right = right[0]
+ if len(right) > 1:
+ to_reduce_right = reduce_as_tree(right)
+
+ if len(left) > 1:
+ to_reduce_left = reduce_as_tree(left)
+ elif len(left) == 1:
+ to_reduce_left = left[0]
+ else:
+ return to_reduce_right
+
+ return to_reduce_left.union_all(to_reduce_right)
+
+
+def get_existing_repository(namespace_name, repository_name, for_update=False, kind_filter=None):
+ query = (Repository
+ .select(Repository, Namespace)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Namespace.username == namespace_name,
+ Repository.name == repository_name))
+
+ if kind_filter:
+ query = (query
+ .switch(Repository)
+ .join(RepositoryKind)
+ .where(RepositoryKind.name == kind_filter))
+
+ if for_update:
+ query = db_for_update(query)
+
+ return query.get()
+
+
+@lru_cache(maxsize=1)
+def get_public_repo_visibility():
+ return Visibility.get(name='public')
+
+
+def _lookup_team_role(name):
+ return _lookup_team_roles()[name]
+
+
+@lru_cache(maxsize=1)
+def _lookup_team_roles():
+ return {role.name:role for role in TeamRole.select()}
+
+
+def filter_to_repos_for_user(query, user_id=None, namespace=None, repo_kind='image',
+ include_public=True, start_id=None):
+ if not include_public and not user_id:
+ return Repository.select().where(Repository.id == '-1')
+
+ # Filter on the type of repository.
+ if repo_kind is not None:
+ try:
+ query = query.where(Repository.kind == Repository.kind.get_id(repo_kind))
+ except RepositoryKind.DoesNotExist:
+ raise DataModelException('Unknown repository kind')
+
+ # Add the start ID if necessary.
+ if start_id is not None:
+ query = query.where(Repository.id >= start_id)
+
+ # Add a namespace filter if necessary.
+ if namespace:
+ query = query.where(Namespace.username == namespace)
+
+ # Build a set of queries that, when unioned together, return the full set of visible repositories
+ # for the filters specified.
+ queries = []
+
+ if include_public:
+ queries.append(query.where(Repository.visibility == get_public_repo_visibility()))
+
+ if user_id is not None:
+ AdminTeam = Team.alias()
+ AdminTeamMember = TeamMember.alias()
+
+ # Add repositories in which the user has permission.
+ queries.append(query
+ .switch(RepositoryPermission)
+ .where(RepositoryPermission.user == user_id))
+
+ # Add repositories in which the user is a member of a team that has permission.
+ queries.append(query
+ .switch(RepositoryPermission)
+ .join(Team)
+ .join(TeamMember)
+ .where(TeamMember.user == user_id))
+
+ # Add repositories under namespaces in which the user is the org admin.
+ queries.append(query
+ .switch(Repository)
+ .join(AdminTeam, on=(Repository.namespace_user == AdminTeam.organization))
+ .join(AdminTeamMember, on=(AdminTeam.id == AdminTeamMember.team))
+ .where(AdminTeam.role == _lookup_team_role('admin'))
+ .where(AdminTeamMember.user == user_id))
+
+ return reduce(lambda l, r: l | r, queries)
+
+
+def get_user_organizations(username):
+ UserAlias = User.alias()
+ return (User
+ .select()
+ .distinct()
+ .join(Team)
+ .join(TeamMember)
+ .join(UserAlias, on=(UserAlias.id == TeamMember.user))
+ .where(User.organization == True, UserAlias.username == username))
+
+
+def calculate_image_aggregate_size(ancestors_str, image_size, parent_image):
+ ancestors = ancestors_str.split('/')[1:-1]
+ if not ancestors:
+ return image_size
+
+ if parent_image is None:
+ raise DataModelException('Could not load parent image')
+
+ ancestor_size = parent_image.aggregate_size
+ if ancestor_size is not None:
+ return ancestor_size + image_size
+
+ # Fallback to a slower path if the parent doesn't have an aggregate size saved.
+ # TODO: remove this code if/when we do a full backfill.
+ ancestor_size = (ImageStorage
+ .select(fn.Sum(ImageStorage.image_size))
+ .join(Image)
+ .where(Image.id << ancestors)
+ .scalar())
+ if ancestor_size is None:
+ return None
+
+ return ancestor_size + image_size
+
+
+def update_last_accessed(token_or_user):
+ """ Updates the `last_accessed` field on the given token or user. If the existing field's value
+ is within the configured threshold, the update is skipped. """
+ if not config.app_config.get('FEATURE_USER_LAST_ACCESSED'):
+ return
+
+ threshold = timedelta(seconds=config.app_config.get('LAST_ACCESSED_UPDATE_THRESHOLD_S', 120))
+ if (token_or_user.last_accessed is not None and
+ datetime.utcnow() - token_or_user.last_accessed < threshold):
+ # Skip updating, as we don't want to put undue pressure on the database.
+ return
+
+ model_class = token_or_user.__class__
+ last_accessed = datetime.utcnow()
+
+ try:
+ (model_class
+ .update(last_accessed=last_accessed)
+ .where(model_class.id == token_or_user.id)
+ .execute())
+ token_or_user.last_accessed = last_accessed
+ except ReadOnlyModeException:
+ pass
+ except PeeweeException as ex:
+ # If there is any form of DB exception, only fail if strict logging is enabled.
+ strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING')
+ if strict_logging_disabled:
+ data = {
+ 'exception': ex,
+ 'token_or_user': token_or_user.id,
+ 'class': str(model_class),
+ }
+
+ logger.exception('update last_accessed for token/user failed', extra=data)
+ else:
+ raise
diff --git a/data/model/appspecifictoken.py b/data/model/appspecifictoken.py
new file mode 100644
index 000000000..c0ead9440
--- /dev/null
+++ b/data/model/appspecifictoken.py
@@ -0,0 +1,172 @@
+import logging
+
+from datetime import datetime
+
+from active_migration import ActiveDataMigration, ERTMigrationFlags
+from data.database import AppSpecificAuthToken, User, random_string_generator
+from data.model import config
+from data.model._basequery import update_last_accessed
+from data.fields import DecryptedValue
+from util.timedeltastring import convert_to_timedelta
+from util.unicode import remove_unicode
+
+logger = logging.getLogger(__name__)
+
+TOKEN_NAME_PREFIX_LENGTH = 60
+MINIMUM_TOKEN_SUFFIX_LENGTH = 60
+
+
+def _default_expiration_duration():
+ expiration_str = config.app_config.get('APP_SPECIFIC_TOKEN_EXPIRATION')
+ return convert_to_timedelta(expiration_str) if expiration_str else None
+
+
+# Define a "unique" value so that callers can specifiy an expiration of None and *not* have it
+# use the default.
+_default_expiration_duration_opt = '__deo'
+
+def create_token(user, title, expiration=_default_expiration_duration_opt):
+ """ Creates and returns an app specific token for the given user. If no expiration is specified
+ (including `None`), then the default from config is used. """
+ if expiration == _default_expiration_duration_opt:
+ duration = _default_expiration_duration()
+ expiration = duration + datetime.now() if duration else None
+
+ token_code = random_string_generator(TOKEN_NAME_PREFIX_LENGTH + MINIMUM_TOKEN_SUFFIX_LENGTH)()
+ token_name = token_code[:TOKEN_NAME_PREFIX_LENGTH]
+ token_secret = token_code[TOKEN_NAME_PREFIX_LENGTH:]
+
+ assert token_name
+ assert token_secret
+
+ # TODO(remove-unenc): Remove legacy handling.
+ old_token_code = (token_code
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS)
+ else None)
+ return AppSpecificAuthToken.create(user=user,
+ title=title,
+ expiration=expiration,
+ token_name=token_name,
+ token_secret=DecryptedValue(token_secret),
+ token_code=old_token_code)
+
+
+def list_tokens(user):
+ """ Lists all tokens for the given user. """
+ return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)
+
+
+def revoke_token(token):
+ """ Revokes an app specific token by deleting it. """
+ token.delete_instance()
+
+
+def revoke_token_by_uuid(uuid, owner):
+ """ Revokes an app specific token by deleting it. """
+ try:
+ token = AppSpecificAuthToken.get(uuid=uuid, user=owner)
+ except AppSpecificAuthToken.DoesNotExist:
+ return None
+
+ revoke_token(token)
+ return token
+
+
+def get_expiring_tokens(user, soon):
+ """ Returns all tokens owned by the given user that will be expiring "soon", where soon is defined
+ by the soon parameter (a timedelta from now).
+ """
+ soon_datetime = datetime.now() + soon
+ return (AppSpecificAuthToken
+ .select()
+ .where(AppSpecificAuthToken.user == user,
+ AppSpecificAuthToken.expiration <= soon_datetime,
+ AppSpecificAuthToken.expiration > datetime.now()))
+
+
+def gc_expired_tokens(expiration_window):
+ """ Deletes all expired tokens outside of the expiration window. """
+ (AppSpecificAuthToken
+ .delete()
+ .where(AppSpecificAuthToken.expiration < (datetime.now() - expiration_window))
+ .execute())
+
+
+def get_token_by_uuid(uuid, owner=None):
+ """ Looks up an unexpired app specific token with the given uuid. Returns it if found or
+ None if none. If owner is specified, only tokens owned by the owner user will be
+ returned.
+ """
+ try:
+ query = (AppSpecificAuthToken
+ .select()
+ .where(AppSpecificAuthToken.uuid == uuid,
+ ((AppSpecificAuthToken.expiration > datetime.now()) |
+ (AppSpecificAuthToken.expiration >> None))))
+ if owner is not None:
+ query = query.where(AppSpecificAuthToken.user == owner)
+
+ return query.get()
+ except AppSpecificAuthToken.DoesNotExist:
+ return None
+
+
+def access_valid_token(token_code):
+ """ Looks up an unexpired app specific token with the given token code. If found, the token's
+ last_accessed field is set to now and the token is returned. If not found, returns None.
+ """
+ token_code = remove_unicode(token_code)
+
+ prefix = token_code[:TOKEN_NAME_PREFIX_LENGTH]
+ if len(prefix) != TOKEN_NAME_PREFIX_LENGTH:
+ return None
+
+ suffix = token_code[TOKEN_NAME_PREFIX_LENGTH:]
+
+ # Lookup the token by its prefix.
+ try:
+ token = (AppSpecificAuthToken
+ .select(AppSpecificAuthToken, User)
+ .join(User)
+ .where(AppSpecificAuthToken.token_name == prefix,
+ ((AppSpecificAuthToken.expiration > datetime.now()) |
+ (AppSpecificAuthToken.expiration >> None)))
+ .get())
+
+ if not token.token_secret.matches(suffix):
+ return None
+
+ assert len(prefix) == TOKEN_NAME_PREFIX_LENGTH
+ assert len(suffix) >= MINIMUM_TOKEN_SUFFIX_LENGTH
+ update_last_accessed(token)
+ return token
+ except AppSpecificAuthToken.DoesNotExist:
+ pass
+
+ # TODO(remove-unenc): Remove legacy handling.
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ try:
+ token = (AppSpecificAuthToken
+ .select(AppSpecificAuthToken, User)
+ .join(User)
+ .where(AppSpecificAuthToken.token_code == token_code,
+ ((AppSpecificAuthToken.expiration > datetime.now()) |
+ (AppSpecificAuthToken.expiration >> None)))
+ .get())
+
+ update_last_accessed(token)
+ return token
+ except AppSpecificAuthToken.DoesNotExist:
+ return None
+
+ return None
+
+
+def get_full_token_string(token):
+ # TODO(remove-unenc): Remove legacy handling.
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ if not token.token_name:
+ return token.token_code
+
+ assert token.token_name
+ return '%s%s' % (token.token_name, token.token_secret.decrypt())
diff --git a/data/model/blob.py b/data/model/blob.py
new file mode 100644
index 000000000..ac14891e8
--- /dev/null
+++ b/data/model/blob.py
@@ -0,0 +1,237 @@
+import logging
+
+from datetime import datetime
+from uuid import uuid4
+
+from data.model import (tag, _basequery, BlobDoesNotExist, InvalidBlobUpload, db_transaction,
+ storage as storage_model, InvalidImageException)
+from data.database import (Repository, Namespace, ImageStorage, Image, ImageStoragePlacement,
+ BlobUpload, ImageStorageLocation, db_random_func)
+
+
+logger = logging.getLogger(__name__)
+
+
+def get_repository_blob_by_digest(repository, blob_digest):
+ """ Find the content-addressable blob linked to the specified repository.
+ """
+ assert blob_digest
+ try:
+ storage = (ImageStorage
+ .select(ImageStorage.uuid)
+ .join(Image)
+ .where(Image.repository == repository,
+ ImageStorage.content_checksum == blob_digest,
+ ImageStorage.uploading == False)
+ .get())
+
+ return storage_model.get_storage_by_uuid(storage.uuid)
+ except (ImageStorage.DoesNotExist, InvalidImageException):
+ raise BlobDoesNotExist('Blob does not exist with digest: {0}'.format(blob_digest))
+
+
+def get_repo_blob_by_digest(namespace, repo_name, blob_digest):
+ """ Find the content-addressable blob linked to the specified repository.
+ """
+ assert blob_digest
+ try:
+ storage = (ImageStorage
+ .select(ImageStorage.uuid)
+ .join(Image)
+ .join(Repository)
+ .join(Namespace, on=(Namespace.id == Repository.namespace_user))
+ .where(Repository.name == repo_name, Namespace.username == namespace,
+ ImageStorage.content_checksum == blob_digest,
+ ImageStorage.uploading == False)
+ .get())
+
+ return storage_model.get_storage_by_uuid(storage.uuid)
+ except (ImageStorage.DoesNotExist, InvalidImageException):
+ raise BlobDoesNotExist('Blob does not exist with digest: {0}'.format(blob_digest))
+
+
+def store_blob_record_and_temp_link(namespace, repo_name, blob_digest, location_obj, byte_count,
+ link_expiration_s, uncompressed_byte_count=None):
+ repo = _basequery.get_existing_repository(namespace, repo_name)
+ assert repo
+
+ return store_blob_record_and_temp_link_in_repo(repo.id, blob_digest, location_obj, byte_count,
+ link_expiration_s, uncompressed_byte_count)
+
+
+def store_blob_record_and_temp_link_in_repo(repository_id, blob_digest, location_obj, byte_count,
+ link_expiration_s, uncompressed_byte_count=None):
+ """ Store a record of the blob and temporarily link it to the specified repository.
+ """
+ assert blob_digest
+ assert byte_count is not None
+
+ with db_transaction():
+ try:
+ storage = ImageStorage.get(content_checksum=blob_digest)
+ save_changes = False
+
+ if storage.image_size is None:
+ storage.image_size = byte_count
+ save_changes = True
+
+ if storage.uncompressed_size is None and uncompressed_byte_count is not None:
+ storage.uncompressed_size = uncompressed_byte_count
+ save_changes = True
+
+ if save_changes:
+ storage.save()
+
+ ImageStoragePlacement.get(storage=storage, location=location_obj)
+ except ImageStorage.DoesNotExist:
+ storage = ImageStorage.create(content_checksum=blob_digest, uploading=False,
+ image_size=byte_count,
+ uncompressed_size=uncompressed_byte_count)
+ ImageStoragePlacement.create(storage=storage, location=location_obj)
+ except ImageStoragePlacement.DoesNotExist:
+ ImageStoragePlacement.create(storage=storage, location=location_obj)
+
+ _temp_link_blob(repository_id, storage, link_expiration_s)
+ return storage
+
+
+def temp_link_blob(repository_id, blob_digest, link_expiration_s):
+ """ Temporarily links to the blob record from the given repository. If the blob record is not
+ found, return None.
+ """
+ assert blob_digest
+
+ with db_transaction():
+ try:
+ storage = ImageStorage.get(content_checksum=blob_digest)
+ except ImageStorage.DoesNotExist:
+ return None
+
+ _temp_link_blob(repository_id, storage, link_expiration_s)
+ return storage
+
+
+def _temp_link_blob(repository_id, storage, link_expiration_s):
+ """ Note: Should *always* be called by a parent under a transaction. """
+ random_image_name = str(uuid4())
+
+ # Create a temporary link into the repository, to be replaced by the v1 metadata later
+ # and create a temporary tag to reference it
+ image = Image.create(storage=storage, docker_image_id=random_image_name, repository=repository_id)
+ tag.create_temporary_hidden_tag(repository_id, image, link_expiration_s)
+
+
+def get_stale_blob_upload(stale_timespan):
+ """ Returns a random blob upload which was created before the stale timespan. """
+ stale_threshold = datetime.now() - stale_timespan
+
+ try:
+ candidates = (BlobUpload
+ .select()
+ .where(BlobUpload.created <= stale_threshold)
+ .limit(500)
+ .distinct()
+ .alias('candidates'))
+
+ found = (BlobUpload
+ .select(candidates.c.id)
+ .from_(candidates)
+ .order_by(db_random_func())
+ .get())
+ if not found:
+ return None
+
+ return (BlobUpload
+ .select(BlobUpload, ImageStorageLocation)
+ .join(ImageStorageLocation)
+ .where(BlobUpload.id == found.id)
+ .get())
+ except BlobUpload.DoesNotExist:
+ return None
+
+
+def get_blob_upload_by_uuid(upload_uuid):
+ """ Loads the upload with the given UUID, if any. """
+ try:
+ return (BlobUpload
+ .select()
+ .where(BlobUpload.uuid == upload_uuid)
+ .get())
+ except BlobUpload.DoesNotExist:
+ return None
+
+
+def get_blob_upload(namespace, repo_name, upload_uuid):
+ """ Load the upload which is already in progress.
+ """
+ try:
+ return (BlobUpload
+ .select(BlobUpload, ImageStorageLocation)
+ .join(ImageStorageLocation)
+ .switch(BlobUpload)
+ .join(Repository)
+ .join(Namespace, on=(Namespace.id == Repository.namespace_user))
+ .where(Repository.name == repo_name, Namespace.username == namespace,
+ BlobUpload.uuid == upload_uuid)
+ .get())
+ except BlobUpload.DoesNotExist:
+ raise InvalidBlobUpload()
+
+
+def initiate_upload(namespace, repo_name, uuid, location_name, storage_metadata):
+ """ Initiates a blob upload for the repository with the given namespace and name,
+ in a specific location. """
+ repo = _basequery.get_existing_repository(namespace, repo_name)
+ return initiate_upload_for_repo(repo, uuid, location_name, storage_metadata)
+
+
+def initiate_upload_for_repo(repo, uuid, location_name, storage_metadata):
+ """ Initiates a blob upload for a specific repository object, in a specific location. """
+ location = storage_model.get_image_location_for_name(location_name)
+ return BlobUpload.create(repository=repo, location=location.id, uuid=uuid,
+ storage_metadata=storage_metadata)
+
+
+def get_shared_blob(digest):
+ """ Returns the ImageStorage blob with the given digest or, if not present,
+ returns None. This method is *only* to be used for shared blobs that are
+ globally accessible, such as the special empty gzipped tar layer that Docker
+ no longer pushes to us.
+ """
+ assert digest
+ try:
+ return ImageStorage.get(content_checksum=digest, uploading=False)
+ except ImageStorage.DoesNotExist:
+ return None
+
+
+def get_or_create_shared_blob(digest, byte_data, storage):
+ """ Returns the ImageStorage blob with the given digest or, if not present,
+ adds a row and writes the given byte data to the storage engine.
+ This method is *only* to be used for shared blobs that are globally
+ accessible, such as the special empty gzipped tar layer that Docker
+ no longer pushes to us.
+ """
+ assert digest
+ assert byte_data is not None
+ assert storage
+
+ try:
+ return ImageStorage.get(content_checksum=digest, uploading=False)
+ except ImageStorage.DoesNotExist:
+ record = ImageStorage.create(image_size=len(byte_data), content_checksum=digest,
+ cas_path=True, uploading=True)
+ preferred = storage.preferred_locations[0]
+ location_obj = ImageStorageLocation.get(name=preferred)
+ try:
+ storage.put_content([preferred], storage_model.get_layer_path(record), byte_data)
+ ImageStoragePlacement.create(storage=record, location=location_obj)
+
+ record.uploading = False
+ record.save()
+ except:
+ logger.exception('Exception when trying to write special layer %s', digest)
+ record.delete_instance()
+ raise
+
+ return record
diff --git a/data/model/build.py b/data/model/build.py
new file mode 100644
index 000000000..79e282509
--- /dev/null
+++ b/data/model/build.py
@@ -0,0 +1,323 @@
+import json
+
+from datetime import timedelta, datetime
+
+from peewee import JOIN
+
+from active_migration import ActiveDataMigration, ERTMigrationFlags
+from data.database import (BuildTriggerService, RepositoryBuildTrigger, Repository, Namespace, User,
+ RepositoryBuild, BUILD_PHASE, db_random_func, UseThenDisconnect,
+ TRIGGER_DISABLE_REASON)
+from data.model import (InvalidBuildTriggerException, InvalidRepositoryBuildException,
+ db_transaction, user as user_model, config)
+from data.fields import DecryptedValue
+
+
+PRESUMED_DEAD_BUILD_AGE = timedelta(days=15)
+PHASES_NOT_ALLOWED_TO_CANCEL_FROM = (BUILD_PHASE.PUSHING, BUILD_PHASE.COMPLETE,
+ BUILD_PHASE.ERROR, BUILD_PHASE.INTERNAL_ERROR)
+
+ARCHIVABLE_BUILD_PHASES = [BUILD_PHASE.COMPLETE, BUILD_PHASE.ERROR, BUILD_PHASE.CANCELLED]
+
+
+def update_build_trigger(trigger, config, auth_token=None, write_token=None):
+ trigger.config = json.dumps(config or {})
+
+ # TODO(remove-unenc): Remove legacy field.
+ if auth_token is not None:
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
+ trigger.auth_token = auth_token
+
+ trigger.secure_auth_token = auth_token
+
+ if write_token is not None:
+ trigger.write_token = write_token
+
+ trigger.save()
+
+
+def create_build_trigger(repo, service_name, auth_token, user, pull_robot=None, config=None):
+ service = BuildTriggerService.get(name=service_name)
+
+ # TODO(remove-unenc): Remove legacy field.
+ old_auth_token = None
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
+ old_auth_token = auth_token
+
+ secure_auth_token = DecryptedValue(auth_token) if auth_token else None
+ trigger = RepositoryBuildTrigger.create(repository=repo, service=service,
+ auth_token=old_auth_token,
+ secure_auth_token=secure_auth_token,
+ connected_user=user,
+ pull_robot=pull_robot,
+ config=json.dumps(config or {}))
+ return trigger
+
+
+def get_build_trigger(trigger_uuid):
+ try:
+ return (RepositoryBuildTrigger
+ .select(RepositoryBuildTrigger, BuildTriggerService, Repository, Namespace)
+ .join(BuildTriggerService)
+ .switch(RepositoryBuildTrigger)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(RepositoryBuildTrigger)
+ .join(User, on=(RepositoryBuildTrigger.connected_user == User.id))
+ .where(RepositoryBuildTrigger.uuid == trigger_uuid)
+ .get())
+ except RepositoryBuildTrigger.DoesNotExist:
+ msg = 'No build trigger with uuid: %s' % trigger_uuid
+ raise InvalidBuildTriggerException(msg)
+
+
+def list_build_triggers(namespace_name, repository_name):
+ return (RepositoryBuildTrigger
+ .select(RepositoryBuildTrigger, BuildTriggerService, Repository)
+ .join(BuildTriggerService)
+ .switch(RepositoryBuildTrigger)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Namespace.username == namespace_name, Repository.name == repository_name))
+
+
+def list_trigger_builds(namespace_name, repository_name, trigger_uuid,
+ limit):
+ return (list_repository_builds(namespace_name, repository_name, limit)
+ .where(RepositoryBuildTrigger.uuid == trigger_uuid))
+
+
+def get_repository_for_resource(resource_key):
+ try:
+ return (Repository
+ .select(Repository, Namespace)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Repository)
+ .join(RepositoryBuild)
+ .where(RepositoryBuild.resource_key == resource_key)
+ .get())
+ except Repository.DoesNotExist:
+ return None
+
+
+def _get_build_base_query():
+ return (RepositoryBuild
+ .select(RepositoryBuild, RepositoryBuildTrigger, BuildTriggerService, Repository,
+ Namespace, User)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(RepositoryBuild)
+ .join(User, JOIN.LEFT_OUTER)
+ .switch(RepositoryBuild)
+ .join(RepositoryBuildTrigger, JOIN.LEFT_OUTER)
+ .join(BuildTriggerService, JOIN.LEFT_OUTER)
+ .order_by(RepositoryBuild.started.desc()))
+
+
+def get_repository_build(build_uuid):
+ try:
+ return _get_build_base_query().where(RepositoryBuild.uuid == build_uuid).get()
+
+ except RepositoryBuild.DoesNotExist:
+ msg = 'Unable to locate a build by id: %s' % build_uuid
+ raise InvalidRepositoryBuildException(msg)
+
+
+def list_repository_builds(namespace_name, repository_name, limit,
+ include_inactive=True, since=None):
+ query = (_get_build_base_query()
+ .where(Repository.name == repository_name, Namespace.username == namespace_name)
+ .limit(limit))
+
+ if since is not None:
+ query = query.where(RepositoryBuild.started >= since)
+
+ if not include_inactive:
+ query = query.where(RepositoryBuild.phase != BUILD_PHASE.ERROR,
+ RepositoryBuild.phase != BUILD_PHASE.COMPLETE)
+
+ return query
+
+
+def get_recent_repository_build(namespace_name, repository_name):
+ query = list_repository_builds(namespace_name, repository_name, 1)
+ try:
+ return query.get()
+ except RepositoryBuild.DoesNotExist:
+ return None
+
+
+def create_repository_build(repo, access_token, job_config_obj, dockerfile_id,
+ display_name, trigger=None, pull_robot_name=None):
+ pull_robot = None
+ if pull_robot_name:
+ pull_robot = user_model.lookup_robot(pull_robot_name)
+
+ return RepositoryBuild.create(repository=repo, access_token=access_token,
+ job_config=json.dumps(job_config_obj),
+ display_name=display_name, trigger=trigger,
+ resource_key=dockerfile_id,
+ pull_robot=pull_robot)
+
+
+def get_pull_robot_name(trigger):
+ if not trigger.pull_robot:
+ return None
+
+ return trigger.pull_robot.username
+
+
+def _get_build_row(build_uuid):
+ return RepositoryBuild.select().where(RepositoryBuild.uuid == build_uuid).get()
+
+
+def update_phase_then_close(build_uuid, phase):
+ """ A function to change the phase of a build """
+ with UseThenDisconnect(config.app_config):
+ try:
+ build = _get_build_row(build_uuid)
+ except RepositoryBuild.DoesNotExist:
+ return False
+
+ # Can't update a cancelled build
+ if build.phase == BUILD_PHASE.CANCELLED:
+ return False
+
+ updated = (RepositoryBuild
+ .update(phase=phase)
+ .where(RepositoryBuild.id == build.id, RepositoryBuild.phase == build.phase)
+ .execute())
+
+ return updated > 0
+
+
+def create_cancel_build_in_queue(build_phase, build_queue_id, build_queue):
+ """ A function to cancel a build before it leaves the queue """
+
+ def cancel_build():
+ cancelled = False
+
+ if build_queue_id is not None:
+ cancelled = build_queue.cancel(build_queue_id)
+
+ if build_phase != BUILD_PHASE.WAITING:
+ return False
+
+ return cancelled
+
+ return cancel_build
+
+
+def create_cancel_build_in_manager(build_phase, build_uuid, build_canceller):
+ """ A function to cancel the build before it starts to push """
+
+ def cancel_build():
+ if build_phase in PHASES_NOT_ALLOWED_TO_CANCEL_FROM:
+ return False
+
+ return build_canceller.try_cancel_build(build_uuid)
+
+ return cancel_build
+
+
+def cancel_repository_build(build, build_queue):
+ """ This tries to cancel the build returns true if request is successful false
+ if it can't be cancelled """
+ from app import build_canceller
+ from buildman.jobutil.buildjob import BuildJobNotifier
+
+ cancel_builds = [create_cancel_build_in_queue(build.phase, build.queue_id, build_queue),
+ create_cancel_build_in_manager(build.phase, build.uuid, build_canceller), ]
+ for cancelled in cancel_builds:
+ if cancelled():
+ updated = update_phase_then_close(build.uuid, BUILD_PHASE.CANCELLED)
+ if updated:
+ BuildJobNotifier(build.uuid).send_notification("build_cancelled")
+
+ return updated
+
+ return False
+
+
+def get_archivable_build():
+ presumed_dead_date = datetime.utcnow() - PRESUMED_DEAD_BUILD_AGE
+
+ candidates = (RepositoryBuild
+ .select(RepositoryBuild.id)
+ .where((RepositoryBuild.phase << ARCHIVABLE_BUILD_PHASES) |
+ (RepositoryBuild.started < presumed_dead_date),
+ RepositoryBuild.logs_archived == False)
+ .limit(50)
+ .alias('candidates'))
+
+ try:
+ found_id = (RepositoryBuild
+ .select(candidates.c.id)
+ .from_(candidates)
+ .order_by(db_random_func())
+ .get())
+ return RepositoryBuild.get(id=found_id)
+ except RepositoryBuild.DoesNotExist:
+ return None
+
+
+def mark_build_archived(build_uuid):
+ """ Mark a build as archived, and return True if we were the ones who actually
+ updated the row. """
+ return (RepositoryBuild
+ .update(logs_archived=True)
+ .where(RepositoryBuild.uuid == build_uuid,
+ RepositoryBuild.logs_archived == False)
+ .execute()) > 0
+
+
+def toggle_build_trigger(trigger, enabled, reason=TRIGGER_DISABLE_REASON.USER_TOGGLED):
+ """ Toggles the enabled status of a build trigger. """
+ trigger.enabled = enabled
+
+ if not enabled:
+ trigger.disabled_reason = RepositoryBuildTrigger.disabled_reason.get_id(reason)
+ trigger.disabled_datetime = datetime.utcnow()
+
+ trigger.save()
+
+
+def update_trigger_disable_status(trigger, final_phase):
+ """ Updates the disable status of the given build trigger. If the build trigger had a
+ failure, then the counter is increased and, if we've reached the limit, the trigger is
+ automatically disabled. Otherwise, if the trigger succeeded, it's counter is reset. This
+ ensures that triggers that continue to error are eventually automatically disabled.
+ """
+ with db_transaction():
+ try:
+ trigger = RepositoryBuildTrigger.get(id=trigger.id)
+ except RepositoryBuildTrigger.DoesNotExist:
+ # Already deleted.
+ return
+
+ # If the build completed successfully, then reset the successive counters.
+ if final_phase == BUILD_PHASE.COMPLETE:
+ trigger.successive_failure_count = 0
+ trigger.successive_internal_error_count = 0
+ trigger.save()
+ return
+
+ # Otherwise, increment the counters and check for trigger disable.
+ if final_phase == BUILD_PHASE.ERROR:
+ trigger.successive_failure_count = trigger.successive_failure_count + 1
+ trigger.successive_internal_error_count = 0
+ elif final_phase == BUILD_PHASE.INTERNAL_ERROR:
+ trigger.successive_internal_error_count = trigger.successive_internal_error_count + 1
+
+ # Check if we need to disable the trigger.
+ failure_threshold = config.app_config.get('SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD')
+ error_threshold = config.app_config.get('SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD')
+
+ if failure_threshold and trigger.successive_failure_count >= failure_threshold:
+ toggle_build_trigger(trigger, False, TRIGGER_DISABLE_REASON.BUILD_FALURES)
+ elif (error_threshold and
+ trigger.successive_internal_error_count >= error_threshold):
+ toggle_build_trigger(trigger, False, TRIGGER_DISABLE_REASON.INTERNAL_ERRORS)
+ else:
+ # Save the trigger changes.
+ trigger.save()
diff --git a/data/model/gc.py b/data/model/gc.py
new file mode 100644
index 000000000..7f898bec8
--- /dev/null
+++ b/data/model/gc.py
@@ -0,0 +1,554 @@
+import logging
+
+from data.model import config, db_transaction, storage, _basequery, tag as pre_oci_tag
+from data.model.oci import tag as oci_tag
+from data.database import Repository, db_for_update
+from data.database import ApprTag
+from data.database import (Tag, Manifest, ManifestBlob, ManifestChild, ManifestLegacyImage,
+ ManifestLabel, Label, TagManifestLabel)
+from data.database import RepositoryTag, TagManifest, Image, DerivedStorageForImage
+from data.database import TagManifestToManifest, TagToRepositoryTag, TagManifestLabelMap
+
+logger = logging.getLogger(__name__)
+
+class _GarbageCollectorContext(object):
+ def __init__(self, repository):
+ self.repository = repository
+ self.manifest_ids = set()
+ self.label_ids = set()
+ self.blob_ids = set()
+ self.legacy_image_ids = set()
+
+ def add_manifest_id(self, manifest_id):
+ self.manifest_ids.add(manifest_id)
+
+ def add_label_id(self, label_id):
+ self.label_ids.add(label_id)
+
+ def add_blob_id(self, blob_id):
+ self.blob_ids.add(blob_id)
+
+ def add_legacy_image_id(self, legacy_image_id):
+ self.legacy_image_ids.add(legacy_image_id)
+
+ def mark_label_id_removed(self, label_id):
+ self.label_ids.remove(label_id)
+
+ def mark_manifest_removed(self, manifest):
+ self.manifest_ids.remove(manifest.id)
+
+ def mark_legacy_image_removed(self, legacy_image):
+ self.legacy_image_ids.remove(legacy_image.id)
+
+ def mark_blob_id_removed(self, blob_id):
+ self.blob_ids.remove(blob_id)
+
+
+def purge_repository(namespace_name, repository_name):
+ """ Completely delete all traces of the repository. Will return True upon
+ complete success, and False upon partial or total failure. Garbage
+ collection is incremental and repeatable, so this return value does
+ not need to be checked or responded to.
+ """
+ try:
+ repo = _basequery.get_existing_repository(namespace_name, repository_name)
+ except Repository.DoesNotExist:
+ return False
+
+ assert repo.name == repository_name
+
+ # Delete the repository of all Appr-referenced entries.
+ # Note that new-model Tag's must be deleted in *two* passes, as they can reference parent tags,
+ # and MySQL is... particular... about such relationships when deleting.
+ if repo.kind.name == 'application':
+ ApprTag.delete().where(ApprTag.repository == repo, ~(ApprTag.linked_tag >> None)).execute()
+ ApprTag.delete().where(ApprTag.repository == repo).execute()
+ else:
+ # GC to remove the images and storage.
+ _purge_repository_contents(repo)
+
+ # Ensure there are no additional tags, manifests, images or blobs in the repository.
+ assert ApprTag.select().where(ApprTag.repository == repo).count() == 0
+ assert Tag.select().where(Tag.repository == repo).count() == 0
+ assert RepositoryTag.select().where(RepositoryTag.repository == repo).count() == 0
+ assert Manifest.select().where(Manifest.repository == repo).count() == 0
+ assert ManifestBlob.select().where(ManifestBlob.repository == repo).count() == 0
+ assert Image.select().where(Image.repository == repo).count() == 0
+
+ # Delete the rest of the repository metadata.
+ try:
+ # Make sure the repository still exists.
+ fetched = _basequery.get_existing_repository(namespace_name, repository_name)
+ except Repository.DoesNotExist:
+ return False
+
+ fetched.delete_instance(recursive=True, delete_nullable=False)
+
+ # Run callbacks
+ for callback in config.repo_cleanup_callbacks:
+ callback(namespace_name, repository_name)
+
+ return True
+
+
+def _chunk_iterate_for_deletion(query, chunk_size=10):
+ """ Returns an iterator that loads the rows returned by the given query in chunks. Note that
+ order is not guaranteed here, so this will only work (i.e. not return duplicates) if
+ the rows returned are being deleted between calls.
+ """
+ while True:
+ results = list(query.limit(chunk_size))
+ if not results:
+ raise StopIteration
+
+ yield results
+
+
+def _purge_repository_contents(repo):
+ """ Purges all the contents of a repository, removing all of its tags,
+ manifests and images.
+ """
+ logger.debug('Purging repository %s', repo)
+
+ # Purge via all the tags.
+ while True:
+ found = False
+ for tags in _chunk_iterate_for_deletion(Tag.select().where(Tag.repository == repo)):
+ logger.debug('Found %s tags to GC under repository %s', len(tags), repo)
+ found = True
+ context = _GarbageCollectorContext(repo)
+ for tag in tags:
+ logger.debug('Deleting tag %s under repository %s', tag, repo)
+ assert tag.repository_id == repo.id
+ _purge_oci_tag(tag, context, allow_non_expired=True)
+
+ _run_garbage_collection(context)
+
+ if not found:
+ break
+
+ # TODO: remove this once we're fully on the OCI data model.
+ while True:
+ found = False
+ repo_tag_query = RepositoryTag.select().where(RepositoryTag.repository == repo)
+ for tags in _chunk_iterate_for_deletion(repo_tag_query):
+ logger.debug('Found %s tags to GC under repository %s', len(tags), repo)
+ found = True
+ context = _GarbageCollectorContext(repo)
+
+ for tag in tags:
+ logger.debug('Deleting tag %s under repository %s', tag, repo)
+ assert tag.repository_id == repo.id
+ _purge_pre_oci_tag(tag, context, allow_non_expired=True)
+
+ _run_garbage_collection(context)
+
+ if not found:
+ break
+
+ # Add all remaining images to a new context. We do this here to minimize the number of images
+ # we need to load.
+ while True:
+ found_image = False
+ image_context = _GarbageCollectorContext(repo)
+ for image in Image.select().where(Image.repository == repo):
+ found_image = True
+ logger.debug('Deleting image %s under repository %s', image, repo)
+ assert image.repository_id == repo.id
+ image_context.add_legacy_image_id(image.id)
+
+ _run_garbage_collection(image_context)
+
+ if not found_image:
+ break
+
+
+def garbage_collect_repo(repo):
+ """ Performs garbage collection over the contents of a repository. """
+ # Purge expired tags.
+ had_changes = False
+
+ for tags in _chunk_iterate_for_deletion(oci_tag.lookup_unrecoverable_tags(repo)):
+ logger.debug('Found %s tags to GC under repository %s', len(tags), repo)
+ context = _GarbageCollectorContext(repo)
+ for tag in tags:
+ logger.debug('Deleting tag %s under repository %s', tag, repo)
+ assert tag.repository_id == repo.id
+ assert tag.lifetime_end_ms is not None
+ _purge_oci_tag(tag, context)
+
+ _run_garbage_collection(context)
+ had_changes = True
+
+ for tags in _chunk_iterate_for_deletion(pre_oci_tag.lookup_unrecoverable_tags(repo)):
+ logger.debug('Found %s tags to GC under repository %s', len(tags), repo)
+ context = _GarbageCollectorContext(repo)
+ for tag in tags:
+ logger.debug('Deleting tag %s under repository %s', tag, repo)
+ assert tag.repository_id == repo.id
+ assert tag.lifetime_end_ts is not None
+ _purge_pre_oci_tag(tag, context)
+
+ _run_garbage_collection(context)
+ had_changes = True
+
+ return had_changes
+
+
+def _run_garbage_collection(context):
+ """ Runs the garbage collection loop, deleting manifests, images, labels and blobs
+ in an iterative fashion.
+ """
+ has_changes = True
+
+ while has_changes:
+ has_changes = False
+
+ # GC all manifests encountered.
+ for manifest_id in list(context.manifest_ids):
+ if _garbage_collect_manifest(manifest_id, context):
+ has_changes = True
+
+ # GC all images encountered.
+ for image_id in list(context.legacy_image_ids):
+ if _garbage_collect_legacy_image(image_id, context):
+ has_changes = True
+
+ # GC all labels encountered.
+ for label_id in list(context.label_ids):
+ if _garbage_collect_label(label_id, context):
+ has_changes = True
+
+ # GC any blobs encountered.
+ if context.blob_ids:
+ storage_ids_removed = set(storage.garbage_collect_storage(context.blob_ids))
+ for blob_removed_id in storage_ids_removed:
+ context.mark_blob_id_removed(blob_removed_id)
+ has_changes = True
+
+
+def _purge_oci_tag(tag, context, allow_non_expired=False):
+ assert tag.repository_id == context.repository.id
+
+ if not allow_non_expired:
+ assert tag.lifetime_end_ms is not None
+ assert tag.lifetime_end_ms <= oci_tag.get_epoch_timestamp_ms()
+
+ # Add the manifest to be GCed.
+ context.add_manifest_id(tag.manifest_id)
+
+ with db_transaction():
+ # Reload the tag and verify its lifetime_end_ms has not changed.
+ try:
+ reloaded_tag = db_for_update(Tag.select().where(Tag.id == tag.id)).get()
+ except Tag.DoesNotExist:
+ return False
+
+ assert reloaded_tag.id == tag.id
+ assert reloaded_tag.repository_id == context.repository.id
+ if reloaded_tag.lifetime_end_ms != tag.lifetime_end_ms:
+ return False
+
+ # Delete mapping rows.
+ TagToRepositoryTag.delete().where(TagToRepositoryTag.tag == tag).execute()
+
+ # Delete the tag.
+ tag.delete_instance()
+
+
+def _purge_pre_oci_tag(tag, context, allow_non_expired=False):
+ assert tag.repository_id == context.repository.id
+
+ if not allow_non_expired:
+ assert tag.lifetime_end_ts is not None
+ assert tag.lifetime_end_ts <= pre_oci_tag.get_epoch_timestamp()
+
+ # If it exists, GC the tag manifest.
+ try:
+ tag_manifest = TagManifest.select().where(TagManifest.tag == tag).get()
+ _garbage_collect_legacy_manifest(tag_manifest.id, context)
+ except TagManifest.DoesNotExist:
+ pass
+
+ # Add the tag's legacy image to be GCed.
+ context.add_legacy_image_id(tag.image_id)
+
+ with db_transaction():
+ # Reload the tag and verify its lifetime_end_ts has not changed.
+ try:
+ reloaded_tag = db_for_update(RepositoryTag.select().where(RepositoryTag.id == tag.id)).get()
+ except RepositoryTag.DoesNotExist:
+ return False
+
+ assert reloaded_tag.id == tag.id
+ assert reloaded_tag.repository_id == context.repository.id
+ if reloaded_tag.lifetime_end_ts != tag.lifetime_end_ts:
+ return False
+
+ # Delete mapping rows.
+ TagToRepositoryTag.delete().where(TagToRepositoryTag.repository_tag == reloaded_tag).execute()
+
+ # Delete the tag.
+ reloaded_tag.delete_instance()
+
+
+def _check_manifest_used(manifest_id):
+ assert manifest_id is not None
+
+ with db_transaction():
+ # Check if the manifest is referenced by any other tag.
+ try:
+ Tag.select().where(Tag.manifest == manifest_id).get()
+ return True
+ except Tag.DoesNotExist:
+ pass
+
+ # Check if the manifest is referenced as a child of another manifest.
+ try:
+ ManifestChild.select().where(ManifestChild.child_manifest == manifest_id).get()
+ return True
+ except ManifestChild.DoesNotExist:
+ pass
+
+ return False
+
+
+def _garbage_collect_manifest(manifest_id, context):
+ assert manifest_id is not None
+
+ # Make sure the manifest isn't referenced.
+ if _check_manifest_used(manifest_id):
+ return False
+
+ # Add the manifest's blobs to the context to be GCed.
+ for manifest_blob in ManifestBlob.select().where(ManifestBlob.manifest == manifest_id):
+ context.add_blob_id(manifest_blob.blob_id)
+
+ # Retrieve the manifest's associated image, if any.
+ try:
+ legacy_image_id = ManifestLegacyImage.get(manifest=manifest_id).image_id
+ context.add_legacy_image_id(legacy_image_id)
+ except ManifestLegacyImage.DoesNotExist:
+ legacy_image_id = None
+
+ # Add child manifests to be GCed.
+ for connector in ManifestChild.select().where(ManifestChild.manifest == manifest_id):
+ context.add_manifest_id(connector.child_manifest_id)
+
+ # Add the labels to be GCed.
+ for manifest_label in ManifestLabel.select().where(ManifestLabel.manifest == manifest_id):
+ context.add_label_id(manifest_label.label_id)
+
+ # Delete the manifest.
+ with db_transaction():
+ try:
+ manifest = Manifest.select().where(Manifest.id == manifest_id).get()
+ except Manifest.DoesNotExist:
+ return False
+
+ assert manifest.id == manifest_id
+ assert manifest.repository_id == context.repository.id
+ if _check_manifest_used(manifest_id):
+ return False
+
+ # Delete any label mappings.
+ (TagManifestLabelMap
+ .delete()
+ .where(TagManifestLabelMap.manifest == manifest_id)
+ .execute())
+
+ # Delete any mapping rows for the manifest.
+ TagManifestToManifest.delete().where(TagManifestToManifest.manifest == manifest_id).execute()
+
+ # Delete any label rows.
+ ManifestLabel.delete().where(ManifestLabel.manifest == manifest_id,
+ ManifestLabel.repository == context.repository).execute()
+
+ # Delete any child manifest rows.
+ ManifestChild.delete().where(ManifestChild.manifest == manifest_id,
+ ManifestChild.repository == context.repository).execute()
+
+ # Delete the manifest blobs for the manifest.
+ ManifestBlob.delete().where(ManifestBlob.manifest == manifest_id,
+ ManifestBlob.repository == context.repository).execute()
+
+ # Delete the manifest legacy image row.
+ if legacy_image_id:
+ (ManifestLegacyImage
+ .delete()
+ .where(ManifestLegacyImage.manifest == manifest_id,
+ ManifestLegacyImage.repository == context.repository)
+ .execute())
+
+ # Delete the manifest.
+ manifest.delete_instance()
+
+ context.mark_manifest_removed(manifest)
+ return True
+
+
+def _garbage_collect_legacy_manifest(legacy_manifest_id, context):
+ assert legacy_manifest_id is not None
+
+ # Add the labels to be GCed.
+ query = TagManifestLabel.select().where(TagManifestLabel.annotated == legacy_manifest_id)
+ for manifest_label in query:
+ context.add_label_id(manifest_label.label_id)
+
+ # Delete the tag manifest.
+ with db_transaction():
+ try:
+ tag_manifest = TagManifest.select().where(TagManifest.id == legacy_manifest_id).get()
+ except TagManifest.DoesNotExist:
+ return False
+
+ assert tag_manifest.id == legacy_manifest_id
+ assert tag_manifest.tag.repository_id == context.repository.id
+
+ # Delete any label mapping rows.
+ (TagManifestLabelMap
+ .delete()
+ .where(TagManifestLabelMap.tag_manifest == legacy_manifest_id)
+ .execute())
+
+ # Delete the label rows.
+ TagManifestLabel.delete().where(TagManifestLabel.annotated == legacy_manifest_id).execute()
+
+ # Delete the mapping row if it exists.
+ try:
+ tmt = (TagManifestToManifest
+ .select()
+ .where(TagManifestToManifest.tag_manifest == tag_manifest)
+ .get())
+ context.add_manifest_id(tmt.manifest_id)
+ tmt.delete_instance()
+ except TagManifestToManifest.DoesNotExist:
+ pass
+
+ # Delete the tag manifest.
+ tag_manifest.delete_instance()
+
+ return True
+
+
+def _check_image_used(legacy_image_id):
+ assert legacy_image_id is not None
+
+ with db_transaction():
+ # Check if the image is referenced by a manifest.
+ try:
+ ManifestLegacyImage.select().where(ManifestLegacyImage.image == legacy_image_id).get()
+ return True
+ except ManifestLegacyImage.DoesNotExist:
+ pass
+
+ # Check if the image is referenced by a tag.
+ try:
+ RepositoryTag.select().where(RepositoryTag.image == legacy_image_id).get()
+ return True
+ except RepositoryTag.DoesNotExist:
+ pass
+
+ # Check if the image is referenced by another image.
+ try:
+ Image.select().where(Image.parent == legacy_image_id).get()
+ return True
+ except Image.DoesNotExist:
+ pass
+
+ return False
+
+
+def _garbage_collect_legacy_image(legacy_image_id, context):
+ assert legacy_image_id is not None
+
+ # Check if the image is referenced.
+ if _check_image_used(legacy_image_id):
+ return False
+
+ # We have an unreferenced image. We can now delete it.
+ # Grab any derived storage for the image.
+ for derived in (DerivedStorageForImage
+ .select()
+ .where(DerivedStorageForImage.source_image == legacy_image_id)):
+ context.add_blob_id(derived.derivative_id)
+
+ try:
+ image = Image.select().where(Image.id == legacy_image_id).get()
+ except Image.DoesNotExist:
+ return False
+
+ assert image.repository_id == context.repository.id
+
+ # Add the image's blob to be GCed.
+ context.add_blob_id(image.storage_id)
+
+ # If the image has a parent ID, add the parent for GC.
+ if image.parent_id is not None:
+ context.add_legacy_image_id(image.parent_id)
+
+ # Delete the image.
+ with db_transaction():
+ if _check_image_used(legacy_image_id):
+ return False
+
+ try:
+ image = Image.select().where(Image.id == legacy_image_id).get()
+ except Image.DoesNotExist:
+ return False
+
+ assert image.id == legacy_image_id
+ assert image.repository_id == context.repository.id
+
+ # Delete any derived storage for the image.
+ (DerivedStorageForImage
+ .delete()
+ .where(DerivedStorageForImage.source_image == legacy_image_id)
+ .execute())
+
+ # Delete the image itself.
+ image.delete_instance()
+
+ context.mark_legacy_image_removed(image)
+
+ if config.image_cleanup_callbacks:
+ for callback in config.image_cleanup_callbacks:
+ callback([image])
+
+ return True
+
+
+def _check_label_used(label_id):
+ assert label_id is not None
+
+ with db_transaction():
+ # Check if the label is referenced by another manifest or tag manifest.
+ try:
+ ManifestLabel.select().where(ManifestLabel.label == label_id).get()
+ return True
+ except ManifestLabel.DoesNotExist:
+ pass
+
+ try:
+ TagManifestLabel.select().where(TagManifestLabel.label == label_id).get()
+ return True
+ except TagManifestLabel.DoesNotExist:
+ pass
+
+ return False
+
+
+def _garbage_collect_label(label_id, context):
+ assert label_id is not None
+
+ # We can now delete the label.
+ with db_transaction():
+ if _check_label_used(label_id):
+ return False
+
+ result = Label.delete().where(Label.id == label_id).execute() == 1
+
+ if result:
+ context.mark_label_id_removed(label_id)
+
+ return result
diff --git a/data/model/health.py b/data/model/health.py
new file mode 100644
index 000000000..b40cee025
--- /dev/null
+++ b/data/model/health.py
@@ -0,0 +1,22 @@
+import logging
+
+from data.database import TeamRole, validate_database_url
+
+logger = logging.getLogger(__name__)
+
+def check_health(app_config):
+ # Attempt to connect to the database first. If the DB is not responding,
+ # using the validate_database_url will timeout quickly, as opposed to
+ # making a normal connect which will just hang (thus breaking the health
+ # check).
+ try:
+ validate_database_url(app_config['DB_URI'], {}, connect_timeout=3)
+ except Exception as ex:
+ return (False, 'Could not connect to the database: %s' % ex.message)
+
+ # We will connect to the db, check that it contains some team role kinds
+ try:
+ okay = bool(list(TeamRole.select().limit(1)))
+ return (okay, 'Could not connect to the database' if not okay else None)
+ except Exception as ex:
+ return (False, 'Could not connect to the database: %s' % ex.message)
diff --git a/data/model/image.py b/data/model/image.py
new file mode 100644
index 000000000..1c6f1b952
--- /dev/null
+++ b/data/model/image.py
@@ -0,0 +1,516 @@
+import logging
+import hashlib
+import json
+
+from collections import defaultdict
+from datetime import datetime
+import dateutil.parser
+
+from peewee import JOIN, IntegrityError, fn
+
+from data.model import (DataModelException, db_transaction, _basequery, storage,
+ InvalidImageException)
+from data.database import (Image, Repository, ImageStoragePlacement, Namespace, ImageStorage,
+ ImageStorageLocation, RepositoryPermission, DerivedStorageForImage,
+ ImageStorageTransformation, User)
+
+from util.canonicaljson import canonicalize
+
+logger = logging.getLogger(__name__)
+
+def _namespace_id_for_username(username):
+ try:
+ return User.get(username=username).id
+ except User.DoesNotExist:
+ return None
+
+
+def get_image_with_storage(docker_image_id, storage_uuid):
+ """ Returns the image with the given docker image ID and storage uuid or None if none.
+ """
+ try:
+ return (Image
+ .select(Image, ImageStorage)
+ .join(ImageStorage)
+ .where(Image.docker_image_id == docker_image_id,
+ ImageStorage.uuid == storage_uuid)
+ .get())
+ except Image.DoesNotExist:
+ return None
+
+
+def get_parent_images(namespace_name, repository_name, image_obj):
+ """ Returns a list of parent Image objects starting with the most recent parent
+ and ending with the base layer. The images in this query will include the storage.
+ """
+ parents = image_obj.ancestors
+
+ # Ancestors are in the format ///...//, with each path section
+ # containing the database Id of the image row.
+ parent_db_ids = parents.strip('/').split('/')
+ if parent_db_ids == ['']:
+ return []
+
+ def filter_to_parents(query):
+ return query.where(Image.id << parent_db_ids)
+
+ parents = _get_repository_images_and_storages(namespace_name, repository_name,
+ filter_to_parents)
+ id_to_image = {unicode(image.id): image for image in parents}
+ try:
+ return [id_to_image[parent_id] for parent_id in reversed(parent_db_ids)]
+ except KeyError as ke:
+ logger.exception('Could not find an expected parent image for image %s', image_obj.id)
+ raise DataModelException('Unknown parent image')
+
+
+def get_placements_for_images(images):
+ """ Returns the placements for the given images, as a map from image storage ID to placements. """
+ if not images:
+ return {}
+
+ query = (ImageStoragePlacement
+ .select(ImageStoragePlacement, ImageStorageLocation, ImageStorage)
+ .join(ImageStorageLocation)
+ .switch(ImageStoragePlacement)
+ .join(ImageStorage)
+ .where(ImageStorage.id << [image.storage_id for image in images]))
+
+ placement_map = defaultdict(list)
+ for placement in query:
+ placement_map[placement.storage.id].append(placement)
+
+ return dict(placement_map)
+
+
+def get_image_and_placements(namespace_name, repo_name, docker_image_id):
+ """ Returns the repo image (with a storage object) and storage placements for the image
+ or (None, None) if non found.
+ """
+ repo_image = get_repo_image_and_storage(namespace_name, repo_name, docker_image_id)
+ if repo_image is None:
+ return (None, None)
+
+ query = (ImageStoragePlacement
+ .select(ImageStoragePlacement, ImageStorageLocation)
+ .join(ImageStorageLocation)
+ .switch(ImageStoragePlacement)
+ .join(ImageStorage)
+ .where(ImageStorage.id == repo_image.storage_id))
+
+ return repo_image, list(query)
+
+
+def get_repo_image(namespace_name, repository_name, docker_image_id):
+ """ Returns the repository image with the given Docker image ID or None if none.
+ Does not include the storage object.
+ """
+ def limit_to_image_id(query):
+ return query.where(Image.docker_image_id == docker_image_id).limit(1)
+
+ query = _get_repository_images(namespace_name, repository_name, limit_to_image_id)
+ try:
+ return query.get()
+ except Image.DoesNotExist:
+ return None
+
+
+def get_repo_image_and_storage(namespace_name, repository_name, docker_image_id):
+ """ Returns the repository image with the given Docker image ID or None if none.
+ Includes the storage object.
+ """
+ def limit_to_image_id(query):
+ return query.where(Image.docker_image_id == docker_image_id)
+
+ images = _get_repository_images_and_storages(namespace_name, repository_name, limit_to_image_id)
+ if not images:
+ return None
+
+ return images[0]
+
+
+def get_image_by_id(namespace_name, repository_name, docker_image_id):
+ """ Returns the repository image with the given Docker image ID or raises if not found.
+ Includes the storage object.
+ """
+ image = get_repo_image_and_storage(namespace_name, repository_name, docker_image_id)
+ if not image:
+ raise InvalidImageException('Unable to find image \'%s\' for repo \'%s/%s\'' %
+ (docker_image_id, namespace_name, repository_name))
+ return image
+
+
+def _get_repository_images_and_storages(namespace_name, repository_name, query_modifier):
+ query = (Image
+ .select(Image, ImageStorage)
+ .join(ImageStorage)
+ .switch(Image)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Repository.name == repository_name, Namespace.username == namespace_name))
+
+ query = query_modifier(query)
+ return query
+
+
+def _get_repository_images(namespace_name, repository_name, query_modifier):
+ query = (Image
+ .select()
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Repository.name == repository_name, Namespace.username == namespace_name))
+
+ query = query_modifier(query)
+ return query
+
+
+def lookup_repository_images(repo, docker_image_ids):
+ return (Image
+ .select(Image, ImageStorage)
+ .join(ImageStorage)
+ .where(Image.repository == repo, Image.docker_image_id << docker_image_ids))
+
+
+def get_repository_images_without_placements(repo_obj, with_ancestor=None):
+ query = (Image
+ .select(Image, ImageStorage)
+ .join(ImageStorage)
+ .where(Image.repository == repo_obj))
+
+ if with_ancestor:
+ ancestors_string = '%s%s/' % (with_ancestor.ancestors, with_ancestor.id)
+ query = query.where((Image.ancestors ** (ancestors_string + '%')) |
+ (Image.id == with_ancestor.id))
+
+ return query
+
+
+def get_repository_images(namespace_name, repository_name):
+ """ Returns all the repository images in the repository. Does not include storage objects. """
+ return _get_repository_images(namespace_name, repository_name, lambda q: q)
+
+
+def __translate_ancestry(old_ancestry, translations, repo_obj, username, preferred_location):
+ if old_ancestry == '/':
+ return '/'
+
+ def translate_id(old_id, docker_image_id):
+ logger.debug('Translating id: %s', old_id)
+ if old_id not in translations:
+ image_in_repo = find_create_or_link_image(docker_image_id, repo_obj, username, translations,
+ preferred_location)
+ translations[old_id] = image_in_repo.id
+ return translations[old_id]
+
+ # Select all the ancestor Docker IDs in a single query.
+ old_ids = [int(id_str) for id_str in old_ancestry.split('/')[1:-1]]
+ query = Image.select(Image.id, Image.docker_image_id).where(Image.id << old_ids)
+ old_images = {i.id: i.docker_image_id for i in query}
+
+ # Translate the old images into new ones.
+ new_ids = [str(translate_id(old_id, old_images[old_id])) for old_id in old_ids]
+ return '/%s/' % '/'.join(new_ids)
+
+
+def _find_or_link_image(existing_image, repo_obj, username, translations, preferred_location):
+ with db_transaction():
+ # Check for an existing image, under the transaction, to make sure it doesn't already exist.
+ repo_image = get_repo_image(repo_obj.namespace_user.username, repo_obj.name,
+ existing_image.docker_image_id)
+ if repo_image:
+ return repo_image
+
+ # Make sure the existing base image still exists.
+ try:
+ to_copy = Image.select().join(ImageStorage).where(Image.id == existing_image.id).get()
+
+ msg = 'Linking image to existing storage with docker id: %s and uuid: %s'
+ logger.debug(msg, existing_image.docker_image_id, to_copy.storage.uuid)
+
+ new_image_ancestry = __translate_ancestry(to_copy.ancestors, translations, repo_obj,
+ username, preferred_location)
+
+ copied_storage = to_copy.storage
+
+ translated_parent_id = None
+ if new_image_ancestry != '/':
+ translated_parent_id = int(new_image_ancestry.split('/')[-2])
+
+ new_image = Image.create(docker_image_id=existing_image.docker_image_id,
+ repository=repo_obj,
+ storage=copied_storage,
+ ancestors=new_image_ancestry,
+ command=existing_image.command,
+ created=existing_image.created,
+ comment=existing_image.comment,
+ v1_json_metadata=existing_image.v1_json_metadata,
+ aggregate_size=existing_image.aggregate_size,
+ parent=translated_parent_id,
+ v1_checksum=existing_image.v1_checksum)
+
+
+ logger.debug('Storing translation %s -> %s', existing_image.id, new_image.id)
+ translations[existing_image.id] = new_image.id
+ return new_image
+ except Image.DoesNotExist:
+ return None
+
+
+def find_create_or_link_image(docker_image_id, repo_obj, username, translations,
+ preferred_location):
+
+ # First check for the image existing in the repository. If found, we simply return it.
+ repo_image = get_repo_image(repo_obj.namespace_user.username, repo_obj.name,
+ docker_image_id)
+ if repo_image:
+ return repo_image
+
+ # We next check to see if there is an existing storage the new image can link to.
+ existing_image_query = (Image
+ .select(Image, ImageStorage)
+ .distinct()
+ .join(ImageStorage)
+ .switch(Image)
+ .join(Repository)
+ .join(RepositoryPermission, JOIN.LEFT_OUTER)
+ .switch(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(ImageStorage.uploading == False,
+ Image.docker_image_id == docker_image_id))
+
+ existing_image_query = _basequery.filter_to_repos_for_user(existing_image_query,
+ _namespace_id_for_username(username))
+
+ # If there is an existing image, we try to translate its ancestry and copy its storage.
+ new_image = None
+ try:
+ logger.debug('Looking up existing image for ID: %s', docker_image_id)
+ existing_image = existing_image_query.get()
+
+ logger.debug('Existing image %s found for ID: %s', existing_image.id, docker_image_id)
+ new_image = _find_or_link_image(existing_image, repo_obj, username, translations,
+ preferred_location)
+ if new_image:
+ return new_image
+ except Image.DoesNotExist:
+ logger.debug('No existing image found for ID: %s', docker_image_id)
+
+ # Otherwise, create a new storage directly.
+ with db_transaction():
+ # Final check for an existing image, under the transaction.
+ repo_image = get_repo_image(repo_obj.namespace_user.username, repo_obj.name,
+ docker_image_id)
+ if repo_image:
+ return repo_image
+
+ logger.debug('Creating new storage for docker id: %s', docker_image_id)
+ new_storage = storage.create_v1_storage(preferred_location)
+
+ return Image.create(docker_image_id=docker_image_id,
+ repository=repo_obj, storage=new_storage,
+ ancestors='/')
+
+
+def set_image_metadata(docker_image_id, namespace_name, repository_name, created_date_str, comment,
+ command, v1_json_metadata, parent=None):
+ """ Sets metadata that is specific to how a binary piece of storage fits into the layer tree.
+ """
+ with db_transaction():
+ try:
+ fetched = (Image
+ .select(Image, ImageStorage)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Image)
+ .join(ImageStorage)
+ .where(Repository.name == repository_name, Namespace.username == namespace_name,
+ Image.docker_image_id == docker_image_id)
+ .get())
+ except Image.DoesNotExist:
+ raise DataModelException('No image with specified id and repository')
+
+ fetched.created = datetime.now()
+ if created_date_str is not None:
+ try:
+ fetched.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
+ except:
+ # parse raises different exceptions, so we cannot use a specific kind of handler here.
+ pass
+
+ # We cleanup any old checksum in case it's a retry after a fail
+ fetched.v1_checksum = None
+ fetched.comment = comment
+ fetched.command = command
+ fetched.v1_json_metadata = v1_json_metadata
+
+ if parent:
+ fetched.ancestors = '%s%s/' % (parent.ancestors, parent.id)
+ fetched.parent = parent
+
+ fetched.save()
+ return fetched
+
+
+def get_image(repo, docker_image_id):
+ try:
+ return (Image
+ .select(Image, ImageStorage)
+ .join(ImageStorage)
+ .where(Image.docker_image_id == docker_image_id, Image.repository == repo)
+ .get())
+ except Image.DoesNotExist:
+ return None
+
+
+def get_image_by_db_id(id):
+ try:
+ return Image.get(id=id)
+ except Image.DoesNotExist:
+ return None
+
+
+def synthesize_v1_image(repo, image_storage_id, storage_image_size, docker_image_id,
+ created_date_str, comment, command, v1_json_metadata, parent_image=None):
+ """ Find an existing image with this docker image id, and if none exists, write one with the
+ specified metadata.
+ """
+ ancestors = '/'
+ if parent_image is not None:
+ ancestors = '{0}{1}/'.format(parent_image.ancestors, parent_image.id)
+
+ created = None
+ if created_date_str is not None:
+ try:
+ created = dateutil.parser.parse(created_date_str).replace(tzinfo=None)
+ except:
+ # parse raises different exceptions, so we cannot use a specific kind of handler here.
+ pass
+
+ # Get the aggregate size for the image.
+ aggregate_size = _basequery.calculate_image_aggregate_size(ancestors, storage_image_size,
+ parent_image)
+
+ try:
+ return Image.create(docker_image_id=docker_image_id, ancestors=ancestors, comment=comment,
+ command=command, v1_json_metadata=v1_json_metadata, created=created,
+ storage=image_storage_id, repository=repo, parent=parent_image,
+ aggregate_size=aggregate_size)
+ except IntegrityError:
+ return Image.get(docker_image_id=docker_image_id, repository=repo)
+
+
+def ensure_image_locations(*names):
+ with db_transaction():
+ locations = ImageStorageLocation.select().where(ImageStorageLocation.name << names)
+
+ insert_names = list(names)
+
+ for location in locations:
+ insert_names.remove(location.name)
+
+ if not insert_names:
+ return
+
+ data = [{'name': name} for name in insert_names]
+ ImageStorageLocation.insert_many(data).execute()
+
+
+def get_max_id_for_sec_scan():
+ """ Gets the maximum id for a clair sec scan """
+ return Image.select(fn.Max(Image.id)).scalar()
+
+
+def get_min_id_for_sec_scan(version):
+ """ Gets the minimum id for a clair sec scan """
+ return (Image
+ .select(fn.Min(Image.id))
+ .where(Image.security_indexed_engine < version)
+ .scalar())
+
+
+def total_image_count():
+ """ Returns the total number of images in DB """
+ return Image.select().count()
+
+
+def get_image_pk_field():
+ """ Returns the primary key for Image DB model """
+ return Image.id
+
+
+def get_images_eligible_for_scan(clair_version):
+ """ Returns a query that gives all images eligible for a clair scan """
+ return (get_image_with_storage_and_parent_base()
+ .where(Image.security_indexed_engine < clair_version)
+ .where(ImageStorage.uploading == False))
+
+
+def get_image_with_storage_and_parent_base():
+ Parent = Image.alias()
+ ParentImageStorage = ImageStorage.alias()
+
+ return (Image
+ .select(Image, ImageStorage, Parent, ParentImageStorage)
+ .join(ImageStorage)
+ .switch(Image)
+ .join(Parent, JOIN.LEFT_OUTER, on=(Image.parent == Parent.id))
+ .join(ParentImageStorage, JOIN.LEFT_OUTER, on=(ParentImageStorage.id == Parent.storage)))
+
+
+def set_secscan_status(image, indexed, version):
+ return (Image
+ .update(security_indexed=indexed, security_indexed_engine=version)
+ .where(Image.id == image.id)
+ .where((Image.security_indexed_engine != version) | (Image.security_indexed != indexed))
+ .execute()) != 0
+
+
+def _get_uniqueness_hash(varying_metadata):
+ if not varying_metadata:
+ return None
+
+ return hashlib.sha256(json.dumps(canonicalize(varying_metadata))).hexdigest()
+
+
+def find_or_create_derived_storage(source_image, transformation_name, preferred_location,
+ varying_metadata=None):
+ existing = find_derived_storage_for_image(source_image, transformation_name, varying_metadata)
+ if existing is not None:
+ return existing
+
+ uniqueness_hash = _get_uniqueness_hash(varying_metadata)
+ trans = ImageStorageTransformation.get(name=transformation_name)
+ new_storage = storage.create_v1_storage(preferred_location)
+
+ try:
+ derived = DerivedStorageForImage.create(source_image=source_image, derivative=new_storage,
+ transformation=trans, uniqueness_hash=uniqueness_hash)
+ except IntegrityError:
+ # Storage was created while this method executed. Just return the existing.
+ ImageStoragePlacement.delete().where(ImageStoragePlacement.storage == new_storage).execute()
+ new_storage.delete_instance()
+ return find_derived_storage_for_image(source_image, transformation_name, varying_metadata)
+
+ return derived
+
+
+def find_derived_storage_for_image(source_image, transformation_name, varying_metadata=None):
+ uniqueness_hash = _get_uniqueness_hash(varying_metadata)
+
+ try:
+ found = (DerivedStorageForImage
+ .select(ImageStorage, DerivedStorageForImage)
+ .join(ImageStorage)
+ .switch(DerivedStorageForImage)
+ .join(ImageStorageTransformation)
+ .where(DerivedStorageForImage.source_image == source_image,
+ ImageStorageTransformation.name == transformation_name,
+ DerivedStorageForImage.uniqueness_hash == uniqueness_hash)
+ .get())
+ return found
+ except DerivedStorageForImage.DoesNotExist:
+ return None
+
+
+def delete_derived_storage(derived_storage):
+ derived_storage.derivative.delete_instance(recursive=True)
diff --git a/data/model/label.py b/data/model/label.py
new file mode 100644
index 000000000..fce7479ba
--- /dev/null
+++ b/data/model/label.py
@@ -0,0 +1,143 @@
+import logging
+
+from cachetools.func import lru_cache
+
+from data.database import (Label, TagManifestLabel, MediaType, LabelSourceType, db_transaction,
+ ManifestLabel, TagManifestLabelMap, TagManifestToManifest)
+from data.model import InvalidLabelKeyException, InvalidMediaTypeException, DataModelException
+from data.text import prefix_search
+from util.validation import validate_label_key
+from util.validation import is_json
+
+logger = logging.getLogger(__name__)
+
+
+@lru_cache(maxsize=1)
+def get_label_source_types():
+ source_type_map = {}
+ for kind in LabelSourceType.select():
+ source_type_map[kind.id] = kind.name
+ source_type_map[kind.name] = kind.id
+
+ return source_type_map
+
+
+@lru_cache(maxsize=1)
+def get_media_types():
+ media_type_map = {}
+ for kind in MediaType.select():
+ media_type_map[kind.id] = kind.name
+ media_type_map[kind.name] = kind.id
+
+ return media_type_map
+
+
+def _get_label_source_type_id(name):
+ kinds = get_label_source_types()
+ return kinds[name]
+
+
+def _get_media_type_id(name):
+ kinds = get_media_types()
+ return kinds[name]
+
+
+def create_manifest_label(tag_manifest, key, value, source_type_name, media_type_name=None):
+ """ Creates a new manifest label on a specific tag manifest. """
+ if not key:
+ raise InvalidLabelKeyException()
+
+ # Note that we don't prevent invalid label names coming from the manifest to be stored, as Docker
+ # does not currently prevent them from being put into said manifests.
+ if not validate_label_key(key) and source_type_name != 'manifest':
+ raise InvalidLabelKeyException()
+
+ # Find the matching media type. If none specified, we infer.
+ if media_type_name is None:
+ media_type_name = 'text/plain'
+ if is_json(value):
+ media_type_name = 'application/json'
+
+ media_type_id = _get_media_type_id(media_type_name)
+ if media_type_id is None:
+ raise InvalidMediaTypeException()
+
+ source_type_id = _get_label_source_type_id(source_type_name)
+
+ with db_transaction():
+ label = Label.create(key=key, value=value, source_type=source_type_id, media_type=media_type_id)
+ tag_manifest_label = TagManifestLabel.create(annotated=tag_manifest, label=label,
+ repository=tag_manifest.tag.repository)
+ try:
+ mapping_row = TagManifestToManifest.get(tag_manifest=tag_manifest)
+ if mapping_row.manifest:
+ manifest_label = ManifestLabel.create(manifest=mapping_row.manifest, label=label,
+ repository=tag_manifest.tag.repository)
+ TagManifestLabelMap.create(manifest_label=manifest_label,
+ tag_manifest_label=tag_manifest_label,
+ label=label,
+ manifest=mapping_row.manifest,
+ tag_manifest=tag_manifest)
+ except TagManifestToManifest.DoesNotExist:
+ pass
+
+ return label
+
+
+def list_manifest_labels(tag_manifest, prefix_filter=None):
+ """ Lists all labels found on the given tag manifest. """
+ query = (Label.select(Label, MediaType)
+ .join(MediaType)
+ .switch(Label)
+ .join(LabelSourceType)
+ .switch(Label)
+ .join(TagManifestLabel)
+ .where(TagManifestLabel.annotated == tag_manifest))
+
+ if prefix_filter is not None:
+ query = query.where(prefix_search(Label.key, prefix_filter))
+
+ return query
+
+
+def get_manifest_label(label_uuid, tag_manifest):
+ """ Retrieves the manifest label on the tag manifest with the given ID. """
+ try:
+ return (Label.select(Label, LabelSourceType)
+ .join(LabelSourceType)
+ .where(Label.uuid == label_uuid)
+ .switch(Label)
+ .join(TagManifestLabel)
+ .where(TagManifestLabel.annotated == tag_manifest)
+ .get())
+ except Label.DoesNotExist:
+ return None
+
+
+def delete_manifest_label(label_uuid, tag_manifest):
+ """ Deletes the manifest label on the tag manifest with the given ID. """
+
+ # Find the label itself.
+ label = get_manifest_label(label_uuid, tag_manifest)
+ if label is None:
+ return None
+
+ if not label.source_type.mutable:
+ raise DataModelException('Cannot delete immutable label')
+
+ # Delete the mapping records and label.
+ (TagManifestLabelMap
+ .delete()
+ .where(TagManifestLabelMap.label == label)
+ .execute())
+
+ deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()
+ if deleted_count != 1:
+ logger.warning('More than a single label deleted for matching label %s', label_uuid)
+
+ deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()
+ if deleted_count != 1:
+ logger.warning('More than a single label deleted for matching label %s', label_uuid)
+
+ label.delete_instance(recursive=False)
+ return label
diff --git a/data/model/log.py b/data/model/log.py
new file mode 100644
index 000000000..e78ec4b1b
--- /dev/null
+++ b/data/model/log.py
@@ -0,0 +1,299 @@
+import json
+import logging
+
+from datetime import datetime, timedelta
+from calendar import timegm
+from cachetools.func import lru_cache
+
+from peewee import JOIN, fn, PeeweeException
+
+from data.database import LogEntryKind, User, RepositoryActionCount, db, LogEntry3
+from data.model import config, user, DataModelException
+
+logger = logging.getLogger(__name__)
+
+ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING = ['pull_repo']
+
+
+def _logs_query(selections, start_time=None, end_time=None, performer=None, repository=None,
+ namespace=None, ignore=None, model=LogEntry3, id_range=None):
+ """ Returns a query for selecting logs from the table, with various options and filters. """
+ assert (start_time is not None and end_time is not None) or (id_range is not None)
+ joined = (model.select(*selections).switch(model))
+
+ if id_range is not None:
+ joined = joined.where(model.id >= id_range[0], model.id <= id_range[1])
+ else:
+ joined = joined.where(model.datetime >= start_time, model.datetime < end_time)
+
+ if repository:
+ joined = joined.where(model.repository == repository)
+
+ if performer:
+ joined = joined.where(model.performer == performer)
+
+ if namespace and not repository:
+ namespace_user = user.get_user_or_org(namespace)
+ if namespace_user is None:
+ raise DataModelException('Invalid namespace requested')
+
+ joined = joined.where(model.account == namespace_user.id)
+
+ if ignore:
+ kind_map = get_log_entry_kinds()
+ ignore_ids = [kind_map[kind_name] for kind_name in ignore]
+ joined = joined.where(~(model.kind << ignore_ids))
+
+ return joined
+
+
+def _latest_logs_query(selections, performer=None, repository=None, namespace=None, ignore=None,
+ model=LogEntry3, size=None):
+ """ Returns a query for selecting the latest logs from the table, with various options and
+ filters. """
+ query = (model.select(*selections).switch(model))
+
+ if repository:
+ query = query.where(model.repository == repository)
+
+ if performer:
+ query = query.where(model.repository == repository)
+
+ if namespace and not repository:
+ namespace_user = user.get_user_or_org(namespace)
+ if namespace_user is None:
+ raise DataModelException('Invalid namespace requested')
+
+ query = query.where(model.account == namespace_user.id)
+
+ if ignore:
+ kind_map = get_log_entry_kinds()
+ ignore_ids = [kind_map[kind_name] for kind_name in ignore]
+ query = query.where(~(model.kind << ignore_ids))
+
+ query = query.order_by(model.datetime.desc(), model.id)
+
+ if size:
+ query = query.limit(size)
+
+ return query
+
+
+@lru_cache(maxsize=1)
+def get_log_entry_kinds():
+ kind_map = {}
+ for kind in LogEntryKind.select():
+ kind_map[kind.id] = kind.name
+ kind_map[kind.name] = kind.id
+
+ return kind_map
+
+
+def _get_log_entry_kind(name):
+ kinds = get_log_entry_kinds()
+ return kinds[name]
+
+
+def get_aggregated_logs(start_time, end_time, performer=None, repository=None, namespace=None,
+ ignore=None, model=LogEntry3):
+ """ Returns the count of logs, by kind and day, for the logs matching the given filters. """
+ date = db.extract_date('day', model.datetime)
+ selections = [model.kind, date.alias('day'), fn.Count(model.id).alias('count')]
+ query = _logs_query(selections, start_time, end_time, performer, repository, namespace, ignore,
+ model=model)
+ return query.group_by(date, model.kind)
+
+
+def get_logs_query(start_time=None, end_time=None, performer=None, repository=None, namespace=None,
+ ignore=None, model=LogEntry3, id_range=None):
+ """ Returns the logs matching the given filters. """
+ Performer = User.alias()
+ Account = User.alias()
+ selections = [model, Performer]
+
+ if namespace is None and repository is None:
+ selections.append(Account)
+
+ query = _logs_query(selections, start_time, end_time, performer, repository, namespace, ignore,
+ model=model, id_range=id_range)
+ query = (query.switch(model).join(Performer, JOIN.LEFT_OUTER,
+ on=(model.performer == Performer.id).alias('performer')))
+
+ if namespace is None and repository is None:
+ query = (query.switch(model).join(Account, JOIN.LEFT_OUTER,
+ on=(model.account == Account.id).alias('account')))
+
+ return query
+
+
+def get_latest_logs_query(performer=None, repository=None, namespace=None, ignore=None,
+ model=LogEntry3, size=None):
+ """ Returns the latest logs matching the given filters. """
+ Performer = User.alias()
+ Account = User.alias()
+ selections = [model, Performer]
+
+ if namespace is None and repository is None:
+ selections.append(Account)
+
+ query = _latest_logs_query(selections, performer, repository, namespace, ignore, model=model,
+ size=size)
+ query = (query.switch(model).join(Performer, JOIN.LEFT_OUTER,
+ on=(model.performer == Performer.id).alias('performer')))
+
+ if namespace is None and repository is None:
+ query = (query.switch(model).join(Account, JOIN.LEFT_OUTER,
+ on=(model.account == Account.id).alias('account')))
+
+ return query
+
+
+def _json_serialize(obj):
+ if isinstance(obj, datetime):
+ return timegm(obj.utctimetuple())
+
+ return obj
+
+
+def log_action(kind_name, user_or_organization_name, performer=None, repository=None, ip=None,
+ metadata={}, timestamp=None):
+ """ Logs an entry in the LogEntry table. """
+ if not timestamp:
+ timestamp = datetime.today()
+
+ account = None
+ if user_or_organization_name is not None:
+ account = User.get(User.username == user_or_organization_name).id
+ else:
+ account = config.app_config.get('SERVICE_LOG_ACCOUNT_ID')
+ if account is None:
+ account = user.get_minimum_user_id()
+
+ if performer is not None:
+ performer = performer.id
+
+ if repository is not None:
+ repository = repository.id
+
+ kind = _get_log_entry_kind(kind_name)
+ metadata_json = json.dumps(metadata, default=_json_serialize)
+ log_data = {
+ 'kind': kind,
+ 'account': account,
+ 'performer': performer,
+ 'repository': repository,
+ 'ip': ip,
+ 'metadata_json': metadata_json,
+ 'datetime': timestamp
+ }
+
+ try:
+ LogEntry3.create(**log_data)
+ except PeeweeException as ex:
+ strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING')
+ if strict_logging_disabled and kind_name in ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING:
+ logger.exception('log_action failed', extra=({'exception': ex}).update(log_data))
+ else:
+ raise
+
+
+def get_stale_logs_start_id(model):
+ """ Gets the oldest log entry. """
+ try:
+ return (model.select(fn.Min(model.id)).tuples())[0][0]
+ except IndexError:
+ return None
+
+
+def get_stale_logs(start_id, end_id, model, cutoff_date):
+ """ Returns all the logs with IDs between start_id and end_id inclusively. """
+ return model.select().where((model.id >= start_id),
+ (model.id <= end_id),
+ model.datetime <= cutoff_date)
+
+
+def delete_stale_logs(start_id, end_id, model):
+ """ Deletes all the logs with IDs between start_id and end_id. """
+ model.delete().where((model.id >= start_id), (model.id <= end_id)).execute()
+
+
+def get_repository_action_counts(repo, start_date):
+ """ Returns the daily aggregated action counts for the given repository, starting at the given
+ start date.
+ """
+ return RepositoryActionCount.select().where(RepositoryActionCount.repository == repo,
+ RepositoryActionCount.date >= start_date)
+
+
+def get_repositories_action_sums(repository_ids):
+ """ Returns a map from repository ID to total actions within that repository in the last week. """
+ if not repository_ids:
+ return {}
+
+ # Filter the join to recent entries only.
+ last_week = datetime.now() - timedelta(weeks=1)
+ tuples = (RepositoryActionCount.select(RepositoryActionCount.repository,
+ fn.Sum(RepositoryActionCount.count))
+ .where(RepositoryActionCount.repository << repository_ids)
+ .where(RepositoryActionCount.date >= last_week)
+ .group_by(RepositoryActionCount.repository).tuples())
+
+ action_count_map = {}
+ for record in tuples:
+ action_count_map[record[0]] = record[1]
+
+ return action_count_map
+
+
+def get_minimum_id_for_logs(start_time, repository_id=None, namespace_id=None, model=LogEntry3):
+ """ Returns the minimum ID for logs matching the given repository or namespace in
+ the logs table, starting at the given start time.
+ """
+ # First try bounded by a day. Most repositories will meet this criteria, and therefore
+ # can make a much faster query.
+ day_after = start_time + timedelta(days=1)
+ result = _get_bounded_id(fn.Min, model.datetime >= start_time,
+ repository_id, namespace_id, model.datetime < day_after, model=model)
+ if result is not None:
+ return result
+
+ return _get_bounded_id(fn.Min, model.datetime >= start_time, repository_id, namespace_id,
+ model=model)
+
+
+def get_maximum_id_for_logs(end_time, repository_id=None, namespace_id=None, model=LogEntry3):
+ """ Returns the maximum ID for logs matching the given repository or namespace in
+ the logs table, ending at the given end time.
+ """
+ # First try bounded by a day. Most repositories will meet this criteria, and therefore
+ # can make a much faster query.
+ day_before = end_time - timedelta(days=1)
+ result = _get_bounded_id(fn.Max, model.datetime <= end_time,
+ repository_id, namespace_id, model.datetime > day_before, model=model)
+ if result is not None:
+ return result
+
+ return _get_bounded_id(fn.Max, model.datetime <= end_time, repository_id, namespace_id,
+ model=model)
+
+
+def _get_bounded_id(fn, filter_clause, repository_id, namespace_id, reduction_clause=None,
+ model=LogEntry3):
+ assert (namespace_id is not None) or (repository_id is not None)
+ query = (model
+ .select(fn(model.id))
+ .where(filter_clause))
+
+ if reduction_clause is not None:
+ query = query.where(reduction_clause)
+
+ if repository_id is not None:
+ query = query.where(model.repository == repository_id)
+ else:
+ query = query.where(model.account == namespace_id)
+
+ row = query.tuples()[0]
+ if not row:
+ return None
+
+ return row[0]
diff --git a/data/model/message.py b/data/model/message.py
new file mode 100644
index 000000000..24df4d0ba
--- /dev/null
+++ b/data/model/message.py
@@ -0,0 +1,24 @@
+from data.database import Messages, MediaType
+
+
+def get_messages():
+ """Query the data base for messages and returns a container of database message objects"""
+ return Messages.select(Messages, MediaType).join(MediaType)
+
+def create(messages):
+ """Insert messages into the database."""
+ inserted = []
+ for message in messages:
+ severity = message['severity']
+ media_type_name = message['media_type']
+ media_type = MediaType.get(name=media_type_name)
+
+ inserted.append(Messages.create(content=message['content'], media_type=media_type,
+ severity=severity))
+ return inserted
+
+def delete_message(uuids):
+ """Delete message from the database"""
+ if not uuids:
+ return
+ Messages.delete().where(Messages.uuid << uuids).execute()
diff --git a/data/model/modelutil.py b/data/model/modelutil.py
new file mode 100644
index 000000000..4048e4eff
--- /dev/null
+++ b/data/model/modelutil.py
@@ -0,0 +1,77 @@
+import dateutil.parser
+
+from datetime import datetime
+
+from peewee import SQL
+
+
+def paginate(query, model, descending=False, page_token=None, limit=50, sort_field_alias=None,
+ max_page=None, sort_field_name=None):
+ """ Paginates the given query using an field range, starting at the optional page_token.
+ Returns a *list* of matching results along with an unencrypted page_token for the
+ next page, if any. If descending is set to True, orders by the field descending rather
+ than ascending.
+ """
+ # Note: We use the sort_field_alias for the order_by, but not the where below. The alias is
+ # necessary for certain queries that use unions in MySQL, as it gets confused on which field
+ # to order by. The where clause, on the other hand, cannot use the alias because Postgres does
+ # not allow aliases in where clauses.
+ sort_field_name = sort_field_name or 'id'
+ sort_field = getattr(model, sort_field_name)
+
+ if sort_field_alias is not None:
+ sort_field_name = sort_field_alias
+ sort_field = SQL(sort_field_alias)
+
+ if descending:
+ query = query.order_by(sort_field.desc())
+ else:
+ query = query.order_by(sort_field)
+
+ start_index = pagination_start(page_token)
+ if start_index is not None:
+ if descending:
+ query = query.where(sort_field <= start_index)
+ else:
+ query = query.where(sort_field >= start_index)
+
+ query = query.limit(limit + 1)
+
+ page_number = (page_token.get('page_number') or None) if page_token else None
+ if page_number is not None and max_page is not None and page_number > max_page:
+ return [], None
+
+ return paginate_query(query, limit=limit, sort_field_name=sort_field_name,
+ page_number=page_number)
+
+
+def pagination_start(page_token=None):
+ """ Returns the start index for pagination for the given page token. Will return None if None. """
+ if page_token is not None:
+ start_index = page_token.get('start_index')
+ if page_token.get('is_datetime'):
+ start_index = dateutil.parser.parse(start_index)
+ return start_index
+ return None
+
+
+def paginate_query(query, limit=50, sort_field_name=None, page_number=None):
+ """ Executes the given query and returns a page's worth of results, as well as the page token
+ for the next page (if any).
+ """
+ results = list(query)
+ page_token = None
+ if len(results) > limit:
+ start_index = getattr(results[limit], sort_field_name or 'id')
+ is_datetime = False
+ if isinstance(start_index, datetime):
+ start_index = start_index.isoformat() + "Z"
+ is_datetime = True
+
+ page_token = {
+ 'start_index': start_index,
+ 'page_number': page_number + 1 if page_number else 1,
+ 'is_datetime': is_datetime,
+ }
+
+ return results[0:limit], page_token
diff --git a/data/model/notification.py b/data/model/notification.py
new file mode 100644
index 000000000..11a84fea7
--- /dev/null
+++ b/data/model/notification.py
@@ -0,0 +1,220 @@
+import json
+
+from peewee import SQL
+
+from data.database import (Notification, NotificationKind, User, Team, TeamMember, TeamRole,
+ RepositoryNotification, ExternalNotificationEvent, Repository,
+ ExternalNotificationMethod, Namespace, db_for_update)
+from data.model import InvalidNotificationException, db_transaction
+
+
+def create_notification(kind_name, target, metadata={}, lookup_path=None):
+ kind_ref = NotificationKind.get(name=kind_name)
+ notification = Notification.create(kind=kind_ref, target=target,
+ metadata_json=json.dumps(metadata),
+ lookup_path=lookup_path)
+ return notification
+
+
+def create_unique_notification(kind_name, target, metadata={}):
+ with db_transaction():
+ if list_notifications(target, kind_name).count() == 0:
+ create_notification(kind_name, target, metadata)
+
+
+def lookup_notification(user, uuid):
+ results = list(list_notifications(user, id_filter=uuid, include_dismissed=True, limit=1))
+ if not results:
+ return None
+
+ return results[0]
+
+
+def lookup_notifications_by_path_prefix(prefix):
+ return list((Notification
+ .select()
+ .where(Notification.lookup_path % prefix)))
+
+
+def list_notifications(user, kind_name=None, id_filter=None, include_dismissed=False,
+ page=None, limit=None):
+
+ base_query = (Notification
+ .select(Notification.id,
+ Notification.uuid,
+ Notification.kind,
+ Notification.metadata_json,
+ Notification.dismissed,
+ Notification.lookup_path,
+ Notification.created,
+ Notification.created.alias('cd'),
+ Notification.target)
+ .join(NotificationKind))
+
+ if kind_name is not None:
+ base_query = base_query.where(NotificationKind.name == kind_name)
+
+ if id_filter is not None:
+ base_query = base_query.where(Notification.uuid == id_filter)
+
+ if not include_dismissed:
+ base_query = base_query.where(Notification.dismissed == False)
+
+ # Lookup directly for the user.
+ user_direct = base_query.clone().where(Notification.target == user)
+
+ # Lookup via organizations admined by the user.
+ Org = User.alias()
+ AdminTeam = Team.alias()
+ AdminTeamMember = TeamMember.alias()
+ AdminUser = User.alias()
+
+ via_orgs = (base_query.clone()
+ .join(Org, on=(Org.id == Notification.target))
+ .join(AdminTeam, on=(Org.id == AdminTeam.organization))
+ .join(TeamRole, on=(AdminTeam.role == TeamRole.id))
+ .switch(AdminTeam)
+ .join(AdminTeamMember, on=(AdminTeam.id == AdminTeamMember.team))
+ .join(AdminUser, on=(AdminTeamMember.user == AdminUser.id))
+ .where((AdminUser.id == user) & (TeamRole.name == 'admin')))
+
+ query = user_direct | via_orgs
+
+ if page:
+ query = query.paginate(page, limit)
+ elif limit:
+ query = query.limit(limit)
+
+ return query.order_by(SQL('cd desc'))
+
+
+def delete_all_notifications_by_path_prefix(prefix):
+ (Notification
+ .delete()
+ .where(Notification.lookup_path ** (prefix + '%'))
+ .execute())
+
+
+def delete_all_notifications_by_kind(kind_name):
+ kind_ref = NotificationKind.get(name=kind_name)
+ (Notification
+ .delete()
+ .where(Notification.kind == kind_ref)
+ .execute())
+
+
+def delete_notifications_by_kind(target, kind_name):
+ kind_ref = NotificationKind.get(name=kind_name)
+ Notification.delete().where(Notification.target == target,
+ Notification.kind == kind_ref).execute()
+
+
+def delete_matching_notifications(target, kind_name, **kwargs):
+ kind_ref = NotificationKind.get(name=kind_name)
+
+ # Load all notifications for the user with the given kind.
+ notifications = (Notification
+ .select()
+ .where(Notification.target == target,
+ Notification.kind == kind_ref))
+
+ # For each, match the metadata to the specified values.
+ for notification in notifications:
+ matches = True
+ try:
+ metadata = json.loads(notification.metadata_json)
+ except:
+ continue
+
+ for (key, value) in kwargs.iteritems():
+ if not key in metadata or metadata[key] != value:
+ matches = False
+ break
+
+ if not matches:
+ continue
+
+ notification.delete_instance()
+
+
+def increment_notification_failure_count(uuid):
+ """ This increments the number of failures by one """
+ (RepositoryNotification
+ .update(number_of_failures=RepositoryNotification.number_of_failures + 1)
+ .where(RepositoryNotification.uuid == uuid)
+ .execute())
+
+
+def reset_notification_number_of_failures(namespace_name, repository_name, uuid):
+ """ This resets the number of failures for a repo notification to 0 """
+ try:
+ notification = RepositoryNotification.select().where(RepositoryNotification.uuid == uuid).get()
+ if (notification.repository.namespace_user.username != namespace_name or
+ notification.repository.name != repository_name):
+ raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid)
+ reset_number_of_failures_to_zero(notification.id)
+ return notification
+ except RepositoryNotification.DoesNotExist:
+ return None
+
+
+def reset_number_of_failures_to_zero(notification_id):
+ """ This resets the number of failures for a repo notification to 0 """
+ RepositoryNotification.update(number_of_failures=0).where(RepositoryNotification.id == notification_id).execute()
+
+
+def create_repo_notification(repo, event_name, method_name, method_config, event_config, title=None):
+ event = ExternalNotificationEvent.get(ExternalNotificationEvent.name == event_name)
+ method = ExternalNotificationMethod.get(ExternalNotificationMethod.name == method_name)
+
+ return RepositoryNotification.create(repository=repo, event=event, method=method,
+ config_json=json.dumps(method_config), title=title,
+ event_config_json=json.dumps(event_config))
+
+
+def _base_get_notification(uuid):
+ """ This is a base query for get statements """
+ return (RepositoryNotification
+ .select(RepositoryNotification, Repository, Namespace)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(RepositoryNotification.uuid == uuid))
+
+
+def get_enabled_notification(uuid):
+ """ This returns a notification with less than 3 failures """
+ try:
+ return _base_get_notification(uuid).where(RepositoryNotification.number_of_failures < 3).get()
+ except RepositoryNotification.DoesNotExist:
+ raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid)
+
+
+def get_repo_notification(uuid):
+ try:
+ return _base_get_notification(uuid).get()
+ except RepositoryNotification.DoesNotExist:
+ raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid)
+
+
+def delete_repo_notification(namespace_name, repository_name, uuid):
+ found = get_repo_notification(uuid)
+ if found.repository.namespace_user.username != namespace_name or found.repository.name != repository_name:
+ raise InvalidNotificationException('No repository notifiation found with uuid: %s' % uuid)
+ found.delete_instance()
+ return found
+
+
+def list_repo_notifications(namespace_name, repository_name, event_name=None):
+ query = (RepositoryNotification
+ .select(RepositoryNotification, Repository, Namespace)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Namespace.username == namespace_name, Repository.name == repository_name))
+
+ if event_name:
+ query = (query
+ .switch(RepositoryNotification)
+ .join(ExternalNotificationEvent)
+ .where(ExternalNotificationEvent.name == event_name))
+
+ return query
diff --git a/data/model/oauth.py b/data/model/oauth.py
new file mode 100644
index 000000000..182c08f32
--- /dev/null
+++ b/data/model/oauth.py
@@ -0,0 +1,434 @@
+import logging
+import json
+
+from flask import url_for
+from datetime import datetime, timedelta
+from oauth2lib.provider import AuthorizationProvider
+from oauth2lib import utils
+
+from active_migration import ActiveDataMigration, ERTMigrationFlags
+from data.database import (OAuthApplication, OAuthAuthorizationCode, OAuthAccessToken, User,
+ random_string_generator)
+from data.fields import DecryptedValue, Credential
+from data.model import user, config
+from auth import scopes
+from util import get_app_url
+
+
+logger = logging.getLogger(__name__)
+
+ACCESS_TOKEN_PREFIX_LENGTH = 20
+ACCESS_TOKEN_MINIMUM_CODE_LENGTH = 20
+AUTHORIZATION_CODE_PREFIX_LENGTH = 20
+
+
+class DatabaseAuthorizationProvider(AuthorizationProvider):
+ def get_authorized_user(self):
+ raise NotImplementedError('Subclasses must fill in the ability to get the authorized_user.')
+
+ def _generate_data_string(self):
+ return json.dumps({'username': self.get_authorized_user().username})
+
+ @property
+ def token_expires_in(self):
+ """Property method to get the token expiration time in seconds.
+ """
+ return int(60*60*24*365.25*10) # 10 Years
+
+ def validate_client_id(self, client_id):
+ return self.get_application_for_client_id(client_id) is not None
+
+ def get_application_for_client_id(self, client_id):
+ try:
+ return OAuthApplication.get(client_id=client_id)
+ except OAuthApplication.DoesNotExist:
+ return None
+
+ def validate_client_secret(self, client_id, client_secret):
+ try:
+ application = OAuthApplication.get(client_id=client_id)
+
+ # TODO(remove-unenc): Remove legacy check.
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ if application.secure_client_secret is None:
+ return application.client_secret == client_secret
+
+ assert application.secure_client_secret is not None
+ return application.secure_client_secret.matches(client_secret)
+ except OAuthApplication.DoesNotExist:
+ return False
+
+ def validate_redirect_uri(self, client_id, redirect_uri):
+ internal_redirect_url = '%s%s' % (get_app_url(config.app_config),
+ url_for('web.oauth_local_handler'))
+
+ if redirect_uri == internal_redirect_url:
+ return True
+
+ try:
+ oauth_app = OAuthApplication.get(client_id=client_id)
+ if (oauth_app.redirect_uri and redirect_uri and
+ redirect_uri.startswith(oauth_app.redirect_uri)):
+ return True
+ return False
+ except OAuthApplication.DoesNotExist:
+ return False
+
+ def validate_scope(self, client_id, scopes_string):
+ return scopes.validate_scope_string(scopes_string)
+
+ def validate_access(self):
+ return self.get_authorized_user() is not None
+
+ def load_authorized_scope_string(self, client_id, username):
+ found = (OAuthAccessToken
+ .select()
+ .join(OAuthApplication)
+ .switch(OAuthAccessToken)
+ .join(User)
+ .where(OAuthApplication.client_id == client_id, User.username == username,
+ OAuthAccessToken.expires_at > datetime.utcnow()))
+ found = list(found)
+ logger.debug('Found %s matching tokens.', len(found))
+ long_scope_string = ','.join([token.scope for token in found])
+ logger.debug('Computed long scope string: %s', long_scope_string)
+ return long_scope_string
+
+ def validate_has_scopes(self, client_id, username, scope):
+ long_scope_string = self.load_authorized_scope_string(client_id, username)
+
+ # Make sure the token contains the given scopes (at least).
+ return scopes.is_subset_string(long_scope_string, scope)
+
+ def from_authorization_code(self, client_id, full_code, scope):
+ code_name = full_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
+ code_credential = full_code[AUTHORIZATION_CODE_PREFIX_LENGTH:]
+
+ try:
+ found = (OAuthAuthorizationCode
+ .select()
+ .join(OAuthApplication)
+ .where(OAuthApplication.client_id == client_id,
+ OAuthAuthorizationCode.code_name == code_name,
+ OAuthAuthorizationCode.scope == scope)
+ .get())
+ if not found.code_credential.matches(code_credential):
+ return None
+
+ logger.debug('Returning data: %s', found.data)
+ return found.data
+ except OAuthAuthorizationCode.DoesNotExist:
+ # Fallback to the legacy lookup of the full code.
+ # TODO(remove-unenc): Remove legacy fallback.
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ try:
+ found = (OAuthAuthorizationCode
+ .select()
+ .join(OAuthApplication)
+ .where(OAuthApplication.client_id == client_id,
+ OAuthAuthorizationCode.code == full_code,
+ OAuthAuthorizationCode.scope == scope)
+ .get())
+ logger.debug('Returning data: %s', found.data)
+ return found.data
+ except OAuthAuthorizationCode.DoesNotExist:
+ return None
+ else:
+ return None
+
+ def persist_authorization_code(self, client_id, full_code, scope):
+ oauth_app = OAuthApplication.get(client_id=client_id)
+ data = self._generate_data_string()
+
+ assert len(full_code) >= (AUTHORIZATION_CODE_PREFIX_LENGTH * 2)
+ code_name = full_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
+ code_credential = full_code[AUTHORIZATION_CODE_PREFIX_LENGTH:]
+
+ # TODO(remove-unenc): Remove legacy fallback.
+ full_code = None
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
+ full_code = code_name + code_credential
+
+ OAuthAuthorizationCode.create(application=oauth_app,
+ code=full_code,
+ scope=scope,
+ code_name=code_name,
+ code_credential=Credential.from_string(code_credential),
+ data=data)
+
+ def persist_token_information(self, client_id, scope, access_token, token_type,
+ expires_in, refresh_token, data):
+ assert not refresh_token
+ found = user.get_user(json.loads(data)['username'])
+ if not found:
+ raise RuntimeError('Username must be in the data field')
+
+ token_name = access_token[:ACCESS_TOKEN_PREFIX_LENGTH]
+ token_code = access_token[ACCESS_TOKEN_PREFIX_LENGTH:]
+
+ assert token_name
+ assert token_code
+ assert len(token_name) == ACCESS_TOKEN_PREFIX_LENGTH
+ assert len(token_code) >= ACCESS_TOKEN_MINIMUM_CODE_LENGTH
+
+ oauth_app = OAuthApplication.get(client_id=client_id)
+ expires_at = datetime.utcnow() + timedelta(seconds=expires_in)
+ OAuthAccessToken.create(application=oauth_app,
+ authorized_user=found,
+ scope=scope,
+ token_name=token_name,
+ token_code=Credential.from_string(token_code),
+ access_token='',
+ token_type=token_type,
+ expires_at=expires_at,
+ data=data)
+
+ def get_auth_denied_response(self, response_type, client_id, redirect_uri, **params):
+ # Ensure proper response_type
+ if response_type != 'token':
+ err = 'unsupported_response_type'
+ return self._make_redirect_error_response(redirect_uri, err)
+
+ # Check redirect URI
+ is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
+ if not is_valid_redirect_uri:
+ return self._invalid_redirect_uri_response()
+
+ return self._make_redirect_error_response(redirect_uri, 'authorization_denied')
+
+ def get_token_response(self, response_type, client_id, redirect_uri, **params):
+ # Ensure proper response_type
+ if response_type != 'token':
+ err = 'unsupported_response_type'
+ return self._make_redirect_error_response(redirect_uri, err)
+
+ # Check for a valid client ID.
+ is_valid_client_id = self.validate_client_id(client_id)
+ if not is_valid_client_id:
+ err = 'unauthorized_client'
+ return self._make_redirect_error_response(redirect_uri, err)
+
+ # Check for a valid redirect URI.
+ is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
+ if not is_valid_redirect_uri:
+ return self._invalid_redirect_uri_response()
+
+ # Check conditions
+ is_valid_access = self.validate_access()
+ scope = params.get('scope', '')
+ are_valid_scopes = self.validate_scope(client_id, scope)
+
+ # Return proper error responses on invalid conditions
+ if not is_valid_access:
+ err = 'access_denied'
+ return self._make_redirect_error_response(redirect_uri, err)
+
+ if not are_valid_scopes:
+ err = 'invalid_scope'
+ return self._make_redirect_error_response(redirect_uri, err)
+
+ # Make sure we have enough random data in the token to have a public
+ # prefix and a private encrypted suffix.
+ access_token = str(self.generate_access_token())
+ assert len(access_token) - ACCESS_TOKEN_PREFIX_LENGTH >= 20
+
+ token_type = self.token_type
+ expires_in = self.token_expires_in
+
+ data = self._generate_data_string()
+ self.persist_token_information(client_id=client_id,
+ scope=scope,
+ access_token=access_token,
+ token_type=token_type,
+ expires_in=expires_in,
+ refresh_token=None,
+ data=data)
+
+ url = utils.build_url(redirect_uri, params)
+ url += '#access_token=%s&token_type=%s&expires_in=%s' % (access_token, token_type, expires_in)
+
+ return self._make_response(headers={'Location': url}, status_code=302)
+
+ def from_refresh_token(self, client_id, refresh_token, scope):
+ raise NotImplementedError()
+
+ def discard_authorization_code(self, client_id, full_code):
+ code_name = full_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
+ try:
+ found = (OAuthAuthorizationCode
+ .select()
+ .join(OAuthApplication)
+ .where(OAuthApplication.client_id == client_id,
+ OAuthAuthorizationCode.code_name == code_name)
+ .get())
+ found.delete_instance()
+ return
+ except OAuthAuthorizationCode.DoesNotExist:
+ pass
+
+ # Legacy: full code.
+ # TODO(remove-unenc): Remove legacy fallback.
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ try:
+ found = (OAuthAuthorizationCode
+ .select()
+ .join(OAuthApplication)
+ .where(OAuthApplication.client_id == client_id,
+ OAuthAuthorizationCode.code == full_code)
+ .get())
+ found.delete_instance()
+ except OAuthAuthorizationCode.DoesNotExist:
+ pass
+
+ def discard_refresh_token(self, client_id, refresh_token):
+ raise NotImplementedError()
+
+
+def create_application(org, name, application_uri, redirect_uri, **kwargs):
+ client_secret = kwargs.pop('client_secret', random_string_generator(length=40)())
+
+ # TODO(remove-unenc): Remove legacy field.
+ old_client_secret = None
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
+ old_client_secret = client_secret
+
+ return OAuthApplication.create(organization=org,
+ name=name,
+ application_uri=application_uri,
+ redirect_uri=redirect_uri,
+ client_secret=old_client_secret,
+ secure_client_secret=DecryptedValue(client_secret),
+ **kwargs)
+
+
+def validate_access_token(access_token):
+ assert isinstance(access_token, basestring)
+ token_name = access_token[:ACCESS_TOKEN_PREFIX_LENGTH]
+ if not token_name:
+ return None
+
+ token_code = access_token[ACCESS_TOKEN_PREFIX_LENGTH:]
+ if not token_code:
+ return None
+
+ try:
+ found = (OAuthAccessToken
+ .select(OAuthAccessToken, User)
+ .join(User)
+ .where(OAuthAccessToken.token_name == token_name)
+ .get())
+
+ if found.token_code is None or not found.token_code.matches(token_code):
+ return None
+
+ return found
+ except OAuthAccessToken.DoesNotExist:
+ pass
+
+ # Legacy lookup.
+ # TODO(remove-unenc): Remove this once migrated.
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ try:
+ assert access_token
+ found = (OAuthAccessToken
+ .select(OAuthAccessToken, User)
+ .join(User)
+ .where(OAuthAccessToken.access_token == access_token)
+ .get())
+ return found
+ except OAuthAccessToken.DoesNotExist:
+ return None
+
+ return None
+
+
+def get_application_for_client_id(client_id):
+ try:
+ return OAuthApplication.get(client_id=client_id)
+ except OAuthApplication.DoesNotExist:
+ return None
+
+
+def reset_client_secret(application):
+ client_secret = random_string_generator(length=40)()
+
+ # TODO(remove-unenc): Remove legacy field.
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
+ application.client_secret = client_secret
+
+ application.secure_client_secret = DecryptedValue(client_secret)
+ application.save()
+ return application
+
+
+def lookup_application(org, client_id):
+ try:
+ return OAuthApplication.get(organization=org, client_id=client_id)
+ except OAuthApplication.DoesNotExist:
+ return None
+
+
+def delete_application(org, client_id):
+ application = lookup_application(org, client_id)
+ if not application:
+ return
+
+ application.delete_instance(recursive=True, delete_nullable=True)
+ return application
+
+
+def lookup_access_token_by_uuid(token_uuid):
+ try:
+ return OAuthAccessToken.get(OAuthAccessToken.uuid == token_uuid)
+ except OAuthAccessToken.DoesNotExist:
+ return None
+
+
+def lookup_access_token_for_user(user_obj, token_uuid):
+ try:
+ return OAuthAccessToken.get(OAuthAccessToken.authorized_user == user_obj,
+ OAuthAccessToken.uuid == token_uuid)
+ except OAuthAccessToken.DoesNotExist:
+ return None
+
+
+def list_access_tokens_for_user(user_obj):
+ query = (OAuthAccessToken
+ .select()
+ .join(OAuthApplication)
+ .switch(OAuthAccessToken)
+ .join(User)
+ .where(OAuthAccessToken.authorized_user == user_obj))
+
+ return query
+
+
+def list_applications_for_org(org):
+ query = (OAuthApplication
+ .select()
+ .join(User)
+ .where(OAuthApplication.organization == org))
+
+ return query
+
+
+def create_access_token_for_testing(user_obj, client_id, scope, access_token=None, expires_in=9000):
+ access_token = access_token or random_string_generator(length=40)()
+ token_name = access_token[:ACCESS_TOKEN_PREFIX_LENGTH]
+ token_code = access_token[ACCESS_TOKEN_PREFIX_LENGTH:]
+
+ assert len(token_name) == ACCESS_TOKEN_PREFIX_LENGTH
+ assert len(token_code) >= ACCESS_TOKEN_MINIMUM_CODE_LENGTH
+
+ expires_at = datetime.utcnow() + timedelta(seconds=expires_in)
+ application = get_application_for_client_id(client_id)
+ created = OAuthAccessToken.create(application=application,
+ authorized_user=user_obj,
+ scope=scope,
+ token_type='token',
+ access_token='',
+ token_code=Credential.from_string(token_code),
+ token_name=token_name,
+ expires_at=expires_at,
+ data='')
+ return created, access_token
diff --git a/data/model/oci/__init__.py b/data/model/oci/__init__.py
new file mode 100644
index 000000000..39bcef2eb
--- /dev/null
+++ b/data/model/oci/__init__.py
@@ -0,0 +1,9 @@
+# There MUST NOT be any circular dependencies between these subsections. If there are fix it by
+# moving the minimal number of things to shared
+from data.model.oci import (
+ blob,
+ label,
+ manifest,
+ shared,
+ tag,
+)
diff --git a/data/model/oci/blob.py b/data/model/oci/blob.py
new file mode 100644
index 000000000..f7739c21b
--- /dev/null
+++ b/data/model/oci/blob.py
@@ -0,0 +1,26 @@
+from data.database import ImageStorage, ManifestBlob
+from data.model import BlobDoesNotExist
+from data.model.storage import get_storage_by_uuid, InvalidImageException
+from data.model.blob import get_repository_blob_by_digest as legacy_get
+
+def get_repository_blob_by_digest(repository, blob_digest):
+ """ Find the content-addressable blob linked to the specified repository and
+ returns it or None if none.
+ """
+ try:
+ storage = (ImageStorage
+ .select(ImageStorage.uuid)
+ .join(ManifestBlob)
+ .where(ManifestBlob.repository == repository,
+ ImageStorage.content_checksum == blob_digest,
+ ImageStorage.uploading == False)
+ .get())
+
+ return get_storage_by_uuid(storage.uuid)
+ except (ImageStorage.DoesNotExist, InvalidImageException):
+ # TODO: Remove once we are no longer using the legacy tables.
+ # Try the legacy call.
+ try:
+ return legacy_get(repository, blob_digest)
+ except BlobDoesNotExist:
+ return None
diff --git a/data/model/oci/label.py b/data/model/oci/label.py
new file mode 100644
index 000000000..d019e6d2d
--- /dev/null
+++ b/data/model/oci/label.py
@@ -0,0 +1,142 @@
+import logging
+
+
+from data.model import InvalidLabelKeyException, InvalidMediaTypeException, DataModelException
+from data.database import (Label, Manifest, TagManifestLabel, MediaType, LabelSourceType,
+ db_transaction, ManifestLabel, TagManifestLabelMap,
+ TagManifestToManifest, Repository, TagManifest)
+from data.text import prefix_search
+from util.validation import validate_label_key
+from util.validation import is_json
+
+logger = logging.getLogger(__name__)
+
+def list_manifest_labels(manifest_id, prefix_filter=None):
+ """ Lists all labels found on the given manifest, with an optional filter by key prefix. """
+ query = (Label
+ .select(Label, MediaType)
+ .join(MediaType)
+ .switch(Label)
+ .join(LabelSourceType)
+ .switch(Label)
+ .join(ManifestLabel)
+ .where(ManifestLabel.manifest == manifest_id))
+
+ if prefix_filter is not None:
+ query = query.where(prefix_search(Label.key, prefix_filter))
+
+ return query
+
+
+def get_manifest_label(label_uuid, manifest):
+ """ Retrieves the manifest label on the manifest with the given UUID or None if none. """
+ try:
+ return (Label
+ .select(Label, LabelSourceType)
+ .join(LabelSourceType)
+ .where(Label.uuid == label_uuid)
+ .switch(Label)
+ .join(ManifestLabel)
+ .where(ManifestLabel.manifest == manifest)
+ .get())
+ except Label.DoesNotExist:
+ return None
+
+
+def create_manifest_label(manifest_id, key, value, source_type_name, media_type_name=None,
+ adjust_old_model=True):
+ """ Creates a new manifest label on a specific tag manifest. """
+ if not key:
+ raise InvalidLabelKeyException()
+
+ # Note that we don't prevent invalid label names coming from the manifest to be stored, as Docker
+ # does not currently prevent them from being put into said manifests.
+ if not validate_label_key(key) and source_type_name != 'manifest':
+ raise InvalidLabelKeyException('Key `%s` is invalid' % key)
+
+ # Find the matching media type. If none specified, we infer.
+ if media_type_name is None:
+ media_type_name = 'text/plain'
+ if is_json(value):
+ media_type_name = 'application/json'
+
+ try:
+ media_type_id = Label.media_type.get_id(media_type_name)
+ except MediaType.DoesNotExist:
+ raise InvalidMediaTypeException()
+
+ source_type_id = Label.source_type.get_id(source_type_name)
+
+ # Ensure the manifest exists.
+ try:
+ manifest = (Manifest
+ .select(Manifest, Repository)
+ .join(Repository)
+ .where(Manifest.id == manifest_id)
+ .get())
+ except Manifest.DoesNotExist:
+ return None
+
+ repository = manifest.repository
+
+ # TODO: Remove this code once the TagManifest table is gone.
+ tag_manifest = None
+ if adjust_old_model:
+ try:
+ mapping_row = (TagManifestToManifest
+ .select(TagManifestToManifest, TagManifest)
+ .join(TagManifest)
+ .where(TagManifestToManifest.manifest == manifest)
+ .get())
+ tag_manifest = mapping_row.tag_manifest
+ except TagManifestToManifest.DoesNotExist:
+ tag_manifest = None
+
+ with db_transaction():
+ label = Label.create(key=key, value=value, source_type=source_type_id, media_type=media_type_id)
+ manifest_label = ManifestLabel.create(manifest=manifest_id, label=label, repository=repository)
+
+ # If there exists a mapping to a TagManifest, add the old-style label.
+ # TODO: Remove this code once the TagManifest table is gone.
+ if tag_manifest:
+ tag_manifest_label = TagManifestLabel.create(annotated=tag_manifest, label=label,
+ repository=repository)
+ TagManifestLabelMap.create(manifest_label=manifest_label,
+ tag_manifest_label=tag_manifest_label,
+ label=label,
+ manifest=manifest,
+ tag_manifest=tag_manifest)
+
+ return label
+
+
+def delete_manifest_label(label_uuid, manifest):
+ """ Deletes the manifest label on the tag manifest with the given ID. Returns the label deleted
+ or None if none.
+ """
+ # Find the label itself.
+ label = get_manifest_label(label_uuid, manifest)
+ if label is None:
+ return None
+
+ if not label.source_type.mutable:
+ raise DataModelException('Cannot delete immutable label')
+
+ # Delete the mapping records and label.
+ # TODO: Remove this code once the TagManifest table is gone.
+ with db_transaction():
+ (TagManifestLabelMap
+ .delete()
+ .where(TagManifestLabelMap.label == label)
+ .execute())
+
+ deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()
+ if deleted_count != 1:
+ logger.warning('More than a single label deleted for matching label %s', label_uuid)
+
+ deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()
+ if deleted_count != 1:
+ logger.warning('More than a single label deleted for matching label %s', label_uuid)
+
+ label.delete_instance(recursive=False)
+ return label
diff --git a/data/model/oci/manifest.py b/data/model/oci/manifest.py
new file mode 100644
index 000000000..85b66efc5
--- /dev/null
+++ b/data/model/oci/manifest.py
@@ -0,0 +1,321 @@
+import logging
+
+from collections import namedtuple
+
+from peewee import IntegrityError
+
+from data.database import (Tag, Manifest, ManifestBlob, ManifestLegacyImage, ManifestChild,
+ db_transaction)
+from data.model import BlobDoesNotExist
+from data.model.blob import get_or_create_shared_blob, get_shared_blob
+from data.model.oci.tag import filter_to_alive_tags, create_temporary_tag_if_necessary
+from data.model.oci.label import create_manifest_label
+from data.model.oci.retriever import RepositoryContentRetriever
+from data.model.storage import lookup_repo_storages_by_content_checksum
+from data.model.image import lookup_repository_images, get_image, synthesize_v1_image
+from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES
+from image.docker.schema1 import ManifestException
+from image.docker.schema2.list import MalformedSchema2ManifestList
+from util.validation import is_json
+
+
+TEMP_TAG_EXPIRATION_SEC = 300 # 5 minutes
+
+
+logger = logging.getLogger(__name__)
+
+CreatedManifest = namedtuple('CreatedManifest', ['manifest', 'newly_created', 'labels_to_apply'])
+
+
+class CreateManifestException(Exception):
+ """ Exception raised when creating a manifest fails and explicit exception
+ raising is requested. """
+
+
+def lookup_manifest(repository_id, manifest_digest, allow_dead=False, require_available=False,
+ temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC):
+ """ Returns the manifest with the specified digest under the specified repository
+ or None if none. If allow_dead is True, then manifests referenced by only
+ dead tags will also be returned. If require_available is True, the manifest
+ will be marked with a temporary tag to ensure it remains available.
+ """
+ if not require_available:
+ return _lookup_manifest(repository_id, manifest_digest, allow_dead=allow_dead)
+
+ with db_transaction():
+ found = _lookup_manifest(repository_id, manifest_digest, allow_dead=allow_dead)
+ if found is None:
+ return None
+
+ create_temporary_tag_if_necessary(found, temp_tag_expiration_sec)
+ return found
+
+
+def _lookup_manifest(repository_id, manifest_digest, allow_dead=False):
+ query = (Manifest
+ .select()
+ .where(Manifest.repository == repository_id)
+ .where(Manifest.digest == manifest_digest))
+
+ if allow_dead:
+ try:
+ return query.get()
+ except Manifest.DoesNotExist:
+ return None
+
+ # Try first to filter to those manifests referenced by an alive tag,
+ try:
+ return filter_to_alive_tags(query.join(Tag)).get()
+ except Manifest.DoesNotExist:
+ pass
+
+ # Try referenced as the child of a manifest that has an alive tag.
+ query = (query
+ .join(ManifestChild, on=(ManifestChild.child_manifest == Manifest.id))
+ .join(Tag, on=(Tag.manifest == ManifestChild.manifest)))
+
+ query = filter_to_alive_tags(query)
+
+ try:
+ return query.get()
+ except Manifest.DoesNotExist:
+ return None
+
+
+def get_or_create_manifest(repository_id, manifest_interface_instance, storage,
+ temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC,
+ for_tagging=False, raise_on_error=False):
+ """ Returns a CreatedManifest for the manifest in the specified repository with the matching
+ digest (if it already exists) or, if not yet created, creates and returns the manifest.
+
+ Returns None if there was an error creating the manifest, unless raise_on_error is specified,
+ in which case a CreateManifestException exception will be raised instead to provide more
+ context to the error.
+
+ Note that *all* blobs referenced by the manifest must exist already in the repository or this
+ method will fail with a None.
+ """
+ existing = lookup_manifest(repository_id, manifest_interface_instance.digest, allow_dead=True,
+ require_available=True,
+ temp_tag_expiration_sec=temp_tag_expiration_sec)
+ if existing is not None:
+ return CreatedManifest(manifest=existing, newly_created=False, labels_to_apply=None)
+
+ return _create_manifest(repository_id, manifest_interface_instance, storage,
+ temp_tag_expiration_sec, for_tagging=for_tagging,
+ raise_on_error=raise_on_error)
+
+
+def _create_manifest(repository_id, manifest_interface_instance, storage,
+ temp_tag_expiration_sec=TEMP_TAG_EXPIRATION_SEC,
+ for_tagging=False, raise_on_error=False):
+ # Validate the manifest.
+ retriever = RepositoryContentRetriever.for_repository(repository_id, storage)
+ try:
+ manifest_interface_instance.validate(retriever)
+ except (ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError) as ex:
+ logger.exception('Could not validate manifest `%s`', manifest_interface_instance.digest)
+ if raise_on_error:
+ raise CreateManifestException(ex)
+
+ return None
+
+ # Load, parse and get/create the child manifests, if any.
+ child_manifest_refs = manifest_interface_instance.child_manifests(retriever)
+ child_manifest_rows = {}
+ child_manifest_label_dicts = []
+
+ if child_manifest_refs is not None:
+ for child_manifest_ref in child_manifest_refs:
+ # Load and parse the child manifest.
+ try:
+ child_manifest = child_manifest_ref.manifest_obj
+ except (ManifestException, MalformedSchema2ManifestList, BlobDoesNotExist, IOError) as ex:
+ logger.exception('Could not load manifest list for manifest `%s`',
+ manifest_interface_instance.digest)
+ if raise_on_error:
+ raise CreateManifestException(ex)
+
+ return None
+
+ # Retrieve its labels.
+ labels = child_manifest.get_manifest_labels(retriever)
+ if labels is None:
+ logger.exception('Could not load manifest labels for child manifest')
+ return None
+
+ # Get/create the child manifest in the database.
+ child_manifest_info = get_or_create_manifest(repository_id, child_manifest, storage,
+ raise_on_error=raise_on_error)
+ if child_manifest_info is None:
+ logger.error('Could not get/create child manifest')
+ return None
+
+ child_manifest_rows[child_manifest_info.manifest.digest] = child_manifest_info.manifest
+ child_manifest_label_dicts.append(labels)
+
+ # Ensure all the blobs in the manifest exist.
+ digests = set(manifest_interface_instance.local_blob_digests)
+ blob_map = {}
+
+ # If the special empty layer is required, simply load it directly. This is much faster
+ # than trying to load it on a per repository basis, and that is unnecessary anyway since
+ # this layer is predefined.
+ if EMPTY_LAYER_BLOB_DIGEST in digests:
+ digests.remove(EMPTY_LAYER_BLOB_DIGEST)
+ blob_map[EMPTY_LAYER_BLOB_DIGEST] = get_shared_blob(EMPTY_LAYER_BLOB_DIGEST)
+ if not blob_map[EMPTY_LAYER_BLOB_DIGEST]:
+ logger.warning('Could not find the special empty blob in storage')
+ return None
+
+ if digests:
+ query = lookup_repo_storages_by_content_checksum(repository_id, digests)
+ blob_map.update({s.content_checksum: s for s in query})
+ for digest_str in digests:
+ if digest_str not in blob_map:
+ logger.warning('Unknown blob `%s` under manifest `%s` for repository `%s`', digest_str,
+ manifest_interface_instance.digest, repository_id)
+
+ if raise_on_error:
+ raise CreateManifestException('Unknown blob `%s`' % digest_str)
+
+ return None
+
+ # Special check: If the empty layer blob is needed for this manifest, add it to the
+ # blob map. This is necessary because Docker decided to elide sending of this special
+ # empty layer in schema version 2, but we need to have it referenced for GC and schema version 1.
+ if EMPTY_LAYER_BLOB_DIGEST not in blob_map:
+ if manifest_interface_instance.get_requires_empty_layer_blob(retriever):
+ shared_blob = get_or_create_shared_blob(EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES, storage)
+ assert not shared_blob.uploading
+ assert shared_blob.content_checksum == EMPTY_LAYER_BLOB_DIGEST
+ blob_map[EMPTY_LAYER_BLOB_DIGEST] = shared_blob
+
+ # Determine and populate the legacy image if necessary. Manifest lists will not have a legacy
+ # image.
+ legacy_image = None
+ if manifest_interface_instance.has_legacy_image:
+ legacy_image_id = _populate_legacy_image(repository_id, manifest_interface_instance, blob_map,
+ retriever)
+ if legacy_image_id is None:
+ return None
+
+ legacy_image = get_image(repository_id, legacy_image_id)
+ if legacy_image is None:
+ return None
+
+ # Create the manifest and its blobs.
+ media_type = Manifest.media_type.get_id(manifest_interface_instance.media_type)
+ storage_ids = {storage.id for storage in blob_map.values()}
+
+ with db_transaction():
+ # Check for the manifest. This is necessary because Postgres doesn't handle IntegrityErrors
+ # well under transactions.
+ try:
+ manifest = Manifest.get(repository=repository_id, digest=manifest_interface_instance.digest)
+ return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None)
+ except Manifest.DoesNotExist:
+ pass
+
+ # Create the manifest.
+ try:
+ manifest = Manifest.create(repository=repository_id,
+ digest=manifest_interface_instance.digest,
+ media_type=media_type,
+ manifest_bytes=manifest_interface_instance.bytes.as_encoded_str())
+ except IntegrityError:
+ manifest = Manifest.get(repository=repository_id, digest=manifest_interface_instance.digest)
+ return CreatedManifest(manifest=manifest, newly_created=False, labels_to_apply=None)
+
+ # Insert the blobs.
+ blobs_to_insert = [dict(manifest=manifest, repository=repository_id,
+ blob=storage_id) for storage_id in storage_ids]
+ if blobs_to_insert:
+ ManifestBlob.insert_many(blobs_to_insert).execute()
+
+ # Set the legacy image (if applicable).
+ if legacy_image is not None:
+ ManifestLegacyImage.create(repository=repository_id, image=legacy_image, manifest=manifest)
+
+ # Insert the manifest child rows (if applicable).
+ if child_manifest_rows:
+ children_to_insert = [dict(manifest=manifest, child_manifest=child_manifest,
+ repository=repository_id)
+ for child_manifest in child_manifest_rows.values()]
+ ManifestChild.insert_many(children_to_insert).execute()
+
+ # If this manifest is being created not for immediate tagging, add a temporary tag to the
+ # manifest to ensure it isn't being GCed. If the manifest *is* for tagging, then since we're
+ # creating a new one here, it cannot be GCed (since it isn't referenced by anything yet), so
+ # its safe to elide the temp tag operation. If we ever change GC code to collect *all* manifests
+ # in a repository for GC, then we will have to reevaluate this optimization at that time.
+ if not for_tagging:
+ create_temporary_tag_if_necessary(manifest, temp_tag_expiration_sec)
+
+ # Define the labels for the manifest (if any).
+ labels = manifest_interface_instance.get_manifest_labels(retriever)
+ if labels:
+ for key, value in labels.iteritems():
+ media_type = 'application/json' if is_json(value) else 'text/plain'
+ create_manifest_label(manifest, key, value, 'manifest', media_type)
+
+ # Return the dictionary of labels to apply (i.e. those labels that cause an action to be taken
+ # on the manifest or its resulting tags). We only return those labels either defined on
+ # the manifest or shared amongst all the child manifests. We intersect amongst all child manifests
+ # to ensure that any action performed is defined in all manifests.
+ labels_to_apply = labels or {}
+ if child_manifest_label_dicts:
+ labels_to_apply = child_manifest_label_dicts[0].viewitems()
+ for child_manifest_label_dict in child_manifest_label_dicts[1:]:
+ # Intersect the key+values of the labels to ensure we get the exact same result
+ # for all the child manifests.
+ labels_to_apply = labels_to_apply & child_manifest_label_dict.viewitems()
+
+ labels_to_apply = dict(labels_to_apply)
+
+ return CreatedManifest(manifest=manifest, newly_created=True, labels_to_apply=labels_to_apply)
+
+
+def _populate_legacy_image(repository_id, manifest_interface_instance, blob_map, retriever):
+ # Lookup all the images and their parent images (if any) inside the manifest.
+ # This will let us know which v1 images we need to synthesize and which ones are invalid.
+ docker_image_ids = list(manifest_interface_instance.get_legacy_image_ids(retriever))
+ images_query = lookup_repository_images(repository_id, docker_image_ids)
+ image_storage_map = {i.docker_image_id: i.storage for i in images_query}
+
+ # Rewrite any v1 image IDs that do not match the checksum in the database.
+ try:
+ rewritten_images = manifest_interface_instance.generate_legacy_layers(image_storage_map,
+ retriever)
+ rewritten_images = list(rewritten_images)
+ parent_image_map = {}
+
+ for rewritten_image in rewritten_images:
+ if not rewritten_image.image_id in image_storage_map:
+ parent_image = None
+ if rewritten_image.parent_image_id:
+ parent_image = parent_image_map.get(rewritten_image.parent_image_id)
+ if parent_image is None:
+ parent_image = get_image(repository_id, rewritten_image.parent_image_id)
+ if parent_image is None:
+ return None
+
+ storage_reference = blob_map[rewritten_image.content_checksum]
+ synthesized = synthesize_v1_image(
+ repository_id,
+ storage_reference.id,
+ storage_reference.image_size,
+ rewritten_image.image_id,
+ rewritten_image.created,
+ rewritten_image.comment,
+ rewritten_image.command,
+ rewritten_image.compat_json,
+ parent_image,
+ )
+
+ parent_image_map[rewritten_image.image_id] = synthesized
+ except ManifestException:
+ logger.exception("exception when rewriting v1 metadata")
+ return None
+
+ return rewritten_images[-1].image_id
diff --git a/data/model/oci/retriever.py b/data/model/oci/retriever.py
new file mode 100644
index 000000000..b6e9633e0
--- /dev/null
+++ b/data/model/oci/retriever.py
@@ -0,0 +1,37 @@
+from image.docker.interfaces import ContentRetriever
+from data.database import Manifest
+from data.model.oci.blob import get_repository_blob_by_digest
+from data.model.storage import get_layer_path
+
+class RepositoryContentRetriever(ContentRetriever):
+ """ Implementation of the ContentRetriever interface for manifests that retrieves
+ config blobs and child manifests for the specified repository.
+ """
+ def __init__(self, repository_id, storage):
+ self.repository_id = repository_id
+ self.storage = storage
+
+ @classmethod
+ def for_repository(cls, repository_id, storage):
+ return RepositoryContentRetriever(repository_id, storage)
+
+ def get_manifest_bytes_with_digest(self, digest):
+ """ Returns the bytes of the manifest with the given digest or None if none found. """
+ query = (Manifest
+ .select()
+ .where(Manifest.repository == self.repository_id)
+ .where(Manifest.digest == digest))
+
+ try:
+ return query.get().manifest_bytes
+ except Manifest.DoesNotExist:
+ return None
+
+ def get_blob_bytes_with_digest(self, digest):
+ """ Returns the bytes of the blob with the given digest or None if none found. """
+ blob = get_repository_blob_by_digest(self.repository_id, digest)
+ if blob is None:
+ return None
+
+ assert blob.locations is not None
+ return self.storage.get_content(blob.locations, get_layer_path(blob))
diff --git a/data/model/oci/shared.py b/data/model/oci/shared.py
new file mode 100644
index 000000000..887eda383
--- /dev/null
+++ b/data/model/oci/shared.py
@@ -0,0 +1,24 @@
+from data.database import Manifest, ManifestLegacyImage, Image
+
+def get_legacy_image_for_manifest(manifest_id):
+ """ Returns the legacy image associated with the given manifest, if any, or None if none. """
+ try:
+ query = (ManifestLegacyImage
+ .select(ManifestLegacyImage, Image)
+ .join(Image)
+ .where(ManifestLegacyImage.manifest == manifest_id))
+ return query.get().image
+ except ManifestLegacyImage.DoesNotExist:
+ return None
+
+
+def get_manifest_for_legacy_image(image_id):
+ """ Returns a manifest that is associated with the given image, if any, or None if none. """
+ try:
+ query = (ManifestLegacyImage
+ .select(ManifestLegacyImage, Manifest)
+ .join(Manifest)
+ .where(ManifestLegacyImage.image == image_id))
+ return query.get().manifest
+ except ManifestLegacyImage.DoesNotExist:
+ return None
diff --git a/data/model/oci/tag.py b/data/model/oci/tag.py
new file mode 100644
index 000000000..4ad1b8c18
--- /dev/null
+++ b/data/model/oci/tag.py
@@ -0,0 +1,505 @@
+import uuid
+import logging
+
+from calendar import timegm
+from peewee import fn
+
+from data.database import (Tag, Manifest, ManifestLegacyImage, Image, ImageStorage,
+ MediaType, RepositoryTag, TagManifest, TagManifestToManifest,
+ get_epoch_timestamp_ms, db_transaction, Repository,
+ TagToRepositoryTag, Namespace, RepositoryNotification,
+ ExternalNotificationEvent)
+from data.model.oci.shared import get_legacy_image_for_manifest
+from data.model import config
+from image.docker.schema1 import (DOCKER_SCHEMA1_CONTENT_TYPES, DockerSchema1Manifest,
+ MalformedSchema1Manifest)
+from util.bytes import Bytes
+from util.timedeltastring import convert_to_timedelta
+
+logger = logging.getLogger(__name__)
+
+
+def get_tag_by_id(tag_id):
+ """ Returns the tag with the given ID, joined with its manifest or None if none. """
+ try:
+ return Tag.select(Tag, Manifest).join(Manifest).where(Tag.id == tag_id).get()
+ except Tag.DoesNotExist:
+ return None
+
+
+def get_tag(repository_id, tag_name):
+ """ Returns the alive, non-hidden tag with the given name under the specified repository or
+ None if none. The tag is returned joined with its manifest.
+ """
+ query = (Tag
+ .select(Tag, Manifest)
+ .join(Manifest)
+ .where(Tag.repository == repository_id)
+ .where(Tag.name == tag_name))
+
+ query = filter_to_alive_tags(query)
+
+ try:
+ found = query.get()
+ assert not found.hidden
+ return found
+ except Tag.DoesNotExist:
+ return None
+
+
+def lookup_alive_tags_shallow(repository_id, start_pagination_id=None, limit=None):
+ """ Returns a list of the tags alive in the specified repository. Note that the tags returned
+ *only* contain their ID and name. Also note that the Tags are returned ordered by ID.
+ """
+ query = (Tag
+ .select(Tag.id, Tag.name)
+ .where(Tag.repository == repository_id)
+ .order_by(Tag.id))
+
+ if start_pagination_id is not None:
+ query = query.where(Tag.id >= start_pagination_id)
+
+ if limit is not None:
+ query = query.limit(limit)
+
+ return filter_to_alive_tags(query)
+
+
+def list_alive_tags(repository_id):
+ """ Returns a list of all the tags alive in the specified repository.
+ Tag's returned are joined with their manifest.
+ """
+ query = (Tag
+ .select(Tag, Manifest)
+ .join(Manifest)
+ .where(Tag.repository == repository_id))
+
+ return filter_to_alive_tags(query)
+
+
+def list_repository_tag_history(repository_id, page, page_size, specific_tag_name=None,
+ active_tags_only=False, since_time_ms=None):
+ """ Returns a tuple of the full set of tags found in the specified repository, including those
+ that are no longer alive (unless active_tags_only is True), and whether additional tags exist.
+ If specific_tag_name is given, the tags are further filtered by name. If since is given, tags
+ are further filtered to newer than that date.
+
+ Note that the returned Manifest will not contain the manifest contents.
+ """
+ query = (Tag
+ .select(Tag, Manifest.id, Manifest.digest, Manifest.media_type)
+ .join(Manifest)
+ .where(Tag.repository == repository_id)
+ .order_by(Tag.lifetime_start_ms.desc(), Tag.name)
+ .limit(page_size + 1)
+ .offset(page_size * (page - 1)))
+
+ if specific_tag_name is not None:
+ query = query.where(Tag.name == specific_tag_name)
+
+ if since_time_ms is not None:
+ query = query.where((Tag.lifetime_start_ms > since_time_ms) | (Tag.lifetime_end_ms > since_time_ms))
+
+ if active_tags_only:
+ query = filter_to_alive_tags(query)
+
+ query = filter_to_visible_tags(query)
+ results = list(query)
+
+ return results[0:page_size], len(results) > page_size
+
+
+def get_legacy_images_for_tags(tags):
+ """ Returns a map from tag ID to the legacy image for the tag. """
+ if not tags:
+ return {}
+
+ query = (ManifestLegacyImage
+ .select(ManifestLegacyImage, Image, ImageStorage)
+ .join(Image)
+ .join(ImageStorage)
+ .where(ManifestLegacyImage.manifest << [tag.manifest_id for tag in tags]))
+
+ by_manifest = {mli.manifest_id: mli.image for mli in query}
+ return {tag.id: by_manifest[tag.manifest_id] for tag in tags if tag.manifest_id in by_manifest}
+
+
+def find_matching_tag(repository_id, tag_names, tag_kinds=None):
+ """ Finds an alive tag in the specified repository with one of the specified tag names and
+ returns it or None if none. Tag's returned are joined with their manifest.
+ """
+ assert repository_id
+ assert tag_names
+
+ query = (Tag
+ .select(Tag, Manifest)
+ .join(Manifest)
+ .where(Tag.repository == repository_id)
+ .where(Tag.name << tag_names))
+
+ if tag_kinds:
+ query = query.where(Tag.tag_kind << tag_kinds)
+
+ try:
+ found = filter_to_alive_tags(query).get()
+ assert not found.hidden
+ return found
+ except Tag.DoesNotExist:
+ return None
+
+
+def get_most_recent_tag_lifetime_start(repository_ids):
+ """ Returns a map from repo ID to the timestamp of the most recently pushed alive tag
+ for each specified repository or None if none.
+ """
+ assert len(repository_ids) > 0 and None not in repository_ids
+
+ query = (Tag.select(Tag.repository, fn.Max(Tag.lifetime_start_ms))
+ .where(Tag.repository << [repo_id for repo_id in repository_ids])
+ .group_by(Tag.repository))
+ tuples = filter_to_alive_tags(query).tuples()
+
+ return {repo_id: timestamp for repo_id, timestamp in tuples}
+
+
+def get_most_recent_tag(repository_id):
+ """ Returns the most recently pushed alive tag in the specified repository or None if none.
+ The Tag returned is joined with its manifest.
+ """
+ assert repository_id
+
+ query = (Tag
+ .select(Tag, Manifest)
+ .join(Manifest)
+ .where(Tag.repository == repository_id)
+ .order_by(Tag.lifetime_start_ms.desc()))
+
+ try:
+ found = filter_to_alive_tags(query).get()
+ assert not found.hidden
+ return found
+ except Tag.DoesNotExist:
+ return None
+
+
+def get_expired_tag(repository_id, tag_name):
+ """ Returns a tag with the given name that is expired in the repository or None if none.
+ """
+ try:
+ return (Tag
+ .select()
+ .where(Tag.name == tag_name, Tag.repository == repository_id)
+ .where(~(Tag.lifetime_end_ms >> None))
+ .where(Tag.lifetime_end_ms <= get_epoch_timestamp_ms())
+ .get())
+ except Tag.DoesNotExist:
+ return None
+
+
+def create_temporary_tag_if_necessary(manifest, expiration_sec):
+ """ Creates a temporary tag pointing to the given manifest, with the given expiration in seconds,
+ unless there is an existing tag that will keep the manifest around.
+ """
+ tag_name = '$temp-%s' % str(uuid.uuid4())
+ now_ms = get_epoch_timestamp_ms()
+ end_ms = now_ms + (expiration_sec * 1000)
+
+ # Check if there is an existing tag on the manifest that won't expire within the
+ # timeframe. If so, no need for a temporary tag.
+ with db_transaction():
+ try:
+ (Tag
+ .select()
+ .where(Tag.manifest == manifest,
+ (Tag.lifetime_end_ms >> None) | (Tag.lifetime_end_ms >= end_ms))
+ .get())
+ return None
+ except Tag.DoesNotExist:
+ pass
+
+ return Tag.create(name=tag_name,
+ repository=manifest.repository_id,
+ lifetime_start_ms=now_ms,
+ lifetime_end_ms=end_ms,
+ reversion=False,
+ hidden=True,
+ manifest=manifest,
+ tag_kind=Tag.tag_kind.get_id('tag'))
+
+
+def retarget_tag(tag_name, manifest_id, is_reversion=False, now_ms=None, adjust_old_model=True):
+ """ Creates or updates a tag with the specified name to point to the given manifest under
+ its repository. If this action is a reversion to a previous manifest, is_reversion
+ should be set to True. Returns the newly created tag row or None on error.
+ """
+ try:
+ manifest = (Manifest
+ .select(Manifest, MediaType)
+ .join(MediaType)
+ .where(Manifest.id == manifest_id)
+ .get())
+ except Manifest.DoesNotExist:
+ return None
+
+ # CHECK: Make sure that we are not mistargeting a schema 1 manifest to a tag with a different
+ # name.
+ if manifest.media_type.name in DOCKER_SCHEMA1_CONTENT_TYPES:
+ try:
+ parsed = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest.manifest_bytes),
+ validate=False)
+ if parsed.tag != tag_name:
+ logger.error('Tried to re-target schema1 manifest with tag `%s` to tag `%s', parsed.tag,
+ tag_name)
+ return None
+ except MalformedSchema1Manifest:
+ logger.exception('Could not parse schema1 manifest')
+ return None
+
+ legacy_image = get_legacy_image_for_manifest(manifest)
+ now_ms = now_ms or get_epoch_timestamp_ms()
+ now_ts = int(now_ms / 1000)
+
+ with db_transaction():
+ # Lookup an existing tag in the repository with the same name and, if present, mark it
+ # as expired.
+ existing_tag = get_tag(manifest.repository_id, tag_name)
+ if existing_tag is not None:
+ _, okay = set_tag_end_ms(existing_tag, now_ms)
+
+ # TODO: should we retry here and/or use a for-update?
+ if not okay:
+ return None
+
+ # Create a new tag pointing to the manifest with a lifetime start of now.
+ created = Tag.create(name=tag_name, repository=manifest.repository_id, lifetime_start_ms=now_ms,
+ reversion=is_reversion, manifest=manifest,
+ tag_kind=Tag.tag_kind.get_id('tag'))
+
+ # TODO: Remove the linkage code once RepositoryTag is gone.
+ # If this is a schema 1 manifest, then add a TagManifest linkage to it. Otherwise, it will only
+ # be pullable via the new OCI model.
+ if adjust_old_model:
+ if manifest.media_type.name in DOCKER_SCHEMA1_CONTENT_TYPES and legacy_image is not None:
+ old_style_tag = RepositoryTag.create(repository=manifest.repository_id, image=legacy_image,
+ name=tag_name, lifetime_start_ts=now_ts,
+ reversion=is_reversion)
+ TagToRepositoryTag.create(tag=created, repository_tag=old_style_tag,
+ repository=manifest.repository_id)
+
+ tag_manifest = TagManifest.create(tag=old_style_tag, digest=manifest.digest,
+ json_data=manifest.manifest_bytes)
+ TagManifestToManifest.create(tag_manifest=tag_manifest, manifest=manifest,
+ repository=manifest.repository_id)
+
+ return created
+
+
+def delete_tag(repository_id, tag_name):
+ """ Deletes the alive tag with the given name in the specified repository and returns the deleted
+ tag. If the tag did not exist, returns None.
+ """
+ tag = get_tag(repository_id, tag_name)
+ if tag is None:
+ return None
+
+ return _delete_tag(tag, get_epoch_timestamp_ms())
+
+
+def _delete_tag(tag, now_ms):
+ """ Deletes the given tag by marking it as expired. """
+ now_ts = int(now_ms / 1000)
+
+ with db_transaction():
+ updated = (Tag
+ .update(lifetime_end_ms=now_ms)
+ .where(Tag.id == tag.id, Tag.lifetime_end_ms == tag.lifetime_end_ms)
+ .execute())
+ if updated != 1:
+ return None
+
+ # TODO: Remove the linkage code once RepositoryTag is gone.
+ try:
+ old_style_tag = (TagToRepositoryTag
+ .select(TagToRepositoryTag, RepositoryTag)
+ .join(RepositoryTag)
+ .where(TagToRepositoryTag.tag == tag)
+ .get()).repository_tag
+
+ old_style_tag.lifetime_end_ts = now_ts
+ old_style_tag.save()
+ except TagToRepositoryTag.DoesNotExist:
+ pass
+
+ return tag
+
+
+def delete_tags_for_manifest(manifest):
+ """ Deletes all tags pointing to the given manifest. Returns the list of tags
+ deleted.
+ """
+ query = Tag.select().where(Tag.manifest == manifest)
+ query = filter_to_alive_tags(query)
+ query = filter_to_visible_tags(query)
+
+ tags = list(query)
+ now_ms = get_epoch_timestamp_ms()
+
+ with db_transaction():
+ for tag in tags:
+ _delete_tag(tag, now_ms)
+
+ return tags
+
+
+def filter_to_visible_tags(query):
+ """ Adjusts the specified Tag query to only return those tags that are visible.
+ """
+ return query.where(Tag.hidden == False)
+
+
+def filter_to_alive_tags(query, now_ms=None, model=Tag):
+ """ Adjusts the specified Tag query to only return those tags alive. If now_ms is specified,
+ the given timestamp (in MS) is used in place of the current timestamp for determining wherther
+ a tag is alive.
+ """
+ if now_ms is None:
+ now_ms = get_epoch_timestamp_ms()
+
+ return (query.where((model.lifetime_end_ms >> None) | (model.lifetime_end_ms > now_ms))
+ .where(model.hidden == False))
+
+
+def set_tag_expiration_sec_for_manifest(manifest_id, expiration_seconds):
+ """ Sets the tag expiration for any tags that point to the given manifest ID. """
+ query = Tag.select().where(Tag.manifest == manifest_id)
+ query = filter_to_alive_tags(query)
+ tags = list(query)
+ for tag in tags:
+ assert not tag.hidden
+ set_tag_end_ms(tag, tag.lifetime_start_ms + (expiration_seconds * 1000))
+
+ return tags
+
+
+def set_tag_expiration_for_manifest(manifest_id, expiration_datetime):
+ """ Sets the tag expiration for any tags that point to the given manifest ID. """
+ query = Tag.select().where(Tag.manifest == manifest_id)
+ query = filter_to_alive_tags(query)
+ tags = list(query)
+ for tag in tags:
+ assert not tag.hidden
+ change_tag_expiration(tag, expiration_datetime)
+
+ return tags
+
+
+def change_tag_expiration(tag_id, expiration_datetime):
+ """ Changes the expiration of the specified tag to the given expiration datetime. If
+ the expiration datetime is None, then the tag is marked as not expiring. Returns
+ a tuple of the previous expiration timestamp in seconds (if any), and whether the
+ operation succeeded.
+ """
+ try:
+ tag = Tag.get(id=tag_id)
+ except Tag.DoesNotExist:
+ return (None, False)
+
+ new_end_ms = None
+ min_expire_sec = convert_to_timedelta(config.app_config.get('LABELED_EXPIRATION_MINIMUM', '1h'))
+ max_expire_sec = convert_to_timedelta(config.app_config.get('LABELED_EXPIRATION_MAXIMUM', '104w'))
+
+ if expiration_datetime is not None:
+ lifetime_start_ts = int(tag.lifetime_start_ms / 1000)
+
+ offset = timegm(expiration_datetime.utctimetuple()) - lifetime_start_ts
+ offset = min(max(offset, min_expire_sec.total_seconds()), max_expire_sec.total_seconds())
+ new_end_ms = tag.lifetime_start_ms + (offset * 1000)
+
+ if new_end_ms == tag.lifetime_end_ms:
+ return (None, True)
+
+ return set_tag_end_ms(tag, new_end_ms)
+
+
+def lookup_unrecoverable_tags(repo):
+ """ Returns the tags in a repository that are expired and past their time machine recovery
+ period. """
+ expired_clause = get_epoch_timestamp_ms() - (Namespace.removed_tag_expiration_s * 1000)
+ return (Tag
+ .select()
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Tag.repository == repo)
+ .where(~(Tag.lifetime_end_ms >> None), Tag.lifetime_end_ms <= expired_clause))
+
+
+def set_tag_end_ms(tag, end_ms):
+ """ Sets the end timestamp for a tag. Should only be called by change_tag_expiration
+ or tests.
+ """
+
+ with db_transaction():
+ updated = (Tag
+ .update(lifetime_end_ms=end_ms)
+ .where(Tag.id == tag)
+ .where(Tag.lifetime_end_ms == tag.lifetime_end_ms)
+ .execute())
+ if updated != 1:
+ return (None, False)
+
+ # TODO: Remove the linkage code once RepositoryTag is gone.
+ try:
+ old_style_tag = (TagToRepositoryTag
+ .select(TagToRepositoryTag, RepositoryTag)
+ .join(RepositoryTag)
+ .where(TagToRepositoryTag.tag == tag)
+ .get()).repository_tag
+
+ old_style_tag.lifetime_end_ts = end_ms / 1000 if end_ms is not None else None
+ old_style_tag.save()
+ except TagToRepositoryTag.DoesNotExist:
+ pass
+
+ return (tag.lifetime_end_ms, True)
+
+
+def tags_containing_legacy_image(image):
+ """ Yields all alive Tags containing the given image as a legacy image, somewhere in its
+ legacy image hierarchy.
+ """
+ ancestors_str = '%s%s/%%' % (image.ancestors, image.id)
+ tags = (Tag
+ .select()
+ .join(Repository)
+ .switch(Tag)
+ .join(Manifest)
+ .join(ManifestLegacyImage)
+ .join(Image)
+ .where(Tag.repository == image.repository_id)
+ .where(Image.repository == image.repository_id)
+ .where((Image.id == image.id) |
+ (Image.ancestors ** ancestors_str)))
+ return filter_to_alive_tags(tags)
+
+
+def lookup_notifiable_tags_for_legacy_image(docker_image_id, storage_uuid, event_name):
+ """ Yields any alive Tags found in repositories with an event with the given name registered
+ and whose legacy Image has the given docker image ID and storage UUID.
+ """
+ event = ExternalNotificationEvent.get(name=event_name)
+ images = (Image
+ .select()
+ .join(ImageStorage)
+ .where(Image.docker_image_id == docker_image_id,
+ ImageStorage.uuid == storage_uuid))
+
+ for image in list(images):
+ # Ensure the image is under a repository that supports the event.
+ try:
+ RepositoryNotification.get(repository=image.repository_id, event=event)
+ except RepositoryNotification.DoesNotExist:
+ continue
+
+ # If found in a repository with the valid event, yield the tag(s) that contains the image.
+ for tag in tags_containing_legacy_image(image):
+ yield tag
diff --git a/data/model/oci/test/__init__.py b/data/model/oci/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/data/model/oci/test/test_oci_label.py b/data/model/oci/test/test_oci_label.py
new file mode 100644
index 000000000..2ba04521b
--- /dev/null
+++ b/data/model/oci/test/test_oci_label.py
@@ -0,0 +1,87 @@
+import pytest
+
+from playhouse.test_utils import assert_query_count
+
+from data.database import Manifest, ManifestLabel
+from data.model.oci.label import (create_manifest_label, list_manifest_labels, get_manifest_label,
+ delete_manifest_label, DataModelException)
+
+from test.fixtures import *
+
+
+@pytest.mark.parametrize('key, value, source_type, expected_error', [
+ ('foo', 'bar', 'manifest', None),
+
+ pytest.param('..foo', 'bar', 'manifest', None, id='invalid key on manifest'),
+ pytest.param('..foo', 'bar', 'api', 'is invalid', id='invalid key on api'),
+])
+def test_create_manifest_label(key, value, source_type, expected_error, initialized_db):
+ manifest = Manifest.get()
+
+ if expected_error:
+ with pytest.raises(DataModelException) as ex:
+ create_manifest_label(manifest, key, value, source_type)
+
+ assert ex.match(expected_error)
+ return
+
+ label = create_manifest_label(manifest, key, value, source_type)
+ labels = [ml.label_id for ml in ManifestLabel.select().where(ManifestLabel.manifest == manifest)]
+ assert label.id in labels
+
+ with assert_query_count(1):
+ assert label in list_manifest_labels(manifest)
+
+ assert label not in list_manifest_labels(manifest, 'someprefix')
+ assert label in list_manifest_labels(manifest, key[0:2])
+
+ with assert_query_count(1):
+ assert get_manifest_label(label.uuid, manifest) == label
+
+
+def test_list_manifest_labels(initialized_db):
+ manifest = Manifest.get()
+
+ label1 = create_manifest_label(manifest, 'foo', '1', 'manifest')
+ label2 = create_manifest_label(manifest, 'bar', '2', 'api')
+ label3 = create_manifest_label(manifest, 'baz', '3', 'internal')
+
+ assert label1 in list_manifest_labels(manifest)
+ assert label2 in list_manifest_labels(manifest)
+ assert label3 in list_manifest_labels(manifest)
+
+ other_manifest = Manifest.select().where(Manifest.id != manifest.id).get()
+ assert label1 not in list_manifest_labels(other_manifest)
+ assert label2 not in list_manifest_labels(other_manifest)
+ assert label3 not in list_manifest_labels(other_manifest)
+
+
+def test_get_manifest_label(initialized_db):
+ found = False
+ for manifest_label in ManifestLabel.select():
+ assert (get_manifest_label(manifest_label.label.uuid, manifest_label.manifest) ==
+ manifest_label.label)
+ assert manifest_label.label in list_manifest_labels(manifest_label.manifest)
+ found = True
+
+ assert found
+
+
+def test_delete_manifest_label(initialized_db):
+ found = False
+ for manifest_label in list(ManifestLabel.select()):
+ assert (get_manifest_label(manifest_label.label.uuid, manifest_label.manifest) ==
+ manifest_label.label)
+ assert manifest_label.label in list_manifest_labels(manifest_label.manifest)
+
+ if manifest_label.label.source_type.mutable:
+ assert delete_manifest_label(manifest_label.label.uuid, manifest_label.manifest)
+ assert manifest_label.label not in list_manifest_labels(manifest_label.manifest)
+ assert get_manifest_label(manifest_label.label.uuid, manifest_label.manifest) is None
+ else:
+ with pytest.raises(DataModelException):
+ delete_manifest_label(manifest_label.label.uuid, manifest_label.manifest)
+
+ found = True
+
+ assert found
diff --git a/data/model/oci/test/test_oci_manifest.py b/data/model/oci/test/test_oci_manifest.py
new file mode 100644
index 000000000..4c5d6ed3b
--- /dev/null
+++ b/data/model/oci/test/test_oci_manifest.py
@@ -0,0 +1,560 @@
+import json
+
+from playhouse.test_utils import assert_query_count
+
+from app import docker_v2_signing_key, storage
+
+from digest.digest_tools import sha256_digest
+from data.database import (Tag, ManifestBlob, ImageStorageLocation, ManifestChild,
+ ImageStorage, Image, RepositoryTag, get_epoch_timestamp_ms)
+from data.model.oci.manifest import lookup_manifest, get_or_create_manifest
+from data.model.oci.tag import filter_to_alive_tags, get_tag
+from data.model.oci.shared import get_legacy_image_for_manifest
+from data.model.oci.label import list_manifest_labels
+from data.model.oci.retriever import RepositoryContentRetriever
+from data.model.repository import get_repository, create_repository
+from data.model.image import find_create_or_link_image
+from data.model.blob import store_blob_record_and_temp_link
+from data.model.storage import get_layer_path
+from image.docker.schema1 import DockerSchema1ManifestBuilder, DockerSchema1Manifest
+from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
+from image.docker.schema2.list import DockerSchema2ManifestListBuilder
+from util.bytes import Bytes
+
+from test.fixtures import *
+
+def test_lookup_manifest(initialized_db):
+ found = False
+ for tag in filter_to_alive_tags(Tag.select()):
+ found = True
+ repo = tag.repository
+ digest = tag.manifest.digest
+ with assert_query_count(1):
+ assert lookup_manifest(repo, digest) == tag.manifest
+
+ assert found
+
+ for tag in Tag.select():
+ repo = tag.repository
+ digest = tag.manifest.digest
+ with assert_query_count(1):
+ assert lookup_manifest(repo, digest, allow_dead=True) == tag.manifest
+
+
+def test_lookup_manifest_dead_tag(initialized_db):
+ dead_tag = Tag.select().where(Tag.lifetime_end_ms <= get_epoch_timestamp_ms()).get()
+ assert dead_tag.lifetime_end_ms <= get_epoch_timestamp_ms()
+
+ assert lookup_manifest(dead_tag.repository, dead_tag.manifest.digest) is None
+ assert (lookup_manifest(dead_tag.repository, dead_tag.manifest.digest, allow_dead=True) ==
+ dead_tag.manifest)
+
+
+def create_manifest_for_testing(repository, differentiation_field='1'):
+ # Populate a manifest.
+ layer_json = json.dumps({
+ 'config': {},
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ },
+ "history": [],
+ })
+
+ # Add a blob containing the config.
+ _, config_digest = _populate_blob(layer_json)
+
+ remote_digest = sha256_digest('something')
+ builder = DockerSchema2ManifestBuilder()
+ builder.set_config_digest(config_digest, len(layer_json))
+ builder.add_layer(remote_digest, 1234, urls=['http://hello/world' + differentiation_field])
+ manifest = builder.build()
+
+ created = get_or_create_manifest(repository, manifest, storage)
+ assert created
+ return created.manifest, manifest
+
+
+def test_lookup_manifest_child_tag(initialized_db):
+ repository = create_repository('devtable', 'newrepo', None)
+ manifest, manifest_impl = create_manifest_for_testing(repository)
+
+ # Mark the hidden tag as dead.
+ hidden_tag = Tag.get(manifest=manifest, hidden=True)
+ hidden_tag.lifetime_end_ms = hidden_tag.lifetime_start_ms
+ hidden_tag.save()
+
+ # Ensure the manifest cannot currently be looked up, as it is not pointed to by an alive tag.
+ assert lookup_manifest(repository, manifest.digest) is None
+ assert lookup_manifest(repository, manifest.digest, allow_dead=True) is not None
+
+ # Populate a manifest list.
+ list_builder = DockerSchema2ManifestListBuilder()
+ list_builder.add_manifest(manifest_impl, 'amd64', 'linux')
+ manifest_list = list_builder.build()
+
+ # Write the manifest list, which should also write the manifests themselves.
+ created_tuple = get_or_create_manifest(repository, manifest_list, storage)
+ assert created_tuple is not None
+
+ # Since the manifests are not yet referenced by a tag, they cannot be found.
+ assert lookup_manifest(repository, manifest.digest) is None
+ assert lookup_manifest(repository, manifest_list.digest) is None
+
+ # Unless we ask for "dead" manifests.
+ assert lookup_manifest(repository, manifest.digest, allow_dead=True) is not None
+ assert lookup_manifest(repository, manifest_list.digest, allow_dead=True) is not None
+
+
+def _populate_blob(content):
+ digest = str(sha256_digest(content))
+ location = ImageStorageLocation.get(name='local_us')
+ blob = store_blob_record_and_temp_link('devtable', 'newrepo', digest, location,
+ len(content), 120)
+ storage.put_content(['local_us'], get_layer_path(blob), content)
+ return blob, digest
+
+
+@pytest.mark.parametrize('schema_version', [
+ 1,
+ 2,
+])
+def test_get_or_create_manifest(schema_version, initialized_db):
+ repository = create_repository('devtable', 'newrepo', None)
+
+ expected_labels = {
+ 'Foo': 'Bar',
+ 'Baz': 'Meh',
+ }
+
+ layer_json = json.dumps({
+ 'id': 'somelegacyid',
+ 'config': {
+ 'Labels': expected_labels,
+ },
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ },
+ "history": [
+ {
+ "created": "2018-04-03T18:37:09.284840891Z",
+ "created_by": "do something",
+ },
+ ],
+ })
+
+ # Create a legacy image.
+ find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
+
+ # Add a blob containing the config.
+ _, config_digest = _populate_blob(layer_json)
+
+ # Add a blob of random data.
+ random_data = 'hello world'
+ _, random_digest = _populate_blob(random_data)
+
+ # Build the manifest.
+ if schema_version == 1:
+ builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
+ builder.add_layer(random_digest, layer_json)
+ sample_manifest_instance = builder.build(docker_v2_signing_key)
+ elif schema_version == 2:
+ builder = DockerSchema2ManifestBuilder()
+ builder.set_config_digest(config_digest, len(layer_json))
+ builder.add_layer(random_digest, len(random_data))
+ sample_manifest_instance = builder.build()
+
+ # Create a new manifest.
+ created_manifest = get_or_create_manifest(repository, sample_manifest_instance, storage)
+ created = created_manifest.manifest
+ newly_created = created_manifest.newly_created
+
+ assert newly_created
+ assert created is not None
+ assert created.media_type.name == sample_manifest_instance.media_type
+ assert created.digest == sample_manifest_instance.digest
+ assert created.manifest_bytes == sample_manifest_instance.bytes.as_encoded_str()
+ assert created_manifest.labels_to_apply == expected_labels
+
+ # Verify it has a temporary tag pointing to it.
+ assert Tag.get(manifest=created, hidden=True).lifetime_end_ms
+
+ # Verify the legacy image.
+ legacy_image = get_legacy_image_for_manifest(created)
+ assert legacy_image is not None
+ assert legacy_image.storage.content_checksum == random_digest
+
+ # Verify the linked blobs.
+ blob_digests = [mb.blob.content_checksum for mb
+ in ManifestBlob.select().where(ManifestBlob.manifest == created)]
+
+ assert random_digest in blob_digests
+ if schema_version == 2:
+ assert config_digest in blob_digests
+
+ # Retrieve it again and ensure it is the same manifest.
+ created_manifest2 = get_or_create_manifest(repository, sample_manifest_instance, storage)
+ created2 = created_manifest2.manifest
+ newly_created2 = created_manifest2.newly_created
+
+ assert not newly_created2
+ assert created2 == created
+
+ # Ensure it again has a temporary tag.
+ assert Tag.get(manifest=created2, hidden=True).lifetime_end_ms
+
+ # Ensure the labels were added.
+ labels = list(list_manifest_labels(created))
+ assert len(labels) == 2
+
+ labels_dict = {label.key: label.value for label in labels}
+ assert labels_dict == expected_labels
+
+
+def test_get_or_create_manifest_invalid_image(initialized_db):
+ repository = get_repository('devtable', 'simple')
+
+ latest_tag = get_tag(repository, 'latest')
+ parsed = DockerSchema1Manifest(Bytes.for_string_or_unicode(latest_tag.manifest.manifest_bytes),
+ validate=False)
+
+ builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
+ builder.add_layer(parsed.blob_digests[0], '{"id": "foo", "parent": "someinvalidimageid"}')
+ sample_manifest_instance = builder.build(docker_v2_signing_key)
+
+ created_manifest = get_or_create_manifest(repository, sample_manifest_instance, storage)
+ assert created_manifest is None
+
+
+def test_get_or_create_manifest_list(initialized_db):
+ repository = create_repository('devtable', 'newrepo', None)
+
+ expected_labels = {
+ 'Foo': 'Bar',
+ 'Baz': 'Meh',
+ }
+
+ layer_json = json.dumps({
+ 'id': 'somelegacyid',
+ 'config': {
+ 'Labels': expected_labels,
+ },
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ },
+ "history": [
+ {
+ "created": "2018-04-03T18:37:09.284840891Z",
+ "created_by": "do something",
+ },
+ ],
+ })
+
+ # Create a legacy image.
+ find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
+
+ # Add a blob containing the config.
+ _, config_digest = _populate_blob(layer_json)
+
+ # Add a blob of random data.
+ random_data = 'hello world'
+ _, random_digest = _populate_blob(random_data)
+
+ # Build the manifests.
+ v1_builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
+ v1_builder.add_layer(random_digest, layer_json)
+ v1_manifest = v1_builder.build(docker_v2_signing_key).unsigned()
+
+ v2_builder = DockerSchema2ManifestBuilder()
+ v2_builder.set_config_digest(config_digest, len(layer_json))
+ v2_builder.add_layer(random_digest, len(random_data))
+ v2_manifest = v2_builder.build()
+
+ # Write the manifests.
+ v1_created = get_or_create_manifest(repository, v1_manifest, storage)
+ assert v1_created
+ assert v1_created.manifest.digest == v1_manifest.digest
+
+ v2_created = get_or_create_manifest(repository, v2_manifest, storage)
+ assert v2_created
+ assert v2_created.manifest.digest == v2_manifest.digest
+
+ # Build the manifest list.
+ list_builder = DockerSchema2ManifestListBuilder()
+ list_builder.add_manifest(v1_manifest, 'amd64', 'linux')
+ list_builder.add_manifest(v2_manifest, 'amd32', 'linux')
+ manifest_list = list_builder.build()
+
+ # Write the manifest list, which should also write the manifests themselves.
+ created_tuple = get_or_create_manifest(repository, manifest_list, storage)
+ assert created_tuple is not None
+
+ created_list = created_tuple.manifest
+ assert created_list
+ assert created_list.media_type.name == manifest_list.media_type
+ assert created_list.digest == manifest_list.digest
+
+ # Ensure the child manifest links exist.
+ child_manifests = {cm.child_manifest.digest: cm.child_manifest
+ for cm in ManifestChild.select().where(ManifestChild.manifest == created_list)}
+ assert len(child_manifests) == 2
+ assert v1_manifest.digest in child_manifests
+ assert v2_manifest.digest in child_manifests
+
+ assert child_manifests[v1_manifest.digest].media_type.name == v1_manifest.media_type
+ assert child_manifests[v2_manifest.digest].media_type.name == v2_manifest.media_type
+
+
+def test_get_or_create_manifest_list_duplicate_child_manifest(initialized_db):
+ repository = create_repository('devtable', 'newrepo', None)
+
+ expected_labels = {
+ 'Foo': 'Bar',
+ 'Baz': 'Meh',
+ }
+
+ layer_json = json.dumps({
+ 'id': 'somelegacyid',
+ 'config': {
+ 'Labels': expected_labels,
+ },
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ },
+ "history": [
+ {
+ "created": "2018-04-03T18:37:09.284840891Z",
+ "created_by": "do something",
+ },
+ ],
+ })
+
+ # Create a legacy image.
+ find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
+
+ # Add a blob containing the config.
+ _, config_digest = _populate_blob(layer_json)
+
+ # Add a blob of random data.
+ random_data = 'hello world'
+ _, random_digest = _populate_blob(random_data)
+
+ # Build the manifest.
+ v2_builder = DockerSchema2ManifestBuilder()
+ v2_builder.set_config_digest(config_digest, len(layer_json))
+ v2_builder.add_layer(random_digest, len(random_data))
+ v2_manifest = v2_builder.build()
+
+ # Write the manifest.
+ v2_created = get_or_create_manifest(repository, v2_manifest, storage)
+ assert v2_created
+ assert v2_created.manifest.digest == v2_manifest.digest
+
+ # Build the manifest list, with the child manifest repeated.
+ list_builder = DockerSchema2ManifestListBuilder()
+ list_builder.add_manifest(v2_manifest, 'amd64', 'linux')
+ list_builder.add_manifest(v2_manifest, 'amd32', 'linux')
+ manifest_list = list_builder.build()
+
+ # Write the manifest list, which should also write the manifests themselves.
+ created_tuple = get_or_create_manifest(repository, manifest_list, storage)
+ assert created_tuple is not None
+
+ created_list = created_tuple.manifest
+ assert created_list
+ assert created_list.media_type.name == manifest_list.media_type
+ assert created_list.digest == manifest_list.digest
+
+ # Ensure the child manifest links exist.
+ child_manifests = {cm.child_manifest.digest: cm.child_manifest
+ for cm in ManifestChild.select().where(ManifestChild.manifest == created_list)}
+ assert len(child_manifests) == 1
+ assert v2_manifest.digest in child_manifests
+ assert child_manifests[v2_manifest.digest].media_type.name == v2_manifest.media_type
+
+ # Try to create again and ensure we get back the same manifest list.
+ created2_tuple = get_or_create_manifest(repository, manifest_list, storage)
+ assert created2_tuple is not None
+ assert created2_tuple.manifest == created_list
+
+
+def test_get_or_create_manifest_with_remote_layers(initialized_db):
+ repository = create_repository('devtable', 'newrepo', None)
+
+ layer_json = json.dumps({
+ 'config': {},
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ },
+ "history": [
+ {
+ "created": "2018-04-03T18:37:09.284840891Z",
+ "created_by": "do something",
+ },
+ {
+ "created": "2018-04-03T18:37:09.284840891Z",
+ "created_by": "do something",
+ },
+ ],
+ })
+
+ # Add a blob containing the config.
+ _, config_digest = _populate_blob(layer_json)
+
+ # Add a blob of random data.
+ random_data = 'hello world'
+ _, random_digest = _populate_blob(random_data)
+
+ remote_digest = sha256_digest('something')
+
+ builder = DockerSchema2ManifestBuilder()
+ builder.set_config_digest(config_digest, len(layer_json))
+ builder.add_layer(remote_digest, 1234, urls=['http://hello/world'])
+ builder.add_layer(random_digest, len(random_data))
+ manifest = builder.build()
+
+ assert remote_digest in manifest.blob_digests
+ assert remote_digest not in manifest.local_blob_digests
+
+ assert manifest.has_remote_layer
+ assert not manifest.has_legacy_image
+ assert manifest.get_schema1_manifest('foo', 'bar', 'baz', None) is None
+
+ # Write the manifest.
+ created_tuple = get_or_create_manifest(repository, manifest, storage)
+ assert created_tuple is not None
+
+ created_manifest = created_tuple.manifest
+ assert created_manifest
+ assert created_manifest.media_type.name == manifest.media_type
+ assert created_manifest.digest == manifest.digest
+
+ # Verify the legacy image.
+ legacy_image = get_legacy_image_for_manifest(created_manifest)
+ assert legacy_image is None
+
+ # Verify the linked blobs.
+ blob_digests = {mb.blob.content_checksum for mb
+ in ManifestBlob.select().where(ManifestBlob.manifest == created_manifest)}
+
+ assert random_digest in blob_digests
+ assert config_digest in blob_digests
+ assert remote_digest not in blob_digests
+
+
+def create_manifest_for_testing(repository, differentiation_field='1', include_shared_blob=False):
+ # Populate a manifest.
+ layer_json = json.dumps({
+ 'config': {},
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ },
+ "history": [],
+ })
+
+ # Add a blob containing the config.
+ _, config_digest = _populate_blob(layer_json)
+
+ remote_digest = sha256_digest('something')
+ builder = DockerSchema2ManifestBuilder()
+ builder.set_config_digest(config_digest, len(layer_json))
+ builder.add_layer(remote_digest, 1234, urls=['http://hello/world' + differentiation_field])
+
+ if include_shared_blob:
+ _, blob_digest = _populate_blob('some data here')
+ builder.add_layer(blob_digest, 4567)
+
+ manifest = builder.build()
+
+ created = get_or_create_manifest(repository, manifest, storage)
+ assert created
+ return created.manifest, manifest
+
+
+def test_retriever(initialized_db):
+ repository = create_repository('devtable', 'newrepo', None)
+
+ layer_json = json.dumps({
+ 'config': {},
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ },
+ "history": [
+ {
+ "created": "2018-04-03T18:37:09.284840891Z",
+ "created_by": "do something",
+ },
+ {
+ "created": "2018-04-03T18:37:09.284840891Z",
+ "created_by": "do something",
+ },
+ ],
+ })
+
+ # Add a blob containing the config.
+ _, config_digest = _populate_blob(layer_json)
+
+ # Add a blob of random data.
+ random_data = 'hello world'
+ _, random_digest = _populate_blob(random_data)
+
+ # Add another blob of random data.
+ other_random_data = 'hi place'
+ _, other_random_digest = _populate_blob(other_random_data)
+
+ remote_digest = sha256_digest('something')
+
+ builder = DockerSchema2ManifestBuilder()
+ builder.set_config_digest(config_digest, len(layer_json))
+ builder.add_layer(other_random_digest, len(other_random_data))
+ builder.add_layer(random_digest, len(random_data))
+ manifest = builder.build()
+
+ assert config_digest in manifest.blob_digests
+ assert random_digest in manifest.blob_digests
+ assert other_random_digest in manifest.blob_digests
+
+ assert config_digest in manifest.local_blob_digests
+ assert random_digest in manifest.local_blob_digests
+ assert other_random_digest in manifest.local_blob_digests
+
+ # Write the manifest.
+ created_tuple = get_or_create_manifest(repository, manifest, storage)
+ assert created_tuple is not None
+
+ created_manifest = created_tuple.manifest
+ assert created_manifest
+ assert created_manifest.media_type.name == manifest.media_type
+ assert created_manifest.digest == manifest.digest
+
+ # Verify the linked blobs.
+ blob_digests = {mb.blob.content_checksum for mb
+ in ManifestBlob.select().where(ManifestBlob.manifest == created_manifest)}
+
+ assert random_digest in blob_digests
+ assert other_random_digest in blob_digests
+ assert config_digest in blob_digests
+
+ # Delete any Image rows linking to the blobs from temp tags.
+ for blob_digest in blob_digests:
+ storage_row = ImageStorage.get(content_checksum=blob_digest)
+ for image in list(Image.select().where(Image.storage == storage_row)):
+ all_temp = all([rt.hidden for rt
+ in RepositoryTag.select().where(RepositoryTag.image == image)])
+ if all_temp:
+ RepositoryTag.delete().where(RepositoryTag.image == image).execute()
+ image.delete_instance(recursive=True)
+
+ # Verify the blobs in the retriever.
+ retriever = RepositoryContentRetriever(repository, storage)
+ assert (retriever.get_manifest_bytes_with_digest(created_manifest.digest) ==
+ manifest.bytes.as_encoded_str())
+
+ for blob_digest in blob_digests:
+ assert retriever.get_blob_bytes_with_digest(blob_digest) is not None
diff --git a/data/model/oci/test/test_oci_tag.py b/data/model/oci/test/test_oci_tag.py
new file mode 100644
index 000000000..d37828cf7
--- /dev/null
+++ b/data/model/oci/test/test_oci_tag.py
@@ -0,0 +1,378 @@
+from calendar import timegm
+from datetime import timedelta, datetime
+
+from playhouse.test_utils import assert_query_count
+
+from data.database import (Tag, ManifestLegacyImage, TagToRepositoryTag, TagManifestToManifest,
+ TagManifest, Manifest, Repository)
+from data.model.oci.test.test_oci_manifest import create_manifest_for_testing
+from data.model.oci.tag import (find_matching_tag, get_most_recent_tag,
+ get_most_recent_tag_lifetime_start, list_alive_tags,
+ get_legacy_images_for_tags, filter_to_alive_tags,
+ filter_to_visible_tags, list_repository_tag_history,
+ get_expired_tag, get_tag, delete_tag,
+ delete_tags_for_manifest, change_tag_expiration,
+ set_tag_expiration_for_manifest, retarget_tag,
+ create_temporary_tag_if_necessary,
+ lookup_alive_tags_shallow,
+ lookup_unrecoverable_tags,
+ get_epoch_timestamp_ms)
+from data.model.repository import get_repository, create_repository
+
+from test.fixtures import *
+
+@pytest.mark.parametrize('namespace_name, repo_name, tag_names, expected', [
+ ('devtable', 'simple', ['latest'], 'latest'),
+ ('devtable', 'simple', ['unknown', 'latest'], 'latest'),
+ ('devtable', 'simple', ['unknown'], None),
+])
+def test_find_matching_tag(namespace_name, repo_name, tag_names, expected, initialized_db):
+ repo = get_repository(namespace_name, repo_name)
+ if expected is not None:
+ with assert_query_count(1):
+ found = find_matching_tag(repo, tag_names)
+
+ assert found is not None
+ assert found.name == expected
+ assert not found.lifetime_end_ms
+ else:
+ with assert_query_count(1):
+ assert find_matching_tag(repo, tag_names) is None
+
+
+def test_get_most_recent_tag_lifetime_start(initialized_db):
+ repo = get_repository('devtable', 'simple')
+ tag = get_most_recent_tag(repo)
+
+ with assert_query_count(1):
+ tags = get_most_recent_tag_lifetime_start([repo])
+ assert tags[repo.id] == tag.lifetime_start_ms
+
+
+def test_get_most_recent_tag(initialized_db):
+ repo = get_repository('outsideorg', 'coolrepo')
+
+ with assert_query_count(1):
+ assert get_most_recent_tag(repo).name == 'latest'
+
+
+def test_get_most_recent_tag_empty_repo(initialized_db):
+ empty_repo = create_repository('devtable', 'empty', None)
+
+ with assert_query_count(1):
+ assert get_most_recent_tag(empty_repo) is None
+
+
+def test_list_alive_tags(initialized_db):
+ found = False
+ for tag in filter_to_visible_tags(filter_to_alive_tags(Tag.select())):
+ tags = list_alive_tags(tag.repository)
+ assert tag in tags
+
+ with assert_query_count(1):
+ legacy_images = get_legacy_images_for_tags(tags)
+
+ for tag in tags:
+ assert ManifestLegacyImage.get(manifest=tag.manifest).image == legacy_images[tag.id]
+
+ found = True
+
+ assert found
+
+ # Ensure hidden tags cannot be listed.
+ tag = Tag.get()
+ tag.hidden = True
+ tag.save()
+
+ tags = list_alive_tags(tag.repository)
+ assert tag not in tags
+
+
+def test_lookup_alive_tags_shallow(initialized_db):
+ found = False
+ for tag in filter_to_visible_tags(filter_to_alive_tags(Tag.select())):
+ tags = lookup_alive_tags_shallow(tag.repository)
+ found = True
+ assert tag in tags
+
+ assert found
+
+ # Ensure hidden tags cannot be listed.
+ tag = Tag.get()
+ tag.hidden = True
+ tag.save()
+
+ tags = lookup_alive_tags_shallow(tag.repository)
+ assert tag not in tags
+
+
+def test_get_tag(initialized_db):
+ found = False
+ for tag in filter_to_visible_tags(filter_to_alive_tags(Tag.select())):
+ repo = tag.repository
+
+ with assert_query_count(1):
+ assert get_tag(repo, tag.name) == tag
+ found = True
+
+ assert found
+
+
+@pytest.mark.parametrize('namespace_name, repo_name', [
+ ('devtable', 'simple'),
+ ('devtable', 'complex'),
+])
+def test_list_repository_tag_history(namespace_name, repo_name, initialized_db):
+ repo = get_repository(namespace_name, repo_name)
+
+ with assert_query_count(1):
+ results, has_more = list_repository_tag_history(repo, 1, 100)
+
+ assert results
+ assert not has_more
+
+
+def test_list_repository_tag_history_with_history(initialized_db):
+ repo = get_repository('devtable', 'history')
+
+ with assert_query_count(1):
+ results, _ = list_repository_tag_history(repo, 1, 100)
+
+ assert len(results) == 2
+ assert results[0].lifetime_end_ms is None
+ assert results[1].lifetime_end_ms is not None
+
+ with assert_query_count(1):
+ results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
+
+ assert len(results) == 2
+ assert results[0].lifetime_end_ms is None
+ assert results[1].lifetime_end_ms is not None
+
+ with assert_query_count(1):
+ results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='foobar')
+
+ assert len(results) == 0
+
+
+def test_list_repository_tag_history_all_tags(initialized_db):
+ for tag in Tag.select():
+ repo = tag.repository
+ with assert_query_count(1):
+ results, _ = list_repository_tag_history(repo, 1, 1000)
+
+ assert (tag in results) == (not tag.hidden)
+
+
+@pytest.mark.parametrize('namespace_name, repo_name, tag_name, expected', [
+ ('devtable', 'simple', 'latest', False),
+ ('devtable', 'simple', 'unknown', False),
+ ('devtable', 'complex', 'latest', False),
+
+ ('devtable', 'history', 'latest', True),
+])
+def test_get_expired_tag(namespace_name, repo_name, tag_name, expected, initialized_db):
+ repo = get_repository(namespace_name, repo_name)
+
+ with assert_query_count(1):
+ assert bool(get_expired_tag(repo, tag_name)) == expected
+
+
+def test_delete_tag(initialized_db):
+ found = False
+ for tag in list(filter_to_visible_tags(filter_to_alive_tags(Tag.select()))):
+ repo = tag.repository
+
+ assert get_tag(repo, tag.name) == tag
+ assert tag.lifetime_end_ms is None
+
+ with assert_query_count(4):
+ assert delete_tag(repo, tag.name) == tag
+
+ assert get_tag(repo, tag.name) is None
+ found = True
+
+ assert found
+
+
+def test_delete_tags_for_manifest(initialized_db):
+ for tag in list(filter_to_visible_tags(filter_to_alive_tags(Tag.select()))):
+ repo = tag.repository
+ assert get_tag(repo, tag.name) == tag
+
+ with assert_query_count(5):
+ assert delete_tags_for_manifest(tag.manifest) == [tag]
+
+ assert get_tag(repo, tag.name) is None
+
+
+def test_delete_tags_for_manifest_same_manifest(initialized_db):
+ new_repo = model.repository.create_repository('devtable', 'newrepo', None)
+ manifest_1, _ = create_manifest_for_testing(new_repo, '1')
+ manifest_2, _ = create_manifest_for_testing(new_repo, '2')
+
+ assert manifest_1.digest != manifest_2.digest
+
+ # Add some tag history, moving a tag back and forth between two manifests.
+ retarget_tag('latest', manifest_1)
+ retarget_tag('latest', manifest_2)
+ retarget_tag('latest', manifest_1)
+ retarget_tag('latest', manifest_2)
+
+ retarget_tag('another1', manifest_1)
+ retarget_tag('another2', manifest_2)
+
+ # Delete all tags pointing to the first manifest.
+ delete_tags_for_manifest(manifest_1)
+
+ assert get_tag(new_repo, 'latest').manifest == manifest_2
+ assert get_tag(new_repo, 'another1') is None
+ assert get_tag(new_repo, 'another2').manifest == manifest_2
+
+ # Delete all tags pointing to the second manifest, which should actually delete the `latest`
+ # tag now.
+ delete_tags_for_manifest(manifest_2)
+ assert get_tag(new_repo, 'latest') is None
+ assert get_tag(new_repo, 'another1') is None
+ assert get_tag(new_repo, 'another2') is None
+
+
+@pytest.mark.parametrize('timedelta, expected_timedelta', [
+ pytest.param(timedelta(seconds=1), timedelta(hours=1), id='less than minimum'),
+ pytest.param(timedelta(weeks=300), timedelta(weeks=104), id='more than maxium'),
+ pytest.param(timedelta(weeks=1), timedelta(weeks=1), id='within range'),
+])
+def test_change_tag_expiration(timedelta, expected_timedelta, initialized_db):
+ now = datetime.utcnow()
+ now_ms = timegm(now.utctimetuple()) * 1000
+
+ tag = Tag.get()
+ tag.lifetime_start_ms = now_ms
+ tag.save()
+
+ original_end_ms, okay = change_tag_expiration(tag, now + timedelta)
+ assert okay
+ assert original_end_ms == tag.lifetime_end_ms
+
+ updated_tag = Tag.get(id=tag.id)
+ offset = expected_timedelta.total_seconds() * 1000
+ expected_ms = (updated_tag.lifetime_start_ms + offset)
+ assert updated_tag.lifetime_end_ms == expected_ms
+
+ original_end_ms, okay = change_tag_expiration(tag, None)
+ assert okay
+ assert original_end_ms == expected_ms
+
+ updated_tag = Tag.get(id=tag.id)
+ assert updated_tag.lifetime_end_ms is None
+
+
+def test_set_tag_expiration_for_manifest(initialized_db):
+ tag = Tag.get()
+ manifest = tag.manifest
+ assert manifest is not None
+
+ set_tag_expiration_for_manifest(manifest, datetime.utcnow() + timedelta(weeks=1))
+
+ updated_tag = Tag.get(id=tag.id)
+ assert updated_tag.lifetime_end_ms is not None
+
+
+def test_create_temporary_tag_if_necessary(initialized_db):
+ tag = Tag.get()
+ manifest = tag.manifest
+ assert manifest is not None
+
+ # Ensure no tag is created, since an existing one is present.
+ created = create_temporary_tag_if_necessary(manifest, 60)
+ assert created is None
+
+ # Mark the tag as deleted.
+ tag.lifetime_end_ms = 1
+ tag.save()
+
+ # Now create a temp tag.
+ created = create_temporary_tag_if_necessary(manifest, 60)
+ assert created is not None
+ assert created.hidden
+ assert created.name.startswith('$temp-')
+ assert created.manifest == manifest
+ assert created.lifetime_end_ms is not None
+ assert created.lifetime_end_ms == (created.lifetime_start_ms + 60000)
+
+ # Try again and ensure it is not created.
+ created = create_temporary_tag_if_necessary(manifest, 30)
+ assert created is None
+
+
+def test_retarget_tag(initialized_db):
+ repo = get_repository('devtable', 'history')
+ results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
+
+ assert len(results) == 2
+ assert results[0].lifetime_end_ms is None
+ assert results[1].lifetime_end_ms is not None
+
+ # Revert back to the original manifest.
+ created = retarget_tag('latest', results[0].manifest, is_reversion=True,
+ now_ms=results[1].lifetime_end_ms + 10000)
+ assert created.lifetime_end_ms is None
+ assert created.reversion
+ assert created.name == 'latest'
+ assert created.manifest == results[0].manifest
+
+ # Verify in the history.
+ results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
+
+ assert len(results) == 3
+ assert results[0].lifetime_end_ms is None
+ assert results[1].lifetime_end_ms is not None
+ assert results[2].lifetime_end_ms is not None
+
+ assert results[0] == created
+
+ # Verify old-style tables.
+ repository_tag = TagToRepositoryTag.get(tag=created).repository_tag
+ assert repository_tag.lifetime_start_ts == int(created.lifetime_start_ms / 1000)
+
+ tag_manifest = TagManifest.get(tag=repository_tag)
+ assert TagManifestToManifest.get(tag_manifest=tag_manifest).manifest == created.manifest
+
+
+def test_retarget_tag_wrong_name(initialized_db):
+ repo = get_repository('devtable', 'history')
+ results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
+ assert len(results) == 2
+
+ created = retarget_tag('someothername', results[1].manifest, is_reversion=True)
+ assert created is None
+
+ results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
+ assert len(results) == 2
+
+
+def test_lookup_unrecoverable_tags(initialized_db):
+ # Ensure no existing tags are found.
+ for repo in Repository.select():
+ assert not list(lookup_unrecoverable_tags(repo))
+
+ # Mark a tag as outside the expiration window and ensure it is found.
+ repo = get_repository('devtable', 'history')
+ results, _ = list_repository_tag_history(repo, 1, 100, specific_tag_name='latest')
+ assert len(results) == 2
+
+ results[1].lifetime_end_ms = 1
+ results[1].save()
+
+ # Ensure the tag is now found.
+ found = list(lookup_unrecoverable_tags(repo))
+ assert found
+ assert len(found) == 1
+ assert found[0] == results[1]
+
+ # Mark the tag as expiring in the future and ensure it is no longer found.
+ results[1].lifetime_end_ms = get_epoch_timestamp_ms() + 1000000
+ results[1].save()
+
+ found = list(lookup_unrecoverable_tags(repo))
+ assert not found
diff --git a/data/model/organization.py b/data/model/organization.py
new file mode 100644
index 000000000..b42f0d454
--- /dev/null
+++ b/data/model/organization.py
@@ -0,0 +1,167 @@
+
+from data.database import (User, FederatedLogin, TeamMember, Team, TeamRole, RepositoryPermission,
+ Repository, Namespace, DeletedNamespace)
+from data.model import (user, team, DataModelException, InvalidOrganizationException,
+ InvalidUsernameException, db_transaction, _basequery)
+
+
+def create_organization(name, email, creating_user, email_required=True, is_possible_abuser=False):
+ with db_transaction():
+ try:
+ # Create the org
+ new_org = user.create_user_noverify(name, email, email_required=email_required,
+ is_possible_abuser=is_possible_abuser)
+ new_org.organization = True
+ new_org.save()
+
+ # Create a team for the owners
+ owners_team = team.create_team('owners', new_org, 'admin')
+
+ # Add the user who created the org to the owners team
+ team.add_user_to_team(creating_user, owners_team)
+
+ return new_org
+ except InvalidUsernameException as iue:
+ raise InvalidOrganizationException(iue.message)
+
+
+def get_organization(name):
+ try:
+ return User.get(username=name, organization=True)
+ except User.DoesNotExist:
+ raise InvalidOrganizationException('Organization does not exist: %s' %
+ name)
+
+
+def convert_user_to_organization(user_obj, admin_user):
+ if user_obj.robot:
+ raise DataModelException('Cannot convert a robot into an organization')
+
+ with db_transaction():
+ # Change the user to an organization and disable this account for login.
+ user_obj.organization = True
+ user_obj.password_hash = None
+ user_obj.save()
+
+ # Clear any federated auth pointing to this user.
+ FederatedLogin.delete().where(FederatedLogin.user == user_obj).execute()
+
+ # Delete any user-specific permissions on repositories.
+ (RepositoryPermission.delete()
+ .where(RepositoryPermission.user == user_obj)
+ .execute())
+
+ # Create a team for the owners
+ owners_team = team.create_team('owners', user_obj, 'admin')
+
+ # Add the user who will admin the org to the owners team
+ team.add_user_to_team(admin_user, owners_team)
+
+ return user_obj
+
+
+def get_user_organizations(username):
+ return _basequery.get_user_organizations(username)
+
+def get_organization_team_members(teamid):
+ joined = User.select().join(TeamMember).join(Team)
+ query = joined.where(Team.id == teamid)
+ return query
+
+
+def __get_org_admin_users(org):
+ return (User
+ .select()
+ .join(TeamMember)
+ .join(Team)
+ .join(TeamRole)
+ .where(Team.organization == org, TeamRole.name == 'admin', User.robot == False)
+ .distinct())
+
+def get_admin_users(org):
+ """ Returns the owner users for the organization. """
+ return __get_org_admin_users(org)
+
+def remove_organization_member(org, user_obj):
+ org_admins = [u.username for u in __get_org_admin_users(org)]
+ if len(org_admins) == 1 and user_obj.username in org_admins:
+ raise DataModelException('Cannot remove user as they are the only organization admin')
+
+ with db_transaction():
+ # Find and remove the user from any repositories under the org.
+ permissions = list(RepositoryPermission
+ .select(RepositoryPermission.id)
+ .join(Repository)
+ .where(Repository.namespace_user == org,
+ RepositoryPermission.user == user_obj))
+
+ if permissions:
+ RepositoryPermission.delete().where(RepositoryPermission.id << permissions).execute()
+
+ # Find and remove the user from any teams under the org.
+ members = list(TeamMember
+ .select(TeamMember.id)
+ .join(Team)
+ .where(Team.organization == org, TeamMember.user == user_obj))
+
+ if members:
+ TeamMember.delete().where(TeamMember.id << members).execute()
+
+
+def get_organization_member_set(org, include_robots=False, users_filter=None):
+ """ Returns the set of all member usernames under the given organization, with optional
+ filtering by robots and/or by a specific set of User objects.
+ """
+ Org = User.alias()
+ org_users = (User
+ .select(User.username)
+ .join(TeamMember)
+ .join(Team)
+ .where(Team.organization == org)
+ .distinct())
+
+ if not include_robots:
+ org_users = org_users.where(User.robot == False)
+
+ if users_filter is not None:
+ ids_list = [u.id for u in users_filter if u is not None]
+ if not ids_list:
+ return set()
+
+ org_users = org_users.where(User.id << ids_list)
+
+ return {user.username for user in org_users}
+
+
+def get_all_repo_users_transitive_via_teams(namespace_name, repository_name):
+ return (User
+ .select()
+ .distinct()
+ .join(TeamMember)
+ .join(Team)
+ .join(RepositoryPermission)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Namespace.username == namespace_name, Repository.name == repository_name))
+
+
+def get_organizations(deleted=False):
+ query = User.select().where(User.organization == True, User.robot == False)
+
+ if not deleted:
+ query = query.where(User.id.not_in(DeletedNamespace.select(DeletedNamespace.namespace)))
+
+ return query
+
+
+def get_active_org_count():
+ return get_organizations().count()
+
+
+def add_user_as_admin(user_obj, org_obj):
+ try:
+ admin_role = TeamRole.get(name='admin')
+ admin_team = Team.select().where(Team.role == admin_role, Team.organization == org_obj).get()
+ team.add_user_to_team(user_obj, admin_team)
+ except team.UserAlreadyInTeam:
+ pass
diff --git a/data/model/permission.py b/data/model/permission.py
new file mode 100644
index 000000000..e38584561
--- /dev/null
+++ b/data/model/permission.py
@@ -0,0 +1,322 @@
+from peewee import JOIN
+
+from data.database import (RepositoryPermission, User, Repository, Visibility, Role, TeamMember,
+ PermissionPrototype, Team, TeamRole, Namespace)
+from data.model import DataModelException, _basequery
+from util.names import parse_robot_username
+
+def list_team_permissions(team):
+ return (RepositoryPermission
+ .select(RepositoryPermission)
+ .join(Repository)
+ .join(Visibility)
+ .switch(RepositoryPermission)
+ .join(Role)
+ .switch(RepositoryPermission)
+ .where(RepositoryPermission.team == team))
+
+
+def list_robot_permissions(robot_name):
+ return (RepositoryPermission
+ .select(RepositoryPermission, User, Repository)
+ .join(Repository)
+ .join(Visibility)
+ .switch(RepositoryPermission)
+ .join(Role)
+ .switch(RepositoryPermission)
+ .join(User)
+ .where(User.username == robot_name, User.robot == True))
+
+
+def list_organization_member_permissions(organization, limit_to_user=None):
+ query = (RepositoryPermission
+ .select(RepositoryPermission, Repository, User)
+ .join(Repository)
+ .switch(RepositoryPermission)
+ .join(User)
+ .where(Repository.namespace_user == organization))
+
+ if limit_to_user is not None:
+ query = query.where(RepositoryPermission.user == limit_to_user)
+ else:
+ query = query.where(User.robot == False)
+
+ return query
+
+
+def get_all_user_repository_permissions(user):
+ return _get_user_repo_permissions(user)
+
+
+def get_user_repo_permissions(user, repo):
+ return _get_user_repo_permissions(user, limit_to_repository_obj=repo)
+
+
+def get_user_repository_permissions(user, namespace, repo_name):
+ return _get_user_repo_permissions(user, limit_namespace=namespace, limit_repo_name=repo_name)
+
+
+def _get_user_repo_permissions(user, limit_to_repository_obj=None, limit_namespace=None,
+ limit_repo_name=None):
+ UserThroughTeam = User.alias()
+
+ base_query = (RepositoryPermission
+ .select(RepositoryPermission, Role, Repository, Namespace)
+ .join(Role)
+ .switch(RepositoryPermission)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(RepositoryPermission))
+
+ if limit_to_repository_obj is not None:
+ base_query = base_query.where(RepositoryPermission.repository == limit_to_repository_obj)
+ elif limit_namespace and limit_repo_name:
+ base_query = base_query.where(Repository.name == limit_repo_name,
+ Namespace.username == limit_namespace)
+
+ direct = (base_query
+ .clone()
+ .join(User)
+ .where(User.id == user))
+
+ team = (base_query
+ .clone()
+ .join(Team)
+ .join(TeamMember)
+ .join(UserThroughTeam, on=(UserThroughTeam.id == TeamMember.user))
+ .where(UserThroughTeam.id == user))
+
+ return direct | team
+
+
+def delete_prototype_permission(org, uid):
+ found = get_prototype_permission(org, uid)
+ if not found:
+ return None
+
+ found.delete_instance()
+ return found
+
+
+def get_prototype_permission(org, uid):
+ try:
+ return PermissionPrototype.get(PermissionPrototype.org == org,
+ PermissionPrototype.uuid == uid)
+ except PermissionPrototype.DoesNotExist:
+ return None
+
+
+def get_prototype_permissions(org):
+ ActivatingUser = User.alias()
+ DelegateUser = User.alias()
+ query = (PermissionPrototype
+ .select()
+ .where(PermissionPrototype.org == org)
+ .join(ActivatingUser, JOIN.LEFT_OUTER,
+ on=(ActivatingUser.id == PermissionPrototype.activating_user))
+ .join(DelegateUser, JOIN.LEFT_OUTER,
+ on=(DelegateUser.id == PermissionPrototype.delegate_user))
+ .join(Team, JOIN.LEFT_OUTER,
+ on=(Team.id == PermissionPrototype.delegate_team))
+ .join(Role, JOIN.LEFT_OUTER, on=(Role.id == PermissionPrototype.role)))
+ return query
+
+
+def update_prototype_permission(org, uid, role_name):
+ found = get_prototype_permission(org, uid)
+ if not found:
+ return None
+
+ new_role = Role.get(Role.name == role_name)
+ found.role = new_role
+ found.save()
+ return found
+
+
+def add_prototype_permission(org, role_name, activating_user,
+ delegate_user=None, delegate_team=None):
+ new_role = Role.get(Role.name == role_name)
+ return PermissionPrototype.create(org=org, role=new_role, activating_user=activating_user,
+ delegate_user=delegate_user, delegate_team=delegate_team)
+
+
+def get_org_wide_permissions(user, org_filter=None):
+ Org = User.alias()
+ team_with_role = Team.select(Team, Org, TeamRole).join(TeamRole)
+ with_org = team_with_role.switch(Team).join(Org, on=(Team.organization ==
+ Org.id))
+ with_user = with_org.switch(Team).join(TeamMember).join(User)
+
+ if org_filter:
+ with_user.where(Org.username == org_filter)
+
+ return with_user.where(User.id == user, Org.organization == True)
+
+
+def get_all_repo_teams(namespace_name, repository_name):
+ return (RepositoryPermission
+ .select(Team.name, Role.name, RepositoryPermission)
+ .join(Team)
+ .switch(RepositoryPermission)
+ .join(Role)
+ .switch(RepositoryPermission)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Namespace.username == namespace_name, Repository.name == repository_name))
+
+
+def apply_default_permissions(repo_obj, creating_user_obj):
+ org = repo_obj.namespace_user
+ user_clause = ((PermissionPrototype.activating_user == creating_user_obj) |
+ (PermissionPrototype.activating_user >> None))
+
+ team_protos = (PermissionPrototype
+ .select()
+ .where(PermissionPrototype.org == org, user_clause,
+ PermissionPrototype.delegate_user >> None))
+
+ def create_team_permission(team, repo, role):
+ RepositoryPermission.create(team=team, repository=repo, role=role)
+
+ __apply_permission_list(repo_obj, team_protos, 'name', create_team_permission)
+
+ user_protos = (PermissionPrototype
+ .select()
+ .where(PermissionPrototype.org == org, user_clause,
+ PermissionPrototype.delegate_team >> None))
+
+ def create_user_permission(user, repo, role):
+ # The creating user always gets admin anyway
+ if user.username == creating_user_obj.username:
+ return
+
+ RepositoryPermission.create(user=user, repository=repo, role=role)
+
+ __apply_permission_list(repo_obj, user_protos, 'username', create_user_permission)
+
+
+def __apply_permission_list(repo, proto_query, name_property, create_permission_func):
+ final_protos = {}
+ for proto in proto_query:
+ applies_to = proto.delegate_team or proto.delegate_user
+ name = getattr(applies_to, name_property)
+ # We will skip the proto if it is pre-empted by a more important proto
+ if name in final_protos and proto.activating_user is None:
+ continue
+
+ # By this point, it is either a user specific proto, or there is no
+ # proto yet, so we can safely assume it applies
+ final_protos[name] = (applies_to, proto.role)
+
+ for delegate, role in final_protos.values():
+ create_permission_func(delegate, repo, role)
+
+
+def __entity_permission_repo_query(entity_id, entity_table, entity_id_property, namespace_name,
+ repository_name):
+ """ This method works for both users and teams. """
+
+ return (RepositoryPermission
+ .select(entity_table, Repository, Namespace, Role, RepositoryPermission)
+ .join(entity_table)
+ .switch(RepositoryPermission)
+ .join(Role)
+ .switch(RepositoryPermission)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Repository.name == repository_name, Namespace.username == namespace_name,
+ entity_id_property == entity_id))
+
+
+def get_user_reponame_permission(username, namespace_name, repository_name):
+ fetched = list(__entity_permission_repo_query(username, User, User.username, namespace_name,
+ repository_name))
+ if not fetched:
+ raise DataModelException('User does not have permission for repo.')
+
+ return fetched[0]
+
+
+def get_team_reponame_permission(team_name, namespace_name, repository_name):
+ fetched = list(__entity_permission_repo_query(team_name, Team, Team.name, namespace_name,
+ repository_name))
+ if not fetched:
+ raise DataModelException('Team does not have permission for repo.')
+
+ return fetched[0]
+
+
+def delete_user_permission(username, namespace_name, repository_name):
+ if username == namespace_name:
+ raise DataModelException('Namespace owner must always be admin.')
+
+ fetched = list(__entity_permission_repo_query(username, User, User.username, namespace_name,
+ repository_name))
+ if not fetched:
+ raise DataModelException('User does not have permission for repo.')
+
+ fetched[0].delete_instance()
+
+
+def delete_team_permission(team_name, namespace_name, repository_name):
+ fetched = list(__entity_permission_repo_query(team_name, Team, Team.name, namespace_name,
+ repository_name))
+ if not fetched:
+ raise DataModelException('Team does not have permission for repo.')
+
+ fetched[0].delete_instance()
+
+
+def __set_entity_repo_permission(entity, permission_entity_property,
+ namespace_name, repository_name, role_name):
+ repo = _basequery.get_existing_repository(namespace_name, repository_name)
+ new_role = Role.get(Role.name == role_name)
+
+ # Fetch any existing permission for this entity on the repo
+ try:
+ entity_attr = getattr(RepositoryPermission, permission_entity_property)
+ perm = RepositoryPermission.get(entity_attr == entity, RepositoryPermission.repository == repo)
+ perm.role = new_role
+ perm.save()
+ return perm
+ except RepositoryPermission.DoesNotExist:
+ set_entity_kwargs = {permission_entity_property: entity}
+ new_perm = RepositoryPermission.create(repository=repo, role=new_role, **set_entity_kwargs)
+ return new_perm
+
+
+def set_user_repo_permission(username, namespace_name, repository_name, role_name):
+ if username == namespace_name:
+ raise DataModelException('Namespace owner must always be admin.')
+
+ try:
+ user = User.get(User.username == username)
+ except User.DoesNotExist:
+ raise DataModelException('Invalid username: %s' % username)
+
+ if user.robot:
+ parts = parse_robot_username(user.username)
+ if not parts:
+ raise DataModelException('Invalid robot: %s' % username)
+
+ robot_namespace, _ = parts
+ if robot_namespace != namespace_name:
+ raise DataModelException('Cannot add robot %s under namespace %s' %
+ (username, namespace_name))
+
+ return __set_entity_repo_permission(user, 'user', namespace_name, repository_name, role_name)
+
+
+def set_team_repo_permission(team_name, namespace_name, repository_name, role_name):
+ try:
+ team = (Team
+ .select()
+ .join(User)
+ .where(Team.name == team_name, User.username == namespace_name)
+ .get())
+ except Team.DoesNotExist:
+ raise DataModelException('No team %s in organization %s' % (team_name, namespace_name))
+
+ return __set_entity_repo_permission(team, 'team', namespace_name, repository_name, role_name)
+
+
diff --git a/data/model/release.py b/data/model/release.py
new file mode 100644
index 000000000..f827eaeb0
--- /dev/null
+++ b/data/model/release.py
@@ -0,0 +1,21 @@
+from data.database import QuayRelease, QuayRegion, QuayService
+
+
+def set_region_release(service_name, region_name, version):
+ service, _ = QuayService.get_or_create(name=service_name)
+ region, _ = QuayRegion.get_or_create(name=region_name)
+
+ return QuayRelease.get_or_create(service=service, version=version, region=region)
+
+
+def get_recent_releases(service_name, region_name):
+ return (QuayRelease
+ .select(QuayRelease)
+ .join(QuayService)
+ .switch(QuayRelease)
+ .join(QuayRegion)
+ .where(QuayService.name == service_name,
+ QuayRegion.name == region_name,
+ QuayRelease.reverted == False,
+ )
+ .order_by(QuayRelease.created.desc()))
diff --git a/data/model/repo_mirror.py b/data/model/repo_mirror.py
new file mode 100644
index 000000000..a9824f3ab
--- /dev/null
+++ b/data/model/repo_mirror.py
@@ -0,0 +1,519 @@
+import re
+
+from datetime import datetime, timedelta
+
+from peewee import IntegrityError, fn
+from jsonschema import ValidationError
+
+from data.database import (RepoMirrorConfig, RepoMirrorRule, RepoMirrorRuleType, RepoMirrorStatus,
+ RepositoryState, Repository, uuid_generator, db_transaction)
+from data.fields import DecryptedValue
+from data.model import DataModelException
+from util.names import parse_robot_username
+
+
+# TODO: Move these to the configuration
+MAX_SYNC_RETRIES = 3
+MAX_SYNC_DURATION = 60*60*2 # 2 Hours
+
+
+def get_eligible_mirrors():
+ """
+ Returns the RepoMirrorConfig that are ready to run now. This includes those that are:
+ 1. Not currently syncing but whose start time is in the past
+ 2. Status of "sync now"
+ 3. Currently marked as syncing but whose expiration time is in the past
+ """
+ now = datetime.utcnow()
+ immediate_candidates_filter = ((RepoMirrorConfig.sync_status == RepoMirrorStatus.SYNC_NOW) &
+ (RepoMirrorConfig.sync_expiration_date >> None))
+
+ ready_candidates_filter = ((RepoMirrorConfig.sync_start_date <= now) &
+ (RepoMirrorConfig.sync_retries_remaining > 0) &
+ (RepoMirrorConfig.sync_status != RepoMirrorStatus.SYNCING) &
+ (RepoMirrorConfig.sync_expiration_date >> None) &
+ (RepoMirrorConfig.is_enabled == True))
+
+ expired_candidates_filter = ((RepoMirrorConfig.sync_start_date <= now) &
+ (RepoMirrorConfig.sync_retries_remaining > 0) &
+ (RepoMirrorConfig.sync_status == RepoMirrorStatus.SYNCING) &
+ (RepoMirrorConfig.sync_expiration_date <= now) &
+ (RepoMirrorConfig.is_enabled == True))
+
+ return (RepoMirrorConfig
+ .select()
+ .join(Repository)
+ .where(Repository.state == RepositoryState.MIRROR)
+ .where(immediate_candidates_filter | ready_candidates_filter | expired_candidates_filter)
+ .order_by(RepoMirrorConfig.sync_start_date.asc()))
+
+
+def get_max_id_for_repo_mirror_config():
+ """ Gets the maximum id for repository mirroring """
+ return RepoMirrorConfig.select(fn.Max(RepoMirrorConfig.id)).scalar()
+
+
+def get_min_id_for_repo_mirror_config():
+ """ Gets the minimum id for a repository mirroring """
+ return RepoMirrorConfig.select(fn.Min(RepoMirrorConfig.id)).scalar()
+
+
+def claim_mirror(mirror):
+ """
+ Attempt to create an exclusive lock on the RepoMirrorConfig and return it.
+ If unable to create the lock, `None` will be returned.
+ """
+
+ # Attempt to update the RepoMirrorConfig to mark it as "claimed"
+ now = datetime.utcnow()
+ expiration_date = now + timedelta(seconds=MAX_SYNC_DURATION)
+ query = (RepoMirrorConfig
+ .update(sync_status=RepoMirrorStatus.SYNCING,
+ sync_expiration_date=expiration_date,
+ sync_transaction_id=uuid_generator())
+ .where(RepoMirrorConfig.id == mirror.id,
+ RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id))
+
+ # If the update was successful, then it was claimed. Return the updated instance.
+ if query.execute():
+ return RepoMirrorConfig.get_by_id(mirror.id)
+
+ return None # Another process must have claimed the mirror faster.
+
+
+def release_mirror(mirror, sync_status):
+ """
+ Return a mirror to the queue and update its status.
+
+ Upon success, move next sync to be at the next interval in the future. Failures remain with
+ current date to ensure they are picked up for repeat attempt. After MAX_SYNC_RETRIES,
+ the next sync will be moved ahead as if it were a success. This is to allow a daily sync,
+ for example, to retry the next day. Without this, users would need to manually run syncs
+ to clear failure state.
+ """
+ if sync_status == RepoMirrorStatus.FAIL:
+ retries = max(0, mirror.sync_retries_remaining - 1)
+
+ if sync_status == RepoMirrorStatus.SUCCESS or retries < 1:
+ now = datetime.utcnow()
+ delta = now - mirror.sync_start_date
+ delta_seconds = (delta.days * 24 * 60 * 60) + delta.seconds
+ next_start_date = now + timedelta(seconds=mirror.sync_interval - (delta_seconds % mirror.sync_interval))
+ retries = MAX_SYNC_RETRIES
+ else:
+ next_start_date = mirror.sync_start_date
+
+ query = (RepoMirrorConfig
+ .update(sync_transaction_id=uuid_generator(),
+ sync_status=sync_status,
+ sync_start_date=next_start_date,
+ sync_expiration_date=None,
+ sync_retries_remaining=retries)
+ .where(RepoMirrorConfig.id == mirror.id,
+ RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id))
+
+ if query.execute():
+ return RepoMirrorConfig.get_by_id(mirror.id)
+
+ # Unable to release Mirror. Has it been claimed by another process?
+ return None
+
+
+def expire_mirror(mirror):
+ """
+ Set the mirror to synchronize ASAP and reset its failure count.
+ """
+
+ # Set the next-sync date to now
+ # TODO: Verify the `where` conditions would not expire a currently syncing mirror.
+ query = (RepoMirrorConfig
+ .update(sync_transaction_id=uuid_generator(),
+ sync_expiration_date=datetime.utcnow(),
+ sync_retries_remaining=MAX_SYNC_RETRIES)
+ .where(RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id,
+ RepoMirrorConfig.id == mirror.id,
+ RepoMirrorConfig.state != RepoMirrorStatus.SYNCING))
+
+ # Fetch and return the latest updates
+ if query.execute():
+ return RepoMirrorConfig.get_by_id(mirror.id)
+
+ # Unable to update expiration date. Perhaps another process has claimed it?
+ return None # TODO: Raise some Exception?
+
+
+def create_mirroring_rule(repository, rule_value, rule_type=RepoMirrorRuleType.TAG_GLOB_CSV):
+ """
+ Create a RepoMirrorRule for a given Repository.
+ """
+
+ if rule_type != RepoMirrorRuleType.TAG_GLOB_CSV:
+ raise ValidationError('validation failed: rule_type must be TAG_GLOB_CSV')
+
+ if not isinstance(rule_value, list) or len(rule_value) < 1:
+ raise ValidationError('validation failed: rule_value for TAG_GLOB_CSV must be a list with at least one rule')
+
+ rule = RepoMirrorRule.create(repository=repository, rule_type=rule_type, rule_value=rule_value)
+ return rule
+
+
+def enable_mirroring_for_repository(repository,
+ root_rule,
+ internal_robot,
+ external_reference,
+ sync_interval,
+ external_registry_username=None,
+ external_registry_password=None,
+ external_registry_config=None,
+ is_enabled=True,
+ sync_start_date=None):
+ """
+ Create a RepoMirrorConfig and set the Repository to the MIRROR state.
+ """
+ assert internal_robot.robot
+
+ namespace, _ = parse_robot_username(internal_robot.username)
+ if namespace != repository.namespace_user.username:
+ raise DataModelException('Cannot use robot for mirroring')
+
+ with db_transaction():
+ # Create the RepoMirrorConfig
+ try:
+ username = DecryptedValue(external_registry_username) if external_registry_username else None
+ password = DecryptedValue(external_registry_password) if external_registry_password else None
+ mirror = RepoMirrorConfig.create(repository=repository,
+ root_rule=root_rule,
+ is_enabled=is_enabled,
+ internal_robot=internal_robot,
+ external_reference=external_reference,
+ external_registry_username=username,
+ external_registry_password=password,
+ external_registry_config=external_registry_config or {},
+ sync_interval=sync_interval,
+ sync_start_date=sync_start_date or datetime.utcnow())
+ except IntegrityError:
+ return RepoMirrorConfig.get(repository=repository)
+
+ # Change Repository state to mirroring mode as needed
+ if repository.state != RepositoryState.MIRROR:
+ query = (Repository
+ .update(state=RepositoryState.MIRROR)
+ .where(Repository.id == repository.id))
+ if not query.execute():
+ raise DataModelException('Could not change the state of the repository')
+
+ return mirror
+
+
+def update_sync_status(mirror, sync_status):
+ """
+ Update the sync status
+ """
+ query = (RepoMirrorConfig
+ .update(sync_transaction_id=uuid_generator(),
+ sync_status=sync_status)
+ .where(RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id,
+ RepoMirrorConfig.id == mirror.id))
+ if query.execute():
+ return RepoMirrorConfig.get_by_id(mirror.id)
+
+ return None
+
+
+def update_sync_status_to_sync_now(mirror):
+ """
+ This will change the sync status to SYNC_NOW and set the retries remaining to one, if it is
+ less than one. None will be returned in cases where this is not possible, such as if the
+ mirror is in the SYNCING state.
+ """
+
+ if mirror.sync_status == RepoMirrorStatus.SYNCING:
+ return None
+
+ retries = max(mirror.sync_retries_remaining, 1)
+
+ query = (RepoMirrorConfig
+ .update(sync_transaction_id=uuid_generator(),
+ sync_status=RepoMirrorStatus.SYNC_NOW,
+ sync_expiration_date=None,
+ sync_retries_remaining=retries)
+ .where(RepoMirrorConfig.id == mirror.id,
+ RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id))
+
+ if query.execute():
+ return RepoMirrorConfig.get_by_id(mirror.id)
+
+ return None
+
+
+def update_sync_status_to_cancel(mirror):
+ """
+ If the mirror is SYNCING, it will be force-claimed (ignoring existing transaction id), and the
+ state will set to NEVER_RUN. None will be returned in cases where this is not possible, such
+ as if the mirror is not in the SYNCING state.
+ """
+
+ if mirror.sync_status != RepoMirrorStatus.SYNCING and mirror.sync_status != RepoMirrorStatus.SYNC_NOW:
+ return None
+
+ query = (RepoMirrorConfig
+ .update(sync_transaction_id=uuid_generator(),
+ sync_status=RepoMirrorStatus.NEVER_RUN,
+ sync_expiration_date=None)
+ .where(RepoMirrorConfig.id == mirror.id))
+
+ if query.execute():
+ return RepoMirrorConfig.get_by_id(mirror.id)
+
+ return None
+
+
+def update_with_transaction(mirror, **kwargs):
+ """
+ Helper function which updates a Repository's RepoMirrorConfig while also rolling its
+ sync_transaction_id for locking purposes.
+ """
+
+ # RepoMirrorConfig attributes which can be modified
+ mutable_attributes = (
+ 'is_enabled',
+ 'mirror_type',
+ 'external_reference',
+ 'external_registry_username',
+ 'external_registry_password',
+ 'external_registry_config',
+ 'sync_interval',
+ 'sync_start_date',
+ 'sync_expiration_date',
+ 'sync_retries_remaining',
+ 'sync_status',
+ 'sync_transaction_id'
+ )
+
+ # Key-Value map of changes to make
+ filtered_kwargs = {key:kwargs.pop(key) for key in mutable_attributes if key in kwargs}
+
+ # Roll the sync_transaction_id to a new value
+ filtered_kwargs['sync_transaction_id'] = uuid_generator()
+
+ # Generate the query to perform the updates
+ query = (RepoMirrorConfig
+ .update(filtered_kwargs)
+ .where(RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id,
+ RepoMirrorConfig.id == mirror.id))
+
+ # Apply the change(s) and return the object if successful
+ if query.execute():
+ return RepoMirrorConfig.get_by_id(mirror.id)
+ else:
+ return None
+
+
+def get_mirror(repository):
+ """
+ Return the RepoMirrorConfig associated with the given Repository, or None if it doesn't exist.
+ """
+ try:
+ return RepoMirrorConfig.get(repository=repository)
+ except RepoMirrorConfig.DoesNotExist:
+ return None
+
+
+def enable_mirror(repository):
+ """
+ Enables a RepoMirrorConfig.
+ """
+ mirror = get_mirror(repository)
+ return bool(update_with_transaction(mirror, is_enabled=True))
+
+
+def disable_mirror(repository):
+ """
+ Disables a RepoMirrorConfig.
+ """
+ mirror = get_mirror(repository)
+ return bool(update_with_transaction(mirror, is_enabled=False))
+
+
+def delete_mirror(repository):
+ """
+ Delete a Repository Mirroring configuration.
+ """
+ raise NotImplementedError("TODO: Not Implemented")
+
+
+def change_remote(repository, remote_repository):
+ """
+ Update the external repository for Repository Mirroring.
+ """
+ mirror = get_mirror(repository)
+ updates = {
+ 'external_reference': remote_repository
+ }
+ return bool(update_with_transaction(mirror, **updates))
+
+
+def change_credentials(repository, username, password):
+ """
+ Update the credentials used to access the remote repository.
+ """
+ mirror = get_mirror(repository)
+ updates = {
+ 'external_registry_username': username,
+ 'external_registry_password': password,
+ }
+ return bool(update_with_transaction(mirror, **updates))
+
+
+def change_username(repository, username):
+ """
+ Update the Username used to access the external repository.
+ """
+ mirror = get_mirror(repository)
+ return bool(update_with_transaction(mirror, external_registry_username=username))
+
+
+def change_sync_interval(repository, interval):
+ """
+ Update the interval at which a repository will be synchronized.
+ """
+ mirror = get_mirror(repository)
+ return bool(update_with_transaction(mirror, sync_interval=interval))
+
+
+def change_sync_start_date(repository, dt):
+ """
+ Specify when the repository should be synchronized next.
+ """
+ mirror = get_mirror(repository)
+ return bool(update_with_transaction(mirror, sync_start_date=dt))
+
+
+def change_root_rule(repository, rule):
+ """
+ Specify which rule should be used for repository mirroring.
+ """
+ assert rule.repository == repository
+ mirror = get_mirror(repository)
+ return bool(update_with_transaction(mirror, root_rule=rule))
+
+
+def change_sync_status(repository, sync_status):
+ """
+ Change Repository's mirroring status.
+ """
+ mirror = get_mirror(repository)
+ return update_with_transaction(mirror, sync_status=sync_status)
+
+
+def change_retries_remaining(repository, retries_remaining):
+ """
+ Change the number of retries remaining for mirroring a repository.
+ """
+ mirror = get_mirror(repository)
+ return update_with_transaction(mirror, sync_retries_remaining=retries_remaining)
+
+
+def change_external_registry_config(repository, config_updates):
+ """
+ Update the 'external_registry_config' with the passed in fields. Config has:
+ verify_tls: True|False
+ proxy: JSON fields 'http_proxy', 'https_proxy', andn 'no_proxy'
+ """
+ mirror = get_mirror(repository)
+ external_registry_config = mirror.external_registry_config
+
+ if 'verify_tls' in config_updates:
+ external_registry_config['verify_tls'] = config_updates['verify_tls']
+
+ if 'proxy' in config_updates:
+ proxy_updates = config_updates['proxy']
+ for key in ('http_proxy', 'https_proxy', 'no_proxy'):
+ if key in config_updates['proxy']:
+ if 'proxy' not in external_registry_config:
+ external_registry_config['proxy'] = {}
+ else:
+ external_registry_config['proxy'][key] = proxy_updates[key]
+
+ return update_with_transaction(mirror, external_registry_config=external_registry_config)
+
+
+def get_mirroring_robot(repository):
+ """
+ Return the robot used for mirroring. Returns None if the repository does not have an associated
+ RepoMirrorConfig or the robot does not exist.
+ """
+ mirror = get_mirror(repository)
+ if mirror:
+ return mirror.internal_robot
+
+ return None
+
+
+def set_mirroring_robot(repository, robot):
+ """
+ Sets the mirroring robot for the repository.
+ """
+ assert robot.robot
+ namespace, _ = parse_robot_username(robot.username)
+ if namespace != repository.namespace_user.username:
+ raise DataModelException('Cannot use robot for mirroring')
+
+ mirror = get_mirror(repository)
+ mirror.internal_robot = robot
+ mirror.save()
+
+
+# -------------------- Mirroring Rules --------------------------#
+
+
+def create_rule(repository, rule_value, rule_type=RepoMirrorRuleType.TAG_GLOB_CSV, left_child=None, right_child=None):
+ """
+ Create a new Rule for mirroring a Repository
+ """
+
+ if rule_type != RepoMirrorRuleType.TAG_GLOB_CSV:
+ raise ValidationError('validation failed: rule_type must be TAG_GLOB_CSV')
+
+ if not isinstance(rule_value, list) or len(rule_value) < 1:
+ raise ValidationError('validation failed: rule_value for TAG_GLOB_CSV must be a list with at least one rule')
+
+ rule_kwargs = {
+ 'repository': repository,
+ 'rule_value': rule_value,
+ 'rule_type': rule_type,
+ 'left_child': left_child,
+ 'right_child': right_child,
+ }
+ rule = RepoMirrorRule.create(**rule_kwargs)
+ return rule
+
+
+def list_rules(repository):
+ """
+ Returns all RepoMirrorRules associated with a Repository.
+ """
+ rules = RepoMirrorRule.select().where(RepoMirrorRule.repository == repository).all()
+ return rules
+
+
+def get_root_rule(repository):
+ """
+ Return the primary mirroring Rule
+ """
+ mirror = get_mirror(repository)
+ try:
+ rule = RepoMirrorRule.get(repository=repository)
+ return rule
+ except RepoMirrorRule.DoesNotExist:
+ return None
+
+
+def change_rule_value(rule, value):
+ """
+ Update the value of an existing rule.
+ """
+ query = (RepoMirrorRule
+ .update(rule_value=value)
+ .where(RepoMirrorRule.id == rule.id))
+ return query.execute()
diff --git a/data/model/repository.py b/data/model/repository.py
new file mode 100644
index 000000000..3400bfde8
--- /dev/null
+++ b/data/model/repository.py
@@ -0,0 +1,457 @@
+import logging
+import random
+
+from enum import Enum
+from datetime import timedelta, datetime
+from peewee import Case, JOIN, fn, SQL, IntegrityError
+from cachetools.func import ttl_cache
+
+from data.model import (
+ config, DataModelException, tag, db_transaction, storage, permission, _basequery)
+from data.database import (
+ Repository, Namespace, RepositoryTag, Star, Image, ImageStorage, User, Visibility,
+ RepositoryPermission, RepositoryActionCount, Role, RepositoryAuthorizedEmail,
+ DerivedStorageForImage, Label, db_for_update, get_epoch_timestamp,
+ db_random_func, db_concat_func, RepositorySearchScore, RepositoryKind, ApprTag,
+ ManifestLegacyImage, Manifest, ManifestChild)
+from data.text import prefix_search
+from util.itertoolrecipes import take
+
+logger = logging.getLogger(__name__)
+SEARCH_FIELDS = Enum("SearchFields", ["name", "description"])
+
+
+class RepoStateConfigException(Exception):
+ """ Repository.state value requires further configuration to operate. """
+ pass
+
+
+def get_repo_kind_name(repo):
+ return Repository.kind.get_name(repo.kind_id)
+
+
+def get_repository_count():
+ return Repository.select().count()
+
+
+def get_public_repo_visibility():
+ return _basequery.get_public_repo_visibility()
+
+
+def create_repository(namespace, name, creating_user, visibility='private', repo_kind='image',
+ description=None):
+ namespace_user = User.get(username=namespace)
+ yesterday = datetime.now() - timedelta(days=1)
+
+ with db_transaction():
+ repo = Repository.create(name=name, visibility=Repository.visibility.get_id(visibility),
+ namespace_user=namespace_user,
+ kind=Repository.kind.get_id(repo_kind),
+ description=description)
+
+ RepositoryActionCount.create(repository=repo, count=0, date=yesterday)
+ RepositorySearchScore.create(repository=repo, score=0)
+
+ # Note: We put the admin create permission under the transaction to ensure it is created.
+ if creating_user and not creating_user.organization:
+ admin = Role.get(name='admin')
+ RepositoryPermission.create(user=creating_user, repository=repo, role=admin)
+
+ # Apply default permissions (only occurs for repositories under organizations)
+ if creating_user and not creating_user.organization and creating_user.username != namespace:
+ permission.apply_default_permissions(repo, creating_user)
+
+ return repo
+
+
+def get_repository(namespace_name, repository_name, kind_filter=None):
+ try:
+ return _basequery.get_existing_repository(namespace_name, repository_name,
+ kind_filter=kind_filter)
+ except Repository.DoesNotExist:
+ return None
+
+
+def get_or_create_repository(namespace, name, creating_user, visibility='private',
+ repo_kind='image'):
+ repo = get_repository(namespace, name, repo_kind)
+ if repo is None:
+ repo = create_repository(namespace, name, creating_user, visibility, repo_kind)
+ return repo
+
+
+@ttl_cache(maxsize=1, ttl=600)
+def _get_gc_expiration_policies():
+ policy_tuples_query = (
+ Namespace.select(Namespace.removed_tag_expiration_s).distinct()
+ .limit(100) # This sucks but it's the only way to limit memory
+ .tuples())
+ return [policy[0] for policy in policy_tuples_query]
+
+
+def get_random_gc_policy():
+ """ Return a single random policy from the database to use when garbage collecting.
+ """
+ return random.choice(_get_gc_expiration_policies())
+
+
+def find_repository_with_garbage(limit_to_gc_policy_s):
+ expiration_timestamp = get_epoch_timestamp() - limit_to_gc_policy_s
+
+ try:
+ candidates = (RepositoryTag.select(RepositoryTag.repository).join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(~(RepositoryTag.lifetime_end_ts >> None),
+ (RepositoryTag.lifetime_end_ts <= expiration_timestamp),
+ (Namespace.removed_tag_expiration_s == limit_to_gc_policy_s)).limit(500)
+ .distinct().alias('candidates'))
+
+ found = (RepositoryTag.select(candidates.c.repository_id).from_(candidates)
+ .order_by(db_random_func()).get())
+
+ if found is None:
+ return
+
+ return Repository.get(Repository.id == found.repository_id)
+ except RepositoryTag.DoesNotExist:
+ return None
+ except Repository.DoesNotExist:
+ return None
+
+
+def star_repository(user, repository):
+ """ Stars a repository. """
+ star = Star.create(user=user.id, repository=repository.id)
+ star.save()
+
+
+def unstar_repository(user, repository):
+ """ Unstars a repository. """
+ try:
+ (Star.delete().where(Star.repository == repository.id, Star.user == user.id).execute())
+ except Star.DoesNotExist:
+ raise DataModelException('Star not found.')
+
+
+def set_trust(repo, trust_enabled):
+ repo.trust_enabled = trust_enabled
+ repo.save()
+
+
+def set_description(repo, description):
+ repo.description = description
+ repo.save()
+
+
+def get_user_starred_repositories(user, kind_filter='image'):
+ """ Retrieves all of the repositories a user has starred. """
+ try:
+ repo_kind = Repository.kind.get_id(kind_filter)
+ except RepositoryKind.DoesNotExist:
+ raise DataModelException('Unknown kind of repository')
+
+ query = (Repository.select(Repository, User, Visibility, Repository.id.alias('rid')).join(Star)
+ .switch(Repository).join(User).switch(Repository).join(Visibility)
+ .where(Star.user == user, Repository.kind == repo_kind))
+
+ return query
+
+
+def repository_is_starred(user, repository):
+ """ Determines whether a user has starred a repository or not. """
+ try:
+ (Star.select().where(Star.repository == repository.id, Star.user == user.id).get())
+ return True
+ except Star.DoesNotExist:
+ return False
+
+
+def get_stars(repository_ids):
+ """ Returns a map from repository ID to the number of stars for each repository in the
+ given repository IDs list.
+ """
+ if not repository_ids:
+ return {}
+
+ tuples = (Star.select(Star.repository, fn.Count(Star.id))
+ .where(Star.repository << repository_ids).group_by(Star.repository).tuples())
+
+ star_map = {}
+ for record in tuples:
+ star_map[record[0]] = record[1]
+
+ return star_map
+
+
+def get_visible_repositories(username, namespace=None, kind_filter='image', include_public=False,
+ start_id=None, limit=None):
+ """ Returns the repositories visible to the given user (if any).
+ """
+ if not include_public and not username:
+ # Short circuit by returning a query that will find no repositories. We need to return a query
+ # here, as it will be modified by other queries later on.
+ return Repository.select(Repository.id.alias('rid')).where(Repository.id == -1)
+
+ query = (Repository.select(Repository.name,
+ Repository.id.alias('rid'), Repository.description,
+ Namespace.username, Repository.visibility, Repository.kind)
+ .switch(Repository).join(Namespace, on=(Repository.namespace_user == Namespace.id)))
+
+ user_id = None
+ if username:
+ # Note: We only need the permissions table if we will filter based on a user's permissions.
+ query = query.switch(Repository).distinct().join(RepositoryPermission, JOIN.LEFT_OUTER)
+ found_namespace = _get_namespace_user(username)
+ if not found_namespace:
+ return Repository.select(Repository.id.alias('rid')).where(Repository.id == -1)
+
+ user_id = found_namespace.id
+
+ query = _basequery.filter_to_repos_for_user(query, user_id, namespace, kind_filter,
+ include_public, start_id=start_id)
+
+ if limit is not None:
+ query = query.limit(limit).order_by(SQL('rid'))
+
+ return query
+
+
+def get_app_repository(namespace_name, repository_name):
+ """ Find an application repository. """
+ try:
+ return _basequery.get_existing_repository(namespace_name, repository_name,
+ kind_filter='application')
+ except Repository.DoesNotExist:
+ return None
+
+
+def get_app_search(lookup, search_fields=None, username=None, limit=50):
+ if search_fields is None:
+ search_fields = set([SEARCH_FIELDS.name.name])
+
+ return get_filtered_matching_repositories(lookup, filter_username=username,
+ search_fields=search_fields, repo_kind='application',
+ offset=0, limit=limit)
+
+
+def _get_namespace_user(username):
+ try:
+ return User.get(username=username)
+ except User.DoesNotExist:
+ return None
+
+
+def get_filtered_matching_repositories(lookup_value, filter_username=None, repo_kind='image',
+ offset=0, limit=25, search_fields=None):
+ """ Returns an iterator of all repositories matching the given lookup value, with optional
+ filtering to a specific user. If the user is unspecified, only public repositories will
+ be returned.
+ """
+ if search_fields is None:
+ search_fields = set([SEARCH_FIELDS.description.name, SEARCH_FIELDS.name.name])
+
+ # Build the unfiltered search query.
+ unfiltered_query = _get_sorted_matching_repositories(lookup_value, repo_kind=repo_kind,
+ search_fields=search_fields,
+ include_private=filter_username is not None,
+ ids_only=filter_username is not None)
+
+ # Add a filter to the iterator, if necessary.
+ if filter_username is not None:
+ filter_user = _get_namespace_user(filter_username)
+ if filter_user is None:
+ return []
+
+ iterator = _filter_repositories_visible_to_user(unfiltered_query, filter_user.id, limit,
+ repo_kind)
+ if offset > 0:
+ take(offset, iterator)
+
+ # Return the results.
+ return list(take(limit, iterator))
+
+ return list(unfiltered_query.offset(offset).limit(limit))
+
+
+def _filter_repositories_visible_to_user(unfiltered_query, filter_user_id, limit, repo_kind):
+ encountered = set()
+ chunk_count = limit * 2
+ unfiltered_page = 0
+ iteration_count = 0
+
+ while iteration_count < 10: # Just to be safe
+ # Find the next chunk's worth of repository IDs, paginated by the chunk size.
+ unfiltered_page = unfiltered_page + 1
+ found_ids = [r.id for r in unfiltered_query.paginate(unfiltered_page, chunk_count)]
+
+ # Make sure we haven't encountered these results before. This code is used to handle
+ # the case where we've previously seen a result, as pagination is not necessary
+ # stable in SQL databases.
+ unfiltered_repository_ids = set(found_ids)
+ new_unfiltered_ids = unfiltered_repository_ids - encountered
+ if not new_unfiltered_ids:
+ break
+
+ encountered.update(new_unfiltered_ids)
+
+ # Filter the repositories found to only those visible to the current user.
+ query = (Repository
+ .select(Repository, Namespace)
+ .distinct()
+ .join(Namespace, on=(Namespace.id == Repository.namespace_user)).switch(Repository)
+ .join(RepositoryPermission).where(Repository.id << list(new_unfiltered_ids)))
+
+ filtered = _basequery.filter_to_repos_for_user(query, filter_user_id, repo_kind=repo_kind)
+
+ # Sort the filtered repositories by their initial order.
+ all_filtered_repos = list(filtered)
+ all_filtered_repos.sort(key=lambda repo: found_ids.index(repo.id))
+
+ # Yield the repositories in sorted order.
+ for filtered_repo in all_filtered_repos:
+ yield filtered_repo
+
+ # If the number of found IDs is less than the chunk count, then we're done.
+ if len(found_ids) < chunk_count:
+ break
+
+ iteration_count = iteration_count + 1
+
+
+def _get_sorted_matching_repositories(lookup_value, repo_kind='image', include_private=False,
+ search_fields=None, ids_only=False):
+ """ Returns a query of repositories matching the given lookup string, with optional inclusion of
+ private repositories. Note that this method does *not* filter results based on visibility
+ to users.
+ """
+ select_fields = [Repository.id] if ids_only else [Repository, Namespace]
+
+ if not lookup_value:
+ # This is a generic listing of repositories. Simply return the sorted repositories based
+ # on RepositorySearchScore.
+ query = (Repository
+ .select(*select_fields)
+ .join(RepositorySearchScore)
+ .order_by(RepositorySearchScore.score.desc()))
+ else:
+ if search_fields is None:
+ search_fields = set([SEARCH_FIELDS.description.name, SEARCH_FIELDS.name.name])
+
+ # Always search at least on name (init clause)
+ clause = Repository.name.match(lookup_value)
+ computed_score = RepositorySearchScore.score.alias('score')
+
+ # If the description field is in the search fields, then we need to compute a synthetic score
+ # to discount the weight of the description more than the name.
+ if SEARCH_FIELDS.description.name in search_fields:
+ clause = Repository.description.match(lookup_value) | clause
+ cases = [(Repository.name.match(lookup_value), 100 * RepositorySearchScore.score),]
+ computed_score = Case(None, cases, RepositorySearchScore.score).alias('score')
+
+ select_fields.append(computed_score)
+ query = (Repository.select(*select_fields)
+ .join(RepositorySearchScore)
+ .where(clause)
+ .order_by(SQL('score').desc()))
+
+ if repo_kind is not None:
+ query = query.where(Repository.kind == Repository.kind.get_id(repo_kind))
+
+ if not include_private:
+ query = query.where(Repository.visibility == _basequery.get_public_repo_visibility())
+
+ if not ids_only:
+ query = (query
+ .switch(Repository)
+ .join(Namespace, on=(Namespace.id == Repository.namespace_user)))
+
+ return query
+
+
+def lookup_repository(repo_id):
+ try:
+ return Repository.get(Repository.id == repo_id)
+ except Repository.DoesNotExist:
+ return None
+
+
+def is_repository_public(repository):
+ return repository.visibility_id == _basequery.get_public_repo_visibility().id
+
+
+def repository_is_public(namespace_name, repository_name):
+ try:
+ (Repository.select().join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Repository).join(Visibility).where(Namespace.username == namespace_name,
+ Repository.name == repository_name,
+ Visibility.name == 'public').get())
+ return True
+ except Repository.DoesNotExist:
+ return False
+
+
+def set_repository_visibility(repo, visibility):
+ visibility_obj = Visibility.get(name=visibility)
+ if not visibility_obj:
+ return
+
+ repo.visibility = visibility_obj
+ repo.save()
+
+
+def get_email_authorized_for_repo(namespace, repository, email):
+ try:
+ return (RepositoryAuthorizedEmail.select(RepositoryAuthorizedEmail, Repository, Namespace)
+ .join(Repository).join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Namespace.username == namespace, Repository.name == repository,
+ RepositoryAuthorizedEmail.email == email).get())
+ except RepositoryAuthorizedEmail.DoesNotExist:
+ return None
+
+
+def create_email_authorization_for_repo(namespace_name, repository_name, email):
+ try:
+ repo = _basequery.get_existing_repository(namespace_name, repository_name)
+ except Repository.DoesNotExist:
+ raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
+
+ return RepositoryAuthorizedEmail.create(repository=repo, email=email, confirmed=False)
+
+
+def confirm_email_authorization_for_repo(code):
+ try:
+ found = (RepositoryAuthorizedEmail.select(RepositoryAuthorizedEmail, Repository, Namespace)
+ .join(Repository).join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(RepositoryAuthorizedEmail.code == code).get())
+ except RepositoryAuthorizedEmail.DoesNotExist:
+ raise DataModelException('Invalid confirmation code.')
+
+ found.confirmed = True
+ found.save()
+
+ return found
+
+
+def is_empty(namespace_name, repository_name):
+ """ Returns if the repository referenced by the given namespace and name is empty. If the repo
+ doesn't exist, returns True.
+ """
+ try:
+ tag.list_repository_tags(namespace_name, repository_name).limit(1).get()
+ return False
+ except RepositoryTag.DoesNotExist:
+ return True
+
+
+def get_repository_state(namespace_name, repository_name):
+ """ Return the Repository State if the Repository exists. Otherwise, returns None. """
+ repo = get_repository(namespace_name, repository_name)
+ if repo:
+ return repo.state
+
+ return None
+
+
+def set_repository_state(repo, state):
+ repo.state = state
+ repo.save()
diff --git a/data/model/repositoryactioncount.py b/data/model/repositoryactioncount.py
new file mode 100644
index 000000000..759edc093
--- /dev/null
+++ b/data/model/repositoryactioncount.py
@@ -0,0 +1,129 @@
+import logging
+
+from collections import namedtuple
+from peewee import IntegrityError
+
+from datetime import date, timedelta, datetime
+from data.database import (Repository, LogEntry, LogEntry2, LogEntry3, RepositoryActionCount,
+ RepositorySearchScore, db_random_func, fn)
+
+logger = logging.getLogger(__name__)
+
+search_bucket = namedtuple('SearchBucket', ['delta', 'days', 'weight'])
+
+# Defines the various buckets for search scoring. Each bucket is computed using the given time
+# delta from today *minus the previous bucket's time period*. Once all the actions over the
+# bucket's time period have been collected, they are multiplied by the given modifier. The modifiers
+# for this bucket were determined via the integral of (2/((x/183)+1)^2)/183 over the period of days
+# in the bucket; this integral over 0..183 has a sum of 1, so we get a good normalize score result.
+SEARCH_BUCKETS = [
+ search_bucket(timedelta(days=1), 1, 0.010870),
+ search_bucket(timedelta(days=7), 6, 0.062815),
+ search_bucket(timedelta(days=31), 24, 0.21604),
+ search_bucket(timedelta(days=183), 152, 0.71028),
+]
+
+def find_uncounted_repository():
+ """ Returns a repository that has not yet had an entry added into the RepositoryActionCount
+ table for yesterday.
+ """
+ try:
+ # Get a random repository to count.
+ today = date.today()
+ yesterday = today - timedelta(days=1)
+ has_yesterday_actions = (RepositoryActionCount
+ .select(RepositoryActionCount.repository)
+ .where(RepositoryActionCount.date == yesterday))
+
+ to_count = (Repository
+ .select()
+ .where(~(Repository.id << (has_yesterday_actions)))
+ .order_by(db_random_func()).get())
+ return to_count
+ except Repository.DoesNotExist:
+ return None
+
+
+def count_repository_actions(to_count, day):
+ """ Aggregates repository actions from the LogEntry table for the specified day. Returns the
+ count or None on error.
+ """
+ # TODO: Clean this up a bit.
+ def lookup_action_count(model):
+ return (model
+ .select()
+ .where(model.repository == to_count,
+ model.datetime >= day,
+ model.datetime < (day + timedelta(days=1)))
+ .count())
+
+ actions = (lookup_action_count(LogEntry3) + lookup_action_count(LogEntry2) +
+ lookup_action_count(LogEntry))
+
+ return actions
+
+
+def store_repository_action_count(repository, day, action_count):
+ """ Stores the action count for a repository for a specific day. Returns False if the
+ repository already has an entry for the specified day.
+ """
+ try:
+ RepositoryActionCount.create(repository=repository, date=day, count=action_count)
+ return True
+ except IntegrityError:
+ logger.debug('Count already written for repository %s', repository.id)
+ return False
+
+
+def update_repository_score(repo):
+ """ Updates the repository score entry for the given table by retrieving information from
+ the RepositoryActionCount table. Note that count_repository_actions for the repo should
+ be called first. Returns True if the row was updated and False otherwise.
+ """
+ today = date.today()
+
+ # Retrieve the counts for each bucket and calculate the final score.
+ final_score = 0.0
+ last_end_timedelta = timedelta(days=0)
+
+ for bucket in SEARCH_BUCKETS:
+ start_date = today - bucket.delta
+ end_date = today - last_end_timedelta
+ last_end_timedelta = bucket.delta
+
+ query = (RepositoryActionCount
+ .select(fn.Sum(RepositoryActionCount.count), fn.Count(RepositoryActionCount.id))
+ .where(RepositoryActionCount.date >= start_date,
+ RepositoryActionCount.date < end_date,
+ RepositoryActionCount.repository == repo))
+
+ bucket_tuple = query.tuples()[0]
+ logger.debug('Got bucket tuple %s for bucket %s for repository %s', bucket_tuple, bucket,
+ repo.id)
+
+ if bucket_tuple[0] is None:
+ continue
+
+ bucket_sum = float(bucket_tuple[0])
+ bucket_count = int(bucket_tuple[1])
+ if not bucket_count:
+ continue
+
+ bucket_score = bucket_sum / (bucket_count * 1.0)
+ final_score += bucket_score * bucket.weight
+
+ # Update the existing repo search score row or create a new one.
+ normalized_score = int(final_score * 100.0)
+ try:
+ try:
+ search_score_row = RepositorySearchScore.get(repository=repo)
+ search_score_row.last_updated = datetime.now()
+ search_score_row.score = normalized_score
+ search_score_row.save()
+ return True
+ except RepositorySearchScore.DoesNotExist:
+ RepositorySearchScore.create(repository=repo, score=normalized_score, last_updated=today)
+ return True
+ except IntegrityError:
+ logger.debug('RepositorySearchScore row already existed; skipping')
+ return False
diff --git a/data/model/service_keys.py b/data/model/service_keys.py
new file mode 100644
index 000000000..eb460299b
--- /dev/null
+++ b/data/model/service_keys.py
@@ -0,0 +1,205 @@
+import re
+
+from calendar import timegm
+from datetime import datetime, timedelta
+from peewee import JOIN
+
+from Crypto.PublicKey import RSA
+from jwkest.jwk import RSAKey
+
+from data.database import db_for_update, User, ServiceKey, ServiceKeyApproval
+from data.model import (ServiceKeyDoesNotExist, ServiceKeyAlreadyApproved, ServiceNameInvalid,
+ db_transaction, config)
+from data.model.notification import create_notification, delete_all_notifications_by_path_prefix
+from util.security.fingerprint import canonical_kid
+
+
+_SERVICE_NAME_REGEX = re.compile(r'^[a-z0-9_]+$')
+
+def _expired_keys_clause(service):
+ return ((ServiceKey.service == service) &
+ (ServiceKey.expiration_date <= datetime.utcnow()))
+
+
+def _stale_expired_keys_service_clause(service):
+ return ((ServiceKey.service == service) & _stale_expired_keys_clause())
+
+
+def _stale_expired_keys_clause():
+ expired_ttl = timedelta(seconds=config.app_config['EXPIRED_SERVICE_KEY_TTL_SEC'])
+ return (ServiceKey.expiration_date <= (datetime.utcnow() - expired_ttl))
+
+
+def _stale_unapproved_keys_clause(service):
+ unapproved_ttl = timedelta(seconds=config.app_config['UNAPPROVED_SERVICE_KEY_TTL_SEC'])
+ return ((ServiceKey.service == service) &
+ (ServiceKey.approval >> None) &
+ (ServiceKey.created_date <= (datetime.utcnow() - unapproved_ttl)))
+
+
+def _gc_expired(service):
+ ServiceKey.delete().where(_stale_expired_keys_service_clause(service) |
+ _stale_unapproved_keys_clause(service)).execute()
+
+
+def _verify_service_name(service_name):
+ if not _SERVICE_NAME_REGEX.match(service_name):
+ raise ServiceNameInvalid
+
+
+def _notify_superusers(key):
+ notification_metadata = {
+ 'name': key.name,
+ 'kid': key.kid,
+ 'service': key.service,
+ 'jwk': key.jwk,
+ 'metadata': key.metadata,
+ 'created_date': timegm(key.created_date.utctimetuple()),
+ }
+
+ if key.expiration_date is not None:
+ notification_metadata['expiration_date'] = timegm(key.expiration_date.utctimetuple())
+
+ if len(config.app_config['SUPER_USERS']) > 0:
+ superusers = User.select().where(User.username << config.app_config['SUPER_USERS'])
+ for superuser in superusers:
+ create_notification('service_key_submitted', superuser, metadata=notification_metadata,
+ lookup_path='/service_key_approval/{0}/{1}'.format(key.kid, superuser.id))
+
+
+def create_service_key(name, kid, service, jwk, metadata, expiration_date, rotation_duration=None):
+ _verify_service_name(service)
+ _gc_expired(service)
+
+ key = ServiceKey.create(name=name, kid=kid, service=service, jwk=jwk, metadata=metadata,
+ expiration_date=expiration_date, rotation_duration=rotation_duration)
+
+ _notify_superusers(key)
+ return key
+
+
+def generate_service_key(service, expiration_date, kid=None, name='', metadata=None,
+ rotation_duration=None):
+ private_key = RSA.generate(2048)
+ jwk = RSAKey(key=private_key.publickey()).serialize()
+ if kid is None:
+ kid = canonical_kid(jwk)
+
+ key = create_service_key(name, kid, service, jwk, metadata or {}, expiration_date,
+ rotation_duration=rotation_duration)
+ return (private_key, key)
+
+
+def replace_service_key(old_kid, kid, jwk, metadata, expiration_date):
+ try:
+ with db_transaction():
+ key = db_for_update(ServiceKey.select().where(ServiceKey.kid == old_kid)).get()
+ key.metadata.update(metadata)
+
+ ServiceKey.create(name=key.name, kid=kid, service=key.service, jwk=jwk,
+ metadata=key.metadata, expiration_date=expiration_date,
+ rotation_duration=key.rotation_duration, approval=key.approval)
+ key.delete_instance()
+ except ServiceKey.DoesNotExist:
+ raise ServiceKeyDoesNotExist
+
+ _notify_superusers(key)
+ delete_all_notifications_by_path_prefix('/service_key_approval/{0}'.format(old_kid))
+ _gc_expired(key.service)
+
+
+def update_service_key(kid, name=None, metadata=None):
+ try:
+ with db_transaction():
+ key = db_for_update(ServiceKey.select().where(ServiceKey.kid == kid)).get()
+ if name is not None:
+ key.name = name
+
+ if metadata is not None:
+ key.metadata.update(metadata)
+
+ key.save()
+ except ServiceKey.DoesNotExist:
+ raise ServiceKeyDoesNotExist
+
+
+def delete_service_key(kid):
+ try:
+ key = ServiceKey.get(kid=kid)
+ ServiceKey.delete().where(ServiceKey.kid == kid).execute()
+ except ServiceKey.DoesNotExist:
+ raise ServiceKeyDoesNotExist
+
+ delete_all_notifications_by_path_prefix('/service_key_approval/{0}'.format(kid))
+ _gc_expired(key.service)
+ return key
+
+
+def set_key_expiration(kid, expiration_date):
+ try:
+ service_key = get_service_key(kid, alive_only=False, approved_only=False)
+ except ServiceKey.DoesNotExist:
+ raise ServiceKeyDoesNotExist
+
+ service_key.expiration_date = expiration_date
+ service_key.save()
+
+
+def approve_service_key(kid, approval_type, approver=None, notes=''):
+ try:
+ with db_transaction():
+ key = db_for_update(ServiceKey.select().where(ServiceKey.kid == kid)).get()
+ if key.approval is not None:
+ raise ServiceKeyAlreadyApproved
+
+ approval = ServiceKeyApproval.create(approver=approver, approval_type=approval_type,
+ notes=notes)
+ key.approval = approval
+ key.save()
+ except ServiceKey.DoesNotExist:
+ raise ServiceKeyDoesNotExist
+
+ delete_all_notifications_by_path_prefix('/service_key_approval/{0}'.format(kid))
+ return key
+
+
+def _list_service_keys_query(kid=None, service=None, approved_only=True, alive_only=True,
+ approval_type=None):
+ query = ServiceKey.select().join(ServiceKeyApproval, JOIN.LEFT_OUTER)
+
+ if approved_only:
+ query = query.where(~(ServiceKey.approval >> None))
+
+ if alive_only:
+ query = query.where((ServiceKey.expiration_date > datetime.utcnow()) |
+ (ServiceKey.expiration_date >> None))
+
+ if approval_type is not None:
+ query = query.where(ServiceKeyApproval.approval_type == approval_type)
+
+ if service is not None:
+ query = query.where(ServiceKey.service == service)
+ query = query.where(~(_expired_keys_clause(service)) |
+ ~(_stale_unapproved_keys_clause(service)))
+
+ if kid is not None:
+ query = query.where(ServiceKey.kid == kid)
+
+ query = query.where(~(_stale_expired_keys_clause()) | (ServiceKey.expiration_date >> None))
+ return query
+
+
+def list_all_keys():
+ return list(_list_service_keys_query(approved_only=False, alive_only=False))
+
+
+def list_service_keys(service):
+ return list(_list_service_keys_query(service=service))
+
+
+def get_service_key(kid, service=None, alive_only=True, approved_only=True):
+ try:
+ return _list_service_keys_query(kid=kid, service=service, approved_only=approved_only,
+ alive_only=alive_only).get()
+ except ServiceKey.DoesNotExist:
+ raise ServiceKeyDoesNotExist
diff --git a/data/model/sqlalchemybridge.py b/data/model/sqlalchemybridge.py
new file mode 100644
index 000000000..e469eff00
--- /dev/null
+++ b/data/model/sqlalchemybridge.py
@@ -0,0 +1,94 @@
+from sqlalchemy import (Table, MetaData, Column, ForeignKey, Integer, String, Boolean, Text,
+ DateTime, Date, BigInteger, Index, text)
+from peewee import (PrimaryKeyField, CharField, BooleanField, DateTimeField, TextField,
+ ForeignKeyField, BigIntegerField, IntegerField, DateField)
+
+
+OPTIONS_TO_COPY = [
+ 'null',
+ 'default',
+ 'primary_key',
+]
+
+
+OPTION_TRANSLATIONS = {
+ 'null': 'nullable',
+}
+
+def gen_sqlalchemy_metadata(peewee_model_list):
+ metadata = MetaData(naming_convention={
+ "ix": 'ix_%(column_0_label)s',
+ "uq": "uq_%(table_name)s_%(column_0_name)s",
+ "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
+ "pk": "pk_%(table_name)s"
+ })
+
+ for model in peewee_model_list:
+ meta = model._meta
+
+ all_indexes = set(meta.indexes)
+ fulltext_indexes = []
+
+ columns = []
+ for field in meta.sorted_fields:
+ alchemy_type = None
+ col_args = []
+ col_kwargs = {}
+ if isinstance(field, PrimaryKeyField):
+ alchemy_type = Integer
+ elif isinstance(field, CharField):
+ alchemy_type = String(field.max_length)
+ elif isinstance(field, BooleanField):
+ alchemy_type = Boolean
+ elif isinstance(field, DateTimeField):
+ alchemy_type = DateTime
+ elif isinstance(field, DateField):
+ alchemy_type = Date
+ elif isinstance(field, TextField):
+ alchemy_type = Text
+ elif isinstance(field, ForeignKeyField):
+ alchemy_type = Integer
+ all_indexes.add(((field.name, ), field.unique))
+ if not field.deferred:
+ target_name = '%s.%s' % (field.rel_model._meta.table_name, field.rel_field.column_name)
+ col_args.append(ForeignKey(target_name))
+ elif isinstance(field, BigIntegerField):
+ alchemy_type = BigInteger
+ elif isinstance(field, IntegerField):
+ alchemy_type = Integer
+ else:
+ raise RuntimeError('Unknown column type: %s' % field)
+
+ if hasattr(field, '__fulltext__'):
+ # Add the fulltext index for the field, based on whether we are under MySQL or Postgres.
+ fulltext_indexes.append(field.name)
+
+ for option_name in OPTIONS_TO_COPY:
+ alchemy_option_name = (OPTION_TRANSLATIONS[option_name]
+ if option_name in OPTION_TRANSLATIONS else option_name)
+ if alchemy_option_name not in col_kwargs:
+ option_val = getattr(field, option_name)
+ col_kwargs[alchemy_option_name] = option_val
+
+ if field.unique or field.index:
+ all_indexes.add(((field.name, ), field.unique))
+
+ new_col = Column(field.column_name, alchemy_type, *col_args, **col_kwargs)
+ columns.append(new_col)
+
+ new_table = Table(meta.table_name, metadata, *columns)
+
+ for col_prop_names, unique in all_indexes:
+ col_names = [meta.fields[prop_name].column_name for prop_name in col_prop_names]
+ index_name = '%s_%s' % (meta.table_name, '_'.join(col_names))
+ col_refs = [getattr(new_table.c, col_name) for col_name in col_names]
+ Index(index_name, *col_refs, unique=unique)
+
+ for col_field_name in fulltext_indexes:
+ index_name = '%s_%s__fulltext' % (meta.table_name, col_field_name)
+ col_ref = getattr(new_table.c, col_field_name)
+ Index(index_name, col_ref, postgresql_ops={col_field_name: 'gin_trgm_ops'},
+ postgresql_using='gin',
+ mysql_prefix='FULLTEXT')
+
+ return metadata
diff --git a/data/model/storage.py b/data/model/storage.py
new file mode 100644
index 000000000..adfa54cd9
--- /dev/null
+++ b/data/model/storage.py
@@ -0,0 +1,373 @@
+import logging
+
+from peewee import SQL, IntegrityError
+from cachetools.func import lru_cache
+from collections import namedtuple
+
+from data.model import (config, db_transaction, InvalidImageException, TorrentInfoDoesNotExist,
+ DataModelException, _basequery)
+from data.database import (ImageStorage, Image, ImageStoragePlacement, ImageStorageLocation,
+ ImageStorageTransformation, ImageStorageSignature,
+ ImageStorageSignatureKind, Repository, Namespace, TorrentInfo, ApprBlob,
+ ensure_under_transaction, ManifestBlob)
+
+
+logger = logging.getLogger(__name__)
+
+_Location = namedtuple('location', ['id', 'name'])
+
+@lru_cache(maxsize=1)
+def get_image_locations():
+ location_map = {}
+ for location in ImageStorageLocation.select():
+ location_tuple = _Location(location.id, location.name)
+ location_map[location.id] = location_tuple
+ location_map[location.name] = location_tuple
+
+ return location_map
+
+
+def get_image_location_for_name(location_name):
+ locations = get_image_locations()
+ return locations[location_name]
+
+
+def get_image_location_for_id(location_id):
+ locations = get_image_locations()
+ return locations[location_id]
+
+
+def add_storage_placement(storage, location_name):
+ """ Adds a storage placement for the given storage at the given location. """
+ location = get_image_location_for_name(location_name)
+ try:
+ ImageStoragePlacement.create(location=location.id, storage=storage)
+ except IntegrityError:
+ # Placement already exists. Nothing to do.
+ pass
+
+
+def _orphaned_storage_query(candidate_ids):
+ """ Returns the subset of the candidate ImageStorage IDs representing storages that are no
+ longer referenced by images.
+ """
+ # Issue a union query to find all storages that are still referenced by a candidate storage. This
+ # is much faster than the group_by and having call we used to use here.
+ nonorphaned_queries = []
+ for counter, candidate_id in enumerate(candidate_ids):
+ query_alias = 'q{0}'.format(counter)
+
+ # TODO: remove the join with Image once fully on the OCI data model.
+ storage_subq = (ImageStorage
+ .select(ImageStorage.id)
+ .join(Image)
+ .where(ImageStorage.id == candidate_id)
+ .limit(1)
+ .alias(query_alias))
+
+ nonorphaned_queries.append(ImageStorage
+ .select(SQL('*'))
+ .from_(storage_subq))
+
+ manifest_storage_subq = (ImageStorage
+ .select(ImageStorage.id)
+ .join(ManifestBlob)
+ .where(ImageStorage.id == candidate_id)
+ .limit(1)
+ .alias(query_alias))
+
+ nonorphaned_queries.append(ImageStorage
+ .select(SQL('*'))
+ .from_(manifest_storage_subq))
+
+ # Build the set of storages that are missing. These storages are orphaned.
+ nonorphaned_storage_ids = {storage.id for storage
+ in _basequery.reduce_as_tree(nonorphaned_queries)}
+ return list(candidate_ids - nonorphaned_storage_ids)
+
+
+def garbage_collect_storage(storage_id_whitelist):
+ """ Performs GC on a possible subset of the storage's with the IDs found in the
+ whitelist. The storages in the whitelist will be checked, and any orphaned will
+ be removed, with those IDs being returned.
+ """
+ if len(storage_id_whitelist) == 0:
+ return []
+
+ def placements_to_filtered_paths_set(placements_list):
+ """ Returns the list of paths to remove from storage, filtered from the given placements
+ query by removing any CAS paths that are still referenced by storage(s) in the database.
+ """
+ with ensure_under_transaction():
+ if not placements_list:
+ return set()
+
+ # Find the content checksums not referenced by other storages. Any that are, we cannot
+ # remove.
+ content_checksums = set([placement.storage.content_checksum for placement in placements_list
+ if placement.storage.cas_path])
+
+ unreferenced_checksums = set()
+ if content_checksums:
+ # Check the current image storage.
+ query = (ImageStorage
+ .select(ImageStorage.content_checksum)
+ .where(ImageStorage.content_checksum << list(content_checksums)))
+ is_referenced_checksums = set([image_storage.content_checksum for image_storage in query])
+ if is_referenced_checksums:
+ logger.warning('GC attempted to remove CAS checksums %s, which are still IS referenced',
+ is_referenced_checksums)
+
+ # Check the ApprBlob table as well.
+ query = ApprBlob.select(ApprBlob.digest).where(ApprBlob.digest << list(content_checksums))
+ appr_blob_referenced_checksums = set([blob.digest for blob in query])
+ if appr_blob_referenced_checksums:
+ logger.warning('GC attempted to remove CAS checksums %s, which are ApprBlob referenced',
+ appr_blob_referenced_checksums)
+
+ unreferenced_checksums = (content_checksums - appr_blob_referenced_checksums -
+ is_referenced_checksums)
+
+ # Return all placements for all image storages found not at a CAS path or with a content
+ # checksum that is referenced.
+ return {(get_image_location_for_id(placement.location_id).name,
+ get_layer_path(placement.storage))
+ for placement in placements_list
+ if not placement.storage.cas_path or
+ placement.storage.content_checksum in unreferenced_checksums}
+
+ # Note: Both of these deletes must occur in the same transaction (unfortunately) because a
+ # storage without any placement is invalid, and a placement cannot exist without a storage.
+ # TODO: We might want to allow for null storages on placements, which would allow us to
+ # delete the storages, then delete the placements in a non-transaction.
+ logger.debug('Garbage collecting storages from candidates: %s', storage_id_whitelist)
+ with db_transaction():
+ orphaned_storage_ids = _orphaned_storage_query(storage_id_whitelist)
+ if len(orphaned_storage_ids) == 0:
+ # Nothing to GC.
+ return []
+
+ placements_to_remove = list(ImageStoragePlacement
+ .select(ImageStoragePlacement, ImageStorage)
+ .join(ImageStorage)
+ .where(ImageStorage.id << orphaned_storage_ids))
+
+ # Remove the placements for orphaned storages
+ if len(placements_to_remove) > 0:
+ placement_ids_to_remove = [placement.id for placement in placements_to_remove]
+ placements_removed = (ImageStoragePlacement
+ .delete()
+ .where(ImageStoragePlacement.id << placement_ids_to_remove)
+ .execute())
+ logger.debug('Removed %s image storage placements', placements_removed)
+
+ # Remove all orphaned storages
+ torrents_removed = (TorrentInfo
+ .delete()
+ .where(TorrentInfo.storage << orphaned_storage_ids)
+ .execute())
+ logger.debug('Removed %s torrent info records', torrents_removed)
+
+ signatures_removed = (ImageStorageSignature
+ .delete()
+ .where(ImageStorageSignature.storage << orphaned_storage_ids)
+ .execute())
+ logger.debug('Removed %s image storage signatures', signatures_removed)
+
+ storages_removed = (ImageStorage
+ .delete()
+ .where(ImageStorage.id << orphaned_storage_ids)
+ .execute())
+ logger.debug('Removed %s image storage records', storages_removed)
+
+ # Determine the paths to remove. We cannot simply remove all paths matching storages, as CAS
+ # can share the same path. We further filter these paths by checking for any storages still in
+ # the database with the same content checksum.
+ paths_to_remove = placements_to_filtered_paths_set(placements_to_remove)
+
+ # We are going to make the conscious decision to not delete image storage blobs inside
+ # transactions.
+ # This may end up producing garbage in s3, trading off for higher availability in the database.
+ for location_name, image_path in paths_to_remove:
+ logger.debug('Removing %s from %s', image_path, location_name)
+ config.store.remove({location_name}, image_path)
+
+ return orphaned_storage_ids
+
+
+def create_v1_storage(location_name):
+ storage = ImageStorage.create(cas_path=False, uploading=True)
+ location = get_image_location_for_name(location_name)
+ ImageStoragePlacement.create(location=location.id, storage=storage)
+ storage.locations = {location_name}
+ return storage
+
+
+def find_or_create_storage_signature(storage, signature_kind_name):
+ found = lookup_storage_signature(storage, signature_kind_name)
+ if found is None:
+ kind = ImageStorageSignatureKind.get(name=signature_kind_name)
+ found = ImageStorageSignature.create(storage=storage, kind=kind)
+
+ return found
+
+
+def lookup_storage_signature(storage, signature_kind_name):
+ kind = ImageStorageSignatureKind.get(name=signature_kind_name)
+ try:
+ return (ImageStorageSignature
+ .select()
+ .where(ImageStorageSignature.storage == storage, ImageStorageSignature.kind == kind)
+ .get())
+ except ImageStorageSignature.DoesNotExist:
+ return None
+
+
+def _get_storage(query_modifier):
+ query = (ImageStoragePlacement
+ .select(ImageStoragePlacement, ImageStorage)
+ .switch(ImageStoragePlacement)
+ .join(ImageStorage))
+
+ placements = list(query_modifier(query))
+
+ if not placements:
+ raise InvalidImageException()
+
+ found = placements[0].storage
+ found.locations = {get_image_location_for_id(placement.location_id).name
+ for placement in placements}
+ return found
+
+
+def get_storage_by_uuid(storage_uuid):
+ def filter_to_uuid(query):
+ return query.where(ImageStorage.uuid == storage_uuid)
+
+ try:
+ return _get_storage(filter_to_uuid)
+ except InvalidImageException:
+ raise InvalidImageException('No storage found with uuid: %s', storage_uuid)
+
+
+def get_layer_path(storage_record):
+ """ Returns the path in the storage engine to the layer data referenced by the storage row. """
+ assert storage_record.cas_path is not None
+ return get_layer_path_for_storage(storage_record.uuid, storage_record.cas_path,
+ storage_record.content_checksum)
+
+
+def get_layer_path_for_storage(storage_uuid, cas_path, content_checksum):
+ """ Returns the path in the storage engine to the layer data referenced by the storage
+ information. """
+ store = config.store
+ if not cas_path:
+ logger.debug('Serving layer from legacy v1 path for storage %s', storage_uuid)
+ return store.v1_image_layer_path(storage_uuid)
+
+ return store.blob_path(content_checksum)
+
+
+def lookup_repo_storages_by_content_checksum(repo, checksums, by_manifest=False):
+ """ Looks up repository storages (without placements) matching the given repository
+ and checksum. """
+ if not checksums:
+ return []
+
+ # There may be many duplicates of the checksums, so for performance reasons we are going
+ # to use a union to select just one storage with each checksum
+ queries = []
+
+ for counter, checksum in enumerate(set(checksums)):
+ query_alias = 'q{0}'.format(counter)
+
+ # TODO: Remove once we have a new-style model for tracking temp uploaded blobs and
+ # all legacy tables have been removed.
+ if by_manifest:
+ candidate_subq = (ImageStorage
+ .select(ImageStorage.id, ImageStorage.content_checksum,
+ ImageStorage.image_size, ImageStorage.uuid, ImageStorage.cas_path,
+ ImageStorage.uncompressed_size, ImageStorage.uploading)
+ .join(ManifestBlob)
+ .where(ManifestBlob.repository == repo,
+ ImageStorage.content_checksum == checksum)
+ .limit(1)
+ .alias(query_alias))
+ else:
+ candidate_subq = (ImageStorage
+ .select(ImageStorage.id, ImageStorage.content_checksum,
+ ImageStorage.image_size, ImageStorage.uuid, ImageStorage.cas_path,
+ ImageStorage.uncompressed_size, ImageStorage.uploading)
+ .join(Image)
+ .where(Image.repository == repo, ImageStorage.content_checksum == checksum)
+ .limit(1)
+ .alias(query_alias))
+
+ queries.append(ImageStorage
+ .select(SQL('*'))
+ .from_(candidate_subq))
+
+ return _basequery.reduce_as_tree(queries)
+
+
+def set_image_storage_metadata(docker_image_id, namespace_name, repository_name, image_size,
+ uncompressed_size):
+ """ Sets metadata that is specific to the binary storage of the data, irrespective of how it
+ is used in the layer tree.
+ """
+ if image_size is None:
+ raise DataModelException('Empty image size field')
+
+ try:
+ image = (Image
+ .select(Image, ImageStorage)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Image)
+ .join(ImageStorage)
+ .where(Repository.name == repository_name, Namespace.username == namespace_name,
+ Image.docker_image_id == docker_image_id)
+ .get())
+ except ImageStorage.DoesNotExist:
+ raise InvalidImageException('No image with specified id and repository')
+
+ # We MUST do this here, it can't be done in the corresponding image call because the storage
+ # has not yet been pushed
+ image.aggregate_size = _basequery.calculate_image_aggregate_size(image.ancestors, image_size,
+ image.parent)
+ image.save()
+
+ image.storage.image_size = image_size
+ image.storage.uncompressed_size = uncompressed_size
+ image.storage.save()
+ return image.storage
+
+
+def get_storage_locations(uuid):
+ query = (ImageStoragePlacement
+ .select()
+ .join(ImageStorage)
+ .where(ImageStorage.uuid == uuid))
+
+ return [get_image_location_for_id(placement.location_id).name for placement in query]
+
+
+def save_torrent_info(storage_object, piece_length, pieces):
+ try:
+ return TorrentInfo.get(storage=storage_object, piece_length=piece_length)
+ except TorrentInfo.DoesNotExist:
+ try:
+ return TorrentInfo.create(storage=storage_object, piece_length=piece_length, pieces=pieces)
+ except IntegrityError:
+ # TorrentInfo already exists for this storage.
+ return TorrentInfo.get(storage=storage_object, piece_length=piece_length)
+
+
+def get_torrent_info(blob):
+ try:
+ return (TorrentInfo
+ .select()
+ .where(TorrentInfo.storage == blob)
+ .get())
+ except TorrentInfo.DoesNotExist:
+ raise TorrentInfoDoesNotExist
diff --git a/data/model/tag.py b/data/model/tag.py
new file mode 100644
index 000000000..437a9765b
--- /dev/null
+++ b/data/model/tag.py
@@ -0,0 +1,816 @@
+import logging
+
+from calendar import timegm
+from datetime import datetime
+from uuid import uuid4
+
+from peewee import IntegrityError, JOIN, fn
+from data.model import (image, storage, db_transaction, DataModelException, _basequery,
+ InvalidManifestException, TagAlreadyCreatedException, StaleTagException,
+ config)
+from data.database import (RepositoryTag, Repository, Image, ImageStorage, Namespace, TagManifest,
+ RepositoryNotification, Label, TagManifestLabel, get_epoch_timestamp,
+ db_for_update, Manifest, ManifestLabel, ManifestBlob,
+ ManifestLegacyImage, TagManifestToManifest,
+ TagManifestLabelMap, TagToRepositoryTag, Tag, get_epoch_timestamp_ms)
+from util.timedeltastring import convert_to_timedelta
+
+
+logger = logging.getLogger(__name__)
+
+
+def get_max_id_for_sec_scan():
+ """ Gets the maximum id for security scanning """
+ return RepositoryTag.select(fn.Max(RepositoryTag.id)).scalar()
+
+
+def get_min_id_for_sec_scan(version):
+ """ Gets the minimum id for a security scanning """
+ return _tag_alive(RepositoryTag
+ .select(fn.Min(RepositoryTag.id))
+ .join(Image)
+ .where(Image.security_indexed_engine < version)).scalar()
+
+
+def get_tag_pk_field():
+ """ Returns the primary key for Image DB model """
+ return RepositoryTag.id
+
+
+def get_tags_images_eligible_for_scan(clair_version):
+ Parent = Image.alias()
+ ParentImageStorage = ImageStorage.alias()
+
+ return _tag_alive(RepositoryTag
+ .select(Image, ImageStorage, Parent, ParentImageStorage, RepositoryTag)
+ .join(Image, on=(RepositoryTag.image == Image.id))
+ .join(ImageStorage, on=(Image.storage == ImageStorage.id))
+ .switch(Image)
+ .join(Parent, JOIN.LEFT_OUTER, on=(Image.parent == Parent.id))
+ .join(ParentImageStorage, JOIN.LEFT_OUTER, on=(ParentImageStorage.id == Parent.storage))
+ .where(RepositoryTag.hidden == False)
+ .where(Image.security_indexed_engine < clair_version))
+
+
+def _tag_alive(query, now_ts=None):
+ if now_ts is None:
+ now_ts = get_epoch_timestamp()
+ return query.where((RepositoryTag.lifetime_end_ts >> None) |
+ (RepositoryTag.lifetime_end_ts > now_ts))
+
+
+def filter_has_repository_event(query, event):
+ """ Filters the query by ensuring the repositories returned have the given event. """
+ return (query
+ .join(Repository)
+ .join(RepositoryNotification)
+ .where(RepositoryNotification.event == event))
+
+
+def filter_tags_have_repository_event(query, event):
+ """ Filters the query by ensuring the repository tags live in a repository that has the given
+ event. Also returns the image storage for the tag's image and orders the results by
+ lifetime_start_ts.
+ """
+ query = filter_has_repository_event(query, event)
+ query = query.switch(RepositoryTag).join(Image).join(ImageStorage)
+ query = query.switch(RepositoryTag).order_by(RepositoryTag.lifetime_start_ts.desc())
+ return query
+
+
+_MAX_SUB_QUERIES = 100
+_MAX_IMAGE_LOOKUP_COUNT = 500
+
+def get_matching_tags_for_images(image_pairs, filter_images=None, filter_tags=None,
+ selections=None):
+ """ Returns all tags that contain the images with the given docker_image_id and storage_uuid,
+ as specified as an iterable of pairs. """
+ if not image_pairs:
+ return []
+
+ image_pairs_set = set(image_pairs)
+
+ # Find all possible matching image+storages.
+ images = []
+
+ while image_pairs:
+ image_pairs_slice = image_pairs[:_MAX_IMAGE_LOOKUP_COUNT]
+
+ ids = [pair[0] for pair in image_pairs_slice]
+ uuids = [pair[1] for pair in image_pairs_slice]
+
+ images_query = (Image
+ .select(Image.id, Image.docker_image_id, Image.ancestors, ImageStorage.uuid)
+ .join(ImageStorage)
+ .where(Image.docker_image_id << ids, ImageStorage.uuid << uuids)
+ .switch(Image))
+
+ if filter_images is not None:
+ images_query = filter_images(images_query)
+
+ images.extend(list(images_query))
+ image_pairs = image_pairs[_MAX_IMAGE_LOOKUP_COUNT:]
+
+ # Filter down to those images actually in the pairs set and build the set of queries to run.
+ individual_image_queries = []
+
+ for img in images:
+ # Make sure the image found is in the set of those requested, and that we haven't already
+ # processed it. We need this check because the query above checks for images with matching
+ # IDs OR storage UUIDs, rather than the expected ID+UUID pair. We do this for efficiency
+ # reasons, and it is highly unlikely we'll find an image with a mismatch, but we need this
+ # check to be absolutely sure.
+ pair = (img.docker_image_id, img.storage.uuid)
+ if pair not in image_pairs_set:
+ continue
+
+ # Remove the pair so we don't try it again.
+ image_pairs_set.remove(pair)
+
+ ancestors_str = '%s%s/%%' % (img.ancestors, img.id)
+ query = (Image
+ .select(Image.id)
+ .where((Image.id == img.id) | (Image.ancestors ** ancestors_str)))
+
+ individual_image_queries.append(query)
+
+ if not individual_image_queries:
+ return []
+
+ # Shard based on the max subquery count. This is used to prevent going over the DB's max query
+ # size, as well as to prevent the DB from locking up on a massive query.
+ sharded_queries = []
+ while individual_image_queries:
+ shard = individual_image_queries[:_MAX_SUB_QUERIES]
+ sharded_queries.append(_basequery.reduce_as_tree(shard))
+ individual_image_queries = individual_image_queries[_MAX_SUB_QUERIES:]
+
+ # Collect IDs of the tags found for each query.
+ tags = {}
+ for query in sharded_queries:
+ ImageAlias = Image.alias()
+ tag_query = (_tag_alive(RepositoryTag
+ .select(*(selections or []))
+ .distinct()
+ .join(ImageAlias)
+ .where(RepositoryTag.hidden == False)
+ .where(ImageAlias.id << query)
+ .switch(RepositoryTag)))
+
+ if filter_tags is not None:
+ tag_query = filter_tags(tag_query)
+
+ for tag in tag_query:
+ tags[tag.id] = tag
+
+ return tags.values()
+
+
+def get_matching_tags(docker_image_id, storage_uuid, *args):
+ """ Returns a query pointing to all tags that contain the image with the
+ given docker_image_id and storage_uuid. """
+ image_row = image.get_image_with_storage(docker_image_id, storage_uuid)
+ if image_row is None:
+ return RepositoryTag.select().where(RepositoryTag.id < 0) # Empty query.
+
+ ancestors_str = '%s%s/%%' % (image_row.ancestors, image_row.id)
+ return _tag_alive(RepositoryTag
+ .select(*args)
+ .distinct()
+ .join(Image)
+ .join(ImageStorage)
+ .where(RepositoryTag.hidden == False)
+ .where((Image.id == image_row.id) |
+ (Image.ancestors ** ancestors_str)))
+
+
+def get_tags_for_image(image_id, *args):
+ return _tag_alive(RepositoryTag
+ .select(*args)
+ .distinct()
+ .where(RepositoryTag.image == image_id,
+ RepositoryTag.hidden == False))
+
+
+def get_tag_manifest_digests(tags):
+ """ Returns a map from tag ID to its associated manifest digest, if any. """
+ if not tags:
+ return dict()
+
+ manifests = (TagManifest
+ .select(TagManifest.tag, TagManifest.digest)
+ .where(TagManifest.tag << [t.id for t in tags]))
+
+ return {manifest.tag_id: manifest.digest for manifest in manifests}
+
+
+def list_active_repo_tags(repo, start_id=None, limit=None, include_images=True):
+ """ Returns all of the active, non-hidden tags in a repository, joined to they images
+ and (if present), their manifest.
+ """
+ if include_images:
+ query = _tag_alive(RepositoryTag
+ .select(RepositoryTag, Image, ImageStorage, TagManifest.digest)
+ .join(Image)
+ .join(ImageStorage)
+ .where(RepositoryTag.repository == repo, RepositoryTag.hidden == False)
+ .switch(RepositoryTag)
+ .join(TagManifest, JOIN.LEFT_OUTER)
+ .order_by(RepositoryTag.id))
+ else:
+ query = _tag_alive(RepositoryTag
+ .select(RepositoryTag)
+ .where(RepositoryTag.repository == repo, RepositoryTag.hidden == False)
+ .order_by(RepositoryTag.id))
+
+ if start_id is not None:
+ query = query.where(RepositoryTag.id >= start_id)
+
+ if limit is not None:
+ query = query.limit(limit)
+
+ return query
+
+
+def list_repository_tags(namespace_name, repository_name, include_hidden=False,
+ include_storage=False):
+ to_select = (RepositoryTag, Image)
+ if include_storage:
+ to_select = (RepositoryTag, Image, ImageStorage)
+
+ query = _tag_alive(RepositoryTag
+ .select(*to_select)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(RepositoryTag)
+ .join(Image)
+ .where(Repository.name == repository_name,
+ Namespace.username == namespace_name))
+
+ if not include_hidden:
+ query = query.where(RepositoryTag.hidden == False)
+
+ if include_storage:
+ query = query.switch(Image).join(ImageStorage)
+
+ return query
+
+
+def create_or_update_tag(namespace_name, repository_name, tag_name, tag_docker_image_id,
+ reversion=False, now_ms=None):
+ try:
+ repo = _basequery.get_existing_repository(namespace_name, repository_name)
+ except Repository.DoesNotExist:
+ raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
+
+ return create_or_update_tag_for_repo(repo.id, tag_name, tag_docker_image_id, reversion=reversion,
+ now_ms=now_ms)
+
+def create_or_update_tag_for_repo(repository_id, tag_name, tag_docker_image_id, reversion=False,
+ oci_manifest=None, now_ms=None):
+ now_ms = now_ms or get_epoch_timestamp_ms()
+ now_ts = int(now_ms / 1000)
+
+ with db_transaction():
+ try:
+ tag = db_for_update(_tag_alive(RepositoryTag
+ .select()
+ .where(RepositoryTag.repository == repository_id,
+ RepositoryTag.name == tag_name), now_ts)).get()
+ tag.lifetime_end_ts = now_ts
+ tag.save()
+
+ # Check for an OCI tag.
+ try:
+ oci_tag = db_for_update(Tag
+ .select()
+ .join(TagToRepositoryTag)
+ .where(TagToRepositoryTag.repository_tag == tag)).get()
+ oci_tag.lifetime_end_ms = now_ms
+ oci_tag.save()
+ except Tag.DoesNotExist:
+ pass
+ except RepositoryTag.DoesNotExist:
+ pass
+ except IntegrityError:
+ msg = 'Tag with name %s was stale when we tried to update it; Please retry the push'
+ raise StaleTagException(msg % tag_name)
+
+ try:
+ image_obj = Image.get(Image.docker_image_id == tag_docker_image_id,
+ Image.repository == repository_id)
+ except Image.DoesNotExist:
+ raise DataModelException('Invalid image with id: %s' % tag_docker_image_id)
+
+ try:
+ created = RepositoryTag.create(repository=repository_id, image=image_obj, name=tag_name,
+ lifetime_start_ts=now_ts, reversion=reversion)
+ if oci_manifest:
+ # Create the OCI tag as well.
+ oci_tag = Tag.create(repository=repository_id, manifest=oci_manifest, name=tag_name,
+ lifetime_start_ms=now_ms, reversion=reversion,
+ tag_kind=Tag.tag_kind.get_id('tag'))
+ TagToRepositoryTag.create(tag=oci_tag, repository_tag=created, repository=repository_id)
+
+ return created
+ except IntegrityError:
+ msg = 'Tag with name %s and lifetime start %s already exists'
+ raise TagAlreadyCreatedException(msg % (tag_name, now_ts))
+
+
+def create_temporary_hidden_tag(repo, image_obj, expiration_s):
+ """ Create a tag with a defined timeline, that will not appear in the UI or CLI. Returns the name
+ of the temporary tag. """
+ now_ts = get_epoch_timestamp()
+ expire_ts = now_ts + expiration_s
+ tag_name = str(uuid4())
+ RepositoryTag.create(repository=repo, image=image_obj, name=tag_name, lifetime_start_ts=now_ts,
+ lifetime_end_ts=expire_ts, hidden=True)
+ return tag_name
+
+
+def lookup_unrecoverable_tags(repo):
+ """ Returns the tags in a repository that are expired and past their time machine recovery
+ period. """
+ expired_clause = get_epoch_timestamp() - Namespace.removed_tag_expiration_s
+ return (RepositoryTag
+ .select()
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(RepositoryTag.repository == repo)
+ .where(~(RepositoryTag.lifetime_end_ts >> None),
+ RepositoryTag.lifetime_end_ts <= expired_clause))
+
+
+def delete_tag(namespace_name, repository_name, tag_name, now_ms=None):
+ now_ms = now_ms or get_epoch_timestamp_ms()
+ now_ts = int(now_ms / 1000)
+
+ with db_transaction():
+ try:
+ query = _tag_alive(RepositoryTag
+ .select(RepositoryTag, Repository)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Repository.name == repository_name,
+ Namespace.username == namespace_name,
+ RepositoryTag.name == tag_name), now_ts)
+ found = db_for_update(query).get()
+ except RepositoryTag.DoesNotExist:
+ msg = ('Invalid repository tag \'%s\' on repository \'%s/%s\'' %
+ (tag_name, namespace_name, repository_name))
+ raise DataModelException(msg)
+
+ found.lifetime_end_ts = now_ts
+ found.save()
+
+ try:
+ oci_tag_query = TagToRepositoryTag.select().where(TagToRepositoryTag.repository_tag == found)
+ oci_tag = db_for_update(oci_tag_query).get().tag
+ oci_tag.lifetime_end_ms = now_ms
+ oci_tag.save()
+ except TagToRepositoryTag.DoesNotExist:
+ pass
+
+ return found
+
+
+def _get_repo_tag_image(tag_name, include_storage, modifier):
+ query = Image.select().join(RepositoryTag)
+
+ if include_storage:
+ query = (Image
+ .select(Image, ImageStorage)
+ .join(ImageStorage)
+ .switch(Image)
+ .join(RepositoryTag))
+
+ images = _tag_alive(modifier(query.where(RepositoryTag.name == tag_name)))
+ if not images:
+ raise DataModelException('Unable to find image for tag.')
+ else:
+ return images[0]
+
+
+def get_repo_tag_image(repo, tag_name, include_storage=False):
+ def modifier(query):
+ return query.where(RepositoryTag.repository == repo)
+
+ return _get_repo_tag_image(tag_name, include_storage, modifier)
+
+
+def get_tag_image(namespace_name, repository_name, tag_name, include_storage=False):
+ def modifier(query):
+ return (query
+ .switch(RepositoryTag)
+ .join(Repository)
+ .join(Namespace)
+ .where(Namespace.username == namespace_name, Repository.name == repository_name))
+
+ return _get_repo_tag_image(tag_name, include_storage, modifier)
+
+
+def list_repository_tag_history(repo_obj, page=1, size=100, specific_tag=None, active_tags_only=False, since_time=None):
+ # Only available on OCI model
+ if since_time is not None:
+ raise NotImplementedError
+
+ query = (RepositoryTag
+ .select(RepositoryTag, Image, ImageStorage)
+ .join(Image)
+ .join(ImageStorage)
+ .switch(RepositoryTag)
+ .where(RepositoryTag.repository == repo_obj)
+ .where(RepositoryTag.hidden == False)
+ .order_by(RepositoryTag.lifetime_start_ts.desc(), RepositoryTag.name)
+ .limit(size + 1)
+ .offset(size * (page - 1)))
+
+ if active_tags_only:
+ query = _tag_alive(query)
+
+ if specific_tag:
+ query = query.where(RepositoryTag.name == specific_tag)
+
+ tags = list(query)
+ if not tags:
+ return [], {}, False
+
+ manifest_map = get_tag_manifest_digests(tags)
+ return tags[0:size], manifest_map, len(tags) > size
+
+
+def restore_tag_to_manifest(repo_obj, tag_name, manifest_digest):
+ """ Restores a tag to a specific manifest digest. """
+ with db_transaction():
+ # Verify that the manifest digest already existed under this repository under the
+ # tag.
+ try:
+ tag_manifest = (TagManifest
+ .select(TagManifest, RepositoryTag, Image)
+ .join(RepositoryTag)
+ .join(Image)
+ .where(RepositoryTag.repository == repo_obj)
+ .where(RepositoryTag.name == tag_name)
+ .where(TagManifest.digest == manifest_digest)
+ .get())
+ except TagManifest.DoesNotExist:
+ raise DataModelException('Cannot restore to unknown or invalid digest')
+
+ # Lookup the existing image, if any.
+ try:
+ existing_image = get_repo_tag_image(repo_obj, tag_name)
+ except DataModelException:
+ existing_image = None
+
+ docker_image_id = tag_manifest.tag.image.docker_image_id
+ oci_manifest = None
+ try:
+ oci_manifest = Manifest.get(repository=repo_obj, digest=manifest_digest)
+ except Manifest.DoesNotExist:
+ pass
+
+ # Change the tag and tag manifest to point to the updated image.
+ updated_tag = create_or_update_tag_for_repo(repo_obj, tag_name, docker_image_id,
+ reversion=True, oci_manifest=oci_manifest)
+ tag_manifest.tag = updated_tag
+ tag_manifest.save()
+ return existing_image
+
+
+def restore_tag_to_image(repo_obj, tag_name, docker_image_id):
+ """ Restores a tag to a specific image ID. """
+ with db_transaction():
+ # Verify that the image ID already existed under this repository under the
+ # tag.
+ try:
+ (RepositoryTag
+ .select()
+ .join(Image)
+ .where(RepositoryTag.repository == repo_obj)
+ .where(RepositoryTag.name == tag_name)
+ .where(Image.docker_image_id == docker_image_id)
+ .get())
+ except RepositoryTag.DoesNotExist:
+ raise DataModelException('Cannot restore to unknown or invalid image')
+
+ # Lookup the existing image, if any.
+ try:
+ existing_image = get_repo_tag_image(repo_obj, tag_name)
+ except DataModelException:
+ existing_image = None
+
+ create_or_update_tag_for_repo(repo_obj, tag_name, docker_image_id, reversion=True)
+ return existing_image
+
+
+def store_tag_manifest_for_testing(namespace_name, repository_name, tag_name, manifest,
+ leaf_layer_id, storage_id_map):
+ """ Stores a tag manifest for a specific tag name in the database. Returns the TagManifest
+ object, as well as a boolean indicating whether the TagManifest was created.
+ """
+ try:
+ repo = _basequery.get_existing_repository(namespace_name, repository_name)
+ except Repository.DoesNotExist:
+ raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
+
+ return store_tag_manifest_for_repo(repo.id, tag_name, manifest, leaf_layer_id, storage_id_map)
+
+
+def store_tag_manifest_for_repo(repository_id, tag_name, manifest, leaf_layer_id, storage_id_map,
+ reversion=False):
+ """ Stores a tag manifest for a specific tag name in the database. Returns the TagManifest
+ object, as well as a boolean indicating whether the TagManifest was created.
+ """
+ # Create the new-style OCI manifest and its blobs.
+ oci_manifest = _populate_manifest_and_blobs(repository_id, manifest, storage_id_map,
+ leaf_layer_id=leaf_layer_id)
+
+ # Create the tag for the tag manifest.
+ tag = create_or_update_tag_for_repo(repository_id, tag_name, leaf_layer_id,
+ reversion=reversion, oci_manifest=oci_manifest)
+
+ # Add a tag manifest pointing to that tag.
+ try:
+ manifest = TagManifest.get(digest=manifest.digest)
+ manifest.tag = tag
+ manifest.save()
+ return manifest, False
+ except TagManifest.DoesNotExist:
+ created = _associate_manifest(tag, oci_manifest)
+ return created, True
+
+
+def get_active_tag(namespace, repo_name, tag_name):
+ return _tag_alive(RepositoryTag
+ .select()
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(RepositoryTag.name == tag_name, Repository.name == repo_name,
+ Namespace.username == namespace)).get()
+
+def get_active_tag_for_repo(repo, tag_name):
+ try:
+ return _tag_alive(RepositoryTag
+ .select(RepositoryTag, Image, ImageStorage)
+ .join(Image)
+ .join(ImageStorage)
+ .where(RepositoryTag.name == tag_name,
+ RepositoryTag.repository == repo,
+ RepositoryTag.hidden == False)).get()
+ except RepositoryTag.DoesNotExist:
+ return None
+
+def get_expired_tag_in_repo(repo, tag_name):
+ return (RepositoryTag
+ .select()
+ .where(RepositoryTag.name == tag_name, RepositoryTag.repository == repo)
+ .where(~(RepositoryTag.lifetime_end_ts >> None))
+ .where(RepositoryTag.lifetime_end_ts <= get_epoch_timestamp())
+ .get())
+
+
+def get_possibly_expired_tag(namespace, repo_name, tag_name):
+ return (RepositoryTag
+ .select()
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(RepositoryTag.name == tag_name, Repository.name == repo_name,
+ Namespace.username == namespace)).get()
+
+def associate_generated_tag_manifest_with_tag(tag, manifest, storage_id_map):
+ oci_manifest = _populate_manifest_and_blobs(tag.repository, manifest, storage_id_map)
+
+ with db_transaction():
+ try:
+ (Tag
+ .select()
+ .join(TagToRepositoryTag)
+ .where(TagToRepositoryTag.repository_tag == tag)).get()
+ except Tag.DoesNotExist:
+ oci_tag = Tag.create(repository=tag.repository, manifest=oci_manifest, name=tag.name,
+ reversion=tag.reversion,
+ lifetime_start_ms=tag.lifetime_start_ts * 1000,
+ lifetime_end_ms=(tag.lifetime_end_ts * 1000
+ if tag.lifetime_end_ts else None),
+ tag_kind=Tag.tag_kind.get_id('tag'))
+ TagToRepositoryTag.create(tag=oci_tag, repository_tag=tag, repository=tag.repository)
+
+ return _associate_manifest(tag, oci_manifest)
+
+
+def _associate_manifest(tag, oci_manifest):
+ with db_transaction():
+ tag_manifest = TagManifest.create(tag=tag, digest=oci_manifest.digest,
+ json_data=oci_manifest.manifest_bytes)
+ TagManifestToManifest.create(tag_manifest=tag_manifest, manifest=oci_manifest)
+ return tag_manifest
+
+
+def _populate_manifest_and_blobs(repository, manifest, storage_id_map, leaf_layer_id=None):
+ leaf_layer_id = leaf_layer_id or manifest.leaf_layer_v1_image_id
+ try:
+ legacy_image = Image.get(Image.docker_image_id == leaf_layer_id,
+ Image.repository == repository)
+ except Image.DoesNotExist:
+ raise DataModelException('Invalid image with id: %s' % leaf_layer_id)
+
+ storage_ids = set()
+ for blob_digest in manifest.local_blob_digests:
+ image_storage_id = storage_id_map.get(blob_digest)
+ if image_storage_id is None:
+ logger.error('Missing blob for manifest `%s` in: %s', blob_digest, storage_id_map)
+ raise DataModelException('Missing blob for manifest `%s`' % blob_digest)
+
+ if image_storage_id in storage_ids:
+ continue
+
+ storage_ids.add(image_storage_id)
+
+ return populate_manifest(repository, manifest, legacy_image, storage_ids)
+
+
+def populate_manifest(repository, manifest, legacy_image, storage_ids):
+ """ Populates the rows for the manifest, including its blobs and legacy image. """
+ media_type = Manifest.media_type.get_id(manifest.media_type)
+
+ # Check for an existing manifest. If present, return it.
+ try:
+ return Manifest.get(repository=repository, digest=manifest.digest)
+ except Manifest.DoesNotExist:
+ pass
+
+ with db_transaction():
+ try:
+ manifest_row = Manifest.create(digest=manifest.digest, repository=repository,
+ manifest_bytes=manifest.bytes.as_encoded_str(),
+ media_type=media_type)
+ except IntegrityError as ie:
+ logger.debug('Got integrity error when trying to write manifest: %s', ie)
+ return Manifest.get(repository=repository, digest=manifest.digest)
+
+ ManifestLegacyImage.create(manifest=manifest_row, repository=repository, image=legacy_image)
+
+ blobs_to_insert = [dict(manifest=manifest_row, repository=repository,
+ blob=storage_id) for storage_id in storage_ids]
+ if blobs_to_insert:
+ ManifestBlob.insert_many(blobs_to_insert).execute()
+
+ return manifest_row
+
+
+def get_tag_manifest(tag):
+ try:
+ return TagManifest.get(tag=tag)
+ except TagManifest.DoesNotExist:
+ return None
+
+
+def load_tag_manifest(namespace, repo_name, tag_name):
+ try:
+ return (_load_repo_manifests(namespace, repo_name)
+ .where(RepositoryTag.name == tag_name)
+ .get())
+ except TagManifest.DoesNotExist:
+ msg = 'Manifest not found for tag {0} in repo {1}/{2}'.format(tag_name, namespace, repo_name)
+ raise InvalidManifestException(msg)
+
+
+def delete_manifest_by_digest(namespace, repo_name, digest):
+ tag_manifests = list(_load_repo_manifests(namespace, repo_name)
+ .where(TagManifest.digest == digest))
+
+ now_ms = get_epoch_timestamp_ms()
+ for tag_manifest in tag_manifests:
+ try:
+ tag = _tag_alive(RepositoryTag.select().where(RepositoryTag.id == tag_manifest.tag_id)).get()
+ delete_tag(namespace, repo_name, tag_manifest.tag.name, now_ms)
+ except RepositoryTag.DoesNotExist:
+ pass
+
+ return [tag_manifest.tag for tag_manifest in tag_manifests]
+
+
+def load_manifest_by_digest(namespace, repo_name, digest, allow_dead=False):
+ try:
+ return (_load_repo_manifests(namespace, repo_name, allow_dead=allow_dead)
+ .where(TagManifest.digest == digest)
+ .get())
+ except TagManifest.DoesNotExist:
+ msg = 'Manifest not found with digest {0} in repo {1}/{2}'.format(digest, namespace, repo_name)
+ raise InvalidManifestException(msg)
+
+
+def _load_repo_manifests(namespace, repo_name, allow_dead=False):
+ query = (TagManifest
+ .select(TagManifest, RepositoryTag)
+ .join(RepositoryTag)
+ .join(Image)
+ .join(Repository)
+ .join(Namespace, on=(Namespace.id == Repository.namespace_user))
+ .where(Repository.name == repo_name, Namespace.username == namespace))
+
+ if not allow_dead:
+ query = _tag_alive(query)
+
+ return query
+
+def change_repository_tag_expiration(namespace_name, repo_name, tag_name, expiration_date):
+ """ Changes the expiration of the tag with the given name to the given expiration datetime. If
+ the expiration datetime is None, then the tag is marked as not expiring.
+ """
+ try:
+ tag = get_active_tag(namespace_name, repo_name, tag_name)
+ return change_tag_expiration(tag, expiration_date)
+ except RepositoryTag.DoesNotExist:
+ return (None, False)
+
+
+def set_tag_expiration_for_manifest(tag_manifest, expiration_sec):
+ """
+ Changes the expiration of the tag that points to the given manifest to be its lifetime start +
+ the expiration seconds.
+ """
+ expiration_time_ts = tag_manifest.tag.lifetime_start_ts + expiration_sec
+ expiration_date = datetime.utcfromtimestamp(expiration_time_ts)
+ return change_tag_expiration(tag_manifest.tag, expiration_date)
+
+
+def change_tag_expiration(tag, expiration_date):
+ """ Changes the expiration of the given tag to the given expiration datetime. If
+ the expiration datetime is None, then the tag is marked as not expiring.
+ """
+ end_ts = None
+ min_expire_sec = convert_to_timedelta(config.app_config.get('LABELED_EXPIRATION_MINIMUM', '1h'))
+ max_expire_sec = convert_to_timedelta(config.app_config.get('LABELED_EXPIRATION_MAXIMUM', '104w'))
+
+ if expiration_date is not None:
+ offset = timegm(expiration_date.utctimetuple()) - tag.lifetime_start_ts
+ offset = min(max(offset, min_expire_sec.total_seconds()), max_expire_sec.total_seconds())
+ end_ts = tag.lifetime_start_ts + offset
+
+ if end_ts == tag.lifetime_end_ts:
+ return (None, True)
+
+ return set_tag_end_ts(tag, end_ts)
+
+
+def set_tag_end_ts(tag, end_ts):
+ """ Sets the end timestamp for a tag. Should only be called by change_tag_expiration
+ or tests.
+ """
+ end_ms = end_ts * 1000 if end_ts is not None else None
+
+ with db_transaction():
+ # Note: We check not just the ID of the tag but also its lifetime_end_ts, to ensure that it has
+ # not changed while we were updating it expiration.
+ result = (RepositoryTag
+ .update(lifetime_end_ts=end_ts)
+ .where(RepositoryTag.id == tag.id,
+ RepositoryTag.lifetime_end_ts == tag.lifetime_end_ts)
+ .execute())
+
+ # Check for a mapping to an OCI tag.
+ try:
+ oci_tag = (Tag
+ .select()
+ .join(TagToRepositoryTag)
+ .where(TagToRepositoryTag.repository_tag == tag)
+ .get())
+
+ (Tag
+ .update(lifetime_end_ms=end_ms)
+ .where(Tag.id == oci_tag.id,
+ Tag.lifetime_end_ms == oci_tag.lifetime_end_ms)
+ .execute())
+ except Tag.DoesNotExist:
+ pass
+
+ return (tag.lifetime_end_ts, result > 0)
+
+
+def find_matching_tag(repo_id, tag_names):
+ """ Finds the most recently pushed alive tag in the repository with one of the given names,
+ if any.
+ """
+ try:
+ return (_tag_alive(RepositoryTag
+ .select()
+ .where(RepositoryTag.repository == repo_id,
+ RepositoryTag.name << list(tag_names))
+ .order_by(RepositoryTag.lifetime_start_ts.desc()))
+ .get())
+ except RepositoryTag.DoesNotExist:
+ return None
+
+
+def get_most_recent_tag(repo_id):
+ """ Returns the most recently pushed alive tag in the repository, or None if none. """
+ try:
+ return (_tag_alive(RepositoryTag
+ .select()
+ .where(RepositoryTag.repository == repo_id, RepositoryTag.hidden == False)
+ .order_by(RepositoryTag.lifetime_start_ts.desc()))
+ .get())
+ except RepositoryTag.DoesNotExist:
+ return None
diff --git a/data/model/team.py b/data/model/team.py
new file mode 100644
index 000000000..4988d74ac
--- /dev/null
+++ b/data/model/team.py
@@ -0,0 +1,519 @@
+import json
+import re
+import uuid
+
+from datetime import datetime
+from peewee import fn
+
+from data.database import (Team, TeamMember, TeamRole, User, TeamMemberInvite, RepositoryPermission,
+ TeamSync, LoginService, FederatedLogin, db_random_func, db_transaction)
+from data.model import (DataModelException, InvalidTeamException, UserAlreadyInTeam,
+ InvalidTeamMemberException, _basequery)
+from data.text import prefix_search
+from util.validation import validate_username
+from util.morecollections import AttrDict
+
+
+MIN_TEAMNAME_LENGTH = 2
+MAX_TEAMNAME_LENGTH = 255
+
+VALID_TEAMNAME_REGEX = r'^([a-z0-9]+(?:[._-][a-z0-9]+)*)$'
+
+
+def validate_team_name(teamname):
+ if not re.match(VALID_TEAMNAME_REGEX, teamname):
+ return (False, 'Namespace must match expression ' + VALID_TEAMNAME_REGEX)
+
+ length_match = (len(teamname) >= MIN_TEAMNAME_LENGTH and len(teamname) <= MAX_TEAMNAME_LENGTH)
+ if not length_match:
+ return (False, 'Team must be between %s and %s characters in length' %
+ (MIN_TEAMNAME_LENGTH, MAX_TEAMNAME_LENGTH))
+
+ return (True, '')
+
+
+def create_team(name, org_obj, team_role_name, description=''):
+ (teamname_valid, teamname_issue) = validate_team_name(name)
+ if not teamname_valid:
+ raise InvalidTeamException('Invalid team name %s: %s' % (name, teamname_issue))
+
+ if not org_obj.organization:
+ raise InvalidTeamException('Specified organization %s was not an organization' %
+ org_obj.username)
+
+ team_role = TeamRole.get(TeamRole.name == team_role_name)
+ return Team.create(name=name, organization=org_obj, role=team_role,
+ description=description)
+
+
+def add_user_to_team(user_obj, team):
+ try:
+ return TeamMember.create(user=user_obj, team=team)
+ except Exception:
+ raise UserAlreadyInTeam('User %s is already a member of team %s' %
+ (user_obj.username, team.name))
+
+
+def remove_user_from_team(org_name, team_name, username, removed_by_username):
+ Org = User.alias()
+ joined = TeamMember.select().join(User).switch(TeamMember).join(Team)
+ with_role = joined.join(TeamRole)
+ with_org = with_role.switch(Team).join(Org,
+ on=(Org.id == Team.organization))
+ found = list(with_org.where(User.username == username,
+ Org.username == org_name,
+ Team.name == team_name))
+
+ if not found:
+ raise DataModelException('User %s does not belong to team %s' %
+ (username, team_name))
+
+ if username == removed_by_username:
+ admin_team_query = __get_user_admin_teams(org_name, username)
+ admin_team_names = {team.name for team in admin_team_query}
+ if team_name in admin_team_names and len(admin_team_names) <= 1:
+ msg = 'User cannot remove themselves from their only admin team.'
+ raise DataModelException(msg)
+
+ user_in_team = found[0]
+ user_in_team.delete_instance()
+
+
+def set_team_org_permission(team, team_role_name, set_by_username):
+ if team.role.name == 'admin' and team_role_name != 'admin':
+ # We need to make sure we're not removing the users only admin role
+ user_admin_teams = __get_user_admin_teams(team.organization.username, set_by_username)
+ admin_team_set = {admin_team.name for admin_team in user_admin_teams}
+ if team.name in admin_team_set and len(admin_team_set) <= 1:
+ msg = (('Cannot remove admin from team \'%s\' because calling user ' +
+ 'would no longer have admin on org \'%s\'') %
+ (team.name, team.organization.username))
+ raise DataModelException(msg)
+
+ new_role = TeamRole.get(TeamRole.name == team_role_name)
+ team.role = new_role
+ team.save()
+ return team
+
+
+def __get_user_admin_teams(org_name, username):
+ Org = User.alias()
+ user_teams = Team.select().join(TeamMember).join(User)
+ with_org = user_teams.switch(Team).join(Org,
+ on=(Org.id == Team.organization))
+ with_role = with_org.switch(Team).join(TeamRole)
+ admin_teams = with_role.where(User.username == username,
+ Org.username == org_name,
+ TeamRole.name == 'admin')
+ return admin_teams
+
+
+def remove_team(org_name, team_name, removed_by_username):
+ joined = Team.select(Team, TeamRole).join(User).switch(Team).join(TeamRole)
+
+ found = list(joined.where(User.organization == True,
+ User.username == org_name,
+ Team.name == team_name))
+ if not found:
+ raise InvalidTeamException('Team \'%s\' is not a team in org \'%s\'' %
+ (team_name, org_name))
+
+ team = found[0]
+ if team.role.name == 'admin':
+ admin_teams = list(__get_user_admin_teams(org_name, removed_by_username))
+ if len(admin_teams) <= 1:
+ # The team we are trying to remove is the only admin team containing this user.
+ msg = "Deleting team '%s' would remove admin ability for user '%s' in organization '%s'"
+ raise DataModelException(msg % (team_name, removed_by_username, org_name))
+
+ team.delete_instance(recursive=True, delete_nullable=True)
+
+
+def add_or_invite_to_team(inviter, team, user_obj=None, email=None, requires_invite=True):
+ # If the user is a member of the organization, then we simply add the
+ # user directly to the team. Otherwise, an invite is created for the user/email.
+ # We return None if the user was directly added and the invite object if the user was invited.
+ if user_obj and requires_invite:
+ orgname = team.organization.username
+
+ # If the user is part of the organization (or a robot), then no invite is required.
+ if user_obj.robot:
+ requires_invite = False
+ if not user_obj.username.startswith(orgname + '+'):
+ raise InvalidTeamMemberException('Cannot add the specified robot to this team, ' +
+ 'as it is not a member of the organization')
+ else:
+ query = (TeamMember
+ .select()
+ .where(TeamMember.user == user_obj)
+ .join(Team)
+ .join(User)
+ .where(User.username == orgname, User.organization == True))
+ requires_invite = not any(query)
+
+ # If we have a valid user and no invite is required, simply add the user to the team.
+ if user_obj and not requires_invite:
+ add_user_to_team(user_obj, team)
+ return None
+
+ email_address = email if not user_obj else None
+ return TeamMemberInvite.create(user=user_obj, email=email_address, team=team, inviter=inviter)
+
+
+def get_matching_user_teams(team_prefix, user_obj, limit=10):
+ team_prefix_search = prefix_search(Team.name, team_prefix)
+ query = (Team
+ .select(Team.id.distinct(), Team)
+ .join(User)
+ .switch(Team)
+ .join(TeamMember)
+ .where(TeamMember.user == user_obj, team_prefix_search)
+ .limit(limit))
+
+ return query
+
+
+def get_organization_team(orgname, teamname):
+ joined = Team.select().join(User)
+ query = joined.where(Team.name == teamname, User.organization == True,
+ User.username == orgname).limit(1)
+ result = list(query)
+ if not result:
+ raise InvalidTeamException('Team does not exist: %s/%s', orgname,
+ teamname)
+
+ return result[0]
+
+
+def get_matching_admined_teams(team_prefix, user_obj, limit=10):
+ team_prefix_search = prefix_search(Team.name, team_prefix)
+ admined_orgs = (_basequery.get_user_organizations(user_obj.username)
+ .switch(Team)
+ .join(TeamRole)
+ .where(TeamRole.name == 'admin'))
+
+ query = (Team
+ .select(Team.id.distinct(), Team)
+ .join(User)
+ .switch(Team)
+ .join(TeamMember)
+ .where(team_prefix_search, Team.organization << (admined_orgs))
+ .limit(limit))
+
+ return query
+
+
+def get_matching_teams(team_prefix, organization):
+ team_prefix_search = prefix_search(Team.name, team_prefix)
+ query = Team.select().where(team_prefix_search, Team.organization == organization)
+ return query.limit(10)
+
+
+def get_teams_within_org(organization, has_external_auth=False):
+ """ Returns a AttrDict of team info (id, name, description), its role under the org,
+ the number of repositories on which it has permission, and the number of members.
+ """
+ query = (Team.select()
+ .where(Team.organization == organization)
+ .join(TeamRole))
+
+ def _team_view(team):
+ return {
+ 'id': team.id,
+ 'name': team.name,
+ 'description': team.description,
+ 'role_name': Team.role.get_name(team.role_id),
+
+ 'repo_count': 0,
+ 'member_count': 0,
+
+ 'is_synced': False,
+ }
+
+ teams = {team.id: _team_view(team) for team in query}
+ if not teams:
+ # Just in case. Should ideally never happen.
+ return []
+
+ # Add repository permissions count.
+ permission_tuples = (RepositoryPermission.select(RepositoryPermission.team,
+ fn.Count(RepositoryPermission.id))
+ .where(RepositoryPermission.team << teams.keys())
+ .group_by(RepositoryPermission.team)
+ .tuples())
+
+ for perm_tuple in permission_tuples:
+ teams[perm_tuple[0]]['repo_count'] = perm_tuple[1]
+
+ # Add the member count.
+ members_tuples = (TeamMember.select(TeamMember.team,
+ fn.Count(TeamMember.id))
+ .where(TeamMember.team << teams.keys())
+ .group_by(TeamMember.team)
+ .tuples())
+
+ for member_tuple in members_tuples:
+ teams[member_tuple[0]]['member_count'] = member_tuple[1]
+
+ # Add syncing information.
+ if has_external_auth:
+ sync_query = TeamSync.select(TeamSync.team).where(TeamSync.team << teams.keys())
+ for team_sync in sync_query:
+ teams[team_sync.team_id]['is_synced'] = True
+
+ return [AttrDict(team_info) for team_info in teams.values()]
+
+
+def get_user_teams_within_org(username, organization):
+ joined = Team.select().join(TeamMember).join(User)
+ return joined.where(Team.organization == organization,
+ User.username == username)
+
+
+def list_organization_members_by_teams(organization):
+ query = (TeamMember
+ .select(Team, User)
+ .join(Team)
+ .switch(TeamMember)
+ .join(User)
+ .where(Team.organization == organization))
+ return query
+
+
+def get_organization_team_member_invites(teamid):
+ joined = TeamMemberInvite.select().join(Team).join(User)
+ query = joined.where(Team.id == teamid)
+ return query
+
+
+def delete_team_email_invite(team, email):
+ try:
+ found = TeamMemberInvite.get(TeamMemberInvite.email == email, TeamMemberInvite.team == team)
+ except TeamMemberInvite.DoesNotExist:
+ return False
+
+ found.delete_instance()
+ return True
+
+
+def delete_team_user_invite(team, user_obj):
+ try:
+ found = TeamMemberInvite.get(TeamMemberInvite.user == user_obj, TeamMemberInvite.team == team)
+ except TeamMemberInvite.DoesNotExist:
+ return False
+
+ found.delete_instance()
+ return True
+
+
+def lookup_team_invites_by_email(email):
+ return TeamMemberInvite.select().where(TeamMemberInvite.email == email)
+
+
+def lookup_team_invites(user_obj):
+ return TeamMemberInvite.select().where(TeamMemberInvite.user == user_obj)
+
+
+def lookup_team_invite(code, user_obj=None):
+ # Lookup the invite code.
+ try:
+ found = TeamMemberInvite.get(TeamMemberInvite.invite_token == code)
+ except TeamMemberInvite.DoesNotExist:
+ raise DataModelException('Invalid confirmation code.')
+
+ if user_obj and found.user != user_obj:
+ raise DataModelException('Invalid confirmation code.')
+
+ return found
+
+
+def delete_team_invite(code, user_obj=None):
+ found = lookup_team_invite(code, user_obj)
+
+ team = found.team
+ inviter = found.inviter
+
+ found.delete_instance()
+
+ return (team, inviter)
+
+
+def find_matching_team_invite(code, user_obj):
+ """ Finds a team invite with the given code that applies to the given user and returns it or
+ raises a DataModelException if not found. """
+ found = lookup_team_invite(code)
+
+ # If the invite is for a specific user, we have to confirm that here.
+ if found.user is not None and found.user != user_obj:
+ message = """This invite is intended for user "%s".
+ Please login to that account and try again.""" % found.user.username
+ raise DataModelException(message)
+
+ return found
+
+
+def find_organization_invites(organization, user_obj):
+ """ Finds all organization team invites for the given user under the given organization. """
+ invite_check = (TeamMemberInvite.user == user_obj)
+ if user_obj.verified:
+ invite_check = invite_check | (TeamMemberInvite.email == user_obj.email)
+
+ query = (TeamMemberInvite
+ .select()
+ .join(Team)
+ .where(invite_check, Team.organization == organization))
+ return query
+
+
+def confirm_team_invite(code, user_obj):
+ """ Confirms the given team invite code for the given user by adding the user to the team
+ and deleting the code. Raises a DataModelException if the code was not found or does
+ not apply to the given user. If the user is invited to two or more teams under the
+ same organization, they are automatically confirmed for all of them. """
+ found = find_matching_team_invite(code, user_obj)
+
+ # Find all matching invitations for the user under the organization.
+ code_found = False
+ for invite in find_organization_invites(found.team.organization, user_obj):
+ # Add the user to the team.
+ try:
+ code_found = True
+ add_user_to_team(user_obj, invite.team)
+ except UserAlreadyInTeam:
+ # Ignore.
+ pass
+
+ # Delete the invite and return the team.
+ invite.delete_instance()
+
+ if not code_found:
+ if found.user:
+ message = """This invite is intended for user "%s".
+ Please login to that account and try again.""" % found.user.username
+ raise DataModelException(message)
+ else:
+ message = """This invite is intended for email "%s".
+ Please login to that account and try again.""" % found.email
+ raise DataModelException(message)
+
+ team = found.team
+ inviter = found.inviter
+ return (team, inviter)
+
+
+def get_federated_team_member_mapping(team, login_service_name):
+ """ Returns a dict of all federated IDs for all team members in the team whose users are
+ bound to the login service within the given name. The dictionary is from federated service
+ identifier (username) to their Quay User table ID.
+ """
+ login_service = LoginService.get(name=login_service_name)
+
+ query = (FederatedLogin
+ .select(FederatedLogin.service_ident, User.id)
+ .join(User)
+ .join(TeamMember)
+ .join(Team)
+ .where(Team.id == team, User.robot == False, FederatedLogin.service == login_service))
+ return dict(query.tuples())
+
+
+def list_team_users(team):
+ """ Returns an iterator of all the *users* found in a team. Does not include robots. """
+ return (User
+ .select()
+ .join(TeamMember)
+ .join(Team)
+ .where(Team.id == team, User.robot == False))
+
+
+def list_team_robots(team):
+ """ Returns an iterator of all the *robots* found in a team. Does not include users. """
+ return (User
+ .select()
+ .join(TeamMember)
+ .join(Team)
+ .where(Team.id == team, User.robot == True))
+
+
+def set_team_syncing(team, login_service_name, config):
+ """ Sets the given team to sync to the given service using the given config. """
+ login_service = LoginService.get(name=login_service_name)
+ return TeamSync.create(team=team, transaction_id='', service=login_service,
+ config=json.dumps(config))
+
+
+def remove_team_syncing(orgname, teamname):
+ """ Removes syncing on the team matching the given organization name and team name. """
+ existing = get_team_sync_information(orgname, teamname)
+ if existing:
+ existing.delete_instance()
+
+
+def get_stale_team(stale_timespan):
+ """ Returns a team that is setup to sync to an external group, and who has not been synced in
+ now - stale_timespan. Returns None if none found.
+ """
+ stale_at = datetime.now() - stale_timespan
+
+ try:
+ candidates = (TeamSync
+ .select(TeamSync.id)
+ .where((TeamSync.last_updated <= stale_at) | (TeamSync.last_updated >> None))
+ .limit(500)
+ .alias('candidates'))
+
+ found = (TeamSync
+ .select(candidates.c.id)
+ .from_(candidates)
+ .order_by(db_random_func())
+ .get())
+
+ if found is None:
+ return
+
+ return TeamSync.select(TeamSync, Team).join(Team).where(TeamSync.id == found.id).get()
+ except TeamSync.DoesNotExist:
+ return None
+
+
+def get_team_sync_information(orgname, teamname):
+ """ Returns the team syncing information for the team with the given name under the organization
+ with the given name or None if none.
+ """
+ query = (TeamSync
+ .select(TeamSync, LoginService)
+ .join(Team)
+ .join(User)
+ .switch(TeamSync)
+ .join(LoginService)
+ .where(Team.name == teamname, User.organization == True, User.username == orgname))
+
+ try:
+ return query.get()
+ except TeamSync.DoesNotExist:
+ return None
+
+
+def update_sync_status(team_sync_info):
+ """ Attempts to update the transaction ID and last updated time on a TeamSync object. If the
+ transaction ID on the entry in the DB does not match that found on the object, this method
+ returns False, which indicates another caller updated it first.
+ """
+ new_transaction_id = str(uuid.uuid4())
+ query = (TeamSync
+ .update(transaction_id=new_transaction_id, last_updated=datetime.now())
+ .where(TeamSync.id == team_sync_info.id,
+ TeamSync.transaction_id == team_sync_info.transaction_id))
+ return query.execute() == 1
+
+
+def delete_members_not_present(team, member_id_set):
+ """ Deletes all members of the given team that are not found in the member ID set. """
+ with db_transaction():
+ user_ids = set([u.id for u in list_team_users(team)])
+ to_delete = list(user_ids - member_id_set)
+ if to_delete:
+ query = TeamMember.delete().where(TeamMember.team == team, TeamMember.user << to_delete)
+ return query.execute()
+
+ return 0
diff --git a/data/model/test/__init__.py b/data/model/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/data/model/test/test_appspecifictoken.py b/data/model/test/test_appspecifictoken.py
new file mode 100644
index 000000000..96a7491f5
--- /dev/null
+++ b/data/model/test/test_appspecifictoken.py
@@ -0,0 +1,126 @@
+from datetime import datetime, timedelta
+from mock import patch
+
+import pytest
+
+from data.model import config as _config
+from data import model
+from data.model.appspecifictoken import create_token, revoke_token, access_valid_token
+from data.model.appspecifictoken import gc_expired_tokens, get_expiring_tokens
+from data.model.appspecifictoken import get_full_token_string
+from util.timedeltastring import convert_to_timedelta
+
+from test.fixtures import *
+
+@pytest.mark.parametrize('expiration', [
+ (None),
+ ('-1m'),
+ ('-1d'),
+ ('-1w'),
+ ('10m'),
+ ('10d'),
+ ('10w'),
+])
+def test_gc(expiration, initialized_db):
+ user = model.user.get_user('devtable')
+
+ expiration_date = None
+ is_expired = False
+ if expiration:
+ if expiration[0] == '-':
+ is_expired = True
+ expiration_date = datetime.now() - convert_to_timedelta(expiration[1:])
+ else:
+ expiration_date = datetime.now() + convert_to_timedelta(expiration)
+
+ # Create a token.
+ token = create_token(user, 'Some token', expiration=expiration_date)
+
+ # GC tokens.
+ gc_expired_tokens(timedelta(seconds=0))
+
+ # Ensure the token was GCed if expired and not if it wasn't.
+ assert (access_valid_token(get_full_token_string(token)) is None) == is_expired
+
+
+def test_access_token(initialized_db):
+ user = model.user.get_user('devtable')
+
+ # Create a token.
+ token = create_token(user, 'Some token')
+ assert token.last_accessed is None
+
+ # Lookup the token.
+ token = access_valid_token(get_full_token_string(token))
+ assert token.last_accessed is not None
+
+ # Revoke the token.
+ revoke_token(token)
+
+ # Ensure it cannot be accessed
+ assert access_valid_token(get_full_token_string(token)) is None
+
+
+def test_expiring_soon(initialized_db):
+ user = model.user.get_user('devtable')
+
+ # Create some tokens.
+ create_token(user, 'Some token')
+ exp_token = create_token(user, 'Some expiring token', datetime.now() + convert_to_timedelta('1d'))
+ create_token(user, 'Some other token', expiration=datetime.now() + convert_to_timedelta('2d'))
+
+ # Get the token expiring soon.
+ expiring_soon = get_expiring_tokens(user, convert_to_timedelta('25h'))
+ assert expiring_soon
+ assert len(expiring_soon) == 1
+ assert expiring_soon[0].id == exp_token.id
+
+ expiring_soon = get_expiring_tokens(user, convert_to_timedelta('49h'))
+ assert expiring_soon
+ assert len(expiring_soon) == 2
+
+
+@pytest.fixture(scope='function')
+def app_config():
+ with patch.dict(_config.app_config, {}, clear=True):
+ yield _config.app_config
+
+@pytest.mark.parametrize('expiration', [
+ (None),
+ ('10m'),
+ ('10d'),
+ ('10w'),
+])
+@pytest.mark.parametrize('default_expiration', [
+ (None),
+ ('10m'),
+ ('10d'),
+ ('10w'),
+])
+def test_create_access_token(expiration, default_expiration, initialized_db, app_config):
+ user = model.user.get_user('devtable')
+ expiration_date = datetime.now() + convert_to_timedelta(expiration) if expiration else None
+ with patch.dict(_config.app_config, {}, clear=True):
+ app_config['APP_SPECIFIC_TOKEN_EXPIRATION'] = default_expiration
+ if expiration:
+ exp_token = create_token(user, 'Some token', expiration=expiration_date)
+ assert exp_token.expiration == expiration_date
+ else:
+ exp_token = create_token(user, 'Some token')
+ assert (exp_token.expiration is None) == (default_expiration is None)
+
+
+@pytest.mark.parametrize('invalid_token', [
+ '',
+ 'foo',
+ 'a' * 40,
+ 'b' * 40,
+ '%s%s' % ('b' * 40, 'a' * 40),
+ '%s%s' % ('a' * 39, 'b' * 40),
+ '%s%s' % ('a' * 40, 'b' * 39),
+ '%s%s' % ('a' * 40, 'b' * 41),
+])
+def test_invalid_access_token(invalid_token, initialized_db):
+ user = model.user.get_user('devtable')
+ token = access_valid_token(invalid_token)
+ assert token is None
diff --git a/data/model/test/test_basequery.py b/data/model/test/test_basequery.py
new file mode 100644
index 000000000..84e248327
--- /dev/null
+++ b/data/model/test/test_basequery.py
@@ -0,0 +1,107 @@
+import pytest
+
+from peewee import JOIN
+from playhouse.test_utils import assert_query_count
+
+from data.database import Repository, RepositoryPermission, TeamMember, Namespace
+from data.model._basequery import filter_to_repos_for_user
+from data.model.organization import get_admin_users
+from data.model.user import get_namespace_user
+from util.names import parse_robot_username
+
+from test.fixtures import *
+
+def _is_team_member(team, user):
+ return user.id in [member.user_id for member in
+ TeamMember.select().where(TeamMember.team == team)]
+
+def _get_visible_repositories_for_user(user, repo_kind='image', include_public=False,
+ namespace=None):
+ """ Returns all repositories directly visible to the given user, by either repo permission,
+ or the user being the admin of a namespace.
+ """
+ for repo in Repository.select():
+ if repo_kind is not None and repo.kind.name != repo_kind:
+ continue
+
+ if namespace is not None and repo.namespace_user.username != namespace:
+ continue
+
+ if include_public and repo.visibility.name == 'public':
+ yield repo
+ continue
+
+ # Direct repo permission.
+ try:
+ RepositoryPermission.get(repository=repo, user=user).get()
+ yield repo
+ continue
+ except RepositoryPermission.DoesNotExist:
+ pass
+
+ # Team permission.
+ found_in_team = False
+ for perm in RepositoryPermission.select().where(RepositoryPermission.repository == repo):
+ if perm.team and _is_team_member(perm.team, user):
+ found_in_team = True
+ break
+
+ if found_in_team:
+ yield repo
+ continue
+
+ # Org namespace admin permission.
+ if user in get_admin_users(repo.namespace_user):
+ yield repo
+ continue
+
+
+@pytest.mark.parametrize('username', [
+ 'devtable',
+ 'devtable+dtrobot',
+ 'public',
+ 'reader',
+])
+@pytest.mark.parametrize('include_public', [
+ True,
+ False
+])
+@pytest.mark.parametrize('filter_to_namespace', [
+ True,
+ False
+])
+@pytest.mark.parametrize('repo_kind', [
+ None,
+ 'image',
+ 'application',
+])
+def test_filter_repositories(username, include_public, filter_to_namespace, repo_kind,
+ initialized_db):
+ namespace = username if filter_to_namespace else None
+ if '+' in username and filter_to_namespace:
+ namespace, _ = parse_robot_username(username)
+
+ user = get_namespace_user(username)
+ query = (Repository
+ .select()
+ .distinct()
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Repository)
+ .join(RepositoryPermission, JOIN.LEFT_OUTER))
+
+ # Prime the cache.
+ Repository.kind.get_id('image')
+
+ with assert_query_count(1):
+ found = list(filter_to_repos_for_user(query, user.id,
+ namespace=namespace,
+ include_public=include_public,
+ repo_kind=repo_kind))
+
+ expected = list(_get_visible_repositories_for_user(user,
+ repo_kind=repo_kind,
+ namespace=namespace,
+ include_public=include_public))
+
+ assert len(found) == len(expected)
+ assert {r.id for r in found} == {r.id for r in expected}
diff --git a/data/model/test/test_build.py b/data/model/test/test_build.py
new file mode 100644
index 000000000..c43d6e683
--- /dev/null
+++ b/data/model/test/test_build.py
@@ -0,0 +1,107 @@
+import pytest
+
+from mock import patch
+
+from data.database import BUILD_PHASE, RepositoryBuildTrigger, RepositoryBuild
+from data.model.build import (update_trigger_disable_status, create_repository_build,
+ get_repository_build, update_phase_then_close)
+from test.fixtures import *
+
+TEST_FAIL_THRESHOLD = 5
+TEST_INTERNAL_ERROR_THRESHOLD = 2
+
+@pytest.mark.parametrize('starting_failure_count, starting_error_count, status, expected_reason', [
+ (0, 0, BUILD_PHASE.COMPLETE, None),
+ (10, 10, BUILD_PHASE.COMPLETE, None),
+
+ (TEST_FAIL_THRESHOLD - 1, TEST_INTERNAL_ERROR_THRESHOLD - 1, BUILD_PHASE.COMPLETE, None),
+ (TEST_FAIL_THRESHOLD - 1, 0, BUILD_PHASE.ERROR, 'successive_build_failures'),
+ (0, TEST_INTERNAL_ERROR_THRESHOLD - 1, BUILD_PHASE.INTERNAL_ERROR,
+ 'successive_build_internal_errors'),
+])
+def test_update_trigger_disable_status(starting_failure_count, starting_error_count, status,
+ expected_reason, initialized_db):
+ test_config = {
+ 'SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD': TEST_FAIL_THRESHOLD,
+ 'SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD': TEST_INTERNAL_ERROR_THRESHOLD,
+ }
+
+ trigger = model.build.list_build_triggers('devtable', 'building')[0]
+ trigger.successive_failure_count = starting_failure_count
+ trigger.successive_internal_error_count = starting_error_count
+ trigger.enabled = True
+ trigger.save()
+
+ with patch('data.model.config.app_config', test_config):
+ update_trigger_disable_status(trigger, status)
+ updated_trigger = RepositoryBuildTrigger.get(uuid=trigger.uuid)
+
+ assert updated_trigger.enabled == (expected_reason is None)
+
+ if expected_reason is not None:
+ assert updated_trigger.disabled_reason.name == expected_reason
+ else:
+ assert updated_trigger.disabled_reason is None
+ assert updated_trigger.successive_failure_count == 0
+ assert updated_trigger.successive_internal_error_count == 0
+
+
+def test_archivable_build_logs(initialized_db):
+ # Make sure there are no archivable logs.
+ result = model.build.get_archivable_build()
+ assert result is None
+
+ # Add a build that cannot (yet) be archived.
+ repo = model.repository.get_repository('devtable', 'simple')
+ token = model.token.create_access_token(repo, 'write')
+ created = RepositoryBuild.create(repository=repo, access_token=token,
+ phase=model.build.BUILD_PHASE.WAITING,
+ logs_archived=False, job_config='{}',
+ display_name='')
+
+ # Make sure there are no archivable logs.
+ result = model.build.get_archivable_build()
+ assert result is None
+
+ # Change the build to being complete.
+ created.phase = model.build.BUILD_PHASE.COMPLETE
+ created.save()
+
+ # Make sure we now find an archivable build.
+ result = model.build.get_archivable_build()
+ assert result.id == created.id
+
+
+def test_update_build_phase(initialized_db):
+ build = create_build(model.repository.get_repository("devtable", "building"))
+
+ repo_build = get_repository_build(build.uuid)
+
+ assert repo_build.phase == BUILD_PHASE.WAITING
+ assert update_phase_then_close(build.uuid, BUILD_PHASE.COMPLETE)
+
+ repo_build = get_repository_build(build.uuid)
+ assert repo_build.phase == BUILD_PHASE.COMPLETE
+
+ repo_build.delete_instance()
+ assert not update_phase_then_close(repo_build.uuid, BUILD_PHASE.PULLING)
+
+
+def create_build(repository):
+ new_token = model.token.create_access_token(repository, 'write', 'build-worker')
+ repo = 'ci.devtable.com:5000/%s/%s' % (repository.namespace_user.username, repository.name)
+ job_config = {
+ 'repository': repo,
+ 'docker_tags': ['latest'],
+ 'build_subdir': '',
+ 'trigger_metadata': {
+ 'commit': '3482adc5822c498e8f7db2e361e8d57b3d77ddd9',
+ 'ref': 'refs/heads/master',
+ 'default_branch': 'master'
+ }
+ }
+ build = create_repository_build(repository, new_token, job_config,
+ '68daeebd-a5b9-457f-80a0-4363b882f8ea',
+ "build_name")
+ build.save()
+ return build
diff --git a/data/model/test/test_gc.py b/data/model/test/test_gc.py
new file mode 100644
index 000000000..79d13779e
--- /dev/null
+++ b/data/model/test/test_gc.py
@@ -0,0 +1,725 @@
+import hashlib
+import pytest
+
+from datetime import datetime, timedelta
+
+from mock import patch
+
+from app import storage, docker_v2_signing_key
+
+from contextlib import contextmanager
+from playhouse.test_utils import assert_query_count
+
+from freezegun import freeze_time
+
+from data import model, database
+from data.database import (Image, ImageStorage, DerivedStorageForImage, Label, TagManifestLabel,
+ ApprBlob, Manifest, TagManifestToManifest, ManifestBlob, Tag,
+ TagToRepositoryTag)
+from data.model.oci.test.test_oci_manifest import create_manifest_for_testing
+from image.docker.schema1 import DockerSchema1ManifestBuilder
+from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
+from image.docker.schemas import parse_manifest_from_bytes
+from util.bytes import Bytes
+
+from test.fixtures import *
+
+
+ADMIN_ACCESS_USER = 'devtable'
+PUBLIC_USER = 'public'
+
+REPO = 'somerepo'
+
+def _set_tag_expiration_policy(namespace, expiration_s):
+ namespace_user = model.user.get_user(namespace)
+ model.user.change_user_tag_expiration(namespace_user, expiration_s)
+
+
+@pytest.fixture()
+def default_tag_policy(initialized_db):
+ _set_tag_expiration_policy(ADMIN_ACCESS_USER, 0)
+ _set_tag_expiration_policy(PUBLIC_USER, 0)
+
+
+def create_image(docker_image_id, repository_obj, username):
+ preferred = storage.preferred_locations[0]
+ image = model.image.find_create_or_link_image(docker_image_id, repository_obj, username, {},
+ preferred)
+ image.storage.uploading = False
+ image.storage.save()
+
+ # Create derived images as well.
+ model.image.find_or_create_derived_storage(image, 'squash', preferred)
+ model.image.find_or_create_derived_storage(image, 'aci', preferred)
+
+ # Add some torrent info.
+ try:
+ database.TorrentInfo.get(storage=image.storage)
+ except database.TorrentInfo.DoesNotExist:
+ model.storage.save_torrent_info(image.storage, 1, 'helloworld')
+
+ # Add some additional placements to the image.
+ for location_name in ['local_eu']:
+ location = database.ImageStorageLocation.get(name=location_name)
+
+ try:
+ database.ImageStoragePlacement.get(location=location, storage=image.storage)
+ except:
+ continue
+
+ database.ImageStoragePlacement.create(location=location, storage=image.storage)
+
+ return image.storage
+
+
+def store_tag_manifest(namespace, repo_name, tag_name, image_id):
+ builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name)
+ storage_id_map = {}
+ try:
+ image_storage = ImageStorage.select().where(~(ImageStorage.content_checksum >> None)).get()
+ builder.add_layer(image_storage.content_checksum, '{"id": "foo"}')
+ storage_id_map[image_storage.content_checksum] = image_storage.id
+ except ImageStorage.DoesNotExist:
+ pass
+
+ manifest = builder.build(docker_v2_signing_key)
+ manifest_row, _ = model.tag.store_tag_manifest_for_testing(namespace, repo_name, tag_name,
+ manifest, image_id, storage_id_map)
+ return manifest_row
+
+
+def create_repository(namespace=ADMIN_ACCESS_USER, name=REPO, **kwargs):
+ user = model.user.get_user(namespace)
+ repo = model.repository.create_repository(namespace, name, user)
+
+ # Populate the repository with the tags.
+ image_map = {}
+ for tag_name in kwargs:
+ image_ids = kwargs[tag_name]
+ parent = None
+
+ for image_id in image_ids:
+ if not image_id in image_map:
+ image_map[image_id] = create_image(image_id, repo, namespace)
+
+ v1_metadata = {
+ 'id': image_id,
+ }
+ if parent is not None:
+ v1_metadata['parent'] = parent.docker_image_id
+
+ # Set the ancestors for the image.
+ parent = model.image.set_image_metadata(image_id, namespace, name, '', '', '', v1_metadata,
+ parent=parent)
+
+ # Set the tag for the image.
+ tag_manifest = store_tag_manifest(namespace, name, tag_name, image_ids[-1])
+
+ # Add some labels to the tag.
+ model.label.create_manifest_label(tag_manifest, 'foo', 'bar', 'manifest')
+ model.label.create_manifest_label(tag_manifest, 'meh', 'grah', 'manifest')
+
+ return repo
+
+
+def gc_now(repository):
+ assert model.gc.garbage_collect_repo(repository)
+
+
+def delete_tag(repository, tag, perform_gc=True, expect_gc=True):
+ model.tag.delete_tag(repository.namespace_user.username, repository.name, tag)
+ if perform_gc:
+ assert model.gc.garbage_collect_repo(repository) == expect_gc
+
+
+def move_tag(repository, tag, docker_image_id, expect_gc=True):
+ model.tag.create_or_update_tag(repository.namespace_user.username, repository.name, tag,
+ docker_image_id)
+ assert model.gc.garbage_collect_repo(repository) == expect_gc
+
+
+def assert_not_deleted(repository, *args):
+ for docker_image_id in args:
+ assert model.image.get_image_by_id(repository.namespace_user.username, repository.name,
+ docker_image_id)
+
+
+def assert_deleted(repository, *args):
+ for docker_image_id in args:
+ try:
+ # Verify the image is missing when accessed by the repository.
+ model.image.get_image_by_id(repository.namespace_user.username, repository.name,
+ docker_image_id)
+ except model.DataModelException:
+ return
+
+ assert False, 'Expected image %s to be deleted' % docker_image_id
+
+
+def _get_dangling_storage_count():
+ storage_ids = set([current.id for current in ImageStorage.select()])
+ referenced_by_image = set([image.storage_id for image in Image.select()])
+ referenced_by_manifest = set([blob.blob_id for blob in ManifestBlob.select()])
+ referenced_by_derived = set([derived.derivative_id
+ for derived in DerivedStorageForImage.select()])
+ return len(storage_ids - referenced_by_image - referenced_by_derived - referenced_by_manifest)
+
+
+def _get_dangling_label_count():
+ return len(_get_dangling_labels())
+
+
+def _get_dangling_labels():
+ label_ids = set([current.id for current in Label.select()])
+ referenced_by_manifest = set([mlabel.label_id for mlabel in TagManifestLabel.select()])
+ return label_ids - referenced_by_manifest
+
+
+def _get_dangling_manifest_count():
+ manifest_ids = set([current.id for current in Manifest.select()])
+ referenced_by_tag_manifest = set([tmt.manifest_id for tmt in TagManifestToManifest.select()])
+ return len(manifest_ids - referenced_by_tag_manifest)
+
+
+
+@contextmanager
+def assert_gc_integrity(expect_storage_removed=True, check_oci_tags=True):
+ """ Specialized assertion for ensuring that GC cleans up all dangling storages
+ and labels, invokes the callback for images removed and doesn't invoke the
+ callback for images *not* removed.
+ """
+ # Add a callback for when images are removed.
+ removed_image_storages = []
+ model.config.register_image_cleanup_callback(removed_image_storages.extend)
+
+ # Store the number of dangling storages and labels.
+ existing_storage_count = _get_dangling_storage_count()
+ existing_label_count = _get_dangling_label_count()
+ existing_manifest_count = _get_dangling_manifest_count()
+ yield
+
+ # Ensure the number of dangling storages, manifests and labels has not changed.
+ updated_storage_count = _get_dangling_storage_count()
+ assert updated_storage_count == existing_storage_count
+
+ updated_label_count = _get_dangling_label_count()
+ assert updated_label_count == existing_label_count, _get_dangling_labels()
+
+ updated_manifest_count = _get_dangling_manifest_count()
+ assert updated_manifest_count == existing_manifest_count
+
+ # Ensure that for each call to the image+storage cleanup callback, the image and its
+ # storage is not found *anywhere* in the database.
+ for removed_image_and_storage in removed_image_storages:
+ with pytest.raises(Image.DoesNotExist):
+ Image.get(id=removed_image_and_storage.id)
+
+ # Ensure that image storages are only removed if not shared.
+ shared = Image.select().where(Image.storage == removed_image_and_storage.storage_id).count()
+ if shared == 0:
+ shared = (ManifestBlob
+ .select()
+ .where(ManifestBlob.blob == removed_image_and_storage.storage_id)
+ .count())
+
+ if shared == 0:
+ with pytest.raises(ImageStorage.DoesNotExist):
+ ImageStorage.get(id=removed_image_and_storage.storage_id)
+
+ with pytest.raises(ImageStorage.DoesNotExist):
+ ImageStorage.get(uuid=removed_image_and_storage.storage.uuid)
+
+ # Ensure all CAS storage is in the storage engine.
+ preferred = storage.preferred_locations[0]
+ for storage_row in ImageStorage.select():
+ if storage_row.cas_path:
+ storage.get_content({preferred}, storage.blob_path(storage_row.content_checksum))
+
+ for blob_row in ApprBlob.select():
+ storage.get_content({preferred}, storage.blob_path(blob_row.digest))
+
+ # Ensure there are no danglings OCI tags.
+ if check_oci_tags:
+ oci_tags = {t.id for t in Tag.select()}
+ referenced_oci_tags = {t.tag_id for t in TagToRepositoryTag.select()}
+ assert not oci_tags - referenced_oci_tags
+
+ # Ensure all tags have valid manifests.
+ for manifest in {t.manifest for t in Tag.select()}:
+ # Ensure that the manifest's blobs all exist.
+ found_blobs = {b.blob.content_checksum
+ for b in ManifestBlob.select().where(ManifestBlob.manifest == manifest)}
+
+ parsed = parse_manifest_from_bytes(Bytes.for_string_or_unicode(manifest.manifest_bytes),
+ manifest.media_type.name)
+ assert set(parsed.local_blob_digests) == found_blobs
+
+
+def test_has_garbage(default_tag_policy, initialized_db):
+ """ Remove all existing repositories, then add one without garbage, check, then add one with
+ garbage, and check again.
+ """
+ # Delete all existing repos.
+ for repo in database.Repository.select().order_by(database.Repository.id):
+ assert model.gc.purge_repository(repo.namespace_user.username, repo.name)
+
+ # Change the time machine expiration on the namespace.
+ (database.User
+ .update(removed_tag_expiration_s=1000000000)
+ .where(database.User.username == ADMIN_ACCESS_USER)
+ .execute())
+
+ # Create a repository without any garbage.
+ repository = create_repository(latest=['i1', 'i2', 'i3'])
+
+ # Ensure that no repositories are returned by the has garbage check.
+ assert model.repository.find_repository_with_garbage(1000000000) is None
+
+ # Delete a tag.
+ delete_tag(repository, 'latest', perform_gc=False)
+
+ # There should still not be any repositories with garbage, due to time machine.
+ assert model.repository.find_repository_with_garbage(1000000000) is None
+
+ # Change the time machine expiration on the namespace.
+ (database.User
+ .update(removed_tag_expiration_s=0)
+ .where(database.User.username == ADMIN_ACCESS_USER)
+ .execute())
+
+ # Now we should find the repository for GC.
+ repository = model.repository.find_repository_with_garbage(0)
+ assert repository is not None
+ assert repository.name == REPO
+
+ # GC the repository.
+ assert model.gc.garbage_collect_repo(repository)
+
+ # There should now be no repositories with garbage.
+ assert model.repository.find_repository_with_garbage(0) is None
+
+
+def test_find_garbage_policy_functions(default_tag_policy, initialized_db):
+ with assert_query_count(1):
+ one_policy = model.repository.get_random_gc_policy()
+ all_policies = model.repository._get_gc_expiration_policies()
+ assert one_policy in all_policies
+
+
+def test_one_tag(default_tag_policy, initialized_db):
+ """ Create a repository with a single tag, then remove that tag and verify that the repository
+ is now empty. """
+ with assert_gc_integrity():
+ repository = create_repository(latest=['i1', 'i2', 'i3'])
+ delete_tag(repository, 'latest')
+ assert_deleted(repository, 'i1', 'i2', 'i3')
+
+
+def test_two_tags_unshared_images(default_tag_policy, initialized_db):
+ """ Repository has two tags with no shared images between them. """
+ with assert_gc_integrity():
+ repository = create_repository(latest=['i1', 'i2', 'i3'], other=['f1', 'f2'])
+ delete_tag(repository, 'latest')
+ assert_deleted(repository, 'i1', 'i2', 'i3')
+ assert_not_deleted(repository, 'f1', 'f2')
+
+
+def test_two_tags_shared_images(default_tag_policy, initialized_db):
+ """ Repository has two tags with shared images. Deleting the tag should only remove the
+ unshared images.
+ """
+ with assert_gc_integrity():
+ repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1'])
+ delete_tag(repository, 'latest')
+ assert_deleted(repository, 'i2', 'i3')
+ assert_not_deleted(repository, 'i1', 'f1')
+
+
+def test_unrelated_repositories(default_tag_policy, initialized_db):
+ """ Two repositories with different images. Removing the tag from one leaves the other's
+ images intact.
+ """
+ with assert_gc_integrity():
+ repository1 = create_repository(latest=['i1', 'i2', 'i3'], name='repo1')
+ repository2 = create_repository(latest=['j1', 'j2', 'j3'], name='repo2')
+
+ delete_tag(repository1, 'latest')
+
+ assert_deleted(repository1, 'i1', 'i2', 'i3')
+ assert_not_deleted(repository2, 'j1', 'j2', 'j3')
+
+
+def test_related_repositories(default_tag_policy, initialized_db):
+ """ Two repositories with shared images. Removing the tag from one leaves the other's
+ images intact.
+ """
+ with assert_gc_integrity():
+ repository1 = create_repository(latest=['i1', 'i2', 'i3'], name='repo1')
+ repository2 = create_repository(latest=['i1', 'i2', 'j1'], name='repo2')
+
+ delete_tag(repository1, 'latest')
+
+ assert_deleted(repository1, 'i3')
+ assert_not_deleted(repository2, 'i1', 'i2', 'j1')
+
+
+def test_inaccessible_repositories(default_tag_policy, initialized_db):
+ """ Two repositories under different namespaces should result in the images being deleted
+ but not completely removed from the database.
+ """
+ with assert_gc_integrity():
+ repository1 = create_repository(namespace=ADMIN_ACCESS_USER, latest=['i1', 'i2', 'i3'])
+ repository2 = create_repository(namespace=PUBLIC_USER, latest=['i1', 'i2', 'i3'])
+
+ delete_tag(repository1, 'latest')
+ assert_deleted(repository1, 'i1', 'i2', 'i3')
+ assert_not_deleted(repository2, 'i1', 'i2', 'i3')
+
+
+def test_many_multiple_shared_images(default_tag_policy, initialized_db):
+ """ Repository has multiple tags with shared images. Delete all but one tag.
+ """
+ with assert_gc_integrity():
+ repository = create_repository(latest=['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j0'],
+ master=['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1'])
+
+ # Delete tag latest. Should only delete j0, since it is not shared.
+ delete_tag(repository, 'latest')
+
+ assert_deleted(repository, 'j0')
+ assert_not_deleted(repository, 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1')
+
+ # Delete tag master. Should delete the rest of the images.
+ delete_tag(repository, 'master')
+
+ assert_deleted(repository, 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1')
+
+
+def test_multiple_shared_images(default_tag_policy, initialized_db):
+ """ Repository has multiple tags with shared images. Selectively deleting the tags, and
+ verifying at each step.
+ """
+ with assert_gc_integrity():
+ repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1', 'f2'],
+ third=['t1', 't2', 't3'], fourth=['i1', 'f1'])
+
+ # Current state:
+ # latest -> i3->i2->i1
+ # other -> f2->f1->i1
+ # third -> t3->t2->t1
+ # fourth -> f1->i1
+
+ # Delete tag other. Should delete f2, since it is not shared.
+ delete_tag(repository, 'other')
+ assert_deleted(repository, 'f2')
+ assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3', 'f1')
+
+ # Current state:
+ # latest -> i3->i2->i1
+ # third -> t3->t2->t1
+ # fourth -> f1->i1
+
+ # Move tag fourth to i3. This should remove f1 since it is no longer referenced.
+ move_tag(repository, 'fourth', 'i3')
+ assert_deleted(repository, 'f1')
+ assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3')
+
+ # Current state:
+ # latest -> i3->i2->i1
+ # third -> t3->t2->t1
+ # fourth -> i3->i2->i1
+
+ # Delete tag 'latest'. This should do nothing since fourth is on the same branch.
+ delete_tag(repository, 'latest')
+ assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3')
+
+ # Current state:
+ # third -> t3->t2->t1
+ # fourth -> i3->i2->i1
+
+ # Delete tag 'third'. This should remove t1->t3.
+ delete_tag(repository, 'third')
+ assert_deleted(repository, 't1', 't2', 't3')
+ assert_not_deleted(repository, 'i1', 'i2', 'i3')
+
+ # Current state:
+ # fourth -> i3->i2->i1
+
+ # Add tag to i1.
+ move_tag(repository, 'newtag', 'i1', expect_gc=False)
+ assert_not_deleted(repository, 'i1', 'i2', 'i3')
+
+ # Current state:
+ # fourth -> i3->i2->i1
+ # newtag -> i1
+
+ # Delete tag 'fourth'. This should remove i2 and i3.
+ delete_tag(repository, 'fourth')
+ assert_deleted(repository, 'i2', 'i3')
+ assert_not_deleted(repository, 'i1')
+
+ # Current state:
+ # newtag -> i1
+
+ # Delete tag 'newtag'. This should remove the remaining image.
+ delete_tag(repository, 'newtag')
+ assert_deleted(repository, 'i1')
+
+ # Current state:
+ # (Empty)
+
+
+def test_empty_gc(default_tag_policy, initialized_db):
+ with assert_gc_integrity(expect_storage_removed=False):
+ repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1', 'f2'],
+ third=['t1', 't2', 't3'], fourth=['i1', 'f1'])
+
+ assert not model.gc.garbage_collect_repo(repository)
+ assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3', 'f1', 'f2')
+
+
+def test_time_machine_no_gc(default_tag_policy, initialized_db):
+ """ Repository has two tags with shared images. Deleting the tag should not remove any images
+ """
+ with assert_gc_integrity(expect_storage_removed=False):
+ repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1'])
+ _set_tag_expiration_policy(repository.namespace_user.username, 60*60*24)
+
+ delete_tag(repository, 'latest', expect_gc=False)
+ assert_not_deleted(repository, 'i2', 'i3')
+ assert_not_deleted(repository, 'i1', 'f1')
+
+
+def test_time_machine_gc(default_tag_policy, initialized_db):
+ """ Repository has two tags with shared images. Deleting the second tag should cause the images
+ for the first deleted tag to gc.
+ """
+ now = datetime.utcnow()
+
+ with assert_gc_integrity():
+ with freeze_time(now):
+ repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1'])
+
+ _set_tag_expiration_policy(repository.namespace_user.username, 1)
+
+ delete_tag(repository, 'latest', expect_gc=False)
+ assert_not_deleted(repository, 'i2', 'i3')
+ assert_not_deleted(repository, 'i1', 'f1')
+
+ with freeze_time(now + timedelta(seconds=2)):
+ # This will cause the images associated with latest to gc
+ delete_tag(repository, 'other')
+ assert_deleted(repository, 'i2', 'i3')
+ assert_not_deleted(repository, 'i1', 'f1')
+
+
+def test_images_shared_storage(default_tag_policy, initialized_db):
+ """ Repository with two tags, both with the same shared storage. Deleting the first
+ tag should delete the first image, but *not* its storage.
+ """
+ with assert_gc_integrity(expect_storage_removed=False):
+ repository = create_repository()
+
+ # Add two tags, each with their own image, but with the same storage.
+ image_storage = model.storage.create_v1_storage(storage.preferred_locations[0])
+
+ first_image = Image.create(docker_image_id='i1',
+ repository=repository, storage=image_storage,
+ ancestors='/')
+
+ second_image = Image.create(docker_image_id='i2',
+ repository=repository, storage=image_storage,
+ ancestors='/')
+
+ store_tag_manifest(repository.namespace_user.username, repository.name,
+ 'first', first_image.docker_image_id)
+
+ store_tag_manifest(repository.namespace_user.username, repository.name,
+ 'second', second_image.docker_image_id)
+
+ # Delete the first tag.
+ delete_tag(repository, 'first')
+ assert_deleted(repository, 'i1')
+ assert_not_deleted(repository, 'i2')
+
+
+def test_image_with_cas(default_tag_policy, initialized_db):
+ """ A repository with a tag pointing to an image backed by CAS. Deleting and GCing the tag
+ should result in the storage and its CAS data being removed.
+ """
+ with assert_gc_integrity(expect_storage_removed=True):
+ repository = create_repository()
+
+ # Create an image storage record under CAS.
+ content = 'hello world'
+ digest = 'sha256:' + hashlib.sha256(content).hexdigest()
+ preferred = storage.preferred_locations[0]
+ storage.put_content({preferred}, storage.blob_path(digest), content)
+
+ image_storage = database.ImageStorage.create(content_checksum=digest, uploading=False)
+ location = database.ImageStorageLocation.get(name=preferred)
+ database.ImageStoragePlacement.create(location=location, storage=image_storage)
+
+ # Ensure the CAS path exists.
+ assert storage.exists({preferred}, storage.blob_path(digest))
+
+ # Create the image and the tag.
+ first_image = Image.create(docker_image_id='i1',
+ repository=repository, storage=image_storage,
+ ancestors='/')
+
+ store_tag_manifest(repository.namespace_user.username, repository.name,
+ 'first', first_image.docker_image_id)
+
+ assert_not_deleted(repository, 'i1')
+
+ # Delete the tag.
+ delete_tag(repository, 'first')
+ assert_deleted(repository, 'i1')
+
+ # Ensure the CAS path is gone.
+ assert not storage.exists({preferred}, storage.blob_path(digest))
+
+
+def test_images_shared_cas(default_tag_policy, initialized_db):
+ """ A repository, each two tags, pointing to the same image, which has image storage
+ with the same *CAS path*, but *distinct records*. Deleting the first tag should delete the
+ first image, and its storage, but not the file in storage, as it shares its CAS path.
+ """
+ with assert_gc_integrity(expect_storage_removed=True):
+ repository = create_repository()
+
+ # Create two image storage records with the same content checksum.
+ content = 'hello world'
+ digest = 'sha256:' + hashlib.sha256(content).hexdigest()
+ preferred = storage.preferred_locations[0]
+ storage.put_content({preferred}, storage.blob_path(digest), content)
+
+ is1 = database.ImageStorage.create(content_checksum=digest, uploading=False)
+ is2 = database.ImageStorage.create(content_checksum=digest, uploading=False)
+
+ location = database.ImageStorageLocation.get(name=preferred)
+
+ database.ImageStoragePlacement.create(location=location, storage=is1)
+ database.ImageStoragePlacement.create(location=location, storage=is2)
+
+ # Ensure the CAS path exists.
+ assert storage.exists({preferred}, storage.blob_path(digest))
+
+ # Create two images in the repository, and two tags, each pointing to one of the storages.
+ first_image = Image.create(docker_image_id='i1',
+ repository=repository, storage=is1,
+ ancestors='/')
+
+ second_image = Image.create(docker_image_id='i2',
+ repository=repository, storage=is2,
+ ancestors='/')
+
+ store_tag_manifest(repository.namespace_user.username, repository.name,
+ 'first', first_image.docker_image_id)
+
+ store_tag_manifest(repository.namespace_user.username, repository.name,
+ 'second', second_image.docker_image_id)
+
+ assert_not_deleted(repository, 'i1', 'i2')
+
+ # Delete the first tag.
+ delete_tag(repository, 'first')
+ assert_deleted(repository, 'i1')
+ assert_not_deleted(repository, 'i2')
+
+ # Ensure the CAS path still exists.
+ assert storage.exists({preferred}, storage.blob_path(digest))
+
+
+def test_images_shared_cas_with_new_blob_table(default_tag_policy, initialized_db):
+ """ A repository with a tag and image that shares its CAS path with a record in the new Blob
+ table. Deleting the first tag should delete the first image, and its storage, but not the
+ file in storage, as it shares its CAS path with the blob row.
+ """
+ with assert_gc_integrity(expect_storage_removed=True):
+ repository = create_repository()
+
+ # Create two image storage records with the same content checksum.
+ content = 'hello world'
+ digest = 'sha256:' + hashlib.sha256(content).hexdigest()
+ preferred = storage.preferred_locations[0]
+ storage.put_content({preferred}, storage.blob_path(digest), content)
+
+ media_type = database.MediaType.get(name='text/plain')
+
+ is1 = database.ImageStorage.create(content_checksum=digest, uploading=False)
+ database.ApprBlob.create(digest=digest, size=0, media_type=media_type)
+
+ location = database.ImageStorageLocation.get(name=preferred)
+ database.ImageStoragePlacement.create(location=location, storage=is1)
+
+ # Ensure the CAS path exists.
+ assert storage.exists({preferred}, storage.blob_path(digest))
+
+ # Create the image in the repository, and the tag.
+ first_image = Image.create(docker_image_id='i1',
+ repository=repository, storage=is1,
+ ancestors='/')
+
+ store_tag_manifest(repository.namespace_user.username, repository.name,
+ 'first', first_image.docker_image_id)
+
+ assert_not_deleted(repository, 'i1')
+
+ # Delete the tag.
+ delete_tag(repository, 'first')
+ assert_deleted(repository, 'i1')
+
+ # Ensure the CAS path still exists, as it is referenced by the Blob table
+ assert storage.exists({preferred}, storage.blob_path(digest))
+
+
+def test_purge_repo(app):
+ """ Test that app registers delete_metadata function on repository deletions """
+ with assert_gc_integrity():
+ with patch('app.tuf_metadata_api') as mock_tuf:
+ model.gc.purge_repository("ns", "repo")
+ assert mock_tuf.delete_metadata.called_with("ns", "repo")
+
+
+def test_super_long_image_chain_gc(app, default_tag_policy):
+ """ Test that a super long chain of images all gets properly GCed. """
+ with assert_gc_integrity():
+ images = ['i%s' % i for i in range(0, 100)]
+ repository = create_repository(latest=images)
+ delete_tag(repository, 'latest')
+
+ # Ensure the repository is now empty.
+ assert_deleted(repository, *images)
+
+
+def test_manifest_v2_shared_config_and_blobs(app, default_tag_policy):
+ """ Test that GCing a tag that refers to a V2 manifest with the same config and some shared
+ blobs as another manifest ensures that the config blob and shared blob are NOT GCed.
+ """
+ repo = model.repository.create_repository('devtable', 'newrepo', None)
+ manifest1, built1 = create_manifest_for_testing(repo, differentiation_field='1',
+ include_shared_blob=True)
+ manifest2, built2 = create_manifest_for_testing(repo, differentiation_field='2',
+ include_shared_blob=True)
+
+ assert set(built1.local_blob_digests).intersection(built2.local_blob_digests)
+ assert built1.config.digest == built2.config.digest
+
+ # Create tags pointing to the manifests.
+ model.oci.tag.retarget_tag('tag1', manifest1)
+ model.oci.tag.retarget_tag('tag2', manifest2)
+
+ with assert_gc_integrity(expect_storage_removed=True, check_oci_tags=False):
+ # Delete tag2.
+ model.oci.tag.delete_tag(repo, 'tag2')
+ assert model.gc.garbage_collect_repo(repo)
+
+ # Ensure the blobs for manifest1 still all exist.
+ preferred = storage.preferred_locations[0]
+ for blob_digest in built1.local_blob_digests:
+ storage_row = ImageStorage.get(content_checksum=blob_digest)
+
+ assert storage_row.cas_path
+ storage.get_content({preferred}, storage.blob_path(storage_row.content_checksum))
diff --git a/data/model/test/test_image.py b/data/model/test/test_image.py
new file mode 100644
index 000000000..9442a23eb
--- /dev/null
+++ b/data/model/test/test_image.py
@@ -0,0 +1,104 @@
+import pytest
+
+from collections import defaultdict
+from data.model import image, repository
+from playhouse.test_utils import assert_query_count
+
+from test.fixtures import *
+
+@pytest.fixture()
+def images(initialized_db):
+ images = image.get_repository_images('devtable', 'simple')
+ assert len(images)
+ return images
+
+
+def test_get_image_with_storage(images, initialized_db):
+ for current in images:
+ storage_uuid = current.storage.uuid
+
+ with assert_query_count(1):
+ retrieved = image.get_image_with_storage(current.docker_image_id, storage_uuid)
+ assert retrieved.id == current.id
+ assert retrieved.storage.uuid == storage_uuid
+
+
+def test_get_parent_images(images, initialized_db):
+ for current in images:
+ if not len(current.ancestor_id_list()):
+ continue
+
+ with assert_query_count(1):
+ parent_images = list(image.get_parent_images('devtable', 'simple', current))
+
+ assert len(parent_images) == len(current.ancestor_id_list())
+ assert set(current.ancestor_id_list()) == {i.id for i in parent_images}
+
+ for parent in parent_images:
+ with assert_query_count(0):
+ assert parent.storage.id
+
+
+def test_get_image(images, initialized_db):
+ for current in images:
+ repo = current.repository
+
+ with assert_query_count(1):
+ found = image.get_image(repo, current.docker_image_id)
+
+ assert found.id == current.id
+
+
+def test_placements(images, initialized_db):
+ with assert_query_count(1):
+ placements_map = image.get_placements_for_images(images)
+
+ for current in images:
+ assert current.storage.id in placements_map
+
+ with assert_query_count(2):
+ expected_image, expected_placements = image.get_image_and_placements('devtable', 'simple',
+ current.docker_image_id)
+
+ assert expected_image.id == current.id
+ assert len(expected_placements) == len(placements_map.get(current.storage.id))
+ assert ({p.id for p in expected_placements} ==
+ {p.id for p in placements_map.get(current.storage.id)})
+
+
+def test_get_repo_image(images, initialized_db):
+ for current in images:
+ with assert_query_count(1):
+ found = image.get_repo_image('devtable', 'simple', current.docker_image_id)
+
+ assert found.id == current.id
+ with assert_query_count(1):
+ assert found.storage.id
+
+
+def test_get_repo_image_and_storage(images, initialized_db):
+ for current in images:
+ with assert_query_count(1):
+ found = image.get_repo_image_and_storage('devtable', 'simple', current.docker_image_id)
+
+ assert found.id == current.id
+ with assert_query_count(0):
+ assert found.storage.id
+
+
+def test_get_repository_images_without_placements(images, initialized_db):
+ ancestors_map = defaultdict(list)
+ for img in images:
+ current = img.parent
+ while current is not None:
+ ancestors_map[current.id].append(img.id)
+ current = current.parent
+
+ for current in images:
+ repo = current.repository
+
+ with assert_query_count(1):
+ found = list(image.get_repository_images_without_placements(repo, with_ancestor=current))
+
+ assert len(found) == len(ancestors_map[current.id]) + 1
+ assert {i.id for i in found} == set(ancestors_map[current.id] + [current.id])
diff --git a/data/model/test/test_image_sharing.py b/data/model/test/test_image_sharing.py
new file mode 100644
index 000000000..239500b10
--- /dev/null
+++ b/data/model/test/test_image_sharing.py
@@ -0,0 +1,215 @@
+import pytest
+
+from data import model
+
+from storage.distributedstorage import DistributedStorage
+from storage.fakestorage import FakeStorage
+from test.fixtures import *
+
+NO_ACCESS_USER = 'freshuser'
+READ_ACCESS_USER = 'reader'
+ADMIN_ACCESS_USER = 'devtable'
+PUBLIC_USER = 'public'
+RANDOM_USER = 'randomuser'
+OUTSIDE_ORG_USER = 'outsideorg'
+
+ADMIN_ROBOT_USER = 'devtable+dtrobot'
+
+ORGANIZATION = 'buynlarge'
+
+REPO = 'devtable/simple'
+PUBLIC_REPO = 'public/publicrepo'
+RANDOM_REPO = 'randomuser/randomrepo'
+
+OUTSIDE_ORG_REPO = 'outsideorg/coolrepo'
+
+ORG_REPO = 'buynlarge/orgrepo'
+ANOTHER_ORG_REPO = 'buynlarge/anotherorgrepo'
+
+# Note: The shared repo has devtable as admin, public as a writer and reader as a reader.
+SHARED_REPO = 'devtable/shared'
+
+
+@pytest.fixture()
+def storage(app):
+ return DistributedStorage({'local_us': FakeStorage(None)}, preferred_locations=['local_us'])
+
+
+def createStorage(storage, docker_image_id, repository=REPO, username=ADMIN_ACCESS_USER):
+ repository_obj = model.repository.get_repository(repository.split('/')[0],
+ repository.split('/')[1])
+ preferred = storage.preferred_locations[0]
+ image = model.image.find_create_or_link_image(docker_image_id, repository_obj, username, {},
+ preferred)
+ image.storage.uploading = False
+ image.storage.save()
+ return image.storage
+
+
+def assertSameStorage(storage, docker_image_id, existing_storage, repository=REPO,
+ username=ADMIN_ACCESS_USER):
+ new_storage = createStorage(storage, docker_image_id, repository, username)
+ assert existing_storage.id == new_storage.id
+
+
+def assertDifferentStorage(storage, docker_image_id, existing_storage, repository=REPO,
+ username=ADMIN_ACCESS_USER):
+ new_storage = createStorage(storage, docker_image_id, repository, username)
+ assert existing_storage.id != new_storage.id
+
+
+def test_same_user(storage, initialized_db):
+ """ The same user creates two images, each which should be shared in the same repo. This is a
+ sanity check. """
+
+ # Create a reference to a new docker ID => new image.
+ first_storage_id = createStorage(storage, 'first-image')
+
+ # Create a reference to the same docker ID => same image.
+ assertSameStorage(storage, 'first-image', first_storage_id)
+
+ # Create a reference to another new docker ID => new image.
+ second_storage_id = createStorage(storage, 'second-image')
+
+ # Create a reference to that same docker ID => same image.
+ assertSameStorage(storage, 'second-image', second_storage_id)
+
+ # Make sure the images are different.
+ assert first_storage_id != second_storage_id
+
+
+def test_no_user_private_repo(storage, initialized_db):
+ """ If no user is specified (token case usually), then no sharing can occur on a private repo. """
+ # Create a reference to a new docker ID => new image.
+ first_storage = createStorage(storage, 'the-image', username=None, repository=SHARED_REPO)
+
+ # Create a areference to the same docker ID, but since no username => new image.
+ assertDifferentStorage(storage, 'the-image', first_storage, username=None, repository=RANDOM_REPO)
+
+
+def test_no_user_public_repo(storage, initialized_db):
+ """ If no user is specified (token case usually), then no sharing can occur on a private repo except when the image is first public. """
+ # Create a reference to a new docker ID => new image.
+ first_storage = createStorage(storage, 'the-image', username=None, repository=PUBLIC_REPO)
+
+ # Create a areference to the same docker ID. Since no username, we'd expect different but the first image is public so => shaed image.
+ assertSameStorage(storage, 'the-image', first_storage, username=None, repository=RANDOM_REPO)
+
+
+def test_different_user_same_repo(storage, initialized_db):
+ """ Two different users create the same image in the same repo. """
+
+ # Create a reference to a new docker ID under the first user => new image.
+ first_storage = createStorage(storage, 'the-image', username=PUBLIC_USER, repository=SHARED_REPO)
+
+ # Create a reference to the *same* docker ID under the second user => same image.
+ assertSameStorage(storage, 'the-image', first_storage, username=ADMIN_ACCESS_USER, repository=SHARED_REPO)
+
+
+def test_different_repo_no_shared_access(storage, initialized_db):
+ """ Neither user has access to the other user's repository. """
+
+ # Create a reference to a new docker ID under the first user => new image.
+ first_storage_id = createStorage(storage, 'the-image', username=RANDOM_USER, repository=RANDOM_REPO)
+
+ # Create a reference to the *same* docker ID under the second user => new image.
+ second_storage_id = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=REPO)
+
+ # Verify that the users do not share storage.
+ assert first_storage_id != second_storage_id
+
+
+def test_public_than_private(storage, initialized_db):
+ """ An image is created publicly then used privately, so it should be shared. """
+
+ # Create a reference to a new docker ID under the first user => new image.
+ first_storage = createStorage(storage, 'the-image', username=PUBLIC_USER, repository=PUBLIC_REPO)
+
+ # Create a reference to the *same* docker ID under the second user => same image, since the first was public.
+ assertSameStorage(storage, 'the-image', first_storage, username=ADMIN_ACCESS_USER, repository=REPO)
+
+
+def test_private_than_public(storage, initialized_db):
+ """ An image is created privately then used publicly, so it should *not* be shared. """
+
+ # Create a reference to a new docker ID under the first user => new image.
+ first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=REPO)
+
+ # Create a reference to the *same* docker ID under the second user => new image, since the first was private.
+ assertDifferentStorage(storage, 'the-image', first_storage, username=PUBLIC_USER, repository=PUBLIC_REPO)
+
+
+def test_different_repo_with_access(storage, initialized_db):
+ """ An image is created in one repo (SHARED_REPO) which the user (PUBLIC_USER) has access to. Later, the
+ image is created in another repo (PUBLIC_REPO) that the user also has access to. The image should
+ be shared since the user has access.
+ """
+ # Create the image in the shared repo => new image.
+ first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=SHARED_REPO)
+
+ # Create the image in the other user's repo, but since the user (PUBLIC) still has access to the shared
+ # repository, they should reuse the storage.
+ assertSameStorage(storage, 'the-image', first_storage, username=PUBLIC_USER, repository=PUBLIC_REPO)
+
+
+def test_org_access(storage, initialized_db):
+ """ An image is accessible by being a member of the organization. """
+
+ # Create the new image under the org's repo => new image.
+ first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ORG_REPO)
+
+ # Create an image under the user's repo, but since the user has access to the organization => shared image.
+ assertSameStorage(storage, 'the-image', first_storage, username=ADMIN_ACCESS_USER, repository=REPO)
+
+ # Ensure that the user's robot does not have access, since it is not on the permissions list for the repo.
+ assertDifferentStorage(storage, 'the-image', first_storage, username=ADMIN_ROBOT_USER, repository=SHARED_REPO)
+
+
+def test_org_access_different_user(storage, initialized_db):
+ """ An image is accessible by being a member of the organization. """
+
+ # Create the new image under the org's repo => new image.
+ first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ORG_REPO)
+
+ # Create an image under a user's repo, but since the user has access to the organization => shared image.
+ assertSameStorage(storage, 'the-image', first_storage, username=PUBLIC_USER, repository=PUBLIC_REPO)
+
+ # Also verify for reader.
+ assertSameStorage(storage, 'the-image', first_storage, username=READ_ACCESS_USER, repository=PUBLIC_REPO)
+
+
+def test_org_no_access(storage, initialized_db):
+ """ An image is not accessible if not a member of the organization. """
+
+ # Create the new image under the org's repo => new image.
+ first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ORG_REPO)
+
+ # Create an image under a user's repo. Since the user is not a member of the organization => new image.
+ assertDifferentStorage(storage, 'the-image', first_storage, username=RANDOM_USER, repository=RANDOM_REPO)
+
+
+def test_org_not_team_member_with_access(storage, initialized_db):
+ """ An image is accessible to a user specifically listed as having permission on the org repo. """
+
+ # Create the new image under the org's repo => new image.
+ first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ORG_REPO)
+
+ # Create an image under a user's repo. Since the user has read access on that repo, they can see the image => shared image.
+ assertSameStorage(storage, 'the-image', first_storage, username=OUTSIDE_ORG_USER, repository=OUTSIDE_ORG_REPO)
+
+
+def test_org_not_team_member_with_no_access(storage, initialized_db):
+ """ A user that has access to one org repo but not another and is not a team member. """
+
+ # Create the new image under the org's repo => new image.
+ first_storage = createStorage(storage, 'the-image', username=ADMIN_ACCESS_USER, repository=ANOTHER_ORG_REPO)
+
+ # Create an image under a user's repo. The user doesn't have access to the repo (ANOTHER_ORG_REPO) so => new image.
+ assertDifferentStorage(storage, 'the-image', first_storage, username=OUTSIDE_ORG_USER, repository=OUTSIDE_ORG_REPO)
+
+def test_no_link_to_uploading(storage, initialized_db):
+ still_uploading = createStorage(storage, 'an-image', repository=PUBLIC_REPO)
+ still_uploading.uploading = True
+ still_uploading.save()
+
+ assertDifferentStorage(storage, 'an-image', still_uploading)
diff --git a/data/model/test/test_log.py b/data/model/test/test_log.py
new file mode 100644
index 000000000..7ced0bb91
--- /dev/null
+++ b/data/model/test/test_log.py
@@ -0,0 +1,80 @@
+import pytest
+
+from data.database import LogEntry3, User
+from data.model import config as _config
+from data.model.log import log_action
+
+from mock import patch, Mock, DEFAULT, sentinel
+from peewee import PeeweeException
+
+
+@pytest.fixture(scope='function')
+def app_config():
+ with patch.dict(_config.app_config, {}, clear=True):
+ yield _config.app_config
+
+@pytest.fixture()
+def logentry_kind():
+ kinds = {'pull_repo': 'pull_repo_kind', 'push_repo': 'push_repo_kind'}
+ with patch('data.model.log.get_log_entry_kinds', return_value=kinds, spec=True):
+ yield kinds
+
+@pytest.fixture()
+def logentry(logentry_kind):
+ with patch('data.database.LogEntry3.create', spec=True):
+ yield LogEntry3
+
+@pytest.fixture()
+def user():
+ with patch.multiple('data.database.User', username=DEFAULT, get=DEFAULT, select=DEFAULT) as user:
+ user['get'].return_value = Mock(id='mock_user_id')
+ user['select'].return_value.tuples.return_value.get.return_value = ['default_user_id']
+ yield User
+
+@pytest.mark.parametrize('action_kind', [('pull'), ('oops')])
+def test_log_action_unknown_action(action_kind):
+ ''' test unknown action types throw an exception when logged '''
+ with pytest.raises(Exception):
+ log_action(action_kind, None)
+
+
+@pytest.mark.parametrize('user_or_org_name,account_id,account', [
+ ('my_test_org', 'N/A', 'mock_user_id' ),
+ (None, 'test_account_id', 'test_account_id'),
+ (None, None, 'default_user_id')
+])
+@pytest.mark.parametrize('unlogged_pulls_ok,action_kind,db_exception,throws', [
+ (False, 'pull_repo', None, False),
+ (False, 'push_repo', None, False),
+ (False, 'pull_repo', PeeweeException, True ),
+ (False, 'push_repo', PeeweeException, True ),
+
+ (True, 'pull_repo', PeeweeException, False),
+ (True, 'push_repo', PeeweeException, True ),
+ (True, 'pull_repo', Exception, True ),
+ (True, 'push_repo', Exception, True )
+])
+def test_log_action(user_or_org_name, account_id, account, unlogged_pulls_ok, action_kind,
+ db_exception, throws, app_config, logentry, user):
+ log_args = {
+ 'performer' : Mock(id='TEST_PERFORMER_ID'),
+ 'repository' : Mock(id='TEST_REPO'),
+ 'ip' : 'TEST_IP',
+ 'metadata' : { 'test_key' : 'test_value' },
+ 'timestamp' : 'TEST_TIMESTAMP'
+ }
+ app_config['SERVICE_LOG_ACCOUNT_ID'] = account_id
+ app_config['ALLOW_PULLS_WITHOUT_STRICT_LOGGING'] = unlogged_pulls_ok
+
+ logentry.create.side_effect = db_exception
+
+ if throws:
+ with pytest.raises(db_exception):
+ log_action(action_kind, user_or_org_name, **log_args)
+ else:
+ log_action(action_kind, user_or_org_name, **log_args)
+
+ logentry.create.assert_called_once_with(kind=action_kind+'_kind', account=account,
+ performer='TEST_PERFORMER_ID', repository='TEST_REPO',
+ ip='TEST_IP', metadata_json='{"test_key": "test_value"}',
+ datetime='TEST_TIMESTAMP')
diff --git a/data/model/test/test_model_blob.py b/data/model/test/test_model_blob.py
new file mode 100644
index 000000000..b6053b353
--- /dev/null
+++ b/data/model/test/test_model_blob.py
@@ -0,0 +1,51 @@
+from app import storage
+from data import model, database
+
+from test.fixtures import *
+
+ADMIN_ACCESS_USER = 'devtable'
+REPO = 'simple'
+
+def test_store_blob(initialized_db):
+ location = database.ImageStorageLocation.select().get()
+
+ # Create a new blob at a unique digest.
+ digest = 'somecooldigest'
+ blob_storage = model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, digest,
+ location, 1024, 0, 5000)
+ assert blob_storage.content_checksum == digest
+ assert blob_storage.image_size == 1024
+ assert blob_storage.uncompressed_size == 5000
+
+ # Link to the same digest.
+ blob_storage2 = model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, digest,
+ location, 2048, 0, 6000)
+ assert blob_storage2.id == blob_storage.id
+
+ # The sizes should be unchanged.
+ assert blob_storage2.image_size == 1024
+ assert blob_storage2.uncompressed_size == 5000
+
+ # Add a new digest, ensure it has a new record.
+ otherdigest = 'anotherdigest'
+ blob_storage3 = model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, otherdigest,
+ location, 1234, 0, 5678)
+ assert blob_storage3.id != blob_storage.id
+ assert blob_storage3.image_size == 1234
+ assert blob_storage3.uncompressed_size == 5678
+
+
+def test_get_or_create_shared_blob(initialized_db):
+ shared = model.blob.get_or_create_shared_blob('sha256:abcdef', 'somecontent', storage)
+ assert shared.content_checksum == 'sha256:abcdef'
+
+ again = model.blob.get_or_create_shared_blob('sha256:abcdef', 'somecontent', storage)
+ assert shared == again
+
+
+def test_lookup_repo_storages_by_content_checksum(initialized_db):
+ for image in database.Image.select():
+ found = model.storage.lookup_repo_storages_by_content_checksum(image.repository,
+ [image.storage.content_checksum])
+ assert len(found) == 1
+ assert found[0].content_checksum == image.storage.content_checksum
diff --git a/data/model/test/test_modelutil.py b/data/model/test/test_modelutil.py
new file mode 100644
index 000000000..5da72be4a
--- /dev/null
+++ b/data/model/test/test_modelutil.py
@@ -0,0 +1,50 @@
+import pytest
+
+from data.database import Role
+from data.model.modelutil import paginate
+from test.fixtures import *
+
+@pytest.mark.parametrize('page_size', [
+ 10,
+ 20,
+ 50,
+ 100,
+ 200,
+ 500,
+ 1000,
+])
+@pytest.mark.parametrize('descending', [
+ False,
+ True,
+])
+def test_paginate(page_size, descending, initialized_db):
+ # Add a bunch of rows into a test table (`Role`).
+ for i in range(0, 522):
+ Role.create(name='testrole%s' % i)
+
+ query = Role.select().where(Role.name ** 'testrole%')
+ all_matching_roles = list(query)
+ assert len(all_matching_roles) == 522
+
+ # Paginate a query to lookup roles.
+ collected = []
+ page_token = None
+ while True:
+ results, page_token = paginate(query, Role, limit=page_size, descending=descending,
+ page_token=page_token)
+ assert len(results) <= page_size
+ collected.extend(results)
+
+ if page_token is None:
+ break
+
+ assert len(results) == page_size
+
+ for index, result in enumerate(results[1:]):
+ if descending:
+ assert result.id < results[index].id
+ else:
+ assert result.id > results[index].id
+
+ assert len(collected) == len(all_matching_roles)
+ assert {c.id for c in collected} == {a.id for a in all_matching_roles}
diff --git a/data/model/test/test_organization.py b/data/model/test/test_organization.py
new file mode 100644
index 000000000..153814765
--- /dev/null
+++ b/data/model/test/test_organization.py
@@ -0,0 +1,22 @@
+import pytest
+
+from data.model.organization import get_organization, get_organizations
+from data.model.user import mark_namespace_for_deletion
+from data.queue import WorkQueue
+from test.fixtures import *
+
+@pytest.mark.parametrize('deleted', [
+ (True),
+ (False),
+])
+def test_get_organizations(deleted, initialized_db):
+ # Delete an org.
+ deleted_org = get_organization('sellnsmall')
+ queue = WorkQueue('testgcnamespace', lambda db: db.transaction())
+ mark_namespace_for_deletion(deleted_org, [], queue)
+
+ orgs = get_organizations(deleted=deleted)
+ assert orgs
+
+ deleted_found = [org for org in orgs if org.id == deleted_org.id]
+ assert bool(deleted_found) == deleted
diff --git a/data/model/test/test_repo_mirroring.py b/data/model/test/test_repo_mirroring.py
new file mode 100644
index 000000000..6a3f808e3
--- /dev/null
+++ b/data/model/test/test_repo_mirroring.py
@@ -0,0 +1,235 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+from jsonschema import ValidationError
+
+from data.database import RepoMirrorConfig, RepoMirrorStatus, User
+from data import model
+from data.model.repo_mirror import (create_mirroring_rule, get_eligible_mirrors, update_sync_status_to_cancel,
+ MAX_SYNC_RETRIES, release_mirror)
+
+from test.fixtures import *
+
+
+def create_mirror_repo_robot(rules, repo_name="repo"):
+ try:
+ user = User.get(User.username == "mirror")
+ except User.DoesNotExist:
+ user = create_user_noverify("mirror", "mirror@example.com", email_required=False)
+
+ try:
+ robot = lookup_robot("mirror+robot")
+ except model.InvalidRobotException:
+ robot, _ = create_robot("robot", user)
+
+ repo = create_repository("mirror", repo_name, None, repo_kind="image", visibility="public")
+ repo.save()
+
+ rule = model.repo_mirror.create_mirroring_rule(repo, rules)
+
+ mirror_kwargs = {
+ "repository": repo,
+ "root_rule": rule,
+ "internal_robot": robot,
+ "external_reference": "registry.example.com/namespace/repository",
+ "sync_interval": timedelta(days=1).total_seconds()
+ }
+ mirror = enable_mirroring_for_repository(**mirror_kwargs)
+ mirror.sync_status = RepoMirrorStatus.NEVER_RUN
+ mirror.sync_start_date = datetime.utcnow() - timedelta(days=1)
+ mirror.sync_retries_remaining = 3
+ mirror.save()
+
+ return (mirror, repo)
+
+
+def disable_existing_mirrors():
+ mirrors = RepoMirrorConfig.select().execute()
+ for mirror in mirrors:
+ mirror.is_enabled = False
+ mirror.save()
+
+
+def test_eligible_oldest_first(initialized_db):
+ """
+ Eligible mirror candidates should be returned with the oldest (earliest created) first.
+ """
+
+ disable_existing_mirrors()
+ mirror_first, repo_first = create_mirror_repo_robot(["updated", "created"], repo_name="first")
+ mirror_second, repo_second = create_mirror_repo_robot(["updated", "created"], repo_name="second")
+ mirror_third, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="third")
+
+ candidates = get_eligible_mirrors()
+
+ assert len(candidates) == 3
+ assert candidates[0] == mirror_first
+ assert candidates[1] == mirror_second
+ assert candidates[2] == mirror_third
+
+
+def test_eligible_includes_expired_syncing(initialized_db):
+ """
+ Mirrors that have an end time in the past are eligible even if their state indicates still syncing.
+ """
+
+ disable_existing_mirrors()
+ mirror_first, repo_first = create_mirror_repo_robot(["updated", "created"], repo_name="first")
+ mirror_second, repo_second = create_mirror_repo_robot(["updated", "created"], repo_name="second")
+ mirror_third, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="third")
+ mirror_fourth, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="fourth")
+
+ mirror_second.sync_expiration_date = datetime.utcnow() - timedelta(hours=1)
+ mirror_second.sync_status = RepoMirrorStatus.SYNCING
+ mirror_second.save()
+
+ mirror_fourth.sync_expiration_date = datetime.utcnow() + timedelta(hours=1)
+ mirror_fourth.sync_status = RepoMirrorStatus.SYNCING
+ mirror_fourth.save()
+
+ candidates = get_eligible_mirrors()
+
+ assert len(candidates) == 3
+ assert candidates[0] == mirror_first
+ assert candidates[1] == mirror_second
+ assert candidates[2] == mirror_third
+
+
+def test_eligible_includes_immediate(initialized_db):
+ """
+ Mirrors that are SYNC_NOW, regardless of starting time
+ """
+
+ disable_existing_mirrors()
+ mirror_first, repo_first = create_mirror_repo_robot(["updated", "created"], repo_name="first")
+ mirror_second, repo_second = create_mirror_repo_robot(["updated", "created"], repo_name="second")
+ mirror_third, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="third")
+ mirror_fourth, repo_third = create_mirror_repo_robot(["updated", "created"], repo_name="fourth")
+ mirror_future, _ = create_mirror_repo_robot(["updated", "created"], repo_name="future")
+ mirror_past, _ = create_mirror_repo_robot(["updated", "created"], repo_name="past")
+
+ mirror_future.sync_start_date = datetime.utcnow() + timedelta(hours=6)
+ mirror_future.sync_status = RepoMirrorStatus.SYNC_NOW
+ mirror_future.save()
+
+ mirror_past.sync_start_date = datetime.utcnow() - timedelta(hours=6)
+ mirror_past.sync_status = RepoMirrorStatus.SYNC_NOW
+ mirror_past.save()
+
+ mirror_fourth.sync_expiration_date = datetime.utcnow() + timedelta(hours=1)
+ mirror_fourth.sync_status = RepoMirrorStatus.SYNCING
+ mirror_fourth.save()
+
+ candidates = get_eligible_mirrors()
+
+ assert len(candidates) == 5
+ assert candidates[0] == mirror_first
+ assert candidates[1] == mirror_second
+ assert candidates[2] == mirror_third
+ assert candidates[3] == mirror_past
+ assert candidates[4] == mirror_future
+
+
+def test_create_rule_validations(initialized_db):
+ mirror, repo = create_mirror_repo_robot(["updated", "created"], repo_name="first")
+
+ with pytest.raises(ValidationError):
+ create_mirroring_rule(repo, None)
+
+ with pytest.raises(ValidationError):
+ create_mirroring_rule(repo, "['tag1', 'tag2']")
+
+ with pytest.raises(ValidationError):
+ create_mirroring_rule(repo, ['tag1', 'tag2'], rule_type=None)
+
+
+def test_long_registry_passwords(initialized_db):
+ """
+ Verify that long passwords, such as Base64 JWT used by Redhat's Registry, work as expected.
+ """
+ MAX_PASSWORD_LENGTH = 1024
+
+ username = ''.join('a' for _ in range(MAX_PASSWORD_LENGTH))
+ password = ''.join('b' for _ in range(MAX_PASSWORD_LENGTH))
+ assert len(username) == MAX_PASSWORD_LENGTH
+ assert len(password) == MAX_PASSWORD_LENGTH
+
+ repo = model.repository.get_repository('devtable', 'mirrored')
+ assert repo
+
+ existing_mirror_conf = model.repo_mirror.get_mirror(repo)
+ assert existing_mirror_conf
+
+ assert model.repo_mirror.change_credentials(repo, username, password)
+
+ updated_mirror_conf = model.repo_mirror.get_mirror(repo)
+ assert updated_mirror_conf
+
+ assert updated_mirror_conf.external_registry_username.decrypt() == username
+ assert updated_mirror_conf.external_registry_password.decrypt() == password
+
+
+def test_sync_status_to_cancel(initialized_db):
+ """
+ SYNCING and SYNC_NOW mirrors may be canceled, ending in NEVER_RUN
+ """
+
+ disable_existing_mirrors()
+ mirror, repo = create_mirror_repo_robot(["updated", "created"], repo_name="cancel")
+
+ mirror.sync_status = RepoMirrorStatus.SYNCING
+ mirror.save()
+ updated = update_sync_status_to_cancel(mirror)
+ assert updated is not None
+ assert updated.sync_status == RepoMirrorStatus.NEVER_RUN
+
+ mirror.sync_status = RepoMirrorStatus.SYNC_NOW
+ mirror.save()
+ updated = update_sync_status_to_cancel(mirror)
+ assert updated is not None
+ assert updated.sync_status == RepoMirrorStatus.NEVER_RUN
+
+ mirror.sync_status = RepoMirrorStatus.FAIL
+ mirror.save()
+ updated = update_sync_status_to_cancel(mirror)
+ assert updated is None
+
+ mirror.sync_status = RepoMirrorStatus.NEVER_RUN
+ mirror.save()
+ updated = update_sync_status_to_cancel(mirror)
+ assert updated is None
+
+ mirror.sync_status = RepoMirrorStatus.SUCCESS
+ mirror.save()
+ updated = update_sync_status_to_cancel(mirror)
+ assert updated is None
+
+
+def test_release_mirror(initialized_db):
+ """
+ Mirrors that are SYNC_NOW, regardless of starting time
+ """
+
+ disable_existing_mirrors()
+ mirror, repo = create_mirror_repo_robot(["updated", "created"], repo_name="first")
+
+ # mysql rounds the milliseconds on update so force that to happen now
+ query = (RepoMirrorConfig
+ .update(sync_start_date=mirror.sync_start_date)
+ .where(RepoMirrorConfig.id == mirror.id))
+ query.execute()
+ mirror = RepoMirrorConfig.get_by_id(mirror.id)
+ original_sync_start_date = mirror.sync_start_date
+
+ assert mirror.sync_retries_remaining == 3
+
+ mirror = release_mirror(mirror, RepoMirrorStatus.FAIL)
+ assert mirror.sync_retries_remaining == 2
+ assert mirror.sync_start_date == original_sync_start_date
+
+ mirror = release_mirror(mirror, RepoMirrorStatus.FAIL)
+ assert mirror.sync_retries_remaining == 1
+ assert mirror.sync_start_date == original_sync_start_date
+
+ mirror = release_mirror(mirror, RepoMirrorStatus.FAIL)
+ assert mirror.sync_retries_remaining == 3
+ assert mirror.sync_start_date > original_sync_start_date
diff --git a/data/model/test/test_repository.py b/data/model/test/test_repository.py
new file mode 100644
index 000000000..25e8b7cf2
--- /dev/null
+++ b/data/model/test/test_repository.py
@@ -0,0 +1,49 @@
+from datetime import timedelta
+
+import pytest
+
+from peewee import IntegrityError
+
+from data.model.gc import purge_repository
+from data.model.repository import create_repository, is_empty
+from data.model.repository import get_filtered_matching_repositories
+from test.fixtures import *
+
+
+def test_duplicate_repository_different_kinds(initialized_db):
+ # Create an image repo.
+ create_repository('devtable', 'somenewrepo', None, repo_kind='image')
+
+ # Try to create an app repo with the same name, which should fail.
+ with pytest.raises(IntegrityError):
+ create_repository('devtable', 'somenewrepo', None, repo_kind='application')
+
+
+def test_is_empty(initialized_db):
+ create_repository('devtable', 'somenewrepo', None, repo_kind='image')
+
+ assert is_empty('devtable', 'somenewrepo')
+ assert not is_empty('devtable', 'simple')
+
+@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('mysql') >= 0,
+ reason='MySQL requires specialized indexing of newly created repos')
+@pytest.mark.parametrize('query', [
+ (''),
+ ('e'),
+])
+@pytest.mark.parametrize('authed_username', [
+ (None),
+ ('devtable'),
+])
+def test_search_pagination(query, authed_username, initialized_db):
+ # Create some public repos.
+ repo1 = create_repository('devtable', 'somenewrepo', None, repo_kind='image', visibility='public')
+ repo2 = create_repository('devtable', 'somenewrepo2', None, repo_kind='image', visibility='public')
+ repo3 = create_repository('devtable', 'somenewrepo3', None, repo_kind='image', visibility='public')
+
+ repositories = get_filtered_matching_repositories(query, filter_username=authed_username)
+ assert len(repositories) > 3
+
+ next_repos = get_filtered_matching_repositories(query, filter_username=authed_username, offset=1)
+ assert repositories[0].id != next_repos[0].id
+ assert repositories[1].id == next_repos[0].id
diff --git a/data/model/test/test_repositoryactioncount.py b/data/model/test/test_repositoryactioncount.py
new file mode 100644
index 000000000..bdad4e315
--- /dev/null
+++ b/data/model/test/test_repositoryactioncount.py
@@ -0,0 +1,38 @@
+from datetime import date, timedelta
+
+import pytest
+
+from data.database import RepositoryActionCount, RepositorySearchScore
+from data.model.repository import create_repository
+from data.model.repositoryactioncount import update_repository_score, SEARCH_BUCKETS
+from test.fixtures import *
+
+@pytest.mark.parametrize('bucket_sums,expected_score', [
+ ((0, 0, 0, 0), 0),
+
+ ((1, 6, 24, 152), 100),
+ ((2, 6, 24, 152), 101),
+ ((1, 6, 24, 304), 171),
+
+ ((100, 480, 24, 152), 703),
+ ((1, 6, 24, 15200), 7131),
+
+ ((300, 500, 1000, 0), 1733),
+ ((5000, 0, 0, 0), 5434),
+])
+def test_update_repository_score(bucket_sums, expected_score, initialized_db):
+ # Create a new repository.
+ repo = create_repository('devtable', 'somenewrepo', None, repo_kind='image')
+
+ # Delete the RAC created in create_repository.
+ RepositoryActionCount.delete().where(RepositoryActionCount.repository == repo).execute()
+
+ # Add RAC rows for each of the buckets.
+ for index, bucket in enumerate(SEARCH_BUCKETS):
+ for day in range(0, bucket.days):
+ RepositoryActionCount.create(repository=repo,
+ count=(bucket_sums[index] / bucket.days * 1.0),
+ date=date.today() - bucket.delta + timedelta(days=day))
+
+ assert update_repository_score(repo)
+ assert RepositorySearchScore.get(repository=repo).score == expected_score
diff --git a/data/model/test/test_tag.py b/data/model/test/test_tag.py
new file mode 100644
index 000000000..2f5adf773
--- /dev/null
+++ b/data/model/test/test_tag.py
@@ -0,0 +1,356 @@
+import json
+
+from datetime import datetime
+from time import time
+
+import pytest
+
+from mock import patch
+
+from app import docker_v2_signing_key
+from data.database import (Image, RepositoryTag, ImageStorage, Repository, Manifest, ManifestBlob,
+ ManifestLegacyImage, TagManifestToManifest, Tag, TagToRepositoryTag)
+from data.model.repository import create_repository
+from data.model.tag import (list_active_repo_tags, create_or_update_tag, delete_tag,
+ get_matching_tags, _tag_alive, get_matching_tags_for_images,
+ change_tag_expiration, get_active_tag, store_tag_manifest_for_testing,
+ get_most_recent_tag, get_active_tag_for_repo,
+ create_or_update_tag_for_repo, set_tag_end_ts)
+from data.model.image import find_create_or_link_image
+from image.docker.schema1 import DockerSchema1ManifestBuilder
+from util.timedeltastring import convert_to_timedelta
+
+from test.fixtures import *
+
+
+def _get_expected_tags(image):
+ expected_query = (RepositoryTag
+ .select()
+ .join(Image)
+ .where(RepositoryTag.hidden == False)
+ .where((Image.id == image.id) | (Image.ancestors ** ('%%/%s/%%' % image.id))))
+ return set([tag.id for tag in _tag_alive(expected_query)])
+
+
+@pytest.mark.parametrize('max_subqueries,max_image_lookup_count', [
+ (1, 1),
+ (10, 10),
+ (100, 500),
+])
+def test_get_matching_tags(max_subqueries, max_image_lookup_count, initialized_db):
+ with patch('data.model.tag._MAX_SUB_QUERIES', max_subqueries):
+ with patch('data.model.tag._MAX_IMAGE_LOOKUP_COUNT', max_image_lookup_count):
+ # Test for every image in the test database.
+ for image in Image.select(Image, ImageStorage).join(ImageStorage):
+ matching_query = get_matching_tags(image.docker_image_id, image.storage.uuid)
+ matching_tags = set([tag.id for tag in matching_query])
+ expected_tags = _get_expected_tags(image)
+ assert matching_tags == expected_tags, "mismatch for image %s" % image.id
+
+ oci_tags = list(Tag
+ .select()
+ .join(TagToRepositoryTag)
+ .where(TagToRepositoryTag.repository_tag << expected_tags))
+ assert len(oci_tags) == len(expected_tags)
+
+
+@pytest.mark.parametrize('max_subqueries,max_image_lookup_count', [
+ (1, 1),
+ (10, 10),
+ (100, 500),
+])
+def test_get_matching_tag_ids_for_images(max_subqueries, max_image_lookup_count, initialized_db):
+ with patch('data.model.tag._MAX_SUB_QUERIES', max_subqueries):
+ with patch('data.model.tag._MAX_IMAGE_LOOKUP_COUNT', max_image_lookup_count):
+ # Try for various sets of the first N images.
+ for count in [5, 10, 15]:
+ pairs = []
+ expected_tags_ids = set()
+ for image in Image.select(Image, ImageStorage).join(ImageStorage):
+ if len(pairs) >= count:
+ break
+
+ pairs.append((image.docker_image_id, image.storage.uuid))
+ expected_tags_ids.update(_get_expected_tags(image))
+
+ matching_tags_ids = set([tag.id for tag in get_matching_tags_for_images(pairs)])
+ assert matching_tags_ids == expected_tags_ids
+
+
+@pytest.mark.parametrize('max_subqueries,max_image_lookup_count', [
+ (1, 1),
+ (10, 10),
+ (100, 500),
+])
+def test_get_matching_tag_ids_for_all_images(max_subqueries, max_image_lookup_count, initialized_db):
+ with patch('data.model.tag._MAX_SUB_QUERIES', max_subqueries):
+ with patch('data.model.tag._MAX_IMAGE_LOOKUP_COUNT', max_image_lookup_count):
+ pairs = []
+ for image in Image.select(Image, ImageStorage).join(ImageStorage):
+ pairs.append((image.docker_image_id, image.storage.uuid))
+
+ expected_tags_ids = set([tag.id for tag in _tag_alive(RepositoryTag.select())])
+ matching_tags_ids = set([tag.id for tag in get_matching_tags_for_images(pairs)])
+
+ # Ensure every alive tag was found.
+ assert matching_tags_ids == expected_tags_ids
+
+
+def test_get_matching_tag_ids_images_filtered(initialized_db):
+ def filter_query(query):
+ return query.join(Repository).where(Repository.name == 'simple')
+
+ filtered_images = filter_query(Image
+ .select(Image, ImageStorage)
+ .join(RepositoryTag)
+ .switch(Image)
+ .join(ImageStorage)
+ .switch(Image))
+
+ expected_tags_query = _tag_alive(filter_query(RepositoryTag
+ .select()))
+
+ pairs = []
+ for image in filtered_images:
+ pairs.append((image.docker_image_id, image.storage.uuid))
+
+ matching_tags = get_matching_tags_for_images(pairs, filter_images=filter_query,
+ filter_tags=filter_query)
+
+ expected_tag_ids = set([tag.id for tag in expected_tags_query])
+ matching_tags_ids = set([tag.id for tag in matching_tags])
+
+ # Ensure every alive tag was found.
+ assert matching_tags_ids == expected_tag_ids
+
+
+def _get_oci_tag(tag):
+ return (Tag
+ .select()
+ .join(TagToRepositoryTag)
+ .where(TagToRepositoryTag.repository_tag == tag)).get()
+
+
+def assert_tags(repository, *args):
+ tags = list(list_active_repo_tags(repository))
+ assert len(tags) == len(args)
+
+ tags_dict = {}
+ for tag in tags:
+ assert not tag.name in tags_dict
+ assert not tag.hidden
+ assert not tag.lifetime_end_ts or tag.lifetime_end_ts > time()
+
+ tags_dict[tag.name] = tag
+
+ oci_tag = _get_oci_tag(tag)
+ assert oci_tag.name == tag.name
+ assert not oci_tag.hidden
+ assert oci_tag.reversion == tag.reversion
+
+ if tag.lifetime_end_ts:
+ assert oci_tag.lifetime_end_ms == (tag.lifetime_end_ts * 1000)
+ else:
+ assert oci_tag.lifetime_end_ms is None
+
+ for expected in args:
+ assert expected in tags_dict
+
+
+def test_create_reversion_tag(initialized_db):
+ repository = create_repository('devtable', 'somenewrepo', None)
+ manifest = Manifest.get()
+ image1 = find_create_or_link_image('foobarimage1', repository, None, {}, 'local_us')
+
+ footag = create_or_update_tag_for_repo(repository, 'foo', image1.docker_image_id,
+ oci_manifest=manifest, reversion=True)
+ assert footag.reversion
+
+ oci_tag = _get_oci_tag(footag)
+ assert oci_tag.name == footag.name
+ assert not oci_tag.hidden
+ assert oci_tag.reversion == footag.reversion
+
+
+def test_list_active_tags(initialized_db):
+ # Create a new repository.
+ repository = create_repository('devtable', 'somenewrepo', None)
+ manifest = Manifest.get()
+
+ # Create some images.
+ image1 = find_create_or_link_image('foobarimage1', repository, None, {}, 'local_us')
+ image2 = find_create_or_link_image('foobarimage2', repository, None, {}, 'local_us')
+
+ # Make sure its tags list is empty.
+ assert_tags(repository)
+
+ # Add some new tags.
+ footag = create_or_update_tag_for_repo(repository, 'foo', image1.docker_image_id,
+ oci_manifest=manifest)
+ bartag = create_or_update_tag_for_repo(repository, 'bar', image1.docker_image_id,
+ oci_manifest=manifest)
+
+ # Since timestamps are stored on a second-granularity, we need to make the tags "start"
+ # before "now", so when we recreate them below, they don't conflict.
+ footag.lifetime_start_ts -= 5
+ footag.save()
+
+ bartag.lifetime_start_ts -= 5
+ bartag.save()
+
+ footag_oci = _get_oci_tag(footag)
+ footag_oci.lifetime_start_ms -= 5000
+ footag_oci.save()
+
+ bartag_oci = _get_oci_tag(bartag)
+ bartag_oci.lifetime_start_ms -= 5000
+ bartag_oci.save()
+
+ # Make sure they are returned.
+ assert_tags(repository, 'foo', 'bar')
+
+ # Set the expirations to be explicitly empty.
+ set_tag_end_ts(footag, None)
+ set_tag_end_ts(bartag, None)
+
+ # Make sure they are returned.
+ assert_tags(repository, 'foo', 'bar')
+
+ # Mark as a tag as expiring in the far future, and make sure it is still returned.
+ set_tag_end_ts(footag, footag.lifetime_start_ts + 10000000)
+
+ # Make sure they are returned.
+ assert_tags(repository, 'foo', 'bar')
+
+ # Delete a tag and make sure it isn't returned.
+ footag = delete_tag('devtable', 'somenewrepo', 'foo')
+ set_tag_end_ts(footag, footag.lifetime_end_ts - 4)
+
+ assert_tags(repository, 'bar')
+
+ # Add a new foo again.
+ footag = create_or_update_tag_for_repo(repository, 'foo', image1.docker_image_id,
+ oci_manifest=manifest)
+ footag.lifetime_start_ts -= 3
+ footag.save()
+
+ footag_oci = _get_oci_tag(footag)
+ footag_oci.lifetime_start_ms -= 3000
+ footag_oci.save()
+
+ assert_tags(repository, 'foo', 'bar')
+
+ # Mark as a tag as expiring in the far future, and make sure it is still returned.
+ set_tag_end_ts(footag, footag.lifetime_start_ts + 10000000)
+
+ # Make sure they are returned.
+ assert_tags(repository, 'foo', 'bar')
+
+ # "Move" foo by updating it and make sure we don't get duplicates.
+ create_or_update_tag_for_repo(repository, 'foo', image2.docker_image_id, oci_manifest=manifest)
+ assert_tags(repository, 'foo', 'bar')
+
+
+@pytest.mark.parametrize('expiration_offset, expected_offset', [
+ (None, None),
+ ('0s', '1h'),
+ ('30m', '1h'),
+ ('2h', '2h'),
+ ('2w', '2w'),
+ ('200w', '104w'),
+])
+def test_change_tag_expiration(expiration_offset, expected_offset, initialized_db):
+ repository = create_repository('devtable', 'somenewrepo', None)
+ image1 = find_create_or_link_image('foobarimage1', repository, None, {}, 'local_us')
+
+ manifest = Manifest.get()
+ footag = create_or_update_tag_for_repo(repository, 'foo', image1.docker_image_id,
+ oci_manifest=manifest)
+
+ expiration_date = None
+ if expiration_offset is not None:
+ expiration_date = datetime.utcnow() + convert_to_timedelta(expiration_offset)
+
+ assert change_tag_expiration(footag, expiration_date)
+
+ # Lookup the tag again.
+ footag_updated = get_active_tag('devtable', 'somenewrepo', 'foo')
+ oci_tag = _get_oci_tag(footag_updated)
+
+ if expected_offset is None:
+ assert footag_updated.lifetime_end_ts is None
+ assert oci_tag.lifetime_end_ms is None
+ else:
+ start_date = datetime.utcfromtimestamp(footag_updated.lifetime_start_ts)
+ end_date = datetime.utcfromtimestamp(footag_updated.lifetime_end_ts)
+ expected_end_date = start_date + convert_to_timedelta(expected_offset)
+ assert (expected_end_date - end_date).total_seconds() < 5 # variance in test
+
+ assert oci_tag.lifetime_end_ms == (footag_updated.lifetime_end_ts * 1000)
+
+
+def random_storages():
+ return list(ImageStorage.select().where(~(ImageStorage.content_checksum >> None)).limit(10))
+
+
+def repeated_storages():
+ storages = list(ImageStorage.select().where(~(ImageStorage.content_checksum >> None)).limit(5))
+ return storages + storages
+
+
+@pytest.mark.parametrize('get_storages', [
+ random_storages,
+ repeated_storages,
+])
+def test_store_tag_manifest(get_storages, initialized_db):
+ # Create a manifest with some layers.
+ builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'sometag')
+
+ storages = get_storages()
+ assert storages
+
+ repo = model.repository.get_repository('devtable', 'simple')
+ storage_id_map = {}
+ for index, storage in enumerate(storages):
+ image_id = 'someimage%s' % index
+ builder.add_layer(storage.content_checksum, json.dumps({'id': image_id}))
+ find_create_or_link_image(image_id, repo, 'devtable', {}, 'local_us')
+ storage_id_map[storage.content_checksum] = storage.id
+
+ manifest = builder.build(docker_v2_signing_key)
+ tag_manifest, _ = store_tag_manifest_for_testing('devtable', 'simple', 'sometag', manifest,
+ manifest.leaf_layer_v1_image_id, storage_id_map)
+
+ # Ensure we have the new-model expected rows.
+ mapping_row = TagManifestToManifest.get(tag_manifest=tag_manifest)
+
+ assert mapping_row.manifest is not None
+ assert mapping_row.manifest.manifest_bytes == manifest.bytes.as_encoded_str()
+ assert mapping_row.manifest.digest == str(manifest.digest)
+
+ blob_rows = {m.blob_id for m in
+ ManifestBlob.select().where(ManifestBlob.manifest == mapping_row.manifest)}
+ assert blob_rows == {s.id for s in storages}
+
+ assert ManifestLegacyImage.get(manifest=mapping_row.manifest).image == tag_manifest.tag.image
+
+
+def test_get_most_recent_tag(initialized_db):
+ # Create a hidden tag that is the most recent.
+ repo = model.repository.get_repository('devtable', 'simple')
+ image = model.tag.get_tag_image('devtable', 'simple', 'latest')
+ model.tag.create_temporary_hidden_tag(repo, image, 10000000)
+
+ # Ensure we find a non-hidden tag.
+ found = model.tag.get_most_recent_tag(repo)
+ assert not found.hidden
+
+
+def test_get_active_tag_for_repo(initialized_db):
+ repo = model.repository.get_repository('devtable', 'simple')
+ image = model.tag.get_tag_image('devtable', 'simple', 'latest')
+ hidden_tag = model.tag.create_temporary_hidden_tag(repo, image, 10000000)
+
+ # Ensure get active tag for repo cannot find it.
+ assert model.tag.get_active_tag_for_repo(repo, hidden_tag) is None
+ assert model.tag.get_active_tag_for_repo(repo, 'latest') is not None
diff --git a/data/model/test/test_team.py b/data/model/test/test_team.py
new file mode 100644
index 000000000..88b08855c
--- /dev/null
+++ b/data/model/test/test_team.py
@@ -0,0 +1,61 @@
+import pytest
+
+from data.model.team import (add_or_invite_to_team, create_team, confirm_team_invite,
+ list_team_users, validate_team_name)
+from data.model.organization import create_organization
+from data.model.user import get_user, create_user_noverify
+
+from test.fixtures import *
+
+
+@pytest.mark.parametrize('name, is_valid', [
+ ('', False),
+ ('f', False),
+ ('fo', True),
+ ('f' * 255, True),
+ ('f' * 256, False),
+ (' ', False),
+ ('helloworld', True),
+ ('hello_world', True),
+ ('hello-world', True),
+ ('hello world', False),
+ ('HelloWorld', False),
+])
+def test_validate_team_name(name, is_valid):
+ result, _ = validate_team_name(name)
+ assert result == is_valid
+
+
+def is_in_team(team, user):
+ return user.username in {u.username for u in list_team_users(team)}
+
+
+def test_invite_to_team(initialized_db):
+ first_user = get_user('devtable')
+ second_user = create_user_noverify('newuser', 'foo@example.com')
+
+ def run_invite_flow(orgname):
+ # Create an org owned by `devtable`.
+ org = create_organization(orgname, orgname + '@example.com', first_user)
+
+ # Create another team and add `devtable` to it. Since `devtable` is already
+ # in the org, it should be done directly.
+ other_team = create_team('otherteam', org, 'admin')
+ invite = add_or_invite_to_team(first_user, other_team, user_obj=first_user)
+ assert invite is None
+ assert is_in_team(other_team, first_user)
+
+ # Try to add `newuser` to the team, which should require an invite.
+ invite = add_or_invite_to_team(first_user, other_team, user_obj=second_user)
+ assert invite is not None
+ assert not is_in_team(other_team, second_user)
+
+ # Accept the invite.
+ confirm_team_invite(invite.invite_token, second_user)
+ assert is_in_team(other_team, second_user)
+
+ # Run for a new org.
+ run_invite_flow('firstorg')
+
+ # Create another org and repeat, ensuring the same operations perform the same way.
+ run_invite_flow('secondorg')
diff --git a/data/model/test/test_user.py b/data/model/test/test_user.py
new file mode 100644
index 000000000..4f124b7f3
--- /dev/null
+++ b/data/model/test/test_user.py
@@ -0,0 +1,205 @@
+from datetime import datetime
+
+import pytest
+
+from mock import patch
+
+from data.database import EmailConfirmation, User, DeletedNamespace
+from data.model.organization import get_organization
+from data.model.notification import create_notification
+from data.model.team import create_team, add_user_to_team
+from data.model.user import create_user_noverify, validate_reset_code, get_active_users
+from data.model.user import mark_namespace_for_deletion, delete_namespace_via_marker
+from data.model.user import create_robot, lookup_robot, list_namespace_robots
+from data.model.user import get_pull_credentials, retrieve_robot_token, verify_robot
+from data.model.user import InvalidRobotException, delete_robot, get_matching_users
+from data.model.repository import create_repository
+from data.fields import Credential
+from data.queue import WorkQueue
+from util.timedeltastring import convert_to_timedelta
+from util.timedeltastring import convert_to_timedelta
+from util.security.token import encode_public_private_token
+from test.fixtures import *
+
+def test_create_user_with_expiration(initialized_db):
+ with patch('data.model.config.app_config', {'DEFAULT_TAG_EXPIRATION': '1h'}):
+ user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
+ assert user.removed_tag_expiration_s == 60 * 60
+
+@pytest.mark.parametrize('token_lifetime, time_since', [
+ ('1m', '2m'),
+ ('2m', '1m'),
+ ('1h', '1m'),
+])
+def test_validation_code(token_lifetime, time_since, initialized_db):
+ user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
+ created = datetime.now() - convert_to_timedelta(time_since)
+ verification_code, unhashed = Credential.generate()
+ confirmation = EmailConfirmation.create(user=user, pw_reset=True,
+ created=created, verification_code=verification_code)
+ encoded = encode_public_private_token(confirmation.code, unhashed)
+
+ with patch('data.model.config.app_config', {'USER_RECOVERY_TOKEN_LIFETIME': token_lifetime}):
+ result = validate_reset_code(encoded)
+ expect_success = convert_to_timedelta(token_lifetime) >= convert_to_timedelta(time_since)
+ assert expect_success == (result is not None)
+
+
+@pytest.mark.parametrize('disabled', [
+ (True),
+ (False),
+])
+@pytest.mark.parametrize('deleted', [
+ (True),
+ (False),
+])
+def test_get_active_users(disabled, deleted, initialized_db):
+ # Delete a user.
+ deleted_user = model.user.get_user('public')
+ queue = WorkQueue('testgcnamespace', lambda db: db.transaction())
+ mark_namespace_for_deletion(deleted_user, [], queue)
+
+ users = get_active_users(disabled=disabled, deleted=deleted)
+ deleted_found = [user for user in users if user.id == deleted_user.id]
+ assert bool(deleted_found) == (deleted and disabled)
+
+ for user in users:
+ if not disabled:
+ assert user.enabled
+
+
+def test_mark_namespace_for_deletion(initialized_db):
+ def create_transaction(db):
+ return db.transaction()
+
+ # Create a user and then mark it for deletion.
+ user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
+
+ # Add some robots.
+ create_robot('foo', user)
+ create_robot('bar', user)
+
+ assert lookup_robot('foobar+foo') is not None
+ assert lookup_robot('foobar+bar') is not None
+ assert len(list(list_namespace_robots('foobar'))) == 2
+
+ # Mark the user for deletion.
+ queue = WorkQueue('testgcnamespace', create_transaction)
+ mark_namespace_for_deletion(user, [], queue)
+
+ # Ensure the older user is still in the DB.
+ older_user = User.get(id=user.id)
+ assert older_user.username != 'foobar'
+
+ # Ensure the robots are deleted.
+ with pytest.raises(InvalidRobotException):
+ assert lookup_robot('foobar+foo')
+
+ with pytest.raises(InvalidRobotException):
+ assert lookup_robot('foobar+bar')
+
+ assert len(list(list_namespace_robots(older_user.username))) == 0
+
+ # Ensure we can create a user with the same namespace again.
+ new_user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
+ assert new_user.id != user.id
+
+ # Ensure the older user is still in the DB.
+ assert User.get(id=user.id).username != 'foobar'
+
+
+def test_delete_namespace_via_marker(initialized_db):
+ def create_transaction(db):
+ return db.transaction()
+
+ # Create a user and then mark it for deletion.
+ user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
+
+ # Add some repositories.
+ create_repository('foobar', 'somerepo', user)
+ create_repository('foobar', 'anotherrepo', user)
+
+ # Mark the user for deletion.
+ queue = WorkQueue('testgcnamespace', create_transaction)
+ marker_id = mark_namespace_for_deletion(user, [], queue)
+
+ # Delete the user.
+ delete_namespace_via_marker(marker_id, [])
+
+ # Ensure the user was actually deleted.
+ with pytest.raises(User.DoesNotExist):
+ User.get(id=user.id)
+
+ with pytest.raises(DeletedNamespace.DoesNotExist):
+ DeletedNamespace.get(id=marker_id)
+
+
+def test_delete_robot(initialized_db):
+ # Create a robot account.
+ user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
+ robot, _ = create_robot('foo', user)
+
+ # Add some notifications and other rows pointing to the robot.
+ create_notification('repo_push', robot)
+
+ team = create_team('someteam', get_organization('buynlarge'), 'member')
+ add_user_to_team(robot, team)
+
+ # Ensure the robot exists.
+ assert lookup_robot(robot.username).id == robot.id
+
+ # Delete the robot.
+ delete_robot(robot.username)
+
+ # Ensure it is gone.
+ with pytest.raises(InvalidRobotException):
+ lookup_robot(robot.username)
+
+
+def test_get_matching_users(initialized_db):
+ # Exact match.
+ for user in User.select().where(User.organization == False, User.robot == False):
+ assert list(get_matching_users(user.username))[0].username == user.username
+
+ # Prefix matching.
+ for user in User.select().where(User.organization == False, User.robot == False):
+ assert user.username in [r.username for r in get_matching_users(user.username[:2])]
+
+
+def test_get_matching_users_with_same_prefix(initialized_db):
+ # Create a bunch of users with the same prefix.
+ for index in range(0, 20):
+ create_user_noverify('foo%s' % index, 'foo%s@example.com' % index, email_required=False)
+
+ # For each user, ensure that lookup of the exact name is found first.
+ for index in range(0, 20):
+ username = 'foo%s' % index
+ assert list(get_matching_users(username))[0].username == username
+
+ # Prefix matching.
+ found = list(get_matching_users('foo', limit=50))
+ assert len(found) == 20
+
+
+def test_robot(initialized_db):
+ # Create a robot account.
+ user = create_user_noverify('foobar', 'foo@example.com', email_required=False)
+ robot, token = create_robot('foo', user)
+ assert retrieve_robot_token(robot) == token
+
+ # Ensure we can retrieve its information.
+ found = lookup_robot('foobar+foo')
+ assert found == robot
+
+ creds = get_pull_credentials('foobar+foo')
+ assert creds is not None
+ assert creds['username'] == 'foobar+foo'
+ assert creds['password'] == token
+
+ assert verify_robot('foobar+foo', token) == robot
+
+ with pytest.raises(InvalidRobotException):
+ assert verify_robot('foobar+foo', 'someothertoken')
+
+ with pytest.raises(InvalidRobotException):
+ assert verify_robot('foobar+unknownbot', token)
diff --git a/data/model/test/test_visible_repos.py b/data/model/test/test_visible_repos.py
new file mode 100644
index 000000000..9e5e7cbf5
--- /dev/null
+++ b/data/model/test/test_visible_repos.py
@@ -0,0 +1,89 @@
+from data import model
+
+from test.fixtures import *
+
+
+NO_ACCESS_USER = 'freshuser'
+READ_ACCESS_USER = 'reader'
+ADMIN_ACCESS_USER = 'devtable'
+PUBLIC_USER = 'public'
+RANDOM_USER = 'randomuser'
+OUTSIDE_ORG_USER = 'outsideorg'
+
+ADMIN_ROBOT_USER = 'devtable+dtrobot'
+
+ORGANIZATION = 'buynlarge'
+
+SIMPLE_REPO = 'simple'
+PUBLIC_REPO = 'publicrepo'
+RANDOM_REPO = 'randomrepo'
+
+OUTSIDE_ORG_REPO = 'coolrepo'
+
+ORG_REPO = 'orgrepo'
+ANOTHER_ORG_REPO = 'anotherorgrepo'
+
+# Note: The shared repo has devtable as admin, public as a writer and reader as a reader.
+SHARED_REPO = 'shared'
+
+
+def assertDoesNotHaveRepo(username, name):
+ repos = list(model.repository.get_visible_repositories(username))
+ names = [repo.name for repo in repos]
+ assert not name in names
+
+
+def assertHasRepo(username, name):
+ repos = list(model.repository.get_visible_repositories(username))
+ names = [repo.name for repo in repos]
+ assert name in names
+
+
+def test_noaccess(initialized_db):
+ repos = list(model.repository.get_visible_repositories(NO_ACCESS_USER))
+ names = [repo.name for repo in repos]
+ assert not names
+
+ # Try retrieving public repos now.
+ repos = list(model.repository.get_visible_repositories(NO_ACCESS_USER, include_public=True))
+ names = [repo.name for repo in repos]
+ assert PUBLIC_REPO in names
+
+
+def test_public(initialized_db):
+ assertHasRepo(PUBLIC_USER, PUBLIC_REPO)
+ assertHasRepo(PUBLIC_USER, SHARED_REPO)
+
+ assertDoesNotHaveRepo(PUBLIC_USER, SIMPLE_REPO)
+ assertDoesNotHaveRepo(PUBLIC_USER, RANDOM_REPO)
+ assertDoesNotHaveRepo(PUBLIC_USER, OUTSIDE_ORG_REPO)
+
+
+def test_reader(initialized_db):
+ assertHasRepo(READ_ACCESS_USER, SHARED_REPO)
+ assertHasRepo(READ_ACCESS_USER, ORG_REPO)
+
+ assertDoesNotHaveRepo(READ_ACCESS_USER, SIMPLE_REPO)
+ assertDoesNotHaveRepo(READ_ACCESS_USER, RANDOM_REPO)
+ assertDoesNotHaveRepo(READ_ACCESS_USER, OUTSIDE_ORG_REPO)
+ assertDoesNotHaveRepo(READ_ACCESS_USER, PUBLIC_REPO)
+
+
+def test_random(initialized_db):
+ assertHasRepo(RANDOM_USER, RANDOM_REPO)
+
+ assertDoesNotHaveRepo(RANDOM_USER, SIMPLE_REPO)
+ assertDoesNotHaveRepo(RANDOM_USER, SHARED_REPO)
+ assertDoesNotHaveRepo(RANDOM_USER, ORG_REPO)
+ assertDoesNotHaveRepo(RANDOM_USER, ANOTHER_ORG_REPO)
+ assertDoesNotHaveRepo(RANDOM_USER, PUBLIC_REPO)
+
+
+def test_admin(initialized_db):
+ assertHasRepo(ADMIN_ACCESS_USER, SIMPLE_REPO)
+ assertHasRepo(ADMIN_ACCESS_USER, SHARED_REPO)
+
+ assertHasRepo(ADMIN_ACCESS_USER, ORG_REPO)
+ assertHasRepo(ADMIN_ACCESS_USER, ANOTHER_ORG_REPO)
+
+ assertDoesNotHaveRepo(ADMIN_ACCESS_USER, OUTSIDE_ORG_REPO)
diff --git a/data/model/token.py b/data/model/token.py
new file mode 100644
index 000000000..82661cdef
--- /dev/null
+++ b/data/model/token.py
@@ -0,0 +1,105 @@
+import logging
+
+from peewee import JOIN
+
+from active_migration import ActiveDataMigration, ERTMigrationFlags
+from data.database import (AccessToken, AccessTokenKind, Repository, Namespace, Role,
+ RepositoryBuildTrigger)
+from data.model import DataModelException, _basequery, InvalidTokenException
+
+
+logger = logging.getLogger(__name__)
+
+
+ACCESS_TOKEN_NAME_PREFIX_LENGTH = 32
+ACCESS_TOKEN_CODE_MINIMUM_LENGTH = 32
+
+
+def create_access_token(repo, role, kind=None, friendly_name=None):
+ role = Role.get(Role.name == role)
+ kind_ref = None
+ if kind is not None:
+ kind_ref = AccessTokenKind.get(AccessTokenKind.name == kind)
+
+ new_token = AccessToken.create(repository=repo, temporary=True, role=role, kind=kind_ref,
+ friendly_name=friendly_name)
+
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
+ new_token.code = new_token.token_name + new_token.token_code.decrypt()
+ new_token.save()
+
+ return new_token
+
+
+def create_delegate_token(namespace_name, repository_name, friendly_name,
+ role='read'):
+ read_only = Role.get(name=role)
+ repo = _basequery.get_existing_repository(namespace_name, repository_name)
+ new_token = AccessToken.create(repository=repo, role=read_only,
+ friendly_name=friendly_name, temporary=False)
+
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
+ new_token.code = new_token.token_name + new_token.token_code.decrypt()
+ new_token.save()
+
+ return new_token
+
+
+def load_token_data(code):
+ """ Load the permissions for any token by code. """
+ token_name = code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
+ token_code = code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:]
+
+ if not token_name or not token_code:
+ raise InvalidTokenException('Invalid delegate token code: %s' % code)
+
+ # Try loading by name and then comparing the code.
+ assert token_name
+ try:
+ found = (AccessToken
+ .select(AccessToken, Repository, Namespace, Role)
+ .join(Role)
+ .switch(AccessToken)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(AccessToken.token_name == token_name)
+ .get())
+
+ assert token_code
+ if found.token_code is None or not found.token_code.matches(token_code):
+ raise InvalidTokenException('Invalid delegate token code: %s' % code)
+
+ assert len(token_code) >= ACCESS_TOKEN_CODE_MINIMUM_LENGTH
+ return found
+ except AccessToken.DoesNotExist:
+ pass
+
+ # Legacy: Try loading the full code directly.
+ # TODO(remove-unenc): Remove this once migrated.
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ try:
+ return (AccessToken
+ .select(AccessToken, Repository, Namespace, Role)
+ .join(Role)
+ .switch(AccessToken)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(AccessToken.code == code)
+ .get())
+ except AccessToken.DoesNotExist:
+ raise InvalidTokenException('Invalid delegate token code: %s' % code)
+
+ raise InvalidTokenException('Invalid delegate token code: %s' % code)
+
+
+def get_full_token_string(token):
+ """ Returns the full string to use for this token to login. """
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ if token.token_name is None:
+ return token.code
+
+ assert token.token_name
+ token_code = token.token_code.decrypt()
+ assert len(token.token_name) == ACCESS_TOKEN_NAME_PREFIX_LENGTH
+ assert len(token_code) >= ACCESS_TOKEN_CODE_MINIMUM_LENGTH
+ return '%s%s' % (token.token_name, token_code)
diff --git a/data/model/user.py b/data/model/user.py
new file mode 100644
index 000000000..7e9ed81b1
--- /dev/null
+++ b/data/model/user.py
@@ -0,0 +1,1217 @@
+import bcrypt
+import logging
+import json
+import uuid
+from flask_login import UserMixin
+
+from peewee import JOIN, IntegrityError, fn
+from uuid import uuid4
+from datetime import datetime, timedelta
+
+from active_migration import ActiveDataMigration, ERTMigrationFlags
+from data.database import (User, LoginService, FederatedLogin, RepositoryPermission, TeamMember,
+ Team, Repository, TupleSelector, TeamRole, Namespace, Visibility,
+ EmailConfirmation, Role, db_for_update, random_string_generator,
+ UserRegion, ImageStorageLocation,
+ ServiceKeyApproval, OAuthApplication, RepositoryBuildTrigger,
+ UserPromptKind, UserPrompt, UserPromptTypes, DeletedNamespace,
+ RobotAccountMetadata, NamespaceGeoRestriction, RepoMirrorConfig,
+ RobotAccountToken)
+from data.readreplica import ReadOnlyModeException
+from data.model import (DataModelException, InvalidPasswordException, InvalidRobotException,
+ InvalidUsernameException, InvalidEmailAddressException,
+ TooManyLoginAttemptsException, db_transaction,
+ notification, config, repository, _basequery, gc)
+from data.fields import Credential
+from data.text import prefix_search
+from util.names import format_robot_username, parse_robot_username
+from util.validation import (validate_username, validate_email, validate_password,
+ INVALID_PASSWORD_MESSAGE)
+from util.backoff import exponential_backoff
+from util.timedeltastring import convert_to_timedelta
+from util.unicode import remove_unicode
+from util.security.token import decode_public_private_token, encode_public_private_token
+
+
+logger = logging.getLogger(__name__)
+
+
+EXPONENTIAL_BACKOFF_SCALE = timedelta(seconds=1)
+
+def hash_password(password, salt=None):
+ salt = salt or bcrypt.gensalt()
+ return bcrypt.hashpw(password.encode('utf-8'), salt)
+
+def create_user(username, password, email, auto_verify=False, email_required=True, prompts=tuple(),
+ is_possible_abuser=False):
+ """ Creates a regular user, if allowed. """
+ if not validate_password(password):
+ raise InvalidPasswordException(INVALID_PASSWORD_MESSAGE)
+
+ created = create_user_noverify(username, email, email_required=email_required, prompts=prompts,
+ is_possible_abuser=is_possible_abuser)
+ created.password_hash = hash_password(password)
+ created.verified = auto_verify
+ created.save()
+
+ return created
+
+
+def create_user_noverify(username, email, email_required=True, prompts=tuple(),
+ is_possible_abuser=False):
+ if email_required:
+ if not validate_email(email):
+ raise InvalidEmailAddressException('Invalid email address: %s' % email)
+ else:
+ # If email addresses are not required and none was specified, then we just use a unique
+ # ID to ensure that the database consistency check remains intact.
+ email = email or str(uuid.uuid4())
+
+ (username_valid, username_issue) = validate_username(username)
+ if not username_valid:
+ raise InvalidUsernameException('Invalid namespace %s: %s' % (username, username_issue))
+
+ try:
+ existing = User.get((User.username == username) | (User.email == email))
+ logger.info('Existing user with same username or email.')
+
+ # A user already exists with either the same username or email
+ if existing.username == username:
+ assert not existing.robot
+
+ msg = 'Username has already been taken by an organization and cannot be reused: %s' % username
+ if not existing.organization:
+ msg = 'Username has already been taken by user cannot be reused: %s' % username
+
+ raise InvalidUsernameException(msg)
+
+ raise InvalidEmailAddressException('Email has already been used: %s' % email)
+ except User.DoesNotExist:
+ # This is actually the happy path
+ logger.debug('Email and username are unique!')
+
+ # Create the user.
+ try:
+ default_expr_s = _convert_to_s(config.app_config['DEFAULT_TAG_EXPIRATION'])
+ default_max_builds = config.app_config.get('DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT')
+ threat_max_builds = config.app_config.get('THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT')
+
+ if is_possible_abuser and threat_max_builds is not None:
+ default_max_builds = threat_max_builds
+
+ new_user = User.create(username=username, email=email, removed_tag_expiration_s=default_expr_s,
+ maximum_queued_builds_count=default_max_builds)
+ for prompt in prompts:
+ create_user_prompt(new_user, prompt)
+
+ return new_user
+ except Exception as ex:
+ raise DataModelException(ex.message)
+
+def increase_maximum_build_count(user, maximum_queued_builds_count):
+ """ Increases the maximum number of allowed builds on the namespace, if greater than that
+ already present.
+ """
+ if (user.maximum_queued_builds_count is not None and
+ maximum_queued_builds_count > user.maximum_queued_builds_count):
+ user.maximum_queued_builds_count = maximum_queued_builds_count
+ user.save()
+
+def is_username_unique(test_username):
+ try:
+ User.get((User.username == test_username))
+ return False
+ except User.DoesNotExist:
+ return True
+
+
+def change_password(user, new_password):
+ if not validate_password(new_password):
+ raise InvalidPasswordException(INVALID_PASSWORD_MESSAGE)
+
+ pw_hash = hash_password(new_password)
+ user.invalid_login_attempts = 0
+ user.password_hash = pw_hash
+ invalidate_all_sessions(user)
+
+ # Remove any password required notifications for the user.
+ notification.delete_notifications_by_kind(user, 'password_required')
+
+
+def get_default_user_prompts(features):
+ prompts = set()
+ if features.USER_METADATA:
+ prompts.add(UserPromptTypes.ENTER_NAME)
+ prompts.add(UserPromptTypes.ENTER_COMPANY)
+
+ return prompts
+
+
+def has_user_prompts(user):
+ try:
+ UserPrompt.select().where(UserPrompt.user == user).get()
+ return True
+ except UserPrompt.DoesNotExist:
+ return False
+
+
+def has_user_prompt(user, prompt_name):
+ prompt_kind = UserPromptKind.get(name=prompt_name)
+
+ try:
+ UserPrompt.get(user=user, kind=prompt_kind)
+ return True
+ except UserPrompt.DoesNotExist:
+ return False
+
+
+def create_user_prompt(user, prompt_name):
+ prompt_kind = UserPromptKind.get(name=prompt_name)
+ return UserPrompt.create(user=user, kind=prompt_kind)
+
+
+def remove_user_prompt(user, prompt_name):
+ prompt_kind = UserPromptKind.get(name=prompt_name)
+ UserPrompt.delete().where(UserPrompt.user == user, UserPrompt.kind == prompt_kind).execute()
+
+
+def get_user_prompts(user):
+ query = UserPrompt.select().where(UserPrompt.user == user).join(UserPromptKind)
+ return [prompt.kind.name for prompt in query]
+
+
+def change_username(user_id, new_username):
+ (username_valid, username_issue) = validate_username(new_username)
+ if not username_valid:
+ raise InvalidUsernameException('Invalid username %s: %s' % (new_username, username_issue))
+
+ with db_transaction():
+ # Reload the user for update
+ user = db_for_update(User.select().where(User.id == user_id)).get()
+
+ # Rename the robots
+ for robot in db_for_update(_list_entity_robots(user.username, include_metadata=False,
+ include_token=False)):
+ _, robot_shortname = parse_robot_username(robot.username)
+ new_robot_name = format_robot_username(new_username, robot_shortname)
+ robot.username = new_robot_name
+ robot.save()
+
+ # Rename the user
+ user.username = new_username
+ user.save()
+
+ # Remove any prompts for username.
+ remove_user_prompt(user, 'confirm_username')
+
+ return user
+
+
+def change_invoice_email_address(user, invoice_email_address):
+ # Note: We null out the address if it is an empty string.
+ user.invoice_email_address = invoice_email_address or None
+ user.save()
+
+
+def change_send_invoice_email(user, invoice_email):
+ user.invoice_email = invoice_email
+ user.save()
+
+
+def _convert_to_s(timespan_string):
+ """ Returns the given timespan string (e.g. `2w` or `45s`) into seconds. """
+ return convert_to_timedelta(timespan_string).total_seconds()
+
+
+def change_user_tag_expiration(user, tag_expiration_s):
+ """ Changes the tag expiration on the given user/org. Note that the specified expiration must
+ be within the configured TAG_EXPIRATION_OPTIONS or this method will raise a
+ DataModelException.
+ """
+ allowed_options = [_convert_to_s(o) for o in config.app_config['TAG_EXPIRATION_OPTIONS']]
+ if tag_expiration_s not in allowed_options:
+ raise DataModelException('Invalid tag expiration option')
+
+ user.removed_tag_expiration_s = tag_expiration_s
+ user.save()
+
+
+def update_email(user, new_email, auto_verify=False):
+ try:
+ user.email = new_email
+ user.verified = auto_verify
+ user.save()
+ except IntegrityError:
+ raise DataModelException('E-mail address already used')
+
+
+def update_enabled(user, set_enabled):
+ user.enabled = set_enabled
+ user.save()
+
+
+def create_robot(robot_shortname, parent, description='', unstructured_metadata=None):
+ (username_valid, username_issue) = validate_username(robot_shortname)
+ if not username_valid:
+ raise InvalidRobotException('The name for the robot \'%s\' is invalid: %s' %
+ (robot_shortname, username_issue))
+
+ username = format_robot_username(parent.username, robot_shortname)
+
+ try:
+ User.get(User.username == username)
+
+ msg = 'Existing robot with name: %s' % username
+ logger.info(msg)
+ raise InvalidRobotException(msg)
+ except User.DoesNotExist:
+ pass
+
+ service = LoginService.get(name='quayrobot')
+ try:
+ with db_transaction():
+ created = User.create(username=username, email=str(uuid.uuid4()), robot=True)
+ token = random_string_generator(length=64)()
+ RobotAccountToken.create(robot_account=created, token=token)
+ FederatedLogin.create(user=created, service=service, service_ident='robot:%s' % created.id)
+ RobotAccountMetadata.create(robot_account=created, description=description[0:255],
+ unstructured_json=unstructured_metadata or {})
+ return created, token
+ except Exception as ex:
+ raise DataModelException(ex.message)
+
+
+def get_or_create_robot_metadata(robot):
+ defaults = dict(description='', unstructured_json={})
+ metadata, _ = RobotAccountMetadata.get_or_create(robot_account=robot, defaults=defaults)
+ return metadata
+
+
+def update_robot_metadata(robot, description='', unstructured_json=None):
+ """ Updates the description and user-specified unstructured metadata associated
+ with a robot account to that specified. """
+ metadata = get_or_create_robot_metadata(robot)
+ metadata.description = description
+ metadata.unstructured_json = unstructured_json or metadata.unstructured_json or {}
+ metadata.save()
+
+
+def retrieve_robot_token(robot):
+ """ Returns the decrypted token for the given robot. """
+ try:
+ token = RobotAccountToken.get(robot_account=robot).token.decrypt()
+ except RobotAccountToken.DoesNotExist:
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ # For legacy only.
+ token = robot.email
+ else:
+ raise
+
+ return token
+
+
+def get_robot_and_metadata(robot_shortname, parent):
+ """ Returns a tuple of the robot matching the given shortname, its token, and its metadata. """
+ robot_username = format_robot_username(parent.username, robot_shortname)
+ robot, metadata = lookup_robot_and_metadata(robot_username)
+ token = retrieve_robot_token(robot)
+ return robot, token, metadata
+
+
+def lookup_robot(robot_username):
+ try:
+ return User.get(username=robot_username, robot=True)
+ except User.DoesNotExist:
+ raise InvalidRobotException('Could not find robot with username: %s' % robot_username)
+
+
+def lookup_robot_and_metadata(robot_username):
+ robot = lookup_robot(robot_username)
+ return robot, get_or_create_robot_metadata(robot)
+
+
+def get_matching_robots(name_prefix, username, limit=10):
+ admined_orgs = (_basequery.get_user_organizations(username)
+ .switch(Team)
+ .join(TeamRole)
+ .where(TeamRole.name == 'admin'))
+
+ prefix_checks = False
+
+ for org in admined_orgs:
+ org_search = prefix_search(User.username, org.username + '+' + name_prefix)
+ prefix_checks = prefix_checks | org_search
+
+ user_search = prefix_search(User.username, username + '+' + name_prefix)
+ prefix_checks = prefix_checks | user_search
+
+ return User.select().where(prefix_checks).limit(limit)
+
+
+def verify_robot(robot_username, password):
+ try:
+ password = remove_unicode(password)
+ except UnicodeEncodeError:
+ msg = ('Could not find robot with username: %s and supplied password.' %
+ robot_username)
+ raise InvalidRobotException(msg)
+
+ result = parse_robot_username(robot_username)
+ if result is None:
+ raise InvalidRobotException('%s is an invalid robot name' % robot_username)
+
+ robot = lookup_robot(robot_username)
+ assert robot.robot
+
+ # Lookup the token for the robot.
+ try:
+ token_data = RobotAccountToken.get(robot_account=robot)
+ if not token_data.token.matches(password):
+ msg = ('Could not find robot with username: %s and supplied password.' %
+ robot_username)
+ raise InvalidRobotException(msg)
+ except RobotAccountToken.DoesNotExist:
+ # TODO(remove-unenc): Remove once migrated.
+ if not ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ raise InvalidRobotException(msg)
+
+ if password.find('robot:') >= 0:
+ # Just to be sure.
+ raise InvalidRobotException(msg)
+
+ query = (User
+ .select()
+ .join(FederatedLogin)
+ .join(LoginService)
+ .where(FederatedLogin.service_ident == password, LoginService.name == 'quayrobot',
+ User.username == robot_username))
+
+ try:
+ robot = query.get()
+ except User.DoesNotExist:
+ msg = ('Could not find robot with username: %s and supplied password.' %
+ robot_username)
+ raise InvalidRobotException(msg)
+
+ # Find the owner user and ensure it is not disabled.
+ try:
+ owner = User.get(User.username == result[0])
+ except User.DoesNotExist:
+ raise InvalidRobotException('Robot %s owner does not exist' % robot_username)
+
+ if not owner.enabled:
+ raise InvalidRobotException('This user has been disabled. Please contact your administrator.')
+
+ # Mark that the robot was accessed.
+ _basequery.update_last_accessed(robot)
+
+ return robot
+
+def regenerate_robot_token(robot_shortname, parent):
+ robot_username = format_robot_username(parent.username, robot_shortname)
+
+ robot, metadata = lookup_robot_and_metadata(robot_username)
+ password = random_string_generator(length=64)()
+ robot.email = str(uuid4())
+ robot.uuid = str(uuid4())
+
+ service = LoginService.get(name='quayrobot')
+ login = FederatedLogin.get(FederatedLogin.user == robot, FederatedLogin.service == service)
+ login.service_ident = 'robot:%s' % (robot.id)
+
+ try:
+ token_data = RobotAccountToken.get(robot_account=robot)
+ except RobotAccountToken.DoesNotExist:
+ token_data = RobotAccountToken.create(robot_account=robot)
+
+ token_data.token = password
+
+ with db_transaction():
+ token_data.save()
+ login.save()
+ robot.save()
+
+ return robot, password, metadata
+
+
+def delete_robot(robot_username):
+ try:
+ robot = User.get(username=robot_username, robot=True)
+ robot.delete_instance(recursive=True, delete_nullable=True)
+
+ except User.DoesNotExist:
+ raise InvalidRobotException('Could not find robot with username: %s' %
+ robot_username)
+
+
+def list_namespace_robots(namespace):
+ """ Returns all the robots found under the given namespace. """
+ return _list_entity_robots(namespace)
+
+
+def _list_entity_robots(entity_name, include_metadata=True, include_token=True):
+ """ Return the list of robots for the specified entity. This MUST return a query, not a
+ materialized list so that callers can use db_for_update.
+ """
+ # TODO(remove-unenc): Remove FederatedLogin and LEFT_OUTER on RobotAccountToken once migration
+ # is complete.
+ if include_metadata or include_token:
+ query = (User
+ .select(User, RobotAccountToken, FederatedLogin, RobotAccountMetadata)
+ .join(FederatedLogin)
+ .switch(User)
+ .join(RobotAccountMetadata, JOIN.LEFT_OUTER)
+ .switch(User)
+ .join(RobotAccountToken, JOIN.LEFT_OUTER)
+ .where(User.robot == True, User.username ** (entity_name + '+%')))
+ else:
+ query = (User
+ .select(User)
+ .where(User.robot == True, User.username ** (entity_name + '+%')))
+
+ return query
+
+
+def list_entity_robot_permission_teams(entity_name, limit=None, include_permissions=False):
+ query = (_list_entity_robots(entity_name))
+
+ # TODO(remove-unenc): Remove FederatedLogin once migration is complete.
+ fields = [User.username, User.creation_date, User.last_accessed, RobotAccountToken.token,
+ FederatedLogin.service_ident, RobotAccountMetadata.description,
+ RobotAccountMetadata.unstructured_json]
+ if include_permissions:
+ query = (query
+ .join(RepositoryPermission, JOIN.LEFT_OUTER,
+ on=(RepositoryPermission.user == FederatedLogin.user))
+ .join(Repository, JOIN.LEFT_OUTER)
+ .switch(User)
+ .join(TeamMember, JOIN.LEFT_OUTER)
+ .join(Team, JOIN.LEFT_OUTER))
+
+ fields.append(Repository.name)
+ fields.append(Team.name)
+
+ query = query.limit(limit).order_by(User.last_accessed.desc())
+ return TupleSelector(query, fields)
+
+
+def update_user_metadata(user, metadata=None):
+ """ Updates the metadata associated with the user, including his/her name and company. """
+ metadata = metadata if metadata is not None else {}
+
+ with db_transaction():
+ if 'given_name' in metadata:
+ user.given_name = metadata['given_name']
+
+ if 'family_name' in metadata:
+ user.family_name = metadata['family_name']
+
+ if 'company' in metadata:
+ user.company = metadata['company']
+
+ if 'location' in metadata:
+ user.location = metadata['location']
+
+ user.save()
+
+ # Remove any prompts associated with the user's metadata being needed.
+ remove_user_prompt(user, UserPromptTypes.ENTER_NAME)
+ remove_user_prompt(user, UserPromptTypes.ENTER_COMPANY)
+
+
+def _get_login_service(service_id):
+ try:
+ return LoginService.get(LoginService.name == service_id)
+ except LoginService.DoesNotExist:
+ return LoginService.create(name=service_id)
+
+
+def create_federated_user(username, email, service_id, service_ident,
+ set_password_notification, metadata={},
+ email_required=True, confirm_username=True,
+ prompts=tuple()):
+ prompts = set(prompts)
+
+ if confirm_username:
+ prompts.add(UserPromptTypes.CONFIRM_USERNAME)
+
+ new_user = create_user_noverify(username, email, email_required=email_required, prompts=prompts)
+ new_user.verified = True
+ new_user.save()
+
+ FederatedLogin.create(user=new_user, service=_get_login_service(service_id),
+ service_ident=service_ident,
+ metadata_json=json.dumps(metadata))
+
+ if set_password_notification:
+ notification.create_notification('password_required', new_user)
+
+ return new_user
+
+
+def attach_federated_login(user, service_id, service_ident, metadata=None):
+ service = _get_login_service(service_id)
+ FederatedLogin.create(user=user, service=service, service_ident=service_ident,
+ metadata_json=json.dumps(metadata or {}))
+ return user
+
+
+def verify_federated_login(service_id, service_ident):
+ try:
+ found = (FederatedLogin
+ .select(FederatedLogin, User)
+ .join(LoginService)
+ .switch(FederatedLogin).join(User)
+ .where(FederatedLogin.service_ident == service_ident, LoginService.name == service_id)
+ .get())
+
+ # Mark that the user was accessed.
+ _basequery.update_last_accessed(found.user)
+
+ return found.user
+ except FederatedLogin.DoesNotExist:
+ return None
+
+
+def list_federated_logins(user):
+ selected = FederatedLogin.select(FederatedLogin.service_ident,
+ LoginService.name, FederatedLogin.metadata_json)
+ joined = selected.join(LoginService)
+ return joined.where(LoginService.name != 'quayrobot',
+ FederatedLogin.user == user)
+
+
+def lookup_federated_login(user, service_name):
+ try:
+ return list_federated_logins(user).where(LoginService.name == service_name).get()
+ except FederatedLogin.DoesNotExist:
+ return None
+
+
+def create_confirm_email_code(user, new_email=None):
+ if new_email:
+ if not validate_email(new_email):
+ raise InvalidEmailAddressException('Invalid email address: %s' %
+ new_email)
+
+ verification_code, unhashed = Credential.generate()
+ code = EmailConfirmation.create(user=user,
+ email_confirm=True,
+ new_email=new_email,
+ verification_code=verification_code)
+ return encode_public_private_token(code.code, unhashed)
+
+
+def confirm_user_email(token):
+ # TODO(remove-unenc): Remove allow_public_only once migrated.
+ allow_public_only = ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS)
+ result = decode_public_private_token(token, allow_public_only=allow_public_only)
+ if not result:
+ raise DataModelException('Invalid email confirmation code')
+
+ try:
+ code = EmailConfirmation.get(EmailConfirmation.code == result.public_code,
+ EmailConfirmation.email_confirm == True)
+ except EmailConfirmation.DoesNotExist:
+ raise DataModelException('Invalid email confirmation code')
+
+ if result.private_token and not code.verification_code.matches(result.private_token):
+ raise DataModelException('Invalid email confirmation code')
+
+ user = code.user
+ user.verified = True
+
+ old_email = None
+ new_email = code.new_email
+ if new_email and new_email != old_email:
+ if find_user_by_email(new_email):
+ raise DataModelException('E-mail address already used')
+
+ old_email = user.email
+ user.email = new_email
+
+ with db_transaction():
+ user.save()
+ code.delete_instance()
+
+ return user, new_email, old_email
+
+
+def create_reset_password_email_code(email):
+ try:
+ user = User.get(User.email == email)
+ except User.DoesNotExist:
+ raise InvalidEmailAddressException('Email address was not found')
+
+ if user.organization:
+ raise InvalidEmailAddressException('Organizations can not have passwords')
+
+ verification_code, unhashed = Credential.generate()
+ code = EmailConfirmation.create(user=user, pw_reset=True, verification_code=verification_code)
+ return encode_public_private_token(code.code, unhashed)
+
+
+def validate_reset_code(token):
+ # TODO(remove-unenc): Remove allow_public_only once migrated.
+ allow_public_only = ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS)
+ result = decode_public_private_token(token, allow_public_only=allow_public_only)
+ if not result:
+ return None
+
+ # Find the reset code.
+ try:
+ code = EmailConfirmation.get(EmailConfirmation.code == result.public_code,
+ EmailConfirmation.pw_reset == True)
+ except EmailConfirmation.DoesNotExist:
+ return None
+
+ if result.private_token and not code.verification_code.matches(result.private_token):
+ return None
+
+ # Make sure the code is not expired.
+ max_lifetime_duration = convert_to_timedelta(config.app_config['USER_RECOVERY_TOKEN_LIFETIME'])
+ if code.created + max_lifetime_duration < datetime.now():
+ code.delete_instance()
+ return None
+
+ # Verify the user and return the code.
+ user = code.user
+
+ with db_transaction():
+ if not user.verified:
+ user.verified = True
+ user.save()
+
+ code.delete_instance()
+
+ return user
+
+
+def find_user_by_email(email):
+ try:
+ return User.get(User.email == email)
+ except User.DoesNotExist:
+ return None
+
+
+def get_nonrobot_user(username):
+ try:
+ return User.get(User.username == username, User.organization == False, User.robot == False)
+ except User.DoesNotExist:
+ return None
+
+
+def get_user(username):
+ try:
+ return User.get(User.username == username, User.organization == False)
+ except User.DoesNotExist:
+ return None
+
+
+def get_namespace_user(username):
+ try:
+ return User.get(User.username == username)
+ except User.DoesNotExist:
+ return None
+
+
+def get_user_or_org(username):
+ try:
+ return User.get(User.username == username, User.robot == False)
+ except User.DoesNotExist:
+ return None
+
+
+def get_user_by_id(user_db_id):
+ try:
+ return User.get(User.id == user_db_id, User.organization == False)
+ except User.DoesNotExist:
+ return None
+
+
+def get_user_map_by_ids(namespace_ids):
+ id_user = {namespace_id: None for namespace_id in namespace_ids}
+ users = User.select().where(User.id << namespace_ids, User.organization == False)
+ for user in users:
+ id_user[user.id] = user
+
+ return id_user
+
+def get_namespace_user_by_user_id(namespace_user_db_id):
+ try:
+ return User.get(User.id == namespace_user_db_id, User.robot == False)
+ except User.DoesNotExist:
+ raise InvalidUsernameException('User with id does not exist: %s' % namespace_user_db_id)
+
+
+def get_namespace_by_user_id(namespace_user_db_id):
+ try:
+ return User.get(User.id == namespace_user_db_id, User.robot == False).username
+ except User.DoesNotExist:
+ raise InvalidUsernameException('User with id does not exist: %s' % namespace_user_db_id)
+
+
+def get_user_by_uuid(user_uuid):
+ try:
+ return User.get(User.uuid == user_uuid, User.organization == False)
+ except User.DoesNotExist:
+ return None
+
+
+def get_user_or_org_by_customer_id(customer_id):
+ try:
+ return User.get(User.stripe_id == customer_id)
+ except User.DoesNotExist:
+ return None
+
+def invalidate_all_sessions(user):
+ """ Invalidates all existing user sessions by rotating the user's UUID. """
+ if not user:
+ return
+
+ user.uuid = str(uuid4())
+ user.save()
+
+def get_matching_user_namespaces(namespace_prefix, username, limit=10):
+ namespace_user = get_namespace_user(username)
+ namespace_user_id = namespace_user.id if namespace_user is not None else None
+
+ namespace_search = prefix_search(Namespace.username, namespace_prefix)
+ base_query = (Namespace
+ .select()
+ .distinct()
+ .join(Repository, on=(Repository.namespace_user == Namespace.id))
+ .join(RepositoryPermission, JOIN.LEFT_OUTER)
+ .where(namespace_search))
+
+ return _basequery.filter_to_repos_for_user(base_query, namespace_user_id).limit(limit)
+
+def get_matching_users(username_prefix, robot_namespace=None, organization=None, limit=20,
+ exact_matches_only=False):
+ # Lookup the exact match first. This ensures that the exact match is not cut off by the list
+ # limit.
+ updated_limit = limit
+ exact_match = list(_get_matching_users(username_prefix, robot_namespace, organization, limit=1,
+ exact_matches_only=True))
+ if exact_match:
+ updated_limit -= 1
+ yield exact_match[0]
+
+ # Perform the remainder of the lookup.
+ if updated_limit:
+ for result in _get_matching_users(username_prefix, robot_namespace, organization, updated_limit,
+ exact_matches_only):
+ if exact_match and result.username == exact_match[0].username:
+ continue
+
+ yield result
+
+def _get_matching_users(username_prefix, robot_namespace=None, organization=None, limit=20,
+ exact_matches_only=False):
+ user_search = prefix_search(User.username, username_prefix)
+ if exact_matches_only:
+ user_search = (User.username == username_prefix)
+
+ direct_user_query = (user_search & (User.organization == False) & (User.robot == False))
+
+ if robot_namespace:
+ robot_prefix = format_robot_username(robot_namespace, username_prefix)
+ robot_search = prefix_search(User.username, robot_prefix)
+ direct_user_query = ((robot_search & (User.robot == True)) | direct_user_query)
+
+ query = (User
+ .select(User.id, User.username, User.email, User.robot)
+ .group_by(User.id, User.username, User.email, User.robot)
+ .where(direct_user_query))
+
+ if organization:
+ query = (query
+ .select(User.id, User.username, User.email, User.robot, fn.Sum(Team.id))
+ .join(TeamMember, JOIN.LEFT_OUTER)
+ .join(Team, JOIN.LEFT_OUTER, on=((Team.id == TeamMember.team) &
+ (Team.organization == organization)))
+ .order_by(User.robot.desc()))
+
+ class MatchingUserResult(object):
+ def __init__(self, *args):
+ self.id = args[0]
+ self.username = args[1]
+ self.email = args[2]
+ self.robot = args[3]
+
+ if organization:
+ self.is_org_member = (args[3] != None)
+ else:
+ self.is_org_member = None
+
+ return (MatchingUserResult(*args) for args in query.tuples().limit(limit))
+
+
+def verify_user(username_or_email, password):
+ """ Verifies that the given username/email + password pair is valid. If the username or e-mail
+ address is invalid, returns None. If the password specified does not match for the given user,
+ either returns None or raises TooManyLoginAttemptsException if there have been too many
+ invalid login attempts. Returns the user object if the login was valid.
+ """
+
+ # Make sure we didn't get any unicode for the username.
+ try:
+ str(username_or_email)
+ except ValueError:
+ return None
+
+ # Fetch the user with the matching username or e-mail address.
+ try:
+ fetched = User.get((User.username == username_or_email) | (User.email == username_or_email))
+ except User.DoesNotExist:
+ return None
+
+ # If the user has any invalid login attempts, check to see if we are within the exponential
+ # backoff window for the user. If so, we raise an exception indicating that the user cannot
+ # login.
+ now = datetime.utcnow()
+ if fetched.invalid_login_attempts > 0:
+ can_retry_at = exponential_backoff(fetched.invalid_login_attempts, EXPONENTIAL_BACKOFF_SCALE,
+ fetched.last_invalid_login)
+
+ if can_retry_at > now:
+ retry_after = can_retry_at - now
+ raise TooManyLoginAttemptsException('Too many login attempts.', retry_after.total_seconds())
+
+ # Hash the given password and compare it to the specified password.
+ if (fetched.password_hash and
+ hash_password(password, fetched.password_hash) == fetched.password_hash):
+
+ # If the user previously had any invalid login attempts, clear them out now.
+ if fetched.invalid_login_attempts > 0:
+ try:
+ (User
+ .update(invalid_login_attempts=0)
+ .where(User.id == fetched.id)
+ .execute())
+
+ # Mark that the user was accessed.
+ _basequery.update_last_accessed(fetched)
+ except ReadOnlyModeException:
+ pass
+
+ # Return the valid user.
+ return fetched
+
+ # Otherwise, update the user's invalid login attempts.
+ try:
+ (User
+ .update(invalid_login_attempts=User.invalid_login_attempts+1, last_invalid_login=now)
+ .where(User.id == fetched.id)
+ .execute())
+ except ReadOnlyModeException:
+ pass
+
+ # We weren't able to authorize the user
+ return None
+
+
+def get_all_repo_users(namespace_name, repository_name):
+ return (RepositoryPermission
+ .select(User, Role, RepositoryPermission)
+ .join(User)
+ .switch(RepositoryPermission)
+ .join(Role)
+ .switch(RepositoryPermission)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Namespace.username == namespace_name, Repository.name == repository_name))
+
+
+def get_all_repo_users_transitive_via_teams(namespace_name, repository_name):
+ return (User
+ .select()
+ .distinct()
+ .join(TeamMember)
+ .join(Team)
+ .join(RepositoryPermission)
+ .join(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Namespace.username == namespace_name, Repository.name == repository_name))
+
+
+def get_all_repo_users_transitive(namespace_name, repository_name):
+ # Load the users found via teams and directly via permissions.
+ via_teams = get_all_repo_users_transitive_via_teams(namespace_name, repository_name)
+ directly = [perm.user for perm in get_all_repo_users(namespace_name, repository_name)]
+
+ # Filter duplicates.
+ user_set = set()
+
+ def check_add(u):
+ if u.username in user_set:
+ return False
+
+ user_set.add(u.username)
+ return True
+
+ return [user for user in list(directly) + list(via_teams) if check_add(user)]
+
+
+def get_private_repo_count(username):
+ return (Repository
+ .select()
+ .join(Visibility)
+ .switch(Repository)
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .where(Namespace.username == username, Visibility.name == 'private')
+ .count())
+
+
+def get_active_users(disabled=True, deleted=False):
+ query = (User
+ .select()
+ .where(User.organization == False, User.robot == False))
+
+ if not disabled:
+ query = query.where(User.enabled == True)
+
+ if not deleted:
+ query = query.where(User.id.not_in(DeletedNamespace.select(DeletedNamespace.namespace)))
+
+ return query
+
+
+def get_active_user_count():
+ return get_active_users().count()
+
+
+def get_robot_count():
+ return User.select().where(User.robot == True).count()
+
+
+def detach_external_login(user, service_name):
+ try:
+ service = LoginService.get(name=service_name)
+ except LoginService.DoesNotExist:
+ return
+
+ FederatedLogin.delete().where(FederatedLogin.user == user,
+ FederatedLogin.service == service).execute()
+
+
+def get_solely_admined_organizations(user_obj):
+ """ Returns the organizations admined solely by the given user. """
+ orgs = (User.select()
+ .where(User.organization == True)
+ .join(Team)
+ .join(TeamRole)
+ .where(TeamRole.name == 'admin')
+ .switch(Team)
+ .join(TeamMember)
+ .where(TeamMember.user == user_obj)
+ .distinct())
+
+ # Filter to organizations where the user is the sole admin.
+ solely_admined = []
+ for org in orgs:
+ admin_user_count = (TeamMember.select()
+ .join(Team)
+ .join(TeamRole)
+ .where(Team.organization == org, TeamRole.name == 'admin')
+ .switch(TeamMember)
+ .join(User)
+ .where(User.robot == False)
+ .distinct()
+ .count())
+
+ if admin_user_count == 1:
+ solely_admined.append(org)
+
+ return solely_admined
+
+
+def mark_namespace_for_deletion(user, queues, namespace_gc_queue, force=False):
+ """ Marks a namespace (as referenced by the given user) for deletion. A queue item will be added
+ to delete the namespace's repositories and storage, while the namespace itself will be
+ renamed, disabled, and delinked from other tables.
+ """
+ if not user.enabled:
+ return None
+
+ if not force and not user.organization:
+ # Ensure that the user is not the sole admin for any organizations. If so, then the user
+ # cannot be deleted before those organizations are deleted or reassigned.
+ organizations = get_solely_admined_organizations(user)
+ if len(organizations) > 0:
+ message = 'Cannot delete %s as you are the only admin for organizations: ' % user.username
+ for index, org in enumerate(organizations):
+ if index > 0:
+ message = message + ', '
+
+ message = message + org.username
+
+ raise DataModelException(message)
+
+ # Delete all queue items for the user.
+ for queue in queues:
+ queue.delete_namespaced_items(user.username)
+
+ # Delete non-repository related items. This operation is very quick, so we can do so here.
+ _delete_user_linked_data(user)
+
+ with db_transaction():
+ original_username = user.username
+ user = db_for_update(User.select().where(User.id == user.id)).get()
+
+ # Mark the namespace as deleted and ready for GC.
+ try:
+ marker = DeletedNamespace.create(namespace=user,
+ original_username=original_username,
+ original_email=user.email)
+ except IntegrityError:
+ return
+
+ # Disable the namespace itself, and replace its various unique fields with UUIDs.
+ user.enabled = False
+ user.username = str(uuid4())
+ user.email = str(uuid4())
+ user.save()
+
+ # Add a queueitem to delete the namespace.
+ marker.queue_id = namespace_gc_queue.put([str(user.id)], json.dumps({
+ 'marker_id': marker.id,
+ 'original_username': original_username,
+ }))
+ marker.save()
+ return marker.id
+
+
+def delete_namespace_via_marker(marker_id, queues):
+ """ Deletes a namespace referenced by the given DeletedNamespace marker ID. """
+ try:
+ marker = DeletedNamespace.get(id=marker_id)
+ except DeletedNamespace.DoesNotExist:
+ return
+
+ delete_user(marker.namespace, queues)
+
+
+def delete_user(user, queues):
+ """ Deletes a user/organization/robot. Should *not* be called by any user-facing API. Instead,
+ mark_namespace_for_deletion should be used, and the queue should call this method.
+ """
+ # Delete all queue items for the user.
+ for queue in queues:
+ queue.delete_namespaced_items(user.username)
+
+ # Delete any repositories under the user's namespace.
+ for repo in list(Repository.select().where(Repository.namespace_user == user)):
+ gc.purge_repository(user.username, repo.name)
+
+ # Delete non-repository related items.
+ _delete_user_linked_data(user)
+
+ # Delete the user itself.
+ user.delete_instance(recursive=True, delete_nullable=True)
+
+
+def _delete_user_linked_data(user):
+ if user.organization:
+ # Delete the organization's teams.
+ with db_transaction():
+ for team in Team.select().where(Team.organization == user):
+ team.delete_instance(recursive=True)
+
+ # Delete any OAuth approvals and tokens associated with the user.
+ with db_transaction():
+ for app in OAuthApplication.select().where(OAuthApplication.organization == user):
+ app.delete_instance(recursive=True)
+ else:
+ # Remove the user from any teams in which they are a member.
+ TeamMember.delete().where(TeamMember.user == user).execute()
+
+ # Delete any repository buildtriggers where the user is the connected user.
+ with db_transaction():
+ triggers = RepositoryBuildTrigger.select().where(RepositoryBuildTrigger.connected_user == user)
+ for trigger in triggers:
+ trigger.delete_instance(recursive=True, delete_nullable=False)
+
+ # Delete any mirrors with robots owned by this user.
+ with db_transaction():
+ robots = list(list_namespace_robots(user.username))
+ RepoMirrorConfig.delete().where(RepoMirrorConfig.internal_robot << robots).execute()
+
+ # Delete any robots owned by this user.
+ with db_transaction():
+ robots = list(list_namespace_robots(user.username))
+ for robot in robots:
+ robot.delete_instance(recursive=True, delete_nullable=True)
+
+ # Null out any service key approvals. We technically lose information here, but its better than
+ # falling and only occurs if a superuser is being deleted.
+ ServiceKeyApproval.update(approver=None).where(ServiceKeyApproval.approver == user).execute()
+
+
+def get_pull_credentials(robotname):
+ """ Returns the pull credentials for a robot with the given name. """
+ try:
+ robot = lookup_robot(robotname)
+ except InvalidRobotException:
+ return None
+
+ token = retrieve_robot_token(robot)
+
+ return {
+ 'username': robot.username,
+ 'password': token,
+ 'registry': '%s://%s/v1/' % (config.app_config['PREFERRED_URL_SCHEME'],
+ config.app_config['SERVER_HOSTNAME']),
+ }
+
+def get_region_locations(user):
+ """ Returns the locations defined as preferred storage for the given user. """
+ query = UserRegion.select().join(ImageStorageLocation).where(UserRegion.user == user)
+ return set([region.location.name for region in query])
+
+def get_federated_logins(user_ids, service_name):
+ """ Returns all federated logins for the given user ids under the given external service. """
+ if not user_ids:
+ return []
+
+ return (FederatedLogin
+ .select()
+ .join(User)
+ .switch(FederatedLogin)
+ .join(LoginService)
+ .where(FederatedLogin.user << user_ids,
+ LoginService.name == service_name))
+
+
+def list_namespace_geo_restrictions(namespace_name):
+ """ Returns all of the defined geographic restrictions for the given namespace. """
+ return (NamespaceGeoRestriction
+ .select()
+ .join(User)
+ .where(User.username == namespace_name))
+
+
+def get_minimum_user_id():
+ return User.select(fn.Min(User.id)).tuples().get()[0]
+
+
+class LoginWrappedDBUser(UserMixin):
+ def __init__(self, user_uuid, db_user=None):
+ self._uuid = user_uuid
+ self._db_user = db_user
+
+ def db_user(self):
+ if not self._db_user:
+ self._db_user = get_user_by_uuid(self._uuid)
+ return self._db_user
+
+ @property
+ def is_authenticated(self):
+ return self.db_user() is not None
+
+ @property
+ def is_active(self):
+ return self.db_user() and self.db_user().verified
+
+ def get_id(self):
+ return unicode(self._uuid)
diff --git a/data/queue.py b/data/queue.py
new file mode 100644
index 000000000..289f4ad64
--- /dev/null
+++ b/data/queue.py
@@ -0,0 +1,378 @@
+import uuid
+
+from datetime import datetime, timedelta
+from contextlib import contextmanager
+
+from data.database import QueueItem, db, db_for_update, db_random_func
+from util.morecollections import AttrDict
+
+
+MINIMUM_EXTENSION = timedelta(seconds=20)
+DEFAULT_BATCH_SIZE = 1000
+
+
+class BuildMetricQueueReporter(object):
+ """ Metric queue reporter for the build system. """
+ def __init__(self, metric_queue):
+ self._metric_queue = metric_queue
+
+ def __call__(self, currently_processing, running_count, total_count):
+ need_capacity_count = total_count - running_count
+ self._metric_queue.put_deprecated('BuildCapacityShortage', need_capacity_count, unit='Count')
+ self._metric_queue.build_capacity_shortage.Set(need_capacity_count)
+
+ building_percent = 100 if currently_processing else 0
+ self._metric_queue.percent_building.Set(building_percent)
+
+
+class WorkQueue(object):
+ """ Work queue defines methods for interacting with a queue backed by the database. """
+ def __init__(self, queue_name, transaction_factory,
+ canonical_name_match_list=None, reporter=None, metric_queue=None,
+ has_namespace=False):
+ self._queue_name = queue_name
+ self._reporter = reporter
+ self._metric_queue = metric_queue
+ self._transaction_factory = transaction_factory
+ self._currently_processing = False
+ self._has_namespaced_items = has_namespace
+
+ if canonical_name_match_list is None:
+ self._canonical_name_match_list = []
+ else:
+ self._canonical_name_match_list = canonical_name_match_list
+
+ @staticmethod
+ def _canonical_name(name_list):
+ return '/'.join(name_list) + '/'
+
+ @classmethod
+ def _running_jobs(cls, now, name_match_query):
+ return (cls
+ ._running_jobs_where(QueueItem.select(QueueItem.queue_name), now)
+ .where(QueueItem.queue_name ** name_match_query))
+
+ @classmethod
+ def _available_jobs(cls, now, name_match_query):
+ return (cls
+ ._available_jobs_where(QueueItem.select(), now)
+ .where(QueueItem.queue_name ** name_match_query))
+
+ @staticmethod
+ def _running_jobs_where(query, now):
+ return query.where(QueueItem.available == False, QueueItem.processing_expires > now)
+
+ @staticmethod
+ def _available_jobs_where(query, now):
+ return query.where(QueueItem.available_after <= now,
+ ((QueueItem.available == True) | (QueueItem.processing_expires <= now)),
+ QueueItem.retries_remaining > 0)
+
+ @classmethod
+ def _available_jobs_not_running(cls, now, name_match_query, running_query):
+ return (cls
+ ._available_jobs(now, name_match_query)
+ .where(~(QueueItem.queue_name << running_query)))
+
+ def num_alive_jobs(self, canonical_name_list):
+ """
+ Returns the number of alive queue items with a given prefix.
+ """
+ def strip_slash(name):
+ return name.lstrip('/')
+ canonical_name_list = map(strip_slash, canonical_name_list)
+ canonical_name_query = '/'.join([self._queue_name] + canonical_name_list) + '%'
+
+ return (QueueItem
+ .select()
+ .where(QueueItem.queue_name ** canonical_name_query)
+ .where(QueueItem.retries_remaining > 0)
+ .count())
+
+ def num_available_jobs_between(self, available_min_time, available_max_time, canonical_name_list):
+ """
+ Returns the number of available queue items with a given prefix, between the two provided times.
+ """
+ def strip_slash(name):
+ return name.lstrip('/')
+ canonical_name_list = map(strip_slash, canonical_name_list)
+
+ available = self._available_jobs(available_max_time,
+ '/'.join([self._queue_name] + canonical_name_list) + '%')
+
+ return available.where(QueueItem.available_after >= available_min_time).count()
+
+ def _name_match_query(self):
+ return '%s%%' % self._canonical_name([self._queue_name] + self._canonical_name_match_list)
+
+ @staticmethod
+ def _item_by_id_for_update(queue_id):
+ return db_for_update(QueueItem.select().where(QueueItem.id == queue_id)).get()
+
+ def get_metrics(self):
+ now = datetime.utcnow()
+ name_match_query = self._name_match_query()
+
+ running_query = self._running_jobs(now, name_match_query)
+ running_count = running_query.distinct().count()
+
+ available_query = self._available_jobs(now, name_match_query)
+ available_count = available_query.select(QueueItem.queue_name).distinct().count()
+
+ available_not_running_query = self._available_jobs_not_running(now, name_match_query,
+ running_query)
+ available_not_running_count = (available_not_running_query
+ .select(QueueItem.queue_name)
+ .distinct()
+ .count())
+
+ return (running_count, available_not_running_count, available_count)
+
+ def update_metrics(self):
+ if self._reporter is None and self._metric_queue is None:
+ return
+
+ (running_count, available_not_running_count, available_count) = self.get_metrics()
+
+ if self._metric_queue:
+ self._metric_queue.work_queue_running.Set(running_count, labelvalues=[self._queue_name])
+ self._metric_queue.work_queue_available.Set(available_count, labelvalues=[self._queue_name])
+ self._metric_queue.work_queue_available_not_running.Set(available_not_running_count,
+ labelvalues=[self._queue_name])
+
+
+ if self._reporter:
+ self._reporter(self._currently_processing, running_count,
+ running_count + available_not_running_count)
+
+ def has_retries_remaining(self, item_id):
+ """ Returns whether the queue item with the given id has any retries remaining. If the
+ queue item does not exist, returns False. """
+ with self._transaction_factory(db):
+ try:
+ return QueueItem.get(id=item_id).retries_remaining > 0
+ except QueueItem.DoesNotExist:
+ return False
+
+ def delete_namespaced_items(self, namespace, subpath=None):
+ """ Deletes all items in this queue that exist under the given namespace. """
+ if not self._has_namespaced_items:
+ return False
+
+ subpath_query = '%s/' % subpath if subpath else ''
+ queue_prefix = '%s/%s/%s%%' % (self._queue_name, namespace, subpath_query)
+ return QueueItem.delete().where(QueueItem.queue_name ** queue_prefix).execute()
+
+ def alive(self, canonical_name_list):
+ """
+ Returns True if a job matching the canonical name list is currently processing
+ or available.
+ """
+ canonical_name = self._canonical_name([self._queue_name] + canonical_name_list)
+ try:
+ select_query = QueueItem.select().where(QueueItem.queue_name == canonical_name)
+ now = datetime.utcnow()
+
+ overall_query = (self._available_jobs_where(select_query.clone(), now) |
+ self._running_jobs_where(select_query.clone(), now))
+ overall_query.get()
+ return True
+ except QueueItem.DoesNotExist:
+ return False
+
+ def _queue_dict(self, canonical_name_list, message, available_after, retries_remaining):
+ return dict(
+ queue_name=self._canonical_name([self._queue_name] + canonical_name_list),
+ body=message,
+ retries_remaining=retries_remaining,
+ available_after=datetime.utcnow() + timedelta(seconds=available_after or 0),
+ )
+
+ @contextmanager
+ def batch_insert(self, batch_size=DEFAULT_BATCH_SIZE):
+ items_to_insert = []
+ def batch_put(canonical_name_list, message, available_after=0, retries_remaining=5):
+ """
+ Put an item, if it shouldn't be processed for some number of seconds,
+ specify that amount as available_after. Returns the ID of the queue item added.
+ """
+ items_to_insert.append(self._queue_dict(canonical_name_list, message, available_after,
+ retries_remaining))
+
+ yield batch_put
+
+ # Chunk the inserted items into batch_size chunks and insert_many
+ remaining = list(items_to_insert)
+ while remaining:
+ QueueItem.insert_many(remaining[0:batch_size]).execute()
+ remaining = remaining[batch_size:]
+
+ def put(self, canonical_name_list, message, available_after=0, retries_remaining=5):
+ """
+ Put an item, if it shouldn't be processed for some number of seconds,
+ specify that amount as available_after. Returns the ID of the queue item added.
+ """
+ item = QueueItem.create(**self._queue_dict(canonical_name_list, message, available_after,
+ retries_remaining))
+ return str(item.id)
+
+ def _select_available_item(self, ordering_required, now):
+ """ Selects an available queue item from the queue table and returns it, if any. If none,
+ return None.
+ """
+ name_match_query = self._name_match_query()
+
+ try:
+ if ordering_required:
+ # The previous solution to this used a select for update in a
+ # transaction to prevent multiple instances from processing the
+ # same queue item. This suffered performance problems. This solution
+ # instead has instances attempt to update the potential queue item to be
+ # unavailable. However, since their update clause is restricted to items
+ # that are available=False, only one instance's update will succeed, and
+ # it will have a changed row count of 1. Instances that have 0 changed
+ # rows know that another instance is already handling that item.
+ running = self._running_jobs(now, name_match_query)
+ avail = self._available_jobs_not_running(now, name_match_query, running)
+ return avail.order_by(QueueItem.id).get()
+ else:
+ # If we don't require ordering, we grab a random item from any of the first 50 available.
+ subquery = self._available_jobs(now, name_match_query).limit(50).alias('j1')
+ return (QueueItem
+ .select()
+ .join(subquery, on=QueueItem.id == subquery.c.id)
+ .order_by(db_random_func())
+ .get())
+
+ except QueueItem.DoesNotExist:
+ # No available queue item was found.
+ return None
+
+ def _attempt_to_claim_item(self, db_item, now, processing_time):
+ """ Attempts to claim the specified queue item for this instance. Returns True on success and
+ False on failure.
+
+ Note that the underlying QueueItem row in the database will be changed on success, but
+ the db_item object given as a parameter will *not* have its fields updated.
+ """
+
+ # Try to claim the item. We do so by updating the item's information only if its current
+ # state ID matches that returned in the previous query. Since all updates to the QueueItem
+ # must change the state ID, this is guarenteed to only succeed if the item has not yet been
+ # claimed by another caller.
+ #
+ # Note that we use this method because InnoDB takes locks on *every* clause in the WHERE when
+ # performing the update. Previously, we would check all these columns, resulting in a bunch
+ # of lock contention. This change mitigates the problem significantly by only checking two
+ # columns (id and state_id), both of which should be absolutely unique at all times.
+ set_unavailable_query = (QueueItem
+ .update(available=False,
+ processing_expires=now + timedelta(seconds=processing_time),
+ retries_remaining=QueueItem.retries_remaining - 1,
+ state_id=str(uuid.uuid4()))
+ .where(QueueItem.id == db_item.id,
+ QueueItem.state_id == db_item.state_id))
+
+ changed = set_unavailable_query.execute()
+ return changed == 1
+
+
+ def get(self, processing_time=300, ordering_required=False):
+ """
+ Get an available item and mark it as unavailable for the default of five
+ minutes. The result of this method must always be composed of simple
+ python objects which are JSON serializable for network portability reasons.
+ """
+ now = datetime.utcnow()
+
+ # Select an available queue item.
+ db_item = self._select_available_item(ordering_required, now)
+ if db_item is None:
+ self._currently_processing = False
+ return None
+
+ # Attempt to claim the item for this instance.
+ was_claimed = self._attempt_to_claim_item(db_item, now, processing_time)
+ if not was_claimed:
+ self._currently_processing = False
+ return None
+
+ self._currently_processing = True
+
+ # Return a view of the queue item rather than an active db object
+ return AttrDict({
+ 'id': db_item.id,
+ 'body': db_item.body,
+ 'retries_remaining': db_item.retries_remaining - 1,
+ })
+
+ def cancel(self, item_id):
+ """ Attempts to cancel the queue item with the given ID from the queue. Returns true on success
+ and false if the queue item could not be canceled.
+ """
+ count_removed = QueueItem.delete().where(QueueItem.id == item_id).execute()
+ return count_removed > 0
+
+ def complete(self, completed_item):
+ self._currently_processing = not self.cancel(completed_item.id)
+
+ def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):
+ with self._transaction_factory(db):
+ retry_date = datetime.utcnow() + timedelta(seconds=retry_after)
+
+ try:
+ incomplete_item_obj = self._item_by_id_for_update(incomplete_item.id)
+ incomplete_item_obj.available_after = retry_date
+ incomplete_item_obj.available = True
+
+ if restore_retry:
+ incomplete_item_obj.retries_remaining += 1
+
+ incomplete_item_obj.save()
+ self._currently_processing = False
+ return incomplete_item_obj.retries_remaining > 0
+ except QueueItem.DoesNotExist:
+ return False
+
+ def extend_processing(self, item, seconds_from_now, minimum_extension=MINIMUM_EXTENSION,
+ updated_data=None):
+ with self._transaction_factory(db):
+ try:
+ queue_item = self._item_by_id_for_update(item.id)
+ new_expiration = datetime.utcnow() + timedelta(seconds=seconds_from_now)
+ has_change = False
+
+ # Only actually write the new expiration to the db if it moves the expiration some minimum
+ if new_expiration - queue_item.processing_expires > minimum_extension:
+ queue_item.processing_expires = new_expiration
+ has_change = True
+
+ if updated_data is not None and queue_item.body != updated_data:
+ queue_item.body = updated_data
+ has_change = True
+
+ if has_change:
+ queue_item.save()
+
+ return has_change
+ except QueueItem.DoesNotExist:
+ return False
+
+
+def delete_expired(expiration_threshold, deletion_threshold, batch_size):
+ """
+ Deletes all queue items that are older than the provided expiration threshold in batches of the
+ provided size. If there are less items than the deletion threshold, this method does nothing.
+
+ Returns the number of items deleted.
+ """
+ to_delete = list(QueueItem
+ .select()
+ .where(QueueItem.processing_expires <= expiration_threshold)
+ .limit(batch_size))
+
+ if len(to_delete) < deletion_threshold:
+ return 0
+
+ QueueItem.delete().where(QueueItem.id << to_delete).execute()
+ return len(to_delete)
diff --git a/data/readreplica.py b/data/readreplica.py
new file mode 100644
index 000000000..33abff2ed
--- /dev/null
+++ b/data/readreplica.py
@@ -0,0 +1,129 @@
+import random
+
+from collections import namedtuple
+
+from peewee import Model, SENTINEL, OperationalError, Proxy
+
+ReadOnlyConfig = namedtuple('ReadOnlyConfig', ['is_readonly', 'read_replicas'])
+
+class ReadOnlyModeException(Exception):
+ """ Exception raised if a write operation was attempted when in read only mode.
+ """
+
+
+class AutomaticFailoverWrapper(object):
+ """ Class which wraps a peewee database driver and (optionally) a second driver.
+ When executing SQL, if an OperationalError occurs, if a second driver is given,
+ the query is attempted again on the fallback DB. Otherwise, the exception is raised.
+ """
+ def __init__(self, primary_db, fallback_db=None):
+ self._primary_db = primary_db
+ self._fallback_db = fallback_db
+
+ def __getattr__(self, attribute):
+ if attribute != 'execute_sql' and hasattr(self._primary_db, attribute):
+ return getattr(self._primary_db, attribute)
+
+ return getattr(self, attribute)
+
+ def execute(self, query, commit=SENTINEL, **context_options):
+ ctx = self.get_sql_context(**context_options)
+ sql, params = ctx.sql(query).query()
+ return self.execute_sql(sql, params, commit=commit)
+
+ def execute_sql(self, sql, params=None, commit=SENTINEL):
+ try:
+ return self._primary_db.execute_sql(sql, params, commit)
+ except OperationalError:
+ if self._fallback_db is not None:
+ try:
+ return self._fallback_db.execute_sql(sql, params, commit)
+ except OperationalError:
+ raise
+
+
+class ReadReplicaSupportedModel(Model):
+ """ Base model for peewee data models that support using a read replica for SELECT
+ requests not under transactions, and automatic failover to the master if the
+ read replica fails.
+
+ Read-only queries are initially attempted on one of the read replica databases
+ being used; if an OperationalError occurs when attempting to invoke the query,
+ then the failure is logged and the query is retried on the database master.
+
+ Queries that are non-SELECTs (or under transactions) are always tried on the
+ master.
+
+ If the system is configured into read only mode, then all non-read-only queries
+ will raise a ReadOnlyModeException.
+ """
+ @classmethod
+ def _read_only_config(cls):
+ read_only_config = getattr(cls._meta, 'read_only_config', None)
+ if read_only_config is None:
+ return ReadOnlyConfig(False, [])
+
+ if isinstance(read_only_config, Proxy) and read_only_config.obj is None:
+ return ReadOnlyConfig(False, [])
+
+ return read_only_config.obj or ReadOnlyConfig(False, [])
+
+ @classmethod
+ def _in_readonly_mode(cls):
+ return cls._read_only_config().is_readonly
+
+ @classmethod
+ def _select_database(cls):
+ """ Selects a read replica database if we're configured to support read replicas.
+ Otherwise, selects the master database.
+ """
+ # Select the master DB if read replica support is not enabled.
+ read_only_config = cls._read_only_config()
+ if not read_only_config.read_replicas:
+ return cls._meta.database
+
+ # Select the master DB if we're ever under a transaction.
+ if cls._meta.database.transaction_depth() > 0:
+ return cls._meta.database
+
+ # Otherwise, return a read replica database with auto-retry onto the main database.
+ replicas = read_only_config.read_replicas
+ selected_read_replica = replicas[random.randrange(len(replicas))]
+ return AutomaticFailoverWrapper(selected_read_replica, cls._meta.database)
+
+ @classmethod
+ def select(cls, *args, **kwargs):
+ query = super(ReadReplicaSupportedModel, cls).select(*args, **kwargs)
+ query._database = cls._select_database()
+ return query
+
+ @classmethod
+ def insert(cls, *args, **kwargs):
+ query = super(ReadReplicaSupportedModel, cls).insert(*args, **kwargs)
+ if cls._in_readonly_mode():
+ raise ReadOnlyModeException()
+ return query
+
+ @classmethod
+ def update(cls, *args, **kwargs):
+ query = super(ReadReplicaSupportedModel, cls).update(*args, **kwargs)
+ if cls._in_readonly_mode():
+ raise ReadOnlyModeException()
+ return query
+
+ @classmethod
+ def delete(cls, *args, **kwargs):
+ query = super(ReadReplicaSupportedModel, cls).delete(*args, **kwargs)
+ if cls._in_readonly_mode():
+ raise ReadOnlyModeException()
+ return query
+
+ @classmethod
+ def raw(cls, *args, **kwargs):
+ query = super(ReadReplicaSupportedModel, cls).raw(*args, **kwargs)
+ if query._sql.lower().startswith('select '):
+ query._database = cls._select_database()
+ elif cls._in_readonly_mode():
+ raise ReadOnlyModeException()
+
+ return query
diff --git a/data/registry_model/__init__.py b/data/registry_model/__init__.py
new file mode 100644
index 000000000..ffac9dd59
--- /dev/null
+++ b/data/registry_model/__init__.py
@@ -0,0 +1,43 @@
+import os
+import logging
+
+from data.registry_model.registry_pre_oci_model import pre_oci_model
+from data.registry_model.registry_oci_model import oci_model
+from data.registry_model.modelsplitter import SplitModel
+
+logger = logging.getLogger(__name__)
+
+
+class RegistryModelProxy(object):
+ def __init__(self):
+ self._model = oci_model if os.getenv('OCI_DATA_MODEL') == 'true' else pre_oci_model
+
+ def setup_split(self, oci_model_proportion, oci_whitelist, v22_whitelist, upgrade_mode):
+ if os.getenv('OCI_DATA_MODEL') == 'true':
+ return
+
+ if upgrade_mode == 'complete':
+ logger.info('===============================')
+ logger.info('Full V2_2 + OCI model is enabled')
+ logger.info('===============================')
+ self._model = oci_model
+ return
+
+ logger.info('===============================')
+ logger.info('Split registry model: OCI %s proportion and whitelist `%s` and V22 whitelist `%s`',
+ oci_model_proportion, oci_whitelist, v22_whitelist)
+ logger.info('===============================')
+ self._model = SplitModel(oci_model_proportion, oci_whitelist, v22_whitelist,
+ upgrade_mode == 'post-oci-rollout')
+
+ def set_for_testing(self, use_oci_model):
+ self._model = oci_model if use_oci_model else pre_oci_model
+ logger.debug('Changed registry model to `%s` for testing', self._model)
+
+ def __getattr__(self, attr):
+ return getattr(self._model, attr)
+
+registry_model = RegistryModelProxy()
+logger.info('===============================')
+logger.info('Using registry model `%s`', registry_model._model)
+logger.info('===============================')
diff --git a/data/registry_model/blobuploader.py b/data/registry_model/blobuploader.py
new file mode 100644
index 000000000..5f99d3ec8
--- /dev/null
+++ b/data/registry_model/blobuploader.py
@@ -0,0 +1,335 @@
+import logging
+import time
+
+from contextlib import contextmanager
+from collections import namedtuple
+
+import bitmath
+import resumablehashlib
+
+from data.registry_model import registry_model
+from data.database import CloseForLongOperation, db_transaction
+from digest import digest_tools
+from util.registry.filelike import wrap_with_handler, StreamSlice
+from util.registry.gzipstream import calculate_size_handler
+from util.registry.torrent import PieceHasher
+
+
+logger = logging.getLogger(__name__)
+
+
+BLOB_CONTENT_TYPE = 'application/octet-stream'
+
+
+class BlobUploadException(Exception):
+ """ Base for all exceptions raised when uploading blobs. """
+
+class BlobRangeMismatchException(BlobUploadException):
+ """ Exception raised if the range to be uploaded does not match. """
+
+class BlobDigestMismatchException(BlobUploadException):
+ """ Exception raised if the digest requested does not match that of the contents uploaded. """
+
+class BlobTooLargeException(BlobUploadException):
+ """ Exception raised if the data uploaded exceeds the maximum_blob_size. """
+ def __init__(self, uploaded, max_allowed):
+ super(BlobTooLargeException, self).__init__()
+ self.uploaded = uploaded
+ self.max_allowed = max_allowed
+
+
+BlobUploadSettings = namedtuple('BlobUploadSettings', ['maximum_blob_size', 'bittorrent_piece_size',
+ 'committed_blob_expiration'])
+
+
+def create_blob_upload(repository_ref, storage, settings, extra_blob_stream_handlers=None):
+ """ Creates a new blob upload in the specified repository and returns a manager for interacting
+ with that upload. Returns None if a new blob upload could not be started.
+ """
+ location_name = storage.preferred_locations[0]
+ new_upload_uuid, upload_metadata = storage.initiate_chunked_upload(location_name)
+ blob_upload = registry_model.create_blob_upload(repository_ref, new_upload_uuid, location_name,
+ upload_metadata)
+ if blob_upload is None:
+ return None
+
+ return _BlobUploadManager(repository_ref, blob_upload, settings, storage,
+ extra_blob_stream_handlers)
+
+
+def retrieve_blob_upload_manager(repository_ref, blob_upload_id, storage, settings):
+ """ Retrieves the manager for an in-progress blob upload with the specified ID under the given
+ repository or None if none.
+ """
+ blob_upload = registry_model.lookup_blob_upload(repository_ref, blob_upload_id)
+ if blob_upload is None:
+ return None
+
+ return _BlobUploadManager(repository_ref, blob_upload, settings, storage)
+
+@contextmanager
+def complete_when_uploaded(blob_upload):
+ """ Wraps the given blob upload in a context manager that completes the upload when the context
+ closes.
+ """
+ try:
+ yield blob_upload
+ except Exception as ex:
+ logger.exception('Exception when uploading blob `%s`', blob_upload.blob_upload_id)
+ raise ex
+ finally:
+ # Cancel the upload if something went wrong or it was not commit to a blob.
+ if blob_upload.committed_blob is None:
+ blob_upload.cancel_upload()
+
+@contextmanager
+def upload_blob(repository_ref, storage, settings, extra_blob_stream_handlers=None):
+ """ Starts a new blob upload in the specified repository and yields a manager for interacting
+ with that upload. When the context manager completes, the blob upload is deleted, whether
+ committed to a blob or not. Yields None if a blob upload could not be started.
+ """
+ created = create_blob_upload(repository_ref, storage, settings, extra_blob_stream_handlers)
+ if not created:
+ yield None
+ return
+
+ try:
+ yield created
+ except Exception as ex:
+ logger.exception('Exception when uploading blob `%s`', created.blob_upload_id)
+ raise ex
+ finally:
+ # Cancel the upload if something went wrong or it was not commit to a blob.
+ if created.committed_blob is None:
+ created.cancel_upload()
+
+
+class _BlobUploadManager(object):
+ """ Defines a helper class for easily interacting with blob uploads in progress, including
+ handling of database and storage calls.
+ """
+ def __init__(self, repository_ref, blob_upload, settings, storage,
+ extra_blob_stream_handlers=None):
+ assert repository_ref is not None
+ assert blob_upload is not None
+
+ self.repository_ref = repository_ref
+ self.blob_upload = blob_upload
+ self.settings = settings
+ self.storage = storage
+ self.extra_blob_stream_handlers = extra_blob_stream_handlers
+ self.committed_blob = None
+
+ @property
+ def blob_upload_id(self):
+ """ Returns the unique ID for the blob upload. """
+ return self.blob_upload.upload_id
+
+ def upload_chunk(self, app_config, input_fp, start_offset=0, length=-1, metric_queue=None):
+ """ Uploads a chunk of data found in the given input file-like interface. start_offset and
+ length are optional and should match a range header if any was given.
+
+ If metric_queue is given, the upload time and chunk size are written into the metrics in
+ the queue.
+
+ Returns the total number of bytes uploaded after this upload has completed. Raises
+ a BlobUploadException if the upload failed.
+ """
+ assert start_offset is not None
+ assert length is not None
+
+ if start_offset > 0 and start_offset > self.blob_upload.byte_count:
+ logger.error('start_offset provided greater than blob_upload.byte_count')
+ raise BlobRangeMismatchException()
+
+ # Ensure that we won't go over the allowed maximum size for blobs.
+ max_blob_size = bitmath.parse_string_unsafe(self.settings.maximum_blob_size)
+ uploaded = bitmath.Byte(length + start_offset)
+ if length > -1 and uploaded > max_blob_size:
+ raise BlobTooLargeException(uploaded=uploaded.bytes, max_allowed=max_blob_size.bytes)
+
+ location_set = {self.blob_upload.location_name}
+ upload_error = None
+ with CloseForLongOperation(app_config):
+ if start_offset > 0 and start_offset < self.blob_upload.byte_count:
+ # Skip the bytes which were received on a previous push, which are already stored and
+ # included in the sha calculation
+ overlap_size = self.blob_upload.byte_count - start_offset
+ input_fp = StreamSlice(input_fp, overlap_size)
+
+ # Update our upload bounds to reflect the skipped portion of the overlap
+ start_offset = self.blob_upload.byte_count
+ length = max(length - overlap_size, 0)
+
+ # We use this to escape early in case we have already processed all of the bytes the user
+ # wants to upload.
+ if length == 0:
+ return self.blob_upload.byte_count
+
+ input_fp = wrap_with_handler(input_fp, self.blob_upload.sha_state.update)
+
+ if self.extra_blob_stream_handlers:
+ for handler in self.extra_blob_stream_handlers:
+ input_fp = wrap_with_handler(input_fp, handler)
+
+ # Add a hasher for calculating SHA1s for torrents if this is the first chunk and/or we have
+ # already calculated hash data for the previous chunk(s).
+ piece_hasher = None
+ if self.blob_upload.chunk_count == 0 or self.blob_upload.piece_sha_state:
+ initial_sha1_value = self.blob_upload.piece_sha_state or resumablehashlib.sha1()
+ initial_sha1_pieces_value = self.blob_upload.piece_hashes or ''
+
+ piece_hasher = PieceHasher(self.settings.bittorrent_piece_size, start_offset,
+ initial_sha1_pieces_value, initial_sha1_value)
+ input_fp = wrap_with_handler(input_fp, piece_hasher.update)
+
+ # If this is the first chunk and we're starting at the 0 offset, add a handler to gunzip the
+ # stream so we can determine the uncompressed size. We'll throw out this data if another chunk
+ # comes in, but in the common case the docker client only sends one chunk.
+ size_info = None
+ if start_offset == 0 and self.blob_upload.chunk_count == 0:
+ size_info, fn = calculate_size_handler()
+ input_fp = wrap_with_handler(input_fp, fn)
+
+ start_time = time.time()
+ length_written, new_metadata, upload_error = self.storage.stream_upload_chunk(
+ location_set,
+ self.blob_upload.upload_id,
+ start_offset,
+ length,
+ input_fp,
+ self.blob_upload.storage_metadata,
+ content_type=BLOB_CONTENT_TYPE,
+ )
+
+ if upload_error is not None:
+ logger.error('storage.stream_upload_chunk returned error %s', upload_error)
+ raise BlobUploadException(upload_error)
+
+ # Update the chunk upload time and push bytes metrics.
+ if metric_queue is not None:
+ metric_queue.chunk_upload_time.Observe(time.time() - start_time, labelvalues=[
+ length_written, list(location_set)[0]])
+
+ metric_queue.push_byte_count.Inc(length_written)
+
+ # Ensure we have not gone beyond the max layer size.
+ new_blob_bytes = self.blob_upload.byte_count + length_written
+ new_blob_size = bitmath.Byte(new_blob_bytes)
+ if new_blob_size > max_blob_size:
+ raise BlobTooLargeException(uploaded=new_blob_size, max_allowed=max_blob_size.bytes)
+
+ # If we determined an uncompressed size and this is the first chunk, add it to the blob.
+ # Otherwise, we clear the size from the blob as it was uploaded in multiple chunks.
+ uncompressed_byte_count = self.blob_upload.uncompressed_byte_count
+ if size_info is not None and self.blob_upload.chunk_count == 0 and size_info.is_valid:
+ uncompressed_byte_count = size_info.uncompressed_size
+ elif length_written > 0:
+ # Otherwise, if we wrote some bytes and the above conditions were not met, then we don't
+ # know the uncompressed size.
+ uncompressed_byte_count = None
+
+ piece_hashes = None
+ piece_sha_state = None
+ if piece_hasher is not None:
+ piece_hashes = piece_hasher.piece_hashes
+ piece_sha_state = piece_hasher.hash_fragment
+
+ self.blob_upload = registry_model.update_blob_upload(self.blob_upload,
+ uncompressed_byte_count,
+ piece_hashes,
+ piece_sha_state,
+ new_metadata,
+ new_blob_bytes,
+ self.blob_upload.chunk_count + 1,
+ self.blob_upload.sha_state)
+ if self.blob_upload is None:
+ raise BlobUploadException('Could not complete upload of chunk')
+
+ return new_blob_bytes
+
+ def cancel_upload(self):
+ """ Cancels the blob upload, deleting any data uploaded and removing the upload itself. """
+ if self.blob_upload is None:
+ return
+
+ # Tell storage to cancel the chunked upload, deleting its contents.
+ self.storage.cancel_chunked_upload({self.blob_upload.location_name},
+ self.blob_upload.upload_id,
+ self.blob_upload.storage_metadata)
+
+ # Remove the blob upload record itself.
+ registry_model.delete_blob_upload(self.blob_upload)
+
+ def commit_to_blob(self, app_config, expected_digest=None):
+ """ Commits the blob upload to a blob under the repository. The resulting blob will be marked
+ to not be GCed for some period of time (as configured by `committed_blob_expiration`).
+
+ If expected_digest is specified, the content digest of the data uploaded for the blob is
+ compared to that given and, if it does not match, a BlobDigestMismatchException is
+ raised. The digest given must be of type `Digest` and not a string.
+ """
+ # Compare the content digest.
+ if expected_digest is not None:
+ self._validate_digest(expected_digest)
+
+ # Finalize the storage.
+ storage_already_existed = self._finalize_blob_storage(app_config)
+
+ # Convert the upload to a blob.
+ computed_digest_str = digest_tools.sha256_digest_from_hashlib(self.blob_upload.sha_state)
+
+ with db_transaction():
+ blob = registry_model.commit_blob_upload(self.blob_upload, computed_digest_str,
+ self.settings.committed_blob_expiration)
+ if blob is None:
+ return None
+
+ # Save torrent hash information (if available).
+ if self.blob_upload.piece_sha_state is not None and not storage_already_existed:
+ piece_bytes = self.blob_upload.piece_hashes + self.blob_upload.piece_sha_state.digest()
+ registry_model.set_torrent_info(blob, self.settings.bittorrent_piece_size, piece_bytes)
+
+ self.committed_blob = blob
+ return blob
+
+ def _validate_digest(self, expected_digest):
+ """
+ Verifies that the digest's SHA matches that of the uploaded data.
+ """
+ computed_digest = digest_tools.sha256_digest_from_hashlib(self.blob_upload.sha_state)
+ if not digest_tools.digests_equal(computed_digest, expected_digest):
+ logger.error('Digest mismatch for upload %s: Expected digest %s, found digest %s',
+ self.blob_upload.upload_id, expected_digest, computed_digest)
+ raise BlobDigestMismatchException()
+
+ def _finalize_blob_storage(self, app_config):
+ """
+ When an upload is successful, this ends the uploading process from the
+ storage's perspective.
+
+ Returns True if the blob already existed.
+ """
+ computed_digest = digest_tools.sha256_digest_from_hashlib(self.blob_upload.sha_state)
+ final_blob_location = digest_tools.content_path(computed_digest)
+
+ # Close the database connection before we perform this operation, as it can take a while
+ # and we shouldn't hold the connection during that time.
+ with CloseForLongOperation(app_config):
+ # Move the storage into place, or if this was a re-upload, cancel it
+ already_existed = self.storage.exists({self.blob_upload.location_name}, final_blob_location)
+ if already_existed:
+ # It already existed, clean up our upload which served as proof that the
+ # uploader had the blob.
+ self.storage.cancel_chunked_upload({self.blob_upload.location_name},
+ self.blob_upload.upload_id,
+ self.blob_upload.storage_metadata)
+ else:
+ # We were the first ones to upload this image (at least to this location)
+ # Let's copy it into place
+ self.storage.complete_chunked_upload({self.blob_upload.location_name},
+ self.blob_upload.upload_id,
+ final_blob_location,
+ self.blob_upload.storage_metadata)
+
+ return already_existed
diff --git a/data/registry_model/datatype.py b/data/registry_model/datatype.py
new file mode 100644
index 000000000..091776bb1
--- /dev/null
+++ b/data/registry_model/datatype.py
@@ -0,0 +1,86 @@
+# pylint: disable=protected-access
+
+from functools import wraps, total_ordering
+
+class FromDictionaryException(Exception):
+ """ Exception raised if constructing a data type from a dictionary fails due to
+ missing data.
+ """
+
+def datatype(name, static_fields):
+ """ Defines a base class for a datatype that will represent a row from the database,
+ in an abstracted form.
+ """
+ @total_ordering
+ class DataType(object):
+ __name__ = name
+
+ def __init__(self, **kwargs):
+ self._db_id = kwargs.pop('db_id', None)
+ self._inputs = kwargs.pop('inputs', None)
+ self._fields = kwargs
+
+ for name in static_fields:
+ assert name in self._fields, 'Missing field %s' % name
+
+ def __eq__(self, other):
+ return self._db_id == other._db_id
+
+ def __lt__(self, other):
+ return self._db_id < other._db_id
+
+ def __getattr__(self, name):
+ if name in static_fields:
+ return self._fields[name]
+
+ raise AttributeError('Unknown field `%s`' % name)
+
+ def __repr__(self):
+ return '<%s> #%s' % (name, self._db_id)
+
+ @classmethod
+ def from_dict(cls, dict_data):
+ try:
+ return cls(**dict_data)
+ except:
+ raise FromDictionaryException()
+
+ def asdict(self):
+ dictionary_rep = dict(self._fields)
+ assert ('db_id' not in dictionary_rep and
+ 'inputs' not in dictionary_rep)
+
+ dictionary_rep['db_id'] = self._db_id
+ dictionary_rep['inputs'] = self._inputs
+ return dictionary_rep
+
+ return DataType
+
+
+def requiresinput(input_name):
+ """ Marks a property on the data type as requiring an input to be invoked. """
+ def inner(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ if self._inputs.get(input_name) is None:
+ raise Exception('Cannot invoke function with missing input `%s`' % input_name)
+
+ kwargs[input_name] = self._inputs[input_name]
+ result = func(self, *args, **kwargs)
+ return result
+
+ return wrapper
+ return inner
+
+
+def optionalinput(input_name):
+ """ Marks a property on the data type as having an input be optional when invoked. """
+ def inner(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ kwargs[input_name] = self._inputs.get(input_name)
+ result = func(self, *args, **kwargs)
+ return result
+
+ return wrapper
+ return inner
diff --git a/data/registry_model/datatypes.py b/data/registry_model/datatypes.py
new file mode 100644
index 000000000..b732fbefc
--- /dev/null
+++ b/data/registry_model/datatypes.py
@@ -0,0 +1,504 @@
+import hashlib
+
+from collections import namedtuple
+from enum import Enum, unique
+
+from cachetools.func import lru_cache
+
+from data import model
+from data.database import Manifest as ManifestTable
+from data.registry_model.datatype import datatype, requiresinput, optionalinput
+from image.docker import ManifestException
+from image.docker.schemas import parse_manifest_from_bytes
+from image.docker.schema1 import DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
+from image.docker.schema2 import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
+from util.bytes import Bytes
+
+
+class RepositoryReference(datatype('Repository', [])):
+ """ RepositoryReference is a reference to a repository, passed to registry interface methods. """
+ @classmethod
+ def for_repo_obj(cls, repo_obj, namespace_name=None, repo_name=None, is_free_namespace=None,
+ state=None):
+ if repo_obj is None:
+ return None
+
+ return RepositoryReference(db_id=repo_obj.id,
+ inputs=dict(
+ kind=model.repository.get_repo_kind_name(repo_obj),
+ is_public=model.repository.is_repository_public(repo_obj),
+ namespace_name=namespace_name,
+ repo_name=repo_name,
+ is_free_namespace=is_free_namespace,
+ state=state
+ ))
+
+ @classmethod
+ def for_id(cls, repo_id, namespace_name=None, repo_name=None, is_free_namespace=None, state=None):
+ return RepositoryReference(db_id=repo_id,
+ inputs=dict(
+ kind=None,
+ is_public=None,
+ namespace_name=namespace_name,
+ repo_name=repo_name,
+ is_free_namespace=is_free_namespace,
+ state=state
+ ))
+
+ @property
+ @lru_cache(maxsize=1)
+ def _repository_obj(self):
+ return model.repository.lookup_repository(self._db_id)
+
+ @property
+ @optionalinput('kind')
+ def kind(self, kind):
+ """ Returns the kind of the repository. """
+ return kind or model.repository.get_repo_kind_name(self._repositry_obj)
+
+ @property
+ @optionalinput('is_public')
+ def is_public(self, is_public):
+ """ Returns whether the repository is public. """
+ if is_public is not None:
+ return is_public
+
+ return model.repository.is_repository_public(self._repository_obj)
+
+ @property
+ def trust_enabled(self):
+ """ Returns whether trust is enabled in this repository. """
+ repository = self._repository_obj
+ if repository is None:
+ return None
+
+ return repository.trust_enabled
+
+ @property
+ def id(self):
+ """ Returns the database ID of the repository. """
+ return self._db_id
+
+ @property
+ @optionalinput('namespace_name')
+ def namespace_name(self, namespace_name=None):
+ """ Returns the namespace name of this repository.
+ """
+ if namespace_name is not None:
+ return namespace_name
+
+ repository = self._repository_obj
+ if repository is None:
+ return None
+
+ return repository.namespace_user.username
+
+ @property
+ @optionalinput('is_free_namespace')
+ def is_free_namespace(self, is_free_namespace=None):
+ """ Returns whether the namespace of the repository is on a free plan.
+ """
+ if is_free_namespace is not None:
+ return is_free_namespace
+
+ repository = self._repository_obj
+ if repository is None:
+ return None
+
+ return repository.namespace_user.stripe_id is None
+
+ @property
+ @optionalinput('repo_name')
+ def name(self, repo_name=None):
+ """ Returns the name of this repository.
+ """
+ if repo_name is not None:
+ return repo_name
+
+ repository = self._repository_obj
+ if repository is None:
+ return None
+
+ return repository.name
+
+ @property
+ @optionalinput('state')
+ def state(self, state=None):
+ """ Return the state of the Repository. """
+ if state is not None:
+ return state
+
+ repository = self._repository_obj
+ if repository is None:
+ return None
+
+ return repository.state
+
+
+class Label(datatype('Label', ['key', 'value', 'uuid', 'source_type_name', 'media_type_name'])):
+ """ Label represents a label on a manifest. """
+ @classmethod
+ def for_label(cls, label):
+ if label is None:
+ return None
+
+ return Label(db_id=label.id, key=label.key, value=label.value,
+ uuid=label.uuid, media_type_name=label.media_type.name,
+ source_type_name=label.source_type.name)
+
+
+class ShallowTag(datatype('ShallowTag', ['name'])):
+ """ ShallowTag represents a tag in a repository, but only contains basic information. """
+ @classmethod
+ def for_tag(cls, tag):
+ if tag is None:
+ return None
+
+ return ShallowTag(db_id=tag.id, name=tag.name)
+
+ @classmethod
+ def for_repository_tag(cls, repository_tag):
+ if repository_tag is None:
+ return None
+
+ return ShallowTag(db_id=repository_tag.id, name=repository_tag.name)
+
+ @property
+ def id(self):
+ """ The ID of this tag for pagination purposes only. """
+ return self._db_id
+
+
+class Tag(datatype('Tag', ['name', 'reversion', 'manifest_digest', 'lifetime_start_ts',
+ 'lifetime_end_ts', 'lifetime_start_ms', 'lifetime_end_ms'])):
+ """ Tag represents a tag in a repository, which points to a manifest or image. """
+ @classmethod
+ def for_tag(cls, tag, legacy_image=None):
+ if tag is None:
+ return None
+
+ return Tag(db_id=tag.id,
+ name=tag.name,
+ reversion=tag.reversion,
+ lifetime_start_ms=tag.lifetime_start_ms,
+ lifetime_end_ms=tag.lifetime_end_ms,
+ lifetime_start_ts=tag.lifetime_start_ms / 1000,
+ lifetime_end_ts=tag.lifetime_end_ms / 1000 if tag.lifetime_end_ms else None,
+ manifest_digest=tag.manifest.digest,
+ inputs=dict(legacy_image=legacy_image,
+ manifest=tag.manifest,
+ repository=RepositoryReference.for_id(tag.repository_id)))
+
+ @classmethod
+ def for_repository_tag(cls, repository_tag, manifest_digest=None, legacy_image=None):
+ if repository_tag is None:
+ return None
+
+ return Tag(db_id=repository_tag.id,
+ name=repository_tag.name,
+ reversion=repository_tag.reversion,
+ lifetime_start_ts=repository_tag.lifetime_start_ts,
+ lifetime_end_ts=repository_tag.lifetime_end_ts,
+ lifetime_start_ms=repository_tag.lifetime_start_ts * 1000,
+ lifetime_end_ms=(repository_tag.lifetime_end_ts * 1000
+ if repository_tag.lifetime_end_ts else None),
+ manifest_digest=manifest_digest,
+ inputs=dict(legacy_image=legacy_image,
+ repository=RepositoryReference.for_id(repository_tag.repository_id)))
+
+ @property
+ @requiresinput('manifest')
+ def _manifest(self, manifest):
+ """ Returns the manifest for this tag. Will only apply to new-style OCI tags. """
+ return manifest
+
+ @property
+ @optionalinput('manifest')
+ def manifest(self, manifest):
+ """ Returns the manifest for this tag or None if none. Will only apply to new-style OCI tags.
+ """
+ return Manifest.for_manifest(manifest, self.legacy_image_if_present)
+
+ @property
+ @requiresinput('repository')
+ def repository(self, repository):
+ """ Returns the repository under which this tag lives.
+ """
+ return repository
+
+ @property
+ @requiresinput('legacy_image')
+ def legacy_image(self, legacy_image):
+ """ Returns the legacy Docker V1-style image for this tag. Note that this
+ will be None for tags whose manifests point to other manifests instead of images.
+ """
+ return legacy_image
+
+ @property
+ @optionalinput('legacy_image')
+ def legacy_image_if_present(self, legacy_image):
+ """ Returns the legacy Docker V1-style image for this tag. Note that this
+ will be None for tags whose manifests point to other manifests instead of images.
+ """
+ return legacy_image
+
+ @property
+ def id(self):
+ """ The ID of this tag for pagination purposes only. """
+ return self._db_id
+
+
+class Manifest(datatype('Manifest', ['digest', 'media_type', 'internal_manifest_bytes'])):
+ """ Manifest represents a manifest in a repository. """
+ @classmethod
+ def for_tag_manifest(cls, tag_manifest, legacy_image=None):
+ if tag_manifest is None:
+ return None
+
+ return Manifest(db_id=tag_manifest.id, digest=tag_manifest.digest,
+ internal_manifest_bytes=Bytes.for_string_or_unicode(tag_manifest.json_data),
+ media_type=DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE, # Always in legacy.
+ inputs=dict(legacy_image=legacy_image, tag_manifest=True))
+
+ @classmethod
+ def for_manifest(cls, manifest, legacy_image):
+ if manifest is None:
+ return None
+
+ # NOTE: `manifest_bytes` will be None if not selected by certain join queries.
+ manifest_bytes = (Bytes.for_string_or_unicode(manifest.manifest_bytes)
+ if manifest.manifest_bytes is not None else None)
+ return Manifest(db_id=manifest.id,
+ digest=manifest.digest,
+ internal_manifest_bytes=manifest_bytes,
+ media_type=ManifestTable.media_type.get_name(manifest.media_type_id),
+ inputs=dict(legacy_image=legacy_image, tag_manifest=False))
+
+ @property
+ @requiresinput('tag_manifest')
+ def _is_tag_manifest(self, tag_manifest):
+ return tag_manifest
+
+ @property
+ @requiresinput('legacy_image')
+ def legacy_image(self, legacy_image):
+ """ Returns the legacy Docker V1-style image for this manifest.
+ """
+ return legacy_image
+
+ @property
+ @optionalinput('legacy_image')
+ def legacy_image_if_present(self, legacy_image):
+ """ Returns the legacy Docker V1-style image for this manifest. Note that this
+ will be None for manifests that point to other manifests instead of images.
+ """
+ return legacy_image
+
+ def get_parsed_manifest(self, validate=True):
+ """ Returns the parsed manifest for this manifest. """
+ assert self.internal_manifest_bytes
+ return parse_manifest_from_bytes(self.internal_manifest_bytes, self.media_type,
+ validate=validate)
+
+ @property
+ def layers_compressed_size(self):
+ """ Returns the total compressed size of the layers in the manifest or None if this could not
+ be computed.
+ """
+ try:
+ return self.get_parsed_manifest().layers_compressed_size
+ except ManifestException:
+ return None
+
+ @property
+ def is_manifest_list(self):
+ """ Returns True if this manifest points to a list (instead of an image). """
+ return self.media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
+
+
+class LegacyImage(datatype('LegacyImage', ['docker_image_id', 'created', 'comment', 'command',
+ 'image_size', 'aggregate_size', 'uploading',
+ 'v1_metadata_string'])):
+ """ LegacyImage represents a Docker V1-style image found in a repository. """
+ @classmethod
+ def for_image(cls, image, images_map=None, tags_map=None, blob=None):
+ if image is None:
+ return None
+
+ return LegacyImage(db_id=image.id,
+ inputs=dict(images_map=images_map, tags_map=tags_map,
+ ancestor_id_list=image.ancestor_id_list(),
+ blob=blob),
+ docker_image_id=image.docker_image_id,
+ created=image.created,
+ comment=image.comment,
+ command=image.command,
+ v1_metadata_string=image.v1_json_metadata,
+ image_size=image.storage.image_size,
+ aggregate_size=image.aggregate_size,
+ uploading=image.storage.uploading)
+
+ @property
+ def id(self):
+ """ Returns the database ID of the legacy image. """
+ return self._db_id
+
+ @property
+ @requiresinput('images_map')
+ @requiresinput('ancestor_id_list')
+ def parents(self, images_map, ancestor_id_list):
+ """ Returns the parent images for this image. Raises an exception if the parents have
+ not been loaded before this property is invoked. Parents are returned starting at the
+ leaf image.
+ """
+ return [LegacyImage.for_image(images_map[ancestor_id], images_map=images_map)
+ for ancestor_id in reversed(ancestor_id_list)
+ if images_map.get(ancestor_id)]
+
+ @property
+ @requiresinput('blob')
+ def blob(self, blob):
+ """ Returns the blob for this image. Raises an exception if the blob has
+ not been loaded before this property is invoked.
+ """
+ return blob
+
+ @property
+ @requiresinput('tags_map')
+ def tags(self, tags_map):
+ """ Returns the tags pointing to this image. Raises an exception if the tags have
+ not been loaded before this property is invoked.
+ """
+ tags = tags_map.get(self._db_id)
+ if not tags:
+ return []
+
+ return [Tag.for_repository_tag(tag) for tag in tags]
+
+
+@unique
+class SecurityScanStatus(Enum):
+ """ Security scan status enum """
+ SCANNED = 'scanned'
+ FAILED = 'failed'
+ QUEUED = 'queued'
+ UNSUPPORTED = 'unsupported'
+
+
+class ManifestLayer(namedtuple('ManifestLayer', ['layer_info', 'blob'])):
+ """ Represents a single layer in a manifest. The `layer_info` data will be manifest-type specific,
+ but will have a few expected fields (such as `digest`). The `blob` represents the associated
+ blob for this layer, optionally with placements. If the layer is a remote layer, the blob will
+ be None.
+ """
+
+ def estimated_size(self, estimate_multiplier):
+ """ Returns the estimated size of this layer. If the layers' blob has an uncompressed size,
+ it is used. Otherwise, the compressed_size field in the layer is multiplied by the
+ multiplier.
+ """
+ if self.blob.uncompressed_size:
+ return self.blob.uncompressed_size
+
+ return (self.layer_info.compressed_size or 0) * estimate_multiplier
+
+
+class Blob(datatype('Blob', ['uuid', 'digest', 'compressed_size', 'uncompressed_size',
+ 'uploading'])):
+ """ Blob represents a content-addressable piece of storage. """
+ @classmethod
+ def for_image_storage(cls, image_storage, storage_path, placements=None):
+ if image_storage is None:
+ return None
+
+ return Blob(db_id=image_storage.id,
+ uuid=image_storage.uuid,
+ inputs=dict(placements=placements, storage_path=storage_path),
+ digest=image_storage.content_checksum,
+ compressed_size=image_storage.image_size,
+ uncompressed_size=image_storage.uncompressed_size,
+ uploading=image_storage.uploading)
+
+ @property
+ @requiresinput('storage_path')
+ def storage_path(self, storage_path):
+ """ Returns the path of this blob in storage. """
+ # TODO: change this to take in the storage engine?
+ return storage_path
+
+ @property
+ @requiresinput('placements')
+ def placements(self, placements):
+ """ Returns all the storage placements at which the Blob can be found. """
+ return placements
+
+
+class DerivedImage(datatype('DerivedImage', ['verb', 'varying_metadata', 'blob'])):
+ """ DerivedImage represents an image derived from a manifest via some form of verb. """
+ @classmethod
+ def for_derived_storage(cls, derived, verb, varying_metadata, blob):
+ return DerivedImage(db_id=derived.id,
+ verb=verb,
+ varying_metadata=varying_metadata,
+ blob=blob)
+
+ @property
+ def unique_id(self):
+ """ Returns a unique ID for this derived image. This call will consistently produce the same
+ unique ID across calls in the same code base.
+ """
+ return hashlib.sha256('%s:%s' % (self.verb, self._db_id)).hexdigest()
+
+
+class TorrentInfo(datatype('TorrentInfo', ['pieces', 'piece_length'])):
+ """ TorrentInfo represents information to pull a blob via torrent. """
+ @classmethod
+ def for_torrent_info(cls, torrent_info):
+ return TorrentInfo(db_id=torrent_info.id,
+ pieces=torrent_info.pieces,
+ piece_length=torrent_info.piece_length)
+
+
+class BlobUpload(datatype('BlobUpload', ['upload_id', 'byte_count', 'uncompressed_byte_count',
+ 'chunk_count', 'sha_state', 'location_name',
+ 'storage_metadata', 'piece_sha_state', 'piece_hashes'])):
+ """ BlobUpload represents information about an in-progress upload to create a blob. """
+ @classmethod
+ def for_upload(cls, blob_upload, location_name=None):
+ return BlobUpload(db_id=blob_upload.id,
+ upload_id=blob_upload.uuid,
+ byte_count=blob_upload.byte_count,
+ uncompressed_byte_count=blob_upload.uncompressed_byte_count,
+ chunk_count=blob_upload.chunk_count,
+ sha_state=blob_upload.sha_state,
+ location_name=location_name or blob_upload.location.name,
+ storage_metadata=blob_upload.storage_metadata,
+ piece_sha_state=blob_upload.piece_sha_state,
+ piece_hashes=blob_upload.piece_hashes)
+
+
+class LikelyVulnerableTag(datatype('LikelyVulnerableTag', ['layer_id', 'name'])):
+ """ LikelyVulnerableTag represents a tag in a repository that is likely vulnerable to a notified
+ vulnerability.
+ """
+ # TODO: Remove all of this once we're on the new security model exclusively.
+ @classmethod
+ def for_tag(cls, tag, repository, docker_image_id, storage_uuid):
+ layer_id = '%s.%s' % (docker_image_id, storage_uuid)
+ return LikelyVulnerableTag(db_id=tag.id,
+ name=tag.name,
+ layer_id=layer_id,
+ inputs=dict(repository=repository))
+
+ @classmethod
+ def for_repository_tag(cls, tag, repository):
+ tag_layer_id = '%s.%s' % (tag.image.docker_image_id, tag.image.storage.uuid)
+ return LikelyVulnerableTag(db_id=tag.id,
+ name=tag.name,
+ layer_id=tag_layer_id,
+ inputs=dict(repository=repository))
+
+ @property
+ @requiresinput('repository')
+ def repository(self, repository):
+ return RepositoryReference.for_repo_obj(repository)
diff --git a/data/registry_model/interface.py b/data/registry_model/interface.py
new file mode 100644
index 000000000..8862f88bc
--- /dev/null
+++ b/data/registry_model/interface.py
@@ -0,0 +1,384 @@
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+
+@add_metaclass(ABCMeta)
+class RegistryDataInterface(object):
+ """ Interface for code to work with the registry data model. The registry data model consists
+ of all tables that store registry-specific information, such as Manifests, Blobs, Images,
+ and Labels.
+ """
+ @abstractmethod
+ def supports_schema2(self, namespace_name):
+ """ Returns whether the implementation of the data interface supports schema 2 format
+ manifests. """
+
+ @abstractmethod
+ def get_tag_legacy_image_id(self, repository_ref, tag_name, storage):
+ """ Returns the legacy image ID for the tag with a legacy images in
+ the repository. Returns None if None.
+ """
+
+ @abstractmethod
+ def get_legacy_tags_map(self, repository_ref, storage):
+ """ Returns a map from tag name to its legacy image ID, for all tags with legacy images in
+ the repository. Note that this can be a *very* heavy operation.
+ """
+
+ @abstractmethod
+ def find_matching_tag(self, repository_ref, tag_names):
+ """ Finds an alive tag in the repository matching one of the given tag names and returns it
+ or None if none.
+ """
+
+ @abstractmethod
+ def get_most_recent_tag(self, repository_ref):
+ """ Returns the most recently pushed alive tag in the repository, if any. If none, returns
+ None.
+ """
+
+ @abstractmethod
+ def lookup_repository(self, namespace_name, repo_name, kind_filter=None):
+ """ Looks up and returns a reference to the repository with the given namespace and name,
+ or None if none. """
+
+ @abstractmethod
+ def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False):
+ """ Returns the manifest associated with the given tag. """
+
+ @abstractmethod
+ def lookup_manifest_by_digest(self, repository_ref, manifest_digest, allow_dead=False,
+ include_legacy_image=False, require_available=False):
+ """ Looks up the manifest with the given digest under the given repository and returns it
+ or None if none. If allow_dead is True, manifests pointed to by dead tags will also
+ be returned. If require_available is True, a temporary tag will be added onto the
+ returned manifest (before it is returned) to ensure it is available until another
+ tagging or manifest operation is taken.
+ """
+
+ @abstractmethod
+ def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name,
+ storage, raise_on_error=False):
+ """ Creates a manifest in a repository, adding all of the necessary data in the model.
+
+ The `manifest_interface_instance` parameter must be an instance of the manifest
+ interface as returned by the image/docker package.
+
+ Note that all blobs referenced by the manifest must exist under the repository or this
+ method will fail and return None.
+
+ Returns a reference to the (created manifest, tag) or (None, None) on error.
+ """
+
+ @abstractmethod
+ def get_legacy_images(self, repository_ref):
+ """
+ Returns an iterator of all the LegacyImage's defined in the matching repository.
+ """
+
+ @abstractmethod
+ def get_legacy_image(self, repository_ref, docker_image_id, include_parents=False,
+ include_blob=False):
+ """
+ Returns the matching LegacyImages under the matching repository, if any. If none,
+ returns None.
+ """
+
+ @abstractmethod
+ def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None):
+ """ Creates a label on the manifest with the given key and value.
+
+ Can raise InvalidLabelKeyException or InvalidMediaTypeException depending
+ on the validation errors.
+ """
+
+ @abstractmethod
+ def batch_create_manifest_labels(self, manifest):
+ """ Returns a context manager for batch creation of labels on a manifest.
+
+ Can raise InvalidLabelKeyException or InvalidMediaTypeException depending
+ on the validation errors.
+ """
+
+ @abstractmethod
+ def list_manifest_labels(self, manifest, key_prefix=None):
+ """ Returns all labels found on the manifest. If specified, the key_prefix will filter the
+ labels returned to those keys that start with the given prefix.
+ """
+
+ @abstractmethod
+ def get_manifest_label(self, manifest, label_uuid):
+ """ Returns the label with the specified UUID on the manifest or None if none. """
+
+ @abstractmethod
+ def delete_manifest_label(self, manifest, label_uuid):
+ """ Delete the label with the specified UUID on the manifest. Returns the label deleted
+ or None if none.
+ """
+
+ @abstractmethod
+ def lookup_cached_active_repository_tags(self, model_cache, repository_ref, start_pagination_id,
+ limit):
+ """
+ Returns a page of active tags in a repository. Note that the tags returned by this method
+ are ShallowTag objects, which only contain the tag name. This method will automatically cache
+ the result and check the cache before making a call.
+ """
+
+ @abstractmethod
+ def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit):
+ """
+ Returns a page of active tags in a repository. Note that the tags returned by this method
+ are ShallowTag objects, which only contain the tag name.
+ """
+
+ @abstractmethod
+ def list_all_active_repository_tags(self, repository_ref, include_legacy_images=False):
+ """
+ Returns a list of all the active tags in the repository. Note that this is a *HEAVY*
+ operation on repositories with a lot of tags, and should only be used for testing or
+ where other more specific operations are not possible.
+ """
+
+ @abstractmethod
+ def list_repository_tag_history(self, repository_ref, page=1, size=100, specific_tag_name=None,
+ active_tags_only=False, since_time_ms=None):
+ """
+ Returns the history of all tags in the repository (unless filtered). This includes tags that
+ have been made in-active due to newer versions of those tags coming into service.
+ """
+
+ @abstractmethod
+ def get_most_recent_tag_lifetime_start(self, repository_refs):
+ """
+ Returns a map from repository ID to the last modified time ( seconds from epoch, UTC)
+ for each repository in the given repository reference list.
+ """
+
+ @abstractmethod
+ def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False):
+ """
+ Returns the latest, *active* tag found in the repository, with the matching name
+ or None if none.
+ """
+
+ @abstractmethod
+ def has_expired_tag(self, repository_ref, tag_name):
+ """
+ Returns true if and only if the repository contains a tag with the given name that is expired.
+ """
+
+ @abstractmethod
+ def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image,
+ storage, legacy_manifest_key, is_reversion=False):
+ """
+ Creates, updates or moves a tag to a new entry in history, pointing to the manifest or
+ legacy image specified. If is_reversion is set to True, this operation is considered a
+ reversion over a previous tag move operation. Returns the updated Tag or None on error.
+ """
+
+ @abstractmethod
+ def delete_tag(self, repository_ref, tag_name):
+ """
+ Deletes the latest, *active* tag with the given name in the repository.
+ """
+
+ @abstractmethod
+ def delete_tags_for_manifest(self, manifest):
+ """
+ Deletes all tags pointing to the given manifest, making the manifest inaccessible for pulling.
+ Returns the tags deleted, if any. Returns None on error.
+ """
+
+ @abstractmethod
+ def change_repository_tag_expiration(self, tag, expiration_date):
+ """ Sets the expiration date of the tag under the matching repository to that given. If the
+ expiration date is None, then the tag will not expire. Returns a tuple of the previous
+ expiration timestamp in seconds (if any), and whether the operation succeeded.
+ """
+
+ @abstractmethod
+ def get_legacy_images_owned_by_tag(self, tag):
+ """ Returns all legacy images *solely owned and used* by the given tag. """
+
+ @abstractmethod
+ def get_security_status(self, manifest_or_legacy_image):
+ """ Returns the security status for the given manifest or legacy image or None if none. """
+
+ @abstractmethod
+ def reset_security_status(self, manifest_or_legacy_image):
+ """ Resets the security status for the given manifest or legacy image, ensuring that it will
+ get re-indexed.
+ """
+
+ @abstractmethod
+ def backfill_manifest_for_tag(self, tag):
+ """ Backfills a manifest for the V1 tag specified.
+ If a manifest already exists for the tag, returns that manifest.
+
+ NOTE: This method will only be necessary until we've completed the backfill, at which point
+ it should be removed.
+ """
+
+ @abstractmethod
+ def is_existing_disabled_namespace(self, namespace_name):
+ """ Returns whether the given namespace exists and is disabled. """
+
+ @abstractmethod
+ def is_namespace_enabled(self, namespace_name):
+ """ Returns whether the given namespace exists and is enabled. """
+
+ @abstractmethod
+ def get_manifest_local_blobs(self, manifest, include_placements=False):
+ """ Returns the set of local blobs for the given manifest or None if none. """
+
+ @abstractmethod
+ def list_manifest_layers(self, manifest, storage, include_placements=False):
+ """ Returns an *ordered list* of the layers found in the manifest, starting at the base
+ and working towards the leaf, including the associated Blob and its placements
+ (if specified). The layer information in `layer_info` will be of type
+ `image.docker.types.ManifestImageLayer`. Should not be called for a manifest list.
+ """
+
+ @abstractmethod
+ def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage,
+ include_placements=False):
+ """ Returns an *ordered list* of the layers found in the parsed manifest, starting at the base
+ and working towards the leaf, including the associated Blob and its placements
+ (if specified). The layer information in `layer_info` will be of type
+ `image.docker.types.ManifestImageLayer`. Should not be called for a manifest list.
+ """
+
+ @abstractmethod
+ def lookup_derived_image(self, manifest, verb, storage, varying_metadata=None,
+ include_placements=False):
+ """
+ Looks up the derived image for the given manifest, verb and optional varying metadata and
+ returns it or None if none.
+ """
+
+ @abstractmethod
+ def lookup_or_create_derived_image(self, manifest, verb, storage_location, storage,
+ varying_metadata=None, include_placements=False):
+ """
+ Looks up the derived image for the given maniest, verb and optional varying metadata
+ and returns it. If none exists, a new derived image is created.
+ """
+
+ @abstractmethod
+ def get_derived_image_signature(self, derived_image, signer_name):
+ """
+ Returns the signature associated with the derived image and a specific signer or None if none.
+ """
+
+ @abstractmethod
+ def set_derived_image_signature(self, derived_image, signer_name, signature):
+ """
+ Sets the calculated signature for the given derived image and signer to that specified.
+ """
+
+ @abstractmethod
+ def delete_derived_image(self, derived_image):
+ """
+ Deletes a derived image and all of its storage.
+ """
+
+ @abstractmethod
+ def set_derived_image_size(self, derived_image, compressed_size):
+ """
+ Sets the compressed size on the given derived image.
+ """
+
+ @abstractmethod
+ def get_torrent_info(self, blob):
+ """
+ Returns the torrent information associated with the given blob or None if none.
+ """
+
+ @abstractmethod
+ def set_torrent_info(self, blob, piece_length, pieces):
+ """
+ Sets the torrent infomation associated with the given blob to that specified.
+ """
+
+ @abstractmethod
+ def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
+ """
+ Returns the blob in the repository with the given digest, if any or None if none. Note that
+ there may be multiple records in the same repository for the same blob digest, so the return
+ value of this function may change.
+ """
+
+ @abstractmethod
+ def create_blob_upload(self, repository_ref, upload_id, location_name, storage_metadata):
+ """ Creates a new blob upload and returns a reference. If the blob upload could not be
+ created, returns None. """
+
+ @abstractmethod
+ def lookup_blob_upload(self, repository_ref, blob_upload_id):
+ """ Looks up the blob upload with the given ID under the specified repository and returns it
+ or None if none.
+ """
+
+ @abstractmethod
+ def update_blob_upload(self, blob_upload, uncompressed_byte_count, piece_hashes, piece_sha_state,
+ storage_metadata, byte_count, chunk_count, sha_state):
+ """ Updates the fields of the blob upload to match those given. Returns the updated blob upload
+ or None if the record does not exists.
+ """
+
+ @abstractmethod
+ def delete_blob_upload(self, blob_upload):
+ """ Deletes a blob upload record. """
+
+ @abstractmethod
+ def commit_blob_upload(self, blob_upload, blob_digest_str, blob_expiration_seconds):
+ """ Commits the blob upload into a blob and sets an expiration before that blob will be GCed.
+ """
+
+ @abstractmethod
+ def mount_blob_into_repository(self, blob, target_repository_ref, expiration_sec):
+ """
+ Mounts the blob from another repository into the specified target repository, and adds an
+ expiration before that blob is automatically GCed. This function is useful during push
+ operations if an existing blob from another repository is being pushed. Returns False if
+ the mounting fails. Note that this function does *not* check security for mounting the blob
+ and the caller is responsible for doing this check (an example can be found in
+ endpoints/v2/blob.py).
+ """
+
+ @abstractmethod
+ def set_tags_expiration_for_manifest(self, manifest, expiration_sec):
+ """
+ Sets the expiration on all tags that point to the given manifest to that specified.
+ """
+
+ @abstractmethod
+ def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):
+ """ Returns the schema 1 version of this manifest, or None if none. """
+
+ @abstractmethod
+ def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance,
+ expiration_sec, storage):
+ """ Creates a manifest under the repository and sets a temporary tag to point to it.
+ Returns the manifest object created or None on error.
+ """
+
+ @abstractmethod
+ def get_cached_namespace_region_blacklist(self, model_cache, namespace_name):
+ """ Returns a cached set of ISO country codes blacklisted for pulls for the namespace
+ or None if the list could not be loaded.
+ """
+
+ @abstractmethod
+ def convert_manifest(self, manifest, namespace_name, repo_name, tag_name, allowed_mediatypes,
+ storage):
+ """ Attempts to convert the specified into a parsed manifest with a media type
+ in the allowed_mediatypes set. If not possible, or an error occurs, returns None.
+ """
+
+ @abstractmethod
+ def yield_tags_for_vulnerability_notification(self, layer_id_pairs):
+ """ Yields tags that contain one (or more) of the given layer ID pairs, in repositories
+ which have been registered for vulnerability_found notifications. Returns an iterator
+ of LikelyVulnerableTag instances.
+ """
diff --git a/data/registry_model/label_handlers.py b/data/registry_model/label_handlers.py
new file mode 100644
index 000000000..96afe0d94
--- /dev/null
+++ b/data/registry_model/label_handlers.py
@@ -0,0 +1,28 @@
+import logging
+
+from util.timedeltastring import convert_to_timedelta
+
+logger = logging.getLogger(__name__)
+
+def _expires_after(label_dict, manifest, model):
+ """ Sets the expiration of a manifest based on the quay.expires-in label. """
+ try:
+ timedelta = convert_to_timedelta(label_dict['value'])
+ except ValueError:
+ logger.exception('Could not convert %s to timedeltastring', label_dict['value'])
+ return
+
+ total_seconds = timedelta.total_seconds()
+ logger.debug('Labeling manifest %s with expiration of %s', manifest, total_seconds)
+ model.set_tags_expiration_for_manifest(manifest, total_seconds)
+
+
+_LABEL_HANDLERS = {
+ 'quay.expires-after': _expires_after,
+}
+
+def apply_label_to_manifest(label_dict, manifest, model):
+ """ Runs the handler defined, if any, for the given label. """
+ handler = _LABEL_HANDLERS.get(label_dict['key'])
+ if handler is not None:
+ handler(label_dict, manifest, model)
diff --git a/data/registry_model/manifestbuilder.py b/data/registry_model/manifestbuilder.py
new file mode 100644
index 000000000..384ecb604
--- /dev/null
+++ b/data/registry_model/manifestbuilder.py
@@ -0,0 +1,220 @@
+import logging
+import json
+import uuid
+
+from collections import namedtuple
+
+from flask import session
+
+from data import model
+from data.database import db_transaction, ImageStorage, ImageStoragePlacement
+from data.registry_model import registry_model
+from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST
+
+logger = logging.getLogger(__name__)
+
+ManifestLayer = namedtuple('ManifestLayer', ['layer_id', 'v1_metadata_string', 'db_id'])
+_BuilderState = namedtuple('_BuilderState', ['builder_id', 'images', 'tags', 'checksums',
+ 'temp_storages'])
+
+_SESSION_KEY = '__manifestbuilder'
+
+
+def create_manifest_builder(repository_ref, storage, legacy_signing_key):
+ """ Creates a new manifest builder for populating manifests under the specified repository
+ and returns it. Returns None if the builder could not be constructed.
+ """
+ builder_id = str(uuid.uuid4())
+ builder = _ManifestBuilder(repository_ref, _BuilderState(builder_id, {}, {}, {}, []), storage,
+ legacy_signing_key)
+ builder._save_to_session()
+ return builder
+
+
+def lookup_manifest_builder(repository_ref, builder_id, storage, legacy_signing_key):
+ """ Looks up the manifest builder with the given ID under the specified repository and returns
+ it or None if none.
+ """
+ builder_state_tuple = session.get(_SESSION_KEY)
+ if builder_state_tuple is None:
+ return None
+
+ builder_state = _BuilderState(*builder_state_tuple)
+ if builder_state.builder_id != builder_id:
+ return None
+
+ return _ManifestBuilder(repository_ref, builder_state, storage, legacy_signing_key)
+
+
+class _ManifestBuilder(object):
+ """ Helper class which provides an interface for bookkeeping the layers and configuration of
+ manifests being constructed.
+ """
+ def __init__(self, repository_ref, builder_state, storage, legacy_signing_key):
+ self._repository_ref = repository_ref
+ self._builder_state = builder_state
+ self._storage = storage
+ self._legacy_signing_key = legacy_signing_key
+
+ @property
+ def builder_id(self):
+ """ Returns the unique ID for this builder. """
+ return self._builder_state.builder_id
+
+ @property
+ def committed_tags(self):
+ """ Returns the tags committed by this builder, if any. """
+ return [registry_model.get_repo_tag(self._repository_ref, tag_name, include_legacy_image=True)
+ for tag_name in self._builder_state.tags.keys()]
+
+ def start_layer(self, layer_id, v1_metadata_string, location_name, calling_user,
+ temp_tag_expiration):
+ """ Starts a new layer with the given ID to be placed into a manifest. Returns the layer
+ started or None if an error occurred.
+ """
+ # Ensure the repository still exists.
+ repository = model.repository.lookup_repository(self._repository_ref._db_id)
+ if repository is None:
+ return None
+
+ namespace_name = repository.namespace_user.username
+ repo_name = repository.name
+
+ try:
+ v1_metadata = json.loads(v1_metadata_string)
+ except ValueError:
+ logger.exception('Exception when trying to parse V1 metadata JSON for layer %s', layer_id)
+ return None
+ except TypeError:
+ logger.exception('Exception when trying to parse V1 metadata JSON for layer %s', layer_id)
+ return None
+
+ # Sanity check that the ID matches the v1 metadata.
+ if layer_id != v1_metadata['id']:
+ return None
+
+ # Ensure the parent already exists in the repository.
+ parent_id = v1_metadata.get('parent', None)
+ parent_image = None
+
+ if parent_id is not None:
+ parent_image = model.image.get_repo_image(namespace_name, repo_name, parent_id)
+ if parent_image is None:
+ return None
+
+ # Check to see if this layer already exists in the repository. If so, we can skip the creation.
+ existing_image = registry_model.get_legacy_image(self._repository_ref, layer_id)
+ if existing_image is not None:
+ self._builder_state.images[layer_id] = existing_image.id
+ self._save_to_session()
+ return ManifestLayer(layer_id, v1_metadata_string, existing_image.id)
+
+ with db_transaction():
+ # Otherwise, create a new legacy image and point a temporary tag at it.
+ created = model.image.find_create_or_link_image(layer_id, repository, calling_user, {},
+ location_name)
+ model.tag.create_temporary_hidden_tag(repository, created, temp_tag_expiration)
+
+ # Save its V1 metadata.
+ command_list = v1_metadata.get('container_config', {}).get('Cmd', None)
+ command = json.dumps(command_list) if command_list else None
+
+ model.image.set_image_metadata(layer_id, namespace_name, repo_name,
+ v1_metadata.get('created'),
+ v1_metadata.get('comment'),
+ command, v1_metadata_string,
+ parent=parent_image)
+
+ # Save the changes to the builder.
+ self._builder_state.images[layer_id] = created.id
+ self._save_to_session()
+
+ return ManifestLayer(layer_id, v1_metadata_string, created.id)
+
+ def lookup_layer(self, layer_id):
+ """ Returns a layer with the given ID under this builder. If none exists, returns None. """
+ if layer_id not in self._builder_state.images:
+ return None
+
+ image = model.image.get_image_by_db_id(self._builder_state.images[layer_id])
+ if image is None:
+ return None
+
+ return ManifestLayer(layer_id, image.v1_json_metadata, image.id)
+
+ def assign_layer_blob(self, layer, blob, computed_checksums):
+ """ Assigns a blob to a layer. """
+ assert blob
+ assert not blob.uploading
+
+ repo_image = model.image.get_image_by_db_id(layer.db_id)
+ if repo_image is None:
+ return None
+
+ with db_transaction():
+ existing_storage = repo_image.storage
+ repo_image.storage = blob._db_id
+ repo_image.save()
+
+ if existing_storage.uploading:
+ self._builder_state.temp_storages.append(existing_storage.id)
+
+ self._builder_state.checksums[layer.layer_id] = computed_checksums
+ self._save_to_session()
+ return True
+
+ def validate_layer_checksum(self, layer, checksum):
+ """ Returns whether the checksum for a layer matches that specified.
+ """
+ return checksum in self.get_layer_checksums(layer)
+
+ def get_layer_checksums(self, layer):
+ """ Returns the registered defined for the layer, if any. """
+ return self._builder_state.checksums.get(layer.layer_id) or []
+
+ def save_precomputed_checksum(self, layer, checksum):
+ """ Saves a precomputed checksum for a layer. """
+ checksums = self._builder_state.checksums.get(layer.layer_id) or []
+ checksums.append(checksum)
+ self._builder_state.checksums[layer.layer_id] = checksums
+ self._save_to_session()
+
+ def commit_tag_and_manifest(self, tag_name, layer):
+ """ Commits a new tag + manifest for that tag to the repository with the given name,
+ pointing to the given layer.
+ """
+ legacy_image = registry_model.get_legacy_image(self._repository_ref, layer.layer_id)
+ if legacy_image is None:
+ return None
+
+ tag = registry_model.retarget_tag(self._repository_ref, tag_name, legacy_image, self._storage,
+ self._legacy_signing_key)
+ if tag is None:
+ return None
+
+ self._builder_state.tags[tag_name] = tag._db_id
+ self._save_to_session()
+ return tag
+
+ def done(self):
+ """ Marks the manifest builder as complete and disposes of any state. This call is optional
+ and it is expected manifest builders will eventually time out if unused for an
+ extended period of time.
+ """
+ temp_storages = self._builder_state.temp_storages
+ for storage_id in temp_storages:
+ try:
+ storage = ImageStorage.get(id=storage_id)
+ if storage.uploading and storage.content_checksum != EMPTY_LAYER_BLOB_DIGEST:
+ # Delete all the placements pointing to the storage.
+ ImageStoragePlacement.delete().where(ImageStoragePlacement.storage == storage).execute()
+
+ # Delete the storage.
+ storage.delete_instance()
+ except ImageStorage.DoesNotExist:
+ pass
+
+ session.pop(_SESSION_KEY, None)
+
+ def _save_to_session(self):
+ session[_SESSION_KEY] = self._builder_state
diff --git a/data/registry_model/modelsplitter.py b/data/registry_model/modelsplitter.py
new file mode 100644
index 000000000..675a66928
--- /dev/null
+++ b/data/registry_model/modelsplitter.py
@@ -0,0 +1,112 @@
+import inspect
+import logging
+import hashlib
+
+from data.database import DerivedStorageForImage, TagManifest, Manifest, Image
+from data.registry_model.registry_oci_model import back_compat_oci_model, oci_model
+from data.registry_model.registry_pre_oci_model import pre_oci_model
+from data.registry_model.datatypes import LegacyImage, Manifest as ManifestDataType
+
+
+logger = logging.getLogger(__name__)
+
+
+class SplitModel(object):
+ def __init__(self, oci_model_proportion, oci_namespace_whitelist, v22_namespace_whitelist,
+ oci_only_mode):
+ self.v22_namespace_whitelist = set(v22_namespace_whitelist)
+
+ self.oci_namespace_whitelist = set(oci_namespace_whitelist)
+ self.oci_namespace_whitelist.update(v22_namespace_whitelist)
+
+ self.oci_model_proportion = oci_model_proportion
+ self.oci_only_mode = oci_only_mode
+
+ def supports_schema2(self, namespace_name):
+ """ Returns whether the implementation of the data interface supports schema 2 format
+ manifests. """
+ return namespace_name in self.v22_namespace_whitelist
+
+ def _namespace_from_kwargs(self, args_dict):
+ if 'namespace_name' in args_dict:
+ return args_dict['namespace_name']
+
+ if 'repository_ref' in args_dict:
+ return args_dict['repository_ref'].namespace_name
+
+ if 'tag' in args_dict:
+ return args_dict['tag'].repository.namespace_name
+
+ if 'manifest' in args_dict:
+ manifest = args_dict['manifest']
+ if manifest._is_tag_manifest:
+ return TagManifest.get(id=manifest._db_id).tag.repository.namespace_user.username
+ else:
+ return Manifest.get(id=manifest._db_id).repository.namespace_user.username
+
+ if 'manifest_or_legacy_image' in args_dict:
+ manifest_or_legacy_image = args_dict['manifest_or_legacy_image']
+ if isinstance(manifest_or_legacy_image, LegacyImage):
+ return Image.get(id=manifest_or_legacy_image._db_id).repository.namespace_user.username
+ else:
+ manifest = manifest_or_legacy_image
+ if manifest._is_tag_manifest:
+ return TagManifest.get(id=manifest._db_id).tag.repository.namespace_user.username
+ else:
+ return Manifest.get(id=manifest._db_id).repository.namespace_user.username
+
+ if 'derived_image' in args_dict:
+ return (DerivedStorageForImage
+ .get(id=args_dict['derived_image']._db_id)
+ .source_image
+ .repository
+ .namespace_user
+ .username)
+
+ if 'blob' in args_dict:
+ return '' # Blob functions are shared, so no need to do anything.
+
+ if 'blob_upload' in args_dict:
+ return '' # Blob functions are shared, so no need to do anything.
+
+ raise Exception('Unknown namespace for dict `%s`' % args_dict)
+
+ def __getattr__(self, attr):
+ def method(*args, **kwargs):
+ if self.oci_model_proportion >= 1.0:
+ if self.oci_only_mode:
+ logger.debug('Calling method `%s` under full OCI data model for all namespaces', attr)
+ return getattr(oci_model, attr)(*args, **kwargs)
+ else:
+ logger.debug('Calling method `%s` under compat OCI data model for all namespaces', attr)
+ return getattr(back_compat_oci_model, attr)(*args, **kwargs)
+
+ argnames = inspect.getargspec(getattr(back_compat_oci_model, attr))[0]
+ if not argnames and isinstance(args[0], ManifestDataType):
+ args_dict = dict(manifest=args[0])
+ else:
+ args_dict = {argnames[index + 1]: value for index, value in enumerate(args)}
+
+ if attr in ['yield_tags_for_vulnerability_notification', 'get_most_recent_tag_lifetime_start']:
+ use_oci = self.oci_model_proportion >= 1.0
+ namespace_name = '(implicit for ' + attr + ')'
+ else:
+ namespace_name = self._namespace_from_kwargs(args_dict)
+ use_oci = namespace_name in self.oci_namespace_whitelist
+
+ if not use_oci and self.oci_model_proportion:
+ # Hash the namespace name and see if it falls into the proportion bucket.
+ bucket = (int(hashlib.md5(namespace_name).hexdigest(), 16) % 100)
+ if bucket <= int(self.oci_model_proportion * 100):
+ logger.debug('Enabling OCI for namespace `%s` in proportional bucket',
+ namespace_name)
+ use_oci = True
+
+ if use_oci:
+ logger.debug('Calling method `%s` under OCI data model for namespace `%s`',
+ attr, namespace_name)
+ return getattr(back_compat_oci_model, attr)(*args, **kwargs)
+ else:
+ return getattr(pre_oci_model, attr)(*args, **kwargs)
+
+ return method
diff --git a/data/registry_model/registry_oci_model.py b/data/registry_model/registry_oci_model.py
new file mode 100644
index 000000000..8821a747b
--- /dev/null
+++ b/data/registry_model/registry_oci_model.py
@@ -0,0 +1,668 @@
+# pylint: disable=protected-access
+import logging
+
+from contextlib import contextmanager
+from peewee import fn
+
+from data import database
+from data import model
+from data.model import oci, DataModelException
+from data.model.oci.retriever import RepositoryContentRetriever
+from data.database import db_transaction, Image, IMAGE_NOT_SCANNED_ENGINE_VERSION
+from data.registry_model.interface import RegistryDataInterface
+from data.registry_model.datatypes import (Tag, Manifest, LegacyImage, Label, SecurityScanStatus,
+ Blob, ShallowTag, LikelyVulnerableTag)
+from data.registry_model.shared import SharedModel
+from data.registry_model.label_handlers import apply_label_to_manifest
+from image.docker import ManifestException
+from image.docker.schema1 import DOCKER_SCHEMA1_CONTENT_TYPES
+from image.docker.schema2 import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
+
+
+logger = logging.getLogger(__name__)
+
+
+class OCIModel(SharedModel, RegistryDataInterface):
+ """
+ OCIModel implements the data model for the registry API using a database schema
+ after it was changed to support the OCI specification.
+ """
+ def __init__(self, oci_model_only=True):
+ self.oci_model_only = oci_model_only
+
+ def supports_schema2(self, namespace_name):
+ """ Returns whether the implementation of the data interface supports schema 2 format
+ manifests. """
+ return True
+
+ def get_tag_legacy_image_id(self, repository_ref, tag_name, storage):
+ """ Returns the legacy image ID for the tag with a legacy images in
+ the repository. Returns None if None.
+ """
+ tag = self.get_repo_tag(repository_ref, tag_name, include_legacy_image=True)
+ if tag is None:
+ return None
+
+ if tag.legacy_image_if_present is not None:
+ return tag.legacy_image_if_present.docker_image_id
+
+ if tag.manifest.media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
+ # See if we can lookup a schema1 legacy image.
+ v1_compatible = self.get_schema1_parsed_manifest(tag.manifest, '', '', '', storage)
+ if v1_compatible is not None:
+ return v1_compatible.leaf_layer_v1_image_id
+
+ return None
+
+ def get_legacy_tags_map(self, repository_ref, storage):
+ """ Returns a map from tag name to its legacy image ID, for all tags with legacy images in
+ the repository. Note that this can be a *very* heavy operation.
+ """
+ tags = oci.tag.list_alive_tags(repository_ref._db_id)
+ legacy_images_map = oci.tag.get_legacy_images_for_tags(tags)
+
+ tags_map = {}
+ for tag in tags:
+ legacy_image = legacy_images_map.get(tag.id)
+ if legacy_image is not None:
+ tags_map[tag.name] = legacy_image.docker_image_id
+ else:
+ manifest = Manifest.for_manifest(tag.manifest, None)
+ if legacy_image is None and manifest.media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
+ # See if we can lookup a schema1 legacy image.
+ v1_compatible = self.get_schema1_parsed_manifest(manifest, '', '', '', storage)
+ if v1_compatible is not None:
+ v1_id = v1_compatible.leaf_layer_v1_image_id
+ if v1_id is not None:
+ tags_map[tag.name] = v1_id
+
+ return tags_map
+
+ def _get_legacy_compatible_image_for_manifest(self, manifest, storage):
+ # Check for a legacy image directly on the manifest.
+ if manifest.media_type != DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
+ return oci.shared.get_legacy_image_for_manifest(manifest._db_id)
+
+ # Otherwise, lookup a legacy image associated with the v1-compatible manifest
+ # in the list.
+ try:
+ manifest_obj = database.Manifest.get(id=manifest._db_id)
+ except database.Manifest.DoesNotExist:
+ logger.exception('Could not find manifest for manifest `%s`', manifest._db_id)
+ return None
+
+ # See if we can lookup a schema1 legacy image.
+ v1_compatible = self.get_schema1_parsed_manifest(manifest, '', '', '', storage)
+ if v1_compatible is None:
+ return None
+
+ v1_id = v1_compatible.leaf_layer_v1_image_id
+ if v1_id is None:
+ return None
+
+ return model.image.get_image(manifest_obj.repository_id, v1_id)
+
+ def find_matching_tag(self, repository_ref, tag_names):
+ """ Finds an alive tag in the repository matching one of the given tag names and returns it
+ or None if none.
+ """
+ found_tag = oci.tag.find_matching_tag(repository_ref._db_id, tag_names)
+ assert found_tag is None or not found_tag.hidden
+ return Tag.for_tag(found_tag)
+
+ def get_most_recent_tag(self, repository_ref):
+ """ Returns the most recently pushed alive tag in the repository, if any. If none, returns
+ None.
+ """
+ found_tag = oci.tag.get_most_recent_tag(repository_ref._db_id)
+ assert found_tag is None or not found_tag.hidden
+ return Tag.for_tag(found_tag)
+
+ def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False):
+ """ Returns the manifest associated with the given tag. """
+ legacy_image = None
+ if include_legacy_image:
+ legacy_image = oci.shared.get_legacy_image_for_manifest(tag._manifest)
+
+ return Manifest.for_manifest(tag._manifest, LegacyImage.for_image(legacy_image))
+
+ def lookup_manifest_by_digest(self, repository_ref, manifest_digest, allow_dead=False,
+ include_legacy_image=False, require_available=False):
+ """ Looks up the manifest with the given digest under the given repository and returns it
+ or None if none. """
+ manifest = oci.manifest.lookup_manifest(repository_ref._db_id, manifest_digest,
+ allow_dead=allow_dead,
+ require_available=require_available)
+ if manifest is None:
+ return None
+
+ legacy_image = None
+ if include_legacy_image:
+ try:
+ legacy_image_id = database.ManifestLegacyImage.get(manifest=manifest).image.docker_image_id
+ legacy_image = self.get_legacy_image(repository_ref, legacy_image_id, include_parents=True)
+ except database.ManifestLegacyImage.DoesNotExist:
+ pass
+
+ return Manifest.for_manifest(manifest, legacy_image)
+
+ def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None):
+ """ Creates a label on the manifest with the given key and value. """
+ label_data = dict(key=key, value=value, source_type_name=source_type_name,
+ media_type_name=media_type_name)
+
+ # Create the label itself.
+ label = oci.label.create_manifest_label(manifest._db_id, key, value, source_type_name,
+ media_type_name,
+ adjust_old_model=not self.oci_model_only)
+ if label is None:
+ return None
+
+ # Apply any changes to the manifest that the label prescribes.
+ apply_label_to_manifest(label_data, manifest, self)
+
+ return Label.for_label(label)
+
+ @contextmanager
+ def batch_create_manifest_labels(self, manifest):
+ """ Returns a context manager for batch creation of labels on a manifest.
+
+ Can raise InvalidLabelKeyException or InvalidMediaTypeException depending
+ on the validation errors.
+ """
+ labels_to_add = []
+ def add_label(key, value, source_type_name, media_type_name=None):
+ labels_to_add.append(dict(key=key, value=value, source_type_name=source_type_name,
+ media_type_name=media_type_name))
+
+ yield add_label
+
+ # TODO: make this truly batch once we've fully transitioned to V2_2 and no longer need
+ # the mapping tables.
+ for label_data in labels_to_add:
+ with db_transaction():
+ # Create the label itself.
+ oci.label.create_manifest_label(manifest._db_id, **label_data)
+
+ # Apply any changes to the manifest that the label prescribes.
+ apply_label_to_manifest(label_data, manifest, self)
+
+ def list_manifest_labels(self, manifest, key_prefix=None):
+ """ Returns all labels found on the manifest. If specified, the key_prefix will filter the
+ labels returned to those keys that start with the given prefix.
+ """
+ labels = oci.label.list_manifest_labels(manifest._db_id, prefix_filter=key_prefix)
+ return [Label.for_label(l) for l in labels]
+
+ def get_manifest_label(self, manifest, label_uuid):
+ """ Returns the label with the specified UUID on the manifest or None if none. """
+ return Label.for_label(oci.label.get_manifest_label(label_uuid, manifest._db_id))
+
+ def delete_manifest_label(self, manifest, label_uuid):
+ """ Delete the label with the specified UUID on the manifest. Returns the label deleted
+ or None if none.
+ """
+ return Label.for_label(oci.label.delete_manifest_label(label_uuid, manifest._db_id))
+
+ def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit):
+ """
+ Returns a page of actvie tags in a repository. Note that the tags returned by this method
+ are ShallowTag objects, which only contain the tag name.
+ """
+ tags = oci.tag.lookup_alive_tags_shallow(repository_ref._db_id, start_pagination_id, limit)
+ return [ShallowTag.for_tag(tag) for tag in tags]
+
+ def list_all_active_repository_tags(self, repository_ref, include_legacy_images=False):
+ """
+ Returns a list of all the active tags in the repository. Note that this is a *HEAVY*
+ operation on repositories with a lot of tags, and should only be used for testing or
+ where other more specific operations are not possible.
+ """
+ tags = list(oci.tag.list_alive_tags(repository_ref._db_id))
+ legacy_images_map = {}
+ if include_legacy_images:
+ legacy_images_map = oci.tag.get_legacy_images_for_tags(tags)
+
+ return [Tag.for_tag(tag, legacy_image=LegacyImage.for_image(legacy_images_map.get(tag.id)))
+ for tag in tags]
+
+ def list_repository_tag_history(self, repository_ref, page=1, size=100, specific_tag_name=None,
+ active_tags_only=False, since_time_ms=None):
+ """
+ Returns the history of all tags in the repository (unless filtered). This includes tags that
+ have been made in-active due to newer versions of those tags coming into service.
+ """
+ tags, has_more = oci.tag.list_repository_tag_history(repository_ref._db_id,
+ page, size,
+ specific_tag_name,
+ active_tags_only,
+ since_time_ms)
+
+ # TODO: do we need legacy images here?
+ legacy_images_map = oci.tag.get_legacy_images_for_tags(tags)
+ return [Tag.for_tag(tag, LegacyImage.for_image(legacy_images_map.get(tag.id))) for tag in tags], has_more
+
+ def has_expired_tag(self, repository_ref, tag_name):
+ """
+ Returns true if and only if the repository contains a tag with the given name that is expired.
+ """
+ return bool(oci.tag.get_expired_tag(repository_ref._db_id, tag_name))
+
+ def get_most_recent_tag_lifetime_start(self, repository_refs):
+ """
+ Returns a map from repository ID to the last modified time (in s) for each repository in the
+ given repository reference list.
+ """
+ if not repository_refs:
+ return {}
+
+ toSeconds = lambda ms: ms / 1000 if ms is not None else None
+ last_modified = oci.tag.get_most_recent_tag_lifetime_start([r.id for r in repository_refs])
+
+ return {repo_id: toSeconds(ms) for repo_id, ms in last_modified.items()}
+
+ def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False):
+ """
+ Returns the latest, *active* tag found in the repository, with the matching name
+ or None if none.
+ """
+ assert isinstance(tag_name, basestring)
+
+ tag = oci.tag.get_tag(repository_ref._db_id, tag_name)
+ if tag is None:
+ return None
+
+ legacy_image = None
+ if include_legacy_image:
+ legacy_images = oci.tag.get_legacy_images_for_tags([tag])
+ legacy_image = legacy_images.get(tag.id)
+
+ return Tag.for_tag(tag, legacy_image=LegacyImage.for_image(legacy_image))
+
+ def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name,
+ storage, raise_on_error=False):
+ """ Creates a manifest in a repository, adding all of the necessary data in the model.
+
+ The `manifest_interface_instance` parameter must be an instance of the manifest
+ interface as returned by the image/docker package.
+
+ Note that all blobs referenced by the manifest must exist under the repository or this
+ method will fail and return None.
+
+ Returns a reference to the (created manifest, tag) or (None, None) on error, unless
+ raise_on_error is set to True, in which case a CreateManifestException may also be
+ raised.
+ """
+ # Get or create the manifest itself.
+ created_manifest = oci.manifest.get_or_create_manifest(repository_ref._db_id,
+ manifest_interface_instance,
+ storage,
+ for_tagging=True,
+ raise_on_error=raise_on_error)
+ if created_manifest is None:
+ return (None, None)
+
+ # Re-target the tag to it.
+ tag = oci.tag.retarget_tag(tag_name, created_manifest.manifest,
+ adjust_old_model=not self.oci_model_only)
+ if tag is None:
+ return (None, None)
+
+ legacy_image = oci.shared.get_legacy_image_for_manifest(created_manifest.manifest)
+ li = LegacyImage.for_image(legacy_image)
+ wrapped_manifest = Manifest.for_manifest(created_manifest.manifest, li)
+
+ # Apply any labels that should modify the created tag.
+ if created_manifest.labels_to_apply:
+ for key, value in created_manifest.labels_to_apply.iteritems():
+ apply_label_to_manifest(dict(key=key, value=value), wrapped_manifest, self)
+
+ # Reload the tag in case any updates were applied.
+ tag = database.Tag.get(id=tag.id)
+
+ return (wrapped_manifest, Tag.for_tag(tag, li))
+
+ def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image, storage,
+ legacy_manifest_key, is_reversion=False):
+ """
+ Creates, updates or moves a tag to a new entry in history, pointing to the manifest or
+ legacy image specified. If is_reversion is set to True, this operation is considered a
+ reversion over a previous tag move operation. Returns the updated Tag or None on error.
+ """
+ assert legacy_manifest_key is not None
+ manifest_id = manifest_or_legacy_image._db_id
+ if isinstance(manifest_or_legacy_image, LegacyImage):
+ # If a legacy image was required, build a new manifest for it and move the tag to that.
+ try:
+ image_row = database.Image.get(id=manifest_or_legacy_image._db_id)
+ except database.Image.DoesNotExist:
+ return None
+
+ manifest_instance = self._build_manifest_for_legacy_image(tag_name, image_row)
+ if manifest_instance is None:
+ return None
+
+ created = oci.manifest.get_or_create_manifest(repository_ref._db_id, manifest_instance,
+ storage)
+ if created is None:
+ return None
+
+ manifest_id = created.manifest.id
+ else:
+ # If the manifest is a schema 1 manifest and its tag name does not match that
+ # specified, then we need to create a new manifest, but with that tag name.
+ if manifest_or_legacy_image.media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
+ try:
+ parsed = manifest_or_legacy_image.get_parsed_manifest()
+ except ManifestException:
+ logger.exception('Could not parse manifest `%s` in retarget_tag',
+ manifest_or_legacy_image._db_id)
+ return None
+
+ if parsed.tag != tag_name:
+ logger.debug('Rewriting manifest `%s` for tag named `%s`',
+ manifest_or_legacy_image._db_id, tag_name)
+
+ repository_id = repository_ref._db_id
+ updated = parsed.with_tag_name(tag_name, legacy_manifest_key)
+ assert updated.is_signed
+
+ created = oci.manifest.get_or_create_manifest(repository_id, updated, storage)
+ if created is None:
+ return None
+
+ manifest_id = created.manifest.id
+
+ tag = oci.tag.retarget_tag(tag_name, manifest_id, is_reversion=is_reversion)
+ legacy_image = LegacyImage.for_image(oci.shared.get_legacy_image_for_manifest(manifest_id))
+ return Tag.for_tag(tag, legacy_image)
+
+ def delete_tag(self, repository_ref, tag_name):
+ """
+ Deletes the latest, *active* tag with the given name in the repository.
+ """
+ deleted_tag = oci.tag.delete_tag(repository_ref._db_id, tag_name)
+ if deleted_tag is None:
+ # TODO: This is only needed because preoci raises an exception. Remove and fix
+ # expected status codes once PreOCIModel is gone.
+ msg = ('Invalid repository tag \'%s\' on repository' % tag_name)
+ raise DataModelException(msg)
+
+ return Tag.for_tag(deleted_tag)
+
+ def delete_tags_for_manifest(self, manifest):
+ """
+ Deletes all tags pointing to the given manifest, making the manifest inaccessible for pulling.
+ Returns the tags deleted, if any. Returns None on error.
+ """
+ deleted_tags = oci.tag.delete_tags_for_manifest(manifest._db_id)
+ return [Tag.for_tag(tag) for tag in deleted_tags]
+
+ def change_repository_tag_expiration(self, tag, expiration_date):
+ """ Sets the expiration date of the tag under the matching repository to that given. If the
+ expiration date is None, then the tag will not expire. Returns a tuple of the previous
+ expiration timestamp in seconds (if any), and whether the operation succeeded.
+ """
+ return oci.tag.change_tag_expiration(tag._db_id, expiration_date)
+
+ def get_legacy_images_owned_by_tag(self, tag):
+ """ Returns all legacy images *solely owned and used* by the given tag. """
+ tag_obj = oci.tag.get_tag_by_id(tag._db_id)
+ if tag_obj is None:
+ return None
+
+ tags = oci.tag.list_alive_tags(tag_obj.repository_id)
+ legacy_images = oci.tag.get_legacy_images_for_tags(tags)
+
+ tag_legacy_image = legacy_images.get(tag._db_id)
+ if tag_legacy_image is None:
+ return None
+
+ assert isinstance(tag_legacy_image, Image)
+
+ # Collect the IDs of all images that the tag uses.
+ tag_image_ids = set()
+ tag_image_ids.add(tag_legacy_image.id)
+ tag_image_ids.update(tag_legacy_image.ancestor_id_list())
+
+ # Remove any images shared by other tags.
+ for current in tags:
+ if current == tag_obj:
+ continue
+
+ current_image = legacy_images.get(current.id)
+ if current_image is None:
+ continue
+
+ tag_image_ids.discard(current_image.id)
+ tag_image_ids = tag_image_ids.difference(current_image.ancestor_id_list())
+ if not tag_image_ids:
+ return []
+
+ if not tag_image_ids:
+ return []
+
+ # Load the images we need to return.
+ images = database.Image.select().where(database.Image.id << list(tag_image_ids))
+ all_image_ids = set()
+ for image in images:
+ all_image_ids.add(image.id)
+ all_image_ids.update(image.ancestor_id_list())
+
+ # Build a map of all the images and their parents.
+ images_map = {}
+ all_images = database.Image.select().where(database.Image.id << list(all_image_ids))
+ for image in all_images:
+ images_map[image.id] = image
+
+ return [LegacyImage.for_image(image, images_map=images_map) for image in images]
+
+ def get_security_status(self, manifest_or_legacy_image):
+ """ Returns the security status for the given manifest or legacy image or None if none. """
+ image = None
+
+ if isinstance(manifest_or_legacy_image, Manifest):
+ image = oci.shared.get_legacy_image_for_manifest(manifest_or_legacy_image._db_id)
+ if image is None:
+ return SecurityScanStatus.UNSUPPORTED
+ else:
+ try:
+ image = database.Image.get(id=manifest_or_legacy_image._db_id)
+ except database.Image.DoesNotExist:
+ return None
+
+ if image.security_indexed_engine is not None and image.security_indexed_engine >= 0:
+ return SecurityScanStatus.SCANNED if image.security_indexed else SecurityScanStatus.FAILED
+
+ return SecurityScanStatus.QUEUED
+
+ def reset_security_status(self, manifest_or_legacy_image):
+ """ Resets the security status for the given manifest or legacy image, ensuring that it will
+ get re-indexed.
+ """
+ image = None
+
+ if isinstance(manifest_or_legacy_image, Manifest):
+ image = oci.shared.get_legacy_image_for_manifest(manifest_or_legacy_image._db_id)
+ if image is None:
+ return None
+ else:
+ try:
+ image = database.Image.get(id=manifest_or_legacy_image._db_id)
+ except database.Image.DoesNotExist:
+ return None
+
+ assert image
+ image.security_indexed = False
+ image.security_indexed_engine = IMAGE_NOT_SCANNED_ENGINE_VERSION
+ image.save()
+
+ def backfill_manifest_for_tag(self, tag):
+ """ Backfills a manifest for the V1 tag specified.
+ If a manifest already exists for the tag, returns that manifest.
+
+ NOTE: This method will only be necessary until we've completed the backfill, at which point
+ it should be removed.
+ """
+ # Nothing to do for OCI tags.
+ manifest = tag.manifest
+ if manifest is None:
+ return None
+
+ legacy_image = oci.shared.get_legacy_image_for_manifest(manifest)
+ return Manifest.for_manifest(manifest, LegacyImage.for_image(legacy_image))
+
+ def list_manifest_layers(self, manifest, storage, include_placements=False):
+ try:
+ manifest_obj = database.Manifest.get(id=manifest._db_id)
+ except database.Manifest.DoesNotExist:
+ logger.exception('Could not find manifest for manifest `%s`', manifest._db_id)
+ return None
+
+ try:
+ parsed = manifest.get_parsed_manifest()
+ except ManifestException:
+ logger.exception('Could not parse and validate manifest `%s`', manifest._db_id)
+ return None
+
+ return self._list_manifest_layers(manifest_obj.repository_id, parsed, storage,
+ include_placements, by_manifest=True)
+
+ def lookup_derived_image(self, manifest, verb, storage, varying_metadata=None,
+ include_placements=False):
+ """
+ Looks up the derived image for the given manifest, verb and optional varying metadata and
+ returns it or None if none.
+ """
+ legacy_image = self._get_legacy_compatible_image_for_manifest(manifest, storage)
+ if legacy_image is None:
+ return None
+
+ derived = model.image.find_derived_storage_for_image(legacy_image, verb, varying_metadata)
+ return self._build_derived(derived, verb, varying_metadata, include_placements)
+
+ def lookup_or_create_derived_image(self, manifest, verb, storage_location, storage,
+ varying_metadata=None,
+ include_placements=False):
+ """
+ Looks up the derived image for the given maniest, verb and optional varying metadata
+ and returns it. If none exists, a new derived image is created.
+ """
+ legacy_image = self._get_legacy_compatible_image_for_manifest(manifest, storage)
+ if legacy_image is None:
+ return None
+
+ derived = model.image.find_or_create_derived_storage(legacy_image, verb, storage_location,
+ varying_metadata)
+ return self._build_derived(derived, verb, varying_metadata, include_placements)
+
+ def set_tags_expiration_for_manifest(self, manifest, expiration_sec):
+ """
+ Sets the expiration on all tags that point to the given manifest to that specified.
+ """
+ oci.tag.set_tag_expiration_sec_for_manifest(manifest._db_id, expiration_sec)
+
+ def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):
+ """ Returns the schema 1 manifest for this manifest, or None if none. """
+ try:
+ parsed = manifest.get_parsed_manifest()
+ except ManifestException:
+ return None
+
+ try:
+ manifest_row = database.Manifest.get(id=manifest._db_id)
+ except database.Manifest.DoesNotExist:
+ return None
+
+ retriever = RepositoryContentRetriever(manifest_row.repository_id, storage)
+ return parsed.get_schema1_manifest(namespace_name, repo_name, tag_name, retriever)
+
+ def convert_manifest(self, manifest, namespace_name, repo_name, tag_name, allowed_mediatypes,
+ storage):
+ try:
+ parsed = manifest.get_parsed_manifest()
+ except ManifestException:
+ return None
+
+ try:
+ manifest_row = database.Manifest.get(id=manifest._db_id)
+ except database.Manifest.DoesNotExist:
+ return None
+
+ retriever = RepositoryContentRetriever(manifest_row.repository_id, storage)
+ return parsed.convert_manifest(allowed_mediatypes, namespace_name, repo_name, tag_name,
+ retriever)
+
+ def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance,
+ expiration_sec, storage):
+ """ Creates a manifest under the repository and sets a temporary tag to point to it.
+ Returns the manifest object created or None on error.
+ """
+ # Get or create the manifest itself. get_or_create_manifest will take care of the
+ # temporary tag work.
+ created_manifest = oci.manifest.get_or_create_manifest(repository_ref._db_id,
+ manifest_interface_instance,
+ storage,
+ temp_tag_expiration_sec=expiration_sec)
+ if created_manifest is None:
+ return None
+
+ legacy_image = oci.shared.get_legacy_image_for_manifest(created_manifest.manifest)
+ li = LegacyImage.for_image(legacy_image)
+ return Manifest.for_manifest(created_manifest.manifest, li)
+
+ def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
+ """
+ Returns the blob in the repository with the given digest, if any or None if none. Note that
+ there may be multiple records in the same repository for the same blob digest, so the return
+ value of this function may change.
+ """
+ image_storage = self._get_shared_storage(blob_digest)
+ if image_storage is None:
+ image_storage = oci.blob.get_repository_blob_by_digest(repository_ref._db_id, blob_digest)
+ if image_storage is None:
+ return None
+
+ assert image_storage.cas_path is not None
+
+ placements = None
+ if include_placements:
+ placements = list(model.storage.get_storage_locations(image_storage.uuid))
+
+ return Blob.for_image_storage(image_storage,
+ storage_path=model.storage.get_layer_path(image_storage),
+ placements=placements)
+
+ def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage,
+ include_placements=False):
+ """ Returns an *ordered list* of the layers found in the parsed manifest, starting at the base
+ and working towards the leaf, including the associated Blob and its placements
+ (if specified).
+ """
+ return self._list_manifest_layers(repository_ref._db_id, parsed_manifest, storage,
+ include_placements=include_placements,
+ by_manifest=True)
+
+ def get_manifest_local_blobs(self, manifest, include_placements=False):
+ """ Returns the set of local blobs for the given manifest or None if none. """
+ try:
+ manifest_row = database.Manifest.get(id=manifest._db_id)
+ except database.Manifest.DoesNotExist:
+ return None
+
+ return self._get_manifest_local_blobs(manifest, manifest_row.repository_id, include_placements,
+ by_manifest=True)
+
+ def yield_tags_for_vulnerability_notification(self, layer_id_pairs):
+ """ Yields tags that contain one (or more) of the given layer ID pairs, in repositories
+ which have been registered for vulnerability_found notifications. Returns an iterator
+ of LikelyVulnerableTag instances.
+ """
+ for docker_image_id, storage_uuid in layer_id_pairs:
+ tags = oci.tag.lookup_notifiable_tags_for_legacy_image(docker_image_id, storage_uuid,
+ 'vulnerability_found')
+ for tag in tags:
+ yield LikelyVulnerableTag.for_tag(tag, tag.repository, docker_image_id, storage_uuid)
+
+oci_model = OCIModel()
+back_compat_oci_model = OCIModel(oci_model_only=False)
diff --git a/data/registry_model/registry_pre_oci_model.py b/data/registry_model/registry_pre_oci_model.py
new file mode 100644
index 000000000..ec69328d5
--- /dev/null
+++ b/data/registry_model/registry_pre_oci_model.py
@@ -0,0 +1,694 @@
+# pylint: disable=protected-access
+import logging
+
+from contextlib import contextmanager
+
+from peewee import IntegrityError, fn
+
+from data import database
+from data import model
+from data.database import db_transaction, IMAGE_NOT_SCANNED_ENGINE_VERSION
+from data.registry_model.interface import RegistryDataInterface
+from data.registry_model.datatypes import (Tag, Manifest, LegacyImage, Label, SecurityScanStatus,
+ Blob, RepositoryReference, ShallowTag,
+ LikelyVulnerableTag)
+from data.registry_model.shared import SharedModel
+from data.registry_model.label_handlers import apply_label_to_manifest
+from image.docker.schema1 import ManifestException, DockerSchema1Manifest
+from util.validation import is_json
+
+
+logger = logging.getLogger(__name__)
+
+
+class PreOCIModel(SharedModel, RegistryDataInterface):
+ """
+ PreOCIModel implements the data model for the registry API using a database schema
+ before it was changed to support the OCI specification.
+ """
+ def supports_schema2(self, namespace_name):
+ """ Returns whether the implementation of the data interface supports schema 2 format
+ manifests. """
+ return False
+
+ def get_tag_legacy_image_id(self, repository_ref, tag_name, storage):
+ """ Returns the legacy image ID for the tag with a legacy images in
+ the repository. Returns None if None.
+ """
+ tag = self.get_repo_tag(repository_ref, tag_name, include_legacy_image=True)
+ if tag is None:
+ return None
+
+ return tag.legacy_image.docker_image_id
+
+ def get_legacy_tags_map(self, repository_ref, storage):
+ """ Returns a map from tag name to its legacy image, for all tags with legacy images in
+ the repository.
+ """
+ tags = self.list_all_active_repository_tags(repository_ref, include_legacy_images=True)
+ return {tag.name: tag.legacy_image.docker_image_id for tag in tags}
+
+ def find_matching_tag(self, repository_ref, tag_names):
+ """ Finds an alive tag in the repository matching one of the given tag names and returns it
+ or None if none.
+ """
+ found_tag = model.tag.find_matching_tag(repository_ref._db_id, tag_names)
+ assert found_tag is None or not found_tag.hidden
+ return Tag.for_repository_tag(found_tag)
+
+ def get_most_recent_tag(self, repository_ref):
+ """ Returns the most recently pushed alive tag in the repository, if any. If none, returns
+ None.
+ """
+ found_tag = model.tag.get_most_recent_tag(repository_ref._db_id)
+ assert found_tag is None or not found_tag.hidden
+ return Tag.for_repository_tag(found_tag)
+
+ def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False):
+ """ Returns the manifest associated with the given tag. """
+ try:
+ tag_manifest = database.TagManifest.get(tag_id=tag._db_id)
+ except database.TagManifest.DoesNotExist:
+ if backfill_if_necessary:
+ return self.backfill_manifest_for_tag(tag)
+
+ return None
+
+ return Manifest.for_tag_manifest(tag_manifest)
+
+ def lookup_manifest_by_digest(self, repository_ref, manifest_digest, allow_dead=False,
+ include_legacy_image=False, require_available=False):
+ """ Looks up the manifest with the given digest under the given repository and returns it
+ or None if none. """
+ repo = model.repository.lookup_repository(repository_ref._db_id)
+ if repo is None:
+ return None
+
+ try:
+ tag_manifest = model.tag.load_manifest_by_digest(repo.namespace_user.username,
+ repo.name,
+ manifest_digest,
+ allow_dead=allow_dead)
+ except model.tag.InvalidManifestException:
+ return None
+
+ legacy_image = None
+ if include_legacy_image:
+ legacy_image = self.get_legacy_image(repository_ref, tag_manifest.tag.image.docker_image_id,
+ include_parents=True)
+
+ return Manifest.for_tag_manifest(tag_manifest, legacy_image)
+
+ def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name,
+ storage, raise_on_error=False):
+ """ Creates a manifest in a repository, adding all of the necessary data in the model.
+
+ The `manifest_interface_instance` parameter must be an instance of the manifest
+ interface as returned by the image/docker package.
+
+ Note that all blobs referenced by the manifest must exist under the repository or this
+ method will fail and return None.
+
+ Returns a reference to the (created manifest, tag) or (None, None) on error.
+ """
+ # NOTE: Only Schema1 is supported by the pre_oci_model.
+ assert isinstance(manifest_interface_instance, DockerSchema1Manifest)
+ if not manifest_interface_instance.layers:
+ return None, None
+
+ # Ensure all the blobs in the manifest exist.
+ digests = manifest_interface_instance.checksums
+ query = self._lookup_repo_storages_by_content_checksum(repository_ref._db_id, digests)
+ blob_map = {s.content_checksum: s for s in query}
+ for layer in manifest_interface_instance.layers:
+ digest_str = str(layer.digest)
+ if digest_str not in blob_map:
+ return None, None
+
+ # Lookup all the images and their parent images (if any) inside the manifest.
+ # This will let us know which v1 images we need to synthesize and which ones are invalid.
+ docker_image_ids = list(manifest_interface_instance.legacy_image_ids)
+ images_query = model.image.lookup_repository_images(repository_ref._db_id, docker_image_ids)
+ image_storage_map = {i.docker_image_id: i.storage for i in images_query}
+
+ # Rewrite any v1 image IDs that do not match the checksum in the database.
+ try:
+ rewritten_images = manifest_interface_instance.rewrite_invalid_image_ids(image_storage_map)
+ rewritten_images = list(rewritten_images)
+ parent_image_map = {}
+
+ for rewritten_image in rewritten_images:
+ if not rewritten_image.image_id in image_storage_map:
+ parent_image = None
+ if rewritten_image.parent_image_id:
+ parent_image = parent_image_map.get(rewritten_image.parent_image_id)
+ if parent_image is None:
+ parent_image = model.image.get_image(repository_ref._db_id,
+ rewritten_image.parent_image_id)
+ if parent_image is None:
+ return None, None
+
+ synthesized = model.image.synthesize_v1_image(
+ repository_ref._db_id,
+ blob_map[rewritten_image.content_checksum].id,
+ blob_map[rewritten_image.content_checksum].image_size,
+ rewritten_image.image_id,
+ rewritten_image.created,
+ rewritten_image.comment,
+ rewritten_image.command,
+ rewritten_image.compat_json,
+ parent_image,
+ )
+
+ parent_image_map[rewritten_image.image_id] = synthesized
+ except ManifestException:
+ logger.exception("exception when rewriting v1 metadata")
+ return None, None
+
+ # Store the manifest pointing to the tag.
+ leaf_layer_id = rewritten_images[-1].image_id
+ tag_manifest, newly_created = model.tag.store_tag_manifest_for_repo(repository_ref._db_id,
+ tag_name,
+ manifest_interface_instance,
+ leaf_layer_id,
+ blob_map)
+
+ manifest = Manifest.for_tag_manifest(tag_manifest)
+
+ # Save the labels on the manifest.
+ repo_tag = tag_manifest.tag
+ if newly_created:
+ has_labels = False
+ with self.batch_create_manifest_labels(manifest) as add_label:
+ if add_label is None:
+ return None, None
+
+ for key, value in manifest_interface_instance.layers[-1].v1_metadata.labels.iteritems():
+ media_type = 'application/json' if is_json(value) else 'text/plain'
+ add_label(key, value, 'manifest', media_type)
+ has_labels = True
+
+ # Reload the tag in case any updates were applied.
+ if has_labels:
+ repo_tag = database.RepositoryTag.get(id=repo_tag.id)
+
+ return manifest, Tag.for_repository_tag(repo_tag)
+
+ def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None):
+ """ Creates a label on the manifest with the given key and value. """
+ try:
+ tag_manifest = database.TagManifest.get(id=manifest._db_id)
+ except database.TagManifest.DoesNotExist:
+ return None
+
+ label_data = dict(key=key, value=value, source_type_name=source_type_name,
+ media_type_name=media_type_name)
+
+ with db_transaction():
+ # Create the label itself.
+ label = model.label.create_manifest_label(tag_manifest, key, value, source_type_name,
+ media_type_name)
+
+ # Apply any changes to the manifest that the label prescribes.
+ apply_label_to_manifest(label_data, manifest, self)
+
+ return Label.for_label(label)
+
+ @contextmanager
+ def batch_create_manifest_labels(self, manifest):
+ """ Returns a context manager for batch creation of labels on a manifest.
+
+ Can raise InvalidLabelKeyException or InvalidMediaTypeException depending
+ on the validation errors.
+ """
+ try:
+ tag_manifest = database.TagManifest.get(id=manifest._db_id)
+ except database.TagManifest.DoesNotExist:
+ yield None
+ return
+
+ labels_to_add = []
+ def add_label(key, value, source_type_name, media_type_name=None):
+ labels_to_add.append(dict(key=key, value=value, source_type_name=source_type_name,
+ media_type_name=media_type_name))
+
+ yield add_label
+
+ # TODO: make this truly batch once we've fully transitioned to V2_2 and no longer need
+ # the mapping tables.
+ for label in labels_to_add:
+ with db_transaction():
+ # Create the label itself.
+ model.label.create_manifest_label(tag_manifest, **label)
+
+ # Apply any changes to the manifest that the label prescribes.
+ apply_label_to_manifest(label, manifest, self)
+
+ def list_manifest_labels(self, manifest, key_prefix=None):
+ """ Returns all labels found on the manifest. If specified, the key_prefix will filter the
+ labels returned to those keys that start with the given prefix.
+ """
+ labels = model.label.list_manifest_labels(manifest._db_id, prefix_filter=key_prefix)
+ return [Label.for_label(l) for l in labels]
+
+ def get_manifest_label(self, manifest, label_uuid):
+ """ Returns the label with the specified UUID on the manifest or None if none. """
+ return Label.for_label(model.label.get_manifest_label(label_uuid, manifest._db_id))
+
+ def delete_manifest_label(self, manifest, label_uuid):
+ """ Delete the label with the specified UUID on the manifest. Returns the label deleted
+ or None if none.
+ """
+ return Label.for_label(model.label.delete_manifest_label(label_uuid, manifest._db_id))
+
+ def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit):
+ """
+ Returns a page of actvie tags in a repository. Note that the tags returned by this method
+ are ShallowTag objects, which only contain the tag name.
+ """
+ tags = model.tag.list_active_repo_tags(repository_ref._db_id, include_images=False,
+ start_id=start_pagination_id, limit=limit)
+ return [ShallowTag.for_repository_tag(tag) for tag in tags]
+
+ def list_all_active_repository_tags(self, repository_ref, include_legacy_images=False):
+ """
+ Returns a list of all the active tags in the repository. Note that this is a *HEAVY*
+ operation on repositories with a lot of tags, and should only be used for testing or
+ where other more specific operations are not possible.
+ """
+ if not include_legacy_images:
+ tags = model.tag.list_active_repo_tags(repository_ref._db_id, include_images=False)
+ return [Tag.for_repository_tag(tag) for tag in tags]
+
+ tags = model.tag.list_active_repo_tags(repository_ref._db_id)
+ return [Tag.for_repository_tag(tag,
+ legacy_image=LegacyImage.for_image(tag.image),
+ manifest_digest=(tag.tagmanifest.digest
+ if hasattr(tag, 'tagmanifest')
+ else None))
+ for tag in tags]
+
+ def list_repository_tag_history(self, repository_ref, page=1, size=100, specific_tag_name=None,
+ active_tags_only=False, since_time_ms=None):
+ """
+ Returns the history of all tags in the repository (unless filtered). This includes tags that
+ have been made in-active due to newer versions of those tags coming into service.
+ """
+
+ # Only available on OCI model
+ if since_time_ms is not None:
+ raise NotImplementedError
+
+ tags, manifest_map, has_more = model.tag.list_repository_tag_history(repository_ref._db_id,
+ page, size,
+ specific_tag_name,
+ active_tags_only)
+ return [Tag.for_repository_tag(tag, manifest_map.get(tag.id),
+ legacy_image=LegacyImage.for_image(tag.image))
+ for tag in tags], has_more
+
+ def has_expired_tag(self, repository_ref, tag_name):
+ """
+ Returns true if and only if the repository contains a tag with the given name that is expired.
+ """
+ try:
+ model.tag.get_expired_tag_in_repo(repository_ref._db_id, tag_name)
+ return True
+ except database.RepositoryTag.DoesNotExist:
+ return False
+
+ def get_most_recent_tag_lifetime_start(self, repository_refs):
+ """
+ Returns a map from repository ID to the last modified time (in s) for each repository in the
+ given repository reference list.
+ """
+ if not repository_refs:
+ return {}
+
+ tuples = (database.RepositoryTag.select(database.RepositoryTag.repository,
+ fn.Max(database.RepositoryTag.lifetime_start_ts))
+ .where(database.RepositoryTag.repository << [r.id for r in repository_refs])
+ .group_by(database.RepositoryTag.repository)
+ .tuples())
+
+ return {repo_id: seconds for repo_id, seconds in tuples}
+
+ def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False):
+ """
+ Returns the latest, *active* tag found in the repository, with the matching name
+ or None if none.
+ """
+ assert isinstance(tag_name, basestring)
+ tag = model.tag.get_active_tag_for_repo(repository_ref._db_id, tag_name)
+ if tag is None:
+ return None
+
+ legacy_image = LegacyImage.for_image(tag.image) if include_legacy_image else None
+ tag_manifest = model.tag.get_tag_manifest(tag)
+ manifest_digest = tag_manifest.digest if tag_manifest else None
+ return Tag.for_repository_tag(tag, legacy_image=legacy_image, manifest_digest=manifest_digest)
+
+ def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image, storage,
+ legacy_manifest_key, is_reversion=False):
+ """
+ Creates, updates or moves a tag to a new entry in history, pointing to the manifest or
+ legacy image specified. If is_reversion is set to True, this operation is considered a
+ reversion over a previous tag move operation. Returns the updated Tag or None on error.
+ """
+ # TODO: unify this.
+ assert legacy_manifest_key is not None
+ if not is_reversion:
+ if isinstance(manifest_or_legacy_image, Manifest):
+ raise NotImplementedError('Not yet implemented')
+ else:
+ model.tag.create_or_update_tag_for_repo(repository_ref._db_id, tag_name,
+ manifest_or_legacy_image.docker_image_id)
+ else:
+ if isinstance(manifest_or_legacy_image, Manifest):
+ model.tag.restore_tag_to_manifest(repository_ref._db_id, tag_name,
+ manifest_or_legacy_image.digest)
+ else:
+ model.tag.restore_tag_to_image(repository_ref._db_id, tag_name,
+ manifest_or_legacy_image.docker_image_id)
+
+ # Generate a manifest for the tag, if necessary.
+ tag = self.get_repo_tag(repository_ref, tag_name, include_legacy_image=True)
+ if tag is None:
+ return None
+
+ self.backfill_manifest_for_tag(tag)
+ return tag
+
+ def delete_tag(self, repository_ref, tag_name):
+ """
+ Deletes the latest, *active* tag with the given name in the repository.
+ """
+ repo = model.repository.lookup_repository(repository_ref._db_id)
+ if repo is None:
+ return None
+
+ deleted_tag = model.tag.delete_tag(repo.namespace_user.username, repo.name, tag_name)
+ return Tag.for_repository_tag(deleted_tag)
+
+ def delete_tags_for_manifest(self, manifest):
+ """
+ Deletes all tags pointing to the given manifest, making the manifest inaccessible for pulling.
+ Returns the tags deleted, if any. Returns None on error.
+ """
+ try:
+ tagmanifest = database.TagManifest.get(id=manifest._db_id)
+ except database.TagManifest.DoesNotExist:
+ return None
+
+ namespace_name = tagmanifest.tag.repository.namespace_user.username
+ repo_name = tagmanifest.tag.repository.name
+ tags = model.tag.delete_manifest_by_digest(namespace_name, repo_name, manifest.digest)
+ return [Tag.for_repository_tag(tag) for tag in tags]
+
+ def change_repository_tag_expiration(self, tag, expiration_date):
+ """ Sets the expiration date of the tag under the matching repository to that given. If the
+ expiration date is None, then the tag will not expire. Returns a tuple of the previous
+ expiration timestamp in seconds (if any), and whether the operation succeeded.
+ """
+ try:
+ tag_obj = database.RepositoryTag.get(id=tag._db_id)
+ except database.RepositoryTag.DoesNotExist:
+ return (None, False)
+
+ return model.tag.change_tag_expiration(tag_obj, expiration_date)
+
+ def get_legacy_images_owned_by_tag(self, tag):
+ """ Returns all legacy images *solely owned and used* by the given tag. """
+ try:
+ tag_obj = database.RepositoryTag.get(id=tag._db_id)
+ except database.RepositoryTag.DoesNotExist:
+ return None
+
+ # Collect the IDs of all images that the tag uses.
+ tag_image_ids = set()
+ tag_image_ids.add(tag_obj.image.id)
+ tag_image_ids.update(tag_obj.image.ancestor_id_list())
+
+ # Remove any images shared by other tags.
+ for current_tag in model.tag.list_active_repo_tags(tag_obj.repository_id):
+ if current_tag == tag_obj:
+ continue
+
+ tag_image_ids.discard(current_tag.image.id)
+ tag_image_ids = tag_image_ids.difference(current_tag.image.ancestor_id_list())
+ if not tag_image_ids:
+ return []
+
+ if not tag_image_ids:
+ return []
+
+ # Load the images we need to return.
+ images = database.Image.select().where(database.Image.id << list(tag_image_ids))
+ all_image_ids = set()
+ for image in images:
+ all_image_ids.add(image.id)
+ all_image_ids.update(image.ancestor_id_list())
+
+ # Build a map of all the images and their parents.
+ images_map = {}
+ all_images = database.Image.select().where(database.Image.id << list(all_image_ids))
+ for image in all_images:
+ images_map[image.id] = image
+
+ return [LegacyImage.for_image(image, images_map=images_map) for image in images]
+
+ def get_security_status(self, manifest_or_legacy_image):
+ """ Returns the security status for the given manifest or legacy image or None if none. """
+ image = None
+
+ if isinstance(manifest_or_legacy_image, Manifest):
+ try:
+ tag_manifest = database.TagManifest.get(id=manifest_or_legacy_image._db_id)
+ image = tag_manifest.tag.image
+ except database.TagManifest.DoesNotExist:
+ return None
+ else:
+ try:
+ image = database.Image.get(id=manifest_or_legacy_image._db_id)
+ except database.Image.DoesNotExist:
+ return None
+
+ if image.security_indexed_engine is not None and image.security_indexed_engine >= 0:
+ return SecurityScanStatus.SCANNED if image.security_indexed else SecurityScanStatus.FAILED
+
+ return SecurityScanStatus.QUEUED
+
+ def reset_security_status(self, manifest_or_legacy_image):
+ """ Resets the security status for the given manifest or legacy image, ensuring that it will
+ get re-indexed.
+ """
+ image = None
+
+ if isinstance(manifest_or_legacy_image, Manifest):
+ try:
+ tag_manifest = database.TagManifest.get(id=manifest_or_legacy_image._db_id)
+ image = tag_manifest.tag.image
+ except database.TagManifest.DoesNotExist:
+ return None
+ else:
+ try:
+ image = database.Image.get(id=manifest_or_legacy_image._db_id)
+ except database.Image.DoesNotExist:
+ return None
+
+ assert image
+ image.security_indexed = False
+ image.security_indexed_engine = IMAGE_NOT_SCANNED_ENGINE_VERSION
+ image.save()
+
+ def backfill_manifest_for_tag(self, tag):
+ """ Backfills a manifest for the V1 tag specified.
+ If a manifest already exists for the tag, returns that manifest.
+
+ NOTE: This method will only be necessary until we've completed the backfill, at which point
+ it should be removed.
+ """
+ # Ensure that there isn't already a manifest for the tag.
+ tag_manifest = model.tag.get_tag_manifest(tag._db_id)
+ if tag_manifest is not None:
+ return Manifest.for_tag_manifest(tag_manifest)
+
+ # Create the manifest.
+ try:
+ tag_obj = database.RepositoryTag.get(id=tag._db_id)
+ except database.RepositoryTag.DoesNotExist:
+ return None
+
+ assert not tag_obj.hidden
+
+ repo = tag_obj.repository
+
+ # Write the manifest to the DB.
+ manifest = self._build_manifest_for_legacy_image(tag_obj.name, tag_obj.image)
+ if manifest is None:
+ return None
+
+ blob_query = self._lookup_repo_storages_by_content_checksum(repo, manifest.checksums)
+ storage_map = {blob.content_checksum: blob.id for blob in blob_query}
+ try:
+ tag_manifest = model.tag.associate_generated_tag_manifest_with_tag(tag_obj, manifest,
+ storage_map)
+ assert tag_manifest
+ except IntegrityError:
+ tag_manifest = model.tag.get_tag_manifest(tag_obj)
+
+ return Manifest.for_tag_manifest(tag_manifest)
+
+ def list_manifest_layers(self, manifest, storage, include_placements=False):
+ try:
+ tag_manifest = database.TagManifest.get(id=manifest._db_id)
+ except database.TagManifest.DoesNotExist:
+ logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id)
+ return None
+
+ try:
+ parsed = manifest.get_parsed_manifest()
+ except ManifestException:
+ logger.exception('Could not parse and validate manifest `%s`', manifest._db_id)
+ return None
+
+ repo_ref = RepositoryReference.for_id(tag_manifest.tag.repository_id)
+ return self.list_parsed_manifest_layers(repo_ref, parsed, storage, include_placements)
+
+ def lookup_derived_image(self, manifest, verb, storage, varying_metadata=None,
+ include_placements=False):
+ """
+ Looks up the derived image for the given manifest, verb and optional varying metadata and
+ returns it or None if none.
+ """
+ try:
+ tag_manifest = database.TagManifest.get(id=manifest._db_id)
+ except database.TagManifest.DoesNotExist:
+ logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id)
+ return None
+
+ repo_image = tag_manifest.tag.image
+ derived = model.image.find_derived_storage_for_image(repo_image, verb, varying_metadata)
+ return self._build_derived(derived, verb, varying_metadata, include_placements)
+
+ def lookup_or_create_derived_image(self, manifest, verb, storage_location, storage,
+ varying_metadata=None, include_placements=False):
+ """
+ Looks up the derived image for the given maniest, verb and optional varying metadata
+ and returns it. If none exists, a new derived image is created.
+ """
+ try:
+ tag_manifest = database.TagManifest.get(id=manifest._db_id)
+ except database.TagManifest.DoesNotExist:
+ logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id)
+ return None
+
+ repo_image = tag_manifest.tag.image
+ derived = model.image.find_or_create_derived_storage(repo_image, verb, storage_location,
+ varying_metadata)
+ return self._build_derived(derived, verb, varying_metadata, include_placements)
+
+ def set_tags_expiration_for_manifest(self, manifest, expiration_sec):
+ """
+ Sets the expiration on all tags that point to the given manifest to that specified.
+ """
+ try:
+ tag_manifest = database.TagManifest.get(id=manifest._db_id)
+ except database.TagManifest.DoesNotExist:
+ return
+
+ model.tag.set_tag_expiration_for_manifest(tag_manifest, expiration_sec)
+
+ def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):
+ """ Returns the schema 1 version of this manifest, or None if none. """
+ try:
+ return manifest.get_parsed_manifest()
+ except ManifestException:
+ return None
+
+ def convert_manifest(self, manifest, namespace_name, repo_name, tag_name, allowed_mediatypes,
+ storage):
+ try:
+ parsed = manifest.get_parsed_manifest()
+ except ManifestException:
+ return None
+
+ try:
+ return parsed.convert_manifest(allowed_mediatypes, namespace_name, repo_name, tag_name, None)
+ except ManifestException:
+ return None
+
+ def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance,
+ expiration_sec, storage):
+ """ Creates a manifest under the repository and sets a temporary tag to point to it.
+ Returns the manifest object created or None on error.
+ """
+ raise NotImplementedError('Unsupported in pre OCI model')
+
+ def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
+ """
+ Returns the blob in the repository with the given digest, if any or None if none. Note that
+ there may be multiple records in the same repository for the same blob digest, so the return
+ value of this function may change.
+ """
+ image_storage = self._get_shared_storage(blob_digest)
+ if image_storage is None:
+ try:
+ image_storage = model.blob.get_repository_blob_by_digest(repository_ref._db_id, blob_digest)
+ except model.BlobDoesNotExist:
+ return None
+
+ assert image_storage.cas_path is not None
+
+ placements = None
+ if include_placements:
+ placements = list(model.storage.get_storage_locations(image_storage.uuid))
+
+ return Blob.for_image_storage(image_storage,
+ storage_path=model.storage.get_layer_path(image_storage),
+ placements=placements)
+
+ def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage,
+ include_placements=False):
+ """ Returns an *ordered list* of the layers found in the parsed manifest, starting at the base
+ and working towards the leaf, including the associated Blob and its placements
+ (if specified).
+ """
+ return self._list_manifest_layers(repository_ref._db_id, parsed_manifest, storage,
+ include_placements=include_placements)
+
+ def get_manifest_local_blobs(self, manifest, include_placements=False):
+ """ Returns the set of local blobs for the given manifest or None if none. """
+ try:
+ tag_manifest = database.TagManifest.get(id=manifest._db_id)
+ except database.TagManifest.DoesNotExist:
+ return None
+
+ return self._get_manifest_local_blobs(manifest, tag_manifest.tag.repository_id,
+ include_placements)
+
+ def yield_tags_for_vulnerability_notification(self, layer_id_pairs):
+ """ Yields tags that contain one (or more) of the given layer ID pairs, in repositories
+ which have been registered for vulnerability_found notifications. Returns an iterator
+ of LikelyVulnerableTag instances.
+ """
+ event = database.ExternalNotificationEvent.get(name='vulnerability_found')
+
+ def filter_notifying_repos(query):
+ return model.tag.filter_has_repository_event(query, event)
+
+ def filter_and_order(query):
+ return model.tag.filter_tags_have_repository_event(query, event)
+
+ # Find the matching tags.
+ tags = model.tag.get_matching_tags_for_images(layer_id_pairs,
+ selections=[database.RepositoryTag,
+ database.Image,
+ database.ImageStorage],
+ filter_images=filter_notifying_repos,
+ filter_tags=filter_and_order)
+ for tag in tags:
+ yield LikelyVulnerableTag.for_repository_tag(tag, tag.repository)
+
+
+pre_oci_model = PreOCIModel()
diff --git a/data/registry_model/shared.py b/data/registry_model/shared.py
new file mode 100644
index 000000000..82a01aa67
--- /dev/null
+++ b/data/registry_model/shared.py
@@ -0,0 +1,509 @@
+# pylint: disable=protected-access
+import logging
+
+from abc import abstractmethod
+from collections import defaultdict
+
+from data import database
+from data import model
+from data.cache import cache_key
+from data.model.oci.retriever import RepositoryContentRetriever
+from data.model.blob import get_shared_blob
+from data.registry_model.datatype import FromDictionaryException
+from data.registry_model.datatypes import (RepositoryReference, Blob, TorrentInfo, BlobUpload,
+ LegacyImage, ManifestLayer, DerivedImage, ShallowTag)
+from image.docker.schema1 import ManifestException, DockerSchema1ManifestBuilder
+from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST
+
+logger = logging.getLogger(__name__)
+
+# The maximum size for generated manifest after which we remove extra metadata.
+MAXIMUM_GENERATED_MANIFEST_SIZE = 3 * 1024 * 1024 # 3 MB
+
+class SharedModel:
+ """
+ SharedModel implements those data model operations for the registry API that are unchanged
+ between the old and new data models.
+ """
+ def lookup_repository(self, namespace_name, repo_name, kind_filter=None):
+ """ Looks up and returns a reference to the repository with the given namespace and name,
+ or None if none. """
+ repo = model.repository.get_repository(namespace_name, repo_name, kind_filter=kind_filter)
+ state = repo.state if repo is not None else None
+ return RepositoryReference.for_repo_obj(repo, namespace_name, repo_name,
+ repo.namespace_user.stripe_id is None if repo else None,
+ state=state)
+
+ def is_existing_disabled_namespace(self, namespace_name):
+ """ Returns whether the given namespace exists and is disabled. """
+ namespace = model.user.get_namespace_user(namespace_name)
+ return namespace is not None and not namespace.enabled
+
+ def is_namespace_enabled(self, namespace_name):
+ """ Returns whether the given namespace exists and is enabled. """
+ namespace = model.user.get_namespace_user(namespace_name)
+ return namespace is not None and namespace.enabled
+
+ def get_derived_image_signature(self, derived_image, signer_name):
+ """
+ Returns the signature associated with the derived image and a specific signer or None if none.
+ """
+ try:
+ derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id)
+ except database.DerivedStorageForImage.DoesNotExist:
+ return None
+
+ storage = derived_storage.derivative
+ signature_entry = model.storage.lookup_storage_signature(storage, signer_name)
+ if signature_entry is None:
+ return None
+
+ return signature_entry.signature
+
+ def set_derived_image_signature(self, derived_image, signer_name, signature):
+ """
+ Sets the calculated signature for the given derived image and signer to that specified.
+ """
+ try:
+ derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id)
+ except database.DerivedStorageForImage.DoesNotExist:
+ return None
+
+ storage = derived_storage.derivative
+ signature_entry = model.storage.find_or_create_storage_signature(storage, signer_name)
+ signature_entry.signature = signature
+ signature_entry.uploading = False
+ signature_entry.save()
+
+ def delete_derived_image(self, derived_image):
+ """
+ Deletes a derived image and all of its storage.
+ """
+ try:
+ derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id)
+ except database.DerivedStorageForImage.DoesNotExist:
+ return None
+
+ model.image.delete_derived_storage(derived_storage)
+
+ def set_derived_image_size(self, derived_image, compressed_size):
+ """
+ Sets the compressed size on the given derived image.
+ """
+ try:
+ derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id)
+ except database.DerivedStorageForImage.DoesNotExist:
+ return None
+
+ storage_entry = derived_storage.derivative
+ storage_entry.image_size = compressed_size
+ storage_entry.uploading = False
+ storage_entry.save()
+
+ def get_torrent_info(self, blob):
+ """
+ Returns the torrent information associated with the given blob or None if none.
+ """
+ try:
+ image_storage = database.ImageStorage.get(id=blob._db_id)
+ except database.ImageStorage.DoesNotExist:
+ return None
+
+ try:
+ torrent_info = model.storage.get_torrent_info(image_storage)
+ except model.TorrentInfoDoesNotExist:
+ return None
+
+ return TorrentInfo.for_torrent_info(torrent_info)
+
+ def set_torrent_info(self, blob, piece_length, pieces):
+ """
+ Sets the torrent infomation associated with the given blob to that specified.
+ """
+ try:
+ image_storage = database.ImageStorage.get(id=blob._db_id)
+ except database.ImageStorage.DoesNotExist:
+ return None
+
+ torrent_info = model.storage.save_torrent_info(image_storage, piece_length, pieces)
+ return TorrentInfo.for_torrent_info(torrent_info)
+
+ @abstractmethod
+ def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit):
+ pass
+
+ def lookup_cached_active_repository_tags(self, model_cache, repository_ref, start_pagination_id,
+ limit):
+ """
+ Returns a page of active tags in a repository. Note that the tags returned by this method
+ are ShallowTag objects, which only contain the tag name. This method will automatically cache
+ the result and check the cache before making a call.
+ """
+ def load_tags():
+ tags = self.lookup_active_repository_tags(repository_ref, start_pagination_id, limit)
+ return [tag.asdict() for tag in tags]
+
+ tags_cache_key = cache_key.for_active_repo_tags(repository_ref._db_id, start_pagination_id,
+ limit)
+ result = model_cache.retrieve(tags_cache_key, load_tags)
+
+ try:
+ return [ShallowTag.from_dict(tag_dict) for tag_dict in result]
+ except FromDictionaryException:
+ return self.lookup_active_repository_tags(repository_ref, start_pagination_id, limit)
+
+ def get_cached_namespace_region_blacklist(self, model_cache, namespace_name):
+ """ Returns a cached set of ISO country codes blacklisted for pulls for the namespace
+ or None if the list could not be loaded.
+ """
+
+ def load_blacklist():
+ restrictions = model.user.list_namespace_geo_restrictions(namespace_name)
+ if restrictions is None:
+ return None
+
+ return [restriction.restricted_region_iso_code for restriction in restrictions]
+
+ blacklist_cache_key = cache_key.for_namespace_geo_restrictions(namespace_name)
+ result = model_cache.retrieve(blacklist_cache_key, load_blacklist)
+ if result is None:
+ return None
+
+ return set(result)
+
+ def get_cached_repo_blob(self, model_cache, namespace_name, repo_name, blob_digest):
+ """
+ Returns the blob in the repository with the given digest if any or None if none.
+ Caches the result in the caching system.
+ """
+ def load_blob():
+ repository_ref = self.lookup_repository(namespace_name, repo_name)
+ if repository_ref is None:
+ return None
+
+ blob_found = self.get_repo_blob_by_digest(repository_ref, blob_digest,
+ include_placements=True)
+ if blob_found is None:
+ return None
+
+ return blob_found.asdict()
+
+ blob_cache_key = cache_key.for_repository_blob(namespace_name, repo_name, blob_digest, 2)
+ blob_dict = model_cache.retrieve(blob_cache_key, load_blob)
+
+ try:
+ return Blob.from_dict(blob_dict) if blob_dict is not None else None
+ except FromDictionaryException:
+ # The data was stale in some way. Simply reload.
+ repository_ref = self.lookup_repository(namespace_name, repo_name)
+ if repository_ref is None:
+ return None
+
+ return self.get_repo_blob_by_digest(repository_ref, blob_digest, include_placements=True)
+
+ @abstractmethod
+ def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False):
+ pass
+
+ def create_blob_upload(self, repository_ref, new_upload_id, location_name, storage_metadata):
+ """ Creates a new blob upload and returns a reference. If the blob upload could not be
+ created, returns None. """
+ repo = model.repository.lookup_repository(repository_ref._db_id)
+ if repo is None:
+ return None
+
+ try:
+ upload_record = model.blob.initiate_upload_for_repo(repo, new_upload_id, location_name,
+ storage_metadata)
+ return BlobUpload.for_upload(upload_record, location_name=location_name)
+ except database.Repository.DoesNotExist:
+ return None
+
+ def lookup_blob_upload(self, repository_ref, blob_upload_id):
+ """ Looks up the blob upload withn the given ID under the specified repository and returns it
+ or None if none.
+ """
+ upload_record = model.blob.get_blob_upload_by_uuid(blob_upload_id)
+ if upload_record is None:
+ return None
+
+ return BlobUpload.for_upload(upload_record)
+
+ def update_blob_upload(self, blob_upload, uncompressed_byte_count, piece_hashes, piece_sha_state,
+ storage_metadata, byte_count, chunk_count, sha_state):
+ """ Updates the fields of the blob upload to match those given. Returns the updated blob upload
+ or None if the record does not exists.
+ """
+ upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id)
+ if upload_record is None:
+ return None
+
+ upload_record.uncompressed_byte_count = uncompressed_byte_count
+ upload_record.piece_hashes = piece_hashes
+ upload_record.piece_sha_state = piece_sha_state
+ upload_record.storage_metadata = storage_metadata
+ upload_record.byte_count = byte_count
+ upload_record.chunk_count = chunk_count
+ upload_record.sha_state = sha_state
+ upload_record.save()
+ return BlobUpload.for_upload(upload_record)
+
+ def delete_blob_upload(self, blob_upload):
+ """ Deletes a blob upload record. """
+ upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id)
+ if upload_record is not None:
+ upload_record.delete_instance()
+
+ def commit_blob_upload(self, blob_upload, blob_digest_str, blob_expiration_seconds):
+ """ Commits the blob upload into a blob and sets an expiration before that blob will be GCed.
+ """
+ upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id)
+ if upload_record is None:
+ return None
+
+ repository_id = upload_record.repository_id
+
+ # Create the blob and temporarily tag it.
+ location_obj = model.storage.get_image_location_for_name(blob_upload.location_name)
+ blob_record = model.blob.store_blob_record_and_temp_link_in_repo(
+ repository_id, blob_digest_str, location_obj.id, blob_upload.byte_count,
+ blob_expiration_seconds, blob_upload.uncompressed_byte_count)
+
+ # Delete the blob upload.
+ upload_record.delete_instance()
+ return Blob.for_image_storage(blob_record,
+ storage_path=model.storage.get_layer_path(blob_record))
+
+ def mount_blob_into_repository(self, blob, target_repository_ref, expiration_sec):
+ """
+ Mounts the blob from another repository into the specified target repository, and adds an
+ expiration before that blob is automatically GCed. This function is useful during push
+ operations if an existing blob from another repository is being pushed. Returns False if
+ the mounting fails.
+ """
+ storage = model.blob.temp_link_blob(target_repository_ref._db_id, blob.digest, expiration_sec)
+ return bool(storage)
+
+ def get_legacy_images(self, repository_ref):
+ """
+ Returns an iterator of all the LegacyImage's defined in the matching repository.
+ """
+ repo = model.repository.lookup_repository(repository_ref._db_id)
+ if repo is None:
+ return None
+
+ all_images = model.image.get_repository_images_without_placements(repo)
+ all_images_map = {image.id: image for image in all_images}
+
+ all_tags = model.tag.list_repository_tags(repo.namespace_user.username, repo.name)
+ tags_by_image_id = defaultdict(list)
+ for tag in all_tags:
+ tags_by_image_id[tag.image_id].append(tag)
+
+ return [LegacyImage.for_image(image, images_map=all_images_map, tags_map=tags_by_image_id)
+ for image in all_images]
+
+ def get_legacy_image(self, repository_ref, docker_image_id, include_parents=False,
+ include_blob=False):
+ """
+ Returns the matching LegacyImages under the matching repository, if any. If none,
+ returns None.
+ """
+ repo = model.repository.lookup_repository(repository_ref._db_id)
+ if repo is None:
+ return None
+
+ image = model.image.get_image(repository_ref._db_id, docker_image_id)
+ if image is None:
+ return None
+
+ parent_images_map = None
+ if include_parents:
+ parent_images = model.image.get_parent_images(repo.namespace_user.username, repo.name, image)
+ parent_images_map = {image.id: image for image in parent_images}
+
+ blob = None
+ if include_blob:
+ placements = list(model.storage.get_storage_locations(image.storage.uuid))
+ blob = Blob.for_image_storage(image.storage,
+ storage_path=model.storage.get_layer_path(image.storage),
+ placements=placements)
+
+ return LegacyImage.for_image(image, images_map=parent_images_map, blob=blob)
+
+ def _get_manifest_local_blobs(self, manifest, repo_id, include_placements=False,
+ by_manifest=False):
+ parsed = manifest.get_parsed_manifest()
+ if parsed is None:
+ return None
+
+ local_blob_digests = list(set(parsed.local_blob_digests))
+ if not len(local_blob_digests):
+ return []
+
+ blob_query = self._lookup_repo_storages_by_content_checksum(repo_id, local_blob_digests,
+ by_manifest=by_manifest)
+ blobs = []
+ for image_storage in blob_query:
+ placements = None
+ if include_placements:
+ placements = list(model.storage.get_storage_locations(image_storage.uuid))
+
+ blob = Blob.for_image_storage(image_storage,
+ storage_path=model.storage.get_layer_path(image_storage),
+ placements=placements)
+ blobs.append(blob)
+
+ return blobs
+
+ def _list_manifest_layers(self, repo_id, parsed, storage, include_placements=False,
+ by_manifest=False):
+ """ Returns an *ordered list* of the layers found in the manifest, starting at the base and
+ working towards the leaf, including the associated Blob and its placements (if specified).
+ Returns None if the manifest could not be parsed and validated.
+ """
+ assert not parsed.is_manifest_list
+
+ retriever = RepositoryContentRetriever(repo_id, storage)
+ requires_empty_blob = parsed.get_requires_empty_layer_blob(retriever)
+
+ storage_map = {}
+ blob_digests = list(parsed.local_blob_digests)
+ if requires_empty_blob:
+ blob_digests.append(EMPTY_LAYER_BLOB_DIGEST)
+
+ if blob_digests:
+ blob_query = self._lookup_repo_storages_by_content_checksum(repo_id, blob_digests,
+ by_manifest=by_manifest)
+ storage_map = {blob.content_checksum: blob for blob in blob_query}
+
+
+ layers = parsed.get_layers(retriever)
+ if layers is None:
+ logger.error('Could not load layers for manifest `%s`', parsed.digest)
+ return None
+
+ manifest_layers = []
+ for layer in layers:
+ if layer.is_remote:
+ manifest_layers.append(ManifestLayer(layer, None))
+ continue
+
+ digest_str = str(layer.blob_digest)
+ if digest_str not in storage_map:
+ logger.error('Missing digest `%s` for manifest `%s`', layer.blob_digest, parsed.digest)
+ return None
+
+ image_storage = storage_map[digest_str]
+ assert image_storage.cas_path is not None
+ assert image_storage.image_size is not None
+
+ placements = None
+ if include_placements:
+ placements = list(model.storage.get_storage_locations(image_storage.uuid))
+
+ blob = Blob.for_image_storage(image_storage,
+ storage_path=model.storage.get_layer_path(image_storage),
+ placements=placements)
+ manifest_layers.append(ManifestLayer(layer, blob))
+
+ return manifest_layers
+
+ def _build_derived(self, derived, verb, varying_metadata, include_placements):
+ if derived is None:
+ return None
+
+ derived_storage = derived.derivative
+ placements = None
+ if include_placements:
+ placements = list(model.storage.get_storage_locations(derived_storage.uuid))
+
+ blob = Blob.for_image_storage(derived_storage,
+ storage_path=model.storage.get_layer_path(derived_storage),
+ placements=placements)
+
+ return DerivedImage.for_derived_storage(derived, verb, varying_metadata, blob)
+
+ def _build_manifest_for_legacy_image(self, tag_name, legacy_image_row):
+ import features
+
+ from app import app, docker_v2_signing_key
+
+ repo = legacy_image_row.repository
+ namespace_name = repo.namespace_user.username
+ repo_name = repo.name
+
+ # Find the v1 metadata for this image and its parents.
+ try:
+ parents = model.image.get_parent_images(namespace_name, repo_name, legacy_image_row)
+ except model.DataModelException:
+ logger.exception('Could not load parent images for legacy image %s', legacy_image_row.id)
+ return None
+
+ # If the manifest is being generated under the library namespace, then we make its namespace
+ # empty.
+ manifest_namespace = namespace_name
+ if features.LIBRARY_SUPPORT and namespace_name == app.config['LIBRARY_NAMESPACE']:
+ manifest_namespace = ''
+
+ # Create and populate the manifest builder
+ builder = DockerSchema1ManifestBuilder(manifest_namespace, repo_name, tag_name)
+
+ # Add the leaf layer
+ builder.add_layer(legacy_image_row.storage.content_checksum, legacy_image_row.v1_json_metadata)
+ if legacy_image_row.storage.uploading:
+ logger.error('Cannot add an uploading storage row: %s', legacy_image_row.storage.id)
+ return None
+
+ for parent_image in parents:
+ if parent_image.storage.uploading:
+ logger.error('Cannot add an uploading storage row: %s', legacy_image_row.storage.id)
+ return None
+
+ builder.add_layer(parent_image.storage.content_checksum, parent_image.v1_json_metadata)
+
+ try:
+ built_manifest = builder.build(docker_v2_signing_key)
+
+ # If the generated manifest is greater than the maximum size, regenerate it with
+ # intermediate metadata layers stripped down to their bare essentials.
+ if len(built_manifest.bytes.as_encoded_str()) > MAXIMUM_GENERATED_MANIFEST_SIZE:
+ built_manifest = builder.with_metadata_removed().build(docker_v2_signing_key)
+
+ if len(built_manifest.bytes.as_encoded_str()) > MAXIMUM_GENERATED_MANIFEST_SIZE:
+ logger.error('Legacy image is too large to generate manifest')
+ return None
+
+ return built_manifest
+ except ManifestException as me:
+ logger.exception('Got exception when trying to build manifest for legacy image %s',
+ legacy_image_row)
+ return None
+
+ def _get_shared_storage(self, blob_digest):
+ """ Returns an ImageStorage row for the blob digest if it is a globally shared storage. """
+ # If the EMPTY_LAYER_BLOB_DIGEST is in the checksums, look it up directly. Since we have
+ # so many duplicate copies in the database currently, looking it up bound to a repository
+ # can be incredibly slow, and, since it is defined as a globally shared layer, this is extra
+ # work we don't need to do.
+ if blob_digest == EMPTY_LAYER_BLOB_DIGEST:
+ return get_shared_blob(EMPTY_LAYER_BLOB_DIGEST)
+
+ return None
+
+ def _lookup_repo_storages_by_content_checksum(self, repo, checksums, by_manifest=False):
+ checksums = set(checksums)
+
+ # Load any shared storages first.
+ extra_storages = []
+ for checksum in list(checksums):
+ shared_storage = self._get_shared_storage(checksum)
+ if shared_storage is not None:
+ extra_storages.append(shared_storage)
+ checksums.remove(checksum)
+
+ found = []
+ if checksums:
+ found = list(model.storage.lookup_repo_storages_by_content_checksum(repo, checksums,
+ by_manifest=by_manifest))
+ return found + extra_storages
diff --git a/data/registry_model/test/test_blobuploader.py b/data/registry_model/test/test_blobuploader.py
new file mode 100644
index 000000000..8b539c617
--- /dev/null
+++ b/data/registry_model/test/test_blobuploader.py
@@ -0,0 +1,145 @@
+import hashlib
+import os
+import tarfile
+
+from io import BytesIO
+from contextlib import closing
+
+import pytest
+
+from data.registry_model.blobuploader import (retrieve_blob_upload_manager,
+ upload_blob, BlobUploadException,
+ BlobDigestMismatchException, BlobTooLargeException,
+ BlobUploadSettings)
+from data.registry_model.registry_pre_oci_model import PreOCIModel
+
+from storage.distributedstorage import DistributedStorage
+from storage.fakestorage import FakeStorage
+from test.fixtures import *
+
+@pytest.fixture()
+def pre_oci_model(initialized_db):
+ return PreOCIModel()
+
+@pytest.mark.parametrize('chunk_count', [
+ 0,
+ 1,
+ 2,
+ 10,
+])
+@pytest.mark.parametrize('subchunk', [
+ True,
+ False,
+])
+def test_basic_upload_blob(chunk_count, subchunk, pre_oci_model):
+ repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
+ storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
+ settings = BlobUploadSettings('2M', 512 * 1024, 3600)
+ app_config = {'TESTING': True}
+
+ data = ''
+ with upload_blob(repository_ref, storage, settings) as manager:
+ assert manager
+ assert manager.blob_upload_id
+
+ for index in range(0, chunk_count):
+ chunk_data = os.urandom(100)
+ data += chunk_data
+
+ if subchunk:
+ manager.upload_chunk(app_config, BytesIO(chunk_data))
+ manager.upload_chunk(app_config, BytesIO(chunk_data), (index * 100) + 50)
+ else:
+ manager.upload_chunk(app_config, BytesIO(chunk_data))
+
+ blob = manager.commit_to_blob(app_config)
+
+ # Check the blob.
+ assert blob.compressed_size == len(data)
+ assert not blob.uploading
+ assert blob.digest == 'sha256:' + hashlib.sha256(data).hexdigest()
+
+ # Ensure the blob exists in storage and has the expected data.
+ assert storage.get_content(['local_us'], blob.storage_path) == data
+
+
+def test_cancel_upload(pre_oci_model):
+ repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
+ storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
+ settings = BlobUploadSettings('2M', 512 * 1024, 3600)
+ app_config = {'TESTING': True}
+
+ blob_upload_id = None
+ with upload_blob(repository_ref, storage, settings) as manager:
+ blob_upload_id = manager.blob_upload_id
+ assert pre_oci_model.lookup_blob_upload(repository_ref, blob_upload_id) is not None
+
+ manager.upload_chunk(app_config, BytesIO('hello world'))
+
+ # Since the blob was not comitted, the upload should be deleted.
+ assert blob_upload_id
+ assert pre_oci_model.lookup_blob_upload(repository_ref, blob_upload_id) is None
+
+
+def test_too_large(pre_oci_model):
+ repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
+ storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
+ settings = BlobUploadSettings('1K', 512 * 1024, 3600)
+ app_config = {'TESTING': True}
+
+ with upload_blob(repository_ref, storage, settings) as manager:
+ with pytest.raises(BlobTooLargeException):
+ manager.upload_chunk(app_config, BytesIO(os.urandom(1024 * 1024 * 2)))
+
+
+def test_extra_blob_stream_handlers(pre_oci_model):
+ handler1_result = []
+ handler2_result = []
+
+ def handler1(bytes):
+ handler1_result.append(bytes)
+
+ def handler2(bytes):
+ handler2_result.append(bytes)
+
+ repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
+ storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
+ settings = BlobUploadSettings('1K', 512 * 1024, 3600)
+ app_config = {'TESTING': True}
+
+ with upload_blob(repository_ref, storage, settings,
+ extra_blob_stream_handlers=[handler1, handler2]) as manager:
+ manager.upload_chunk(app_config, BytesIO('hello '))
+ manager.upload_chunk(app_config, BytesIO('world'))
+
+ assert ''.join(handler1_result) == 'hello world'
+ assert ''.join(handler2_result) == 'hello world'
+
+
+def valid_tar_gz(contents):
+ with closing(BytesIO()) as layer_data:
+ with closing(tarfile.open(fileobj=layer_data, mode='w|gz')) as tar_file:
+ tar_file_info = tarfile.TarInfo(name='somefile')
+ tar_file_info.type = tarfile.REGTYPE
+ tar_file_info.size = len(contents)
+ tar_file_info.mtime = 1
+ tar_file.addfile(tar_file_info, BytesIO(contents))
+
+ layer_bytes = layer_data.getvalue()
+ return layer_bytes
+
+
+def test_uncompressed_size(pre_oci_model):
+ repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
+ storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
+ settings = BlobUploadSettings('1K', 512 * 1024, 3600)
+ app_config = {'TESTING': True}
+
+ with upload_blob(repository_ref, storage, settings) as manager:
+ manager.upload_chunk(app_config, BytesIO(valid_tar_gz('hello world')))
+
+ blob = manager.commit_to_blob(app_config)
+
+ assert blob.compressed_size is not None
+ assert blob.uncompressed_size is not None
+
diff --git a/data/registry_model/test/test_interface.py b/data/registry_model/test/test_interface.py
new file mode 100644
index 000000000..8255ade6d
--- /dev/null
+++ b/data/registry_model/test/test_interface.py
@@ -0,0 +1,1095 @@
+# -*- coding: utf-8 -*-
+
+import hashlib
+import json
+import uuid
+
+from datetime import datetime, timedelta
+from io import BytesIO
+
+import pytest
+
+from mock import patch
+from playhouse.test_utils import assert_query_count
+
+from app import docker_v2_signing_key, storage
+from data import model
+from data.database import (TagManifestLabelMap, TagManifestToManifest, Manifest, ManifestBlob,
+ ManifestLegacyImage, ManifestLabel,
+ TagManifest, TagManifestLabel, DerivedStorageForImage,
+ TorrentInfo, Tag, TagToRepositoryTag, ImageStorageLocation)
+from data.cache.impl import InMemoryDataModelCache
+from data.registry_model.registry_pre_oci_model import PreOCIModel
+from data.registry_model.registry_oci_model import OCIModel
+from data.registry_model.datatypes import RepositoryReference
+from data.registry_model.blobuploader import upload_blob, BlobUploadSettings
+from data.registry_model.modelsplitter import SplitModel
+from data.model.blob import store_blob_record_and_temp_link
+from image.docker.types import ManifestImageLayer
+from image.docker.schema1 import (DockerSchema1ManifestBuilder, DOCKER_SCHEMA1_CONTENT_TYPES,
+ DockerSchema1Manifest)
+from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
+from image.docker.schema2.list import DockerSchema2ManifestListBuilder
+from util.bytes import Bytes
+
+from test.fixtures import *
+
+
+@pytest.fixture(params=[PreOCIModel(), OCIModel(), OCIModel(oci_model_only=False),
+ SplitModel(0, {'devtable'}, {'buynlarge'}, False),
+ SplitModel(1.0, {'devtable'}, {'buynlarge'}, False),
+ SplitModel(1.0, {'devtable'}, {'buynlarge'}, True)])
+def registry_model(request, initialized_db):
+ return request.param
+
+@pytest.fixture()
+def pre_oci_model(initialized_db):
+ return PreOCIModel()
+
+@pytest.fixture()
+def oci_model(initialized_db):
+ return OCIModel()
+
+
+@pytest.mark.parametrize('names, expected', [
+ (['unknown'], None),
+ (['latest'], {'latest'}),
+ (['latest', 'prod'], {'latest', 'prod'}),
+ (['latest', 'prod', 'another'], {'latest', 'prod'}),
+ (['foo', 'prod'], {'prod'}),
+])
+def test_find_matching_tag(names, expected, registry_model):
+ repo = model.repository.get_repository('devtable', 'simple')
+ repository_ref = RepositoryReference.for_repo_obj(repo)
+ found = registry_model.find_matching_tag(repository_ref, names)
+ if expected is None:
+ assert found is None
+ else:
+ assert found.name in expected
+ assert found.repository.namespace_name == 'devtable'
+ assert found.repository.name == 'simple'
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name, expected', [
+ ('devtable', 'simple', {'latest', 'prod'}),
+ ('buynlarge', 'orgrepo', {'latest', 'prod'}),
+])
+def test_get_most_recent_tag(repo_namespace, repo_name, expected, registry_model):
+ repo = model.repository.get_repository(repo_namespace, repo_name)
+ repository_ref = RepositoryReference.for_repo_obj(repo)
+ found = registry_model.get_most_recent_tag(repository_ref)
+ if expected is None:
+ assert found is None
+ else:
+ assert found.name in expected
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name, expected', [
+ ('devtable', 'simple', True),
+ ('buynlarge', 'orgrepo', True),
+ ('buynlarge', 'unknownrepo', False),
+])
+def test_lookup_repository(repo_namespace, repo_name, expected, registry_model):
+ repo_ref = registry_model.lookup_repository(repo_namespace, repo_name)
+ if expected:
+ assert repo_ref
+ else:
+ assert repo_ref is None
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name', [
+ ('devtable', 'simple'),
+ ('buynlarge', 'orgrepo'),
+])
+def test_lookup_manifests(repo_namespace, repo_name, registry_model):
+ repo = model.repository.get_repository(repo_namespace, repo_name)
+ repository_ref = RepositoryReference.for_repo_obj(repo)
+ found_tag = registry_model.find_matching_tag(repository_ref, ['latest'])
+ found_manifest = registry_model.get_manifest_for_tag(found_tag)
+ found = registry_model.lookup_manifest_by_digest(repository_ref, found_manifest.digest,
+ include_legacy_image=True)
+ assert found._db_id == found_manifest._db_id
+ assert found.digest == found_manifest.digest
+ assert found.legacy_image
+ assert found.legacy_image.parents
+
+ schema1_parsed = registry_model.get_schema1_parsed_manifest(found, 'foo', 'bar', 'baz', storage)
+ assert schema1_parsed is not None
+
+
+def test_lookup_unknown_manifest(registry_model):
+ repo = model.repository.get_repository('devtable', 'simple')
+ repository_ref = RepositoryReference.for_repo_obj(repo)
+ found = registry_model.lookup_manifest_by_digest(repository_ref, 'sha256:deadbeef')
+ assert found is None
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name', [
+ ('devtable', 'simple'),
+ ('devtable', 'complex'),
+ ('devtable', 'history'),
+ ('buynlarge', 'orgrepo'),
+])
+def test_legacy_images(repo_namespace, repo_name, registry_model):
+ repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
+ legacy_images = registry_model.get_legacy_images(repository_ref)
+ assert len(legacy_images)
+
+ found_tags = set()
+ for image in legacy_images:
+ found_image = registry_model.get_legacy_image(repository_ref, image.docker_image_id,
+ include_parents=True)
+
+ with assert_query_count(5 if found_image.parents else 4):
+ found_image = registry_model.get_legacy_image(repository_ref, image.docker_image_id,
+ include_parents=True, include_blob=True)
+ assert found_image.docker_image_id == image.docker_image_id
+ assert found_image.parents == image.parents
+ assert found_image.blob
+ assert found_image.blob.placements
+
+ # Check that the tags list can be retrieved.
+ assert image.tags is not None
+ found_tags.update({tag.name for tag in image.tags})
+
+ # Check against the actual DB row.
+ model_image = model.image.get_image(repository_ref._db_id, found_image.docker_image_id)
+ assert model_image.id == found_image._db_id
+ assert ([pid for pid in reversed(model_image.ancestor_id_list())] ==
+ [p._db_id for p in found_image.parents])
+
+ # Try without parents and ensure it raises an exception.
+ found_image = registry_model.get_legacy_image(repository_ref, image.docker_image_id,
+ include_parents=False)
+ with pytest.raises(Exception):
+ assert not found_image.parents
+
+ assert found_tags
+
+ unknown = registry_model.get_legacy_image(repository_ref, 'unknown', include_parents=True)
+ assert unknown is None
+
+
+def test_manifest_labels(registry_model):
+ repo = model.repository.get_repository('devtable', 'simple')
+ repository_ref = RepositoryReference.for_repo_obj(repo)
+ found_tag = registry_model.find_matching_tag(repository_ref, ['latest'])
+ found_manifest = registry_model.get_manifest_for_tag(found_tag)
+
+ # Create a new label.
+ created = registry_model.create_manifest_label(found_manifest, 'foo', 'bar', 'api')
+ assert created.key == 'foo'
+ assert created.value == 'bar'
+ assert created.source_type_name == 'api'
+ assert created.media_type_name == 'text/plain'
+
+ # Ensure we can look it up.
+ assert registry_model.get_manifest_label(found_manifest, created.uuid) == created
+
+ # Ensure it is in our list of labels.
+ assert created in registry_model.list_manifest_labels(found_manifest)
+ assert created in registry_model.list_manifest_labels(found_manifest, key_prefix='fo')
+
+ # Ensure it is *not* in our filtered list.
+ assert created not in registry_model.list_manifest_labels(found_manifest, key_prefix='ba')
+
+ # Delete the label and ensure it is gone.
+ assert registry_model.delete_manifest_label(found_manifest, created.uuid)
+ assert registry_model.get_manifest_label(found_manifest, created.uuid) is None
+ assert created not in registry_model.list_manifest_labels(found_manifest)
+
+
+def test_manifest_label_handlers(registry_model):
+ repo = model.repository.get_repository('devtable', 'simple')
+ repository_ref = RepositoryReference.for_repo_obj(repo)
+ found_tag = registry_model.get_repo_tag(repository_ref, 'latest')
+ found_manifest = registry_model.get_manifest_for_tag(found_tag)
+
+ # Ensure the tag has no expiration.
+ assert found_tag.lifetime_end_ts is None
+
+ # Create a new label with an expires-after.
+ registry_model.create_manifest_label(found_manifest, 'quay.expires-after', '2h', 'api')
+
+ # Ensure the tag now has an expiration.
+ updated_tag = registry_model.get_repo_tag(repository_ref, 'latest')
+ assert updated_tag.lifetime_end_ts == (updated_tag.lifetime_start_ts + (60 * 60 * 2))
+
+
+def test_batch_labels(registry_model):
+ repo = model.repository.get_repository('devtable', 'history')
+ repository_ref = RepositoryReference.for_repo_obj(repo)
+ found_tag = registry_model.find_matching_tag(repository_ref, ['latest'])
+ found_manifest = registry_model.get_manifest_for_tag(found_tag)
+
+ with registry_model.batch_create_manifest_labels(found_manifest) as add_label:
+ add_label('foo', '1', 'api')
+ add_label('bar', '2', 'api')
+ add_label('baz', '3', 'api')
+
+ # Ensure we can look them up.
+ assert len(registry_model.list_manifest_labels(found_manifest)) == 3
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name', [
+ ('devtable', 'simple'),
+ ('devtable', 'complex'),
+ ('devtable', 'history'),
+ ('buynlarge', 'orgrepo'),
+])
+def test_repository_tags(repo_namespace, repo_name, registry_model):
+ repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
+ tags = registry_model.list_all_active_repository_tags(repository_ref, include_legacy_images=True)
+ assert len(tags)
+
+ tags_map = registry_model.get_legacy_tags_map(repository_ref, storage)
+
+ for tag in tags:
+ found_tag = registry_model.get_repo_tag(repository_ref, tag.name, include_legacy_image=True)
+ assert found_tag == tag
+
+ if found_tag.legacy_image is None:
+ continue
+
+ found_image = registry_model.get_legacy_image(repository_ref,
+ found_tag.legacy_image.docker_image_id)
+ assert found_image == found_tag.legacy_image
+ assert tag.name in tags_map
+ assert tags_map[tag.name] == found_image.docker_image_id
+
+
+@pytest.mark.parametrize('namespace, name, expected_tag_count, has_expired', [
+ ('devtable', 'simple', 2, False),
+ ('devtable', 'history', 2, True),
+ ('devtable', 'gargantuan', 8, False),
+ ('public', 'publicrepo', 1, False),
+])
+def test_repository_tag_history(namespace, name, expected_tag_count, has_expired, registry_model):
+ # Pre-cache media type loads to ensure consistent query count.
+ Manifest.media_type.get_name(1)
+
+ repository_ref = registry_model.lookup_repository(namespace, name)
+ with assert_query_count(2):
+ history, has_more = registry_model.list_repository_tag_history(repository_ref)
+ assert not has_more
+ assert len(history) == expected_tag_count
+
+ for tag in history:
+ # Retrieve the manifest to ensure it doesn't issue extra queries.
+ tag.manifest
+
+ if has_expired:
+ # Ensure the latest tag is marked expired, since there is an expired one.
+ with assert_query_count(1):
+ assert registry_model.has_expired_tag(repository_ref, 'latest')
+
+
+@pytest.mark.parametrize('repositories, expected_tag_count', [
+ ([], 0),
+ ([('devtable', 'simple'), ('devtable', 'building')], 1),
+])
+def test_get_most_recent_tag_lifetime_start(repositories, expected_tag_count, registry_model):
+ last_modified_map = registry_model.get_most_recent_tag_lifetime_start(
+ [registry_model.lookup_repository(name, namespace) for name, namespace in repositories]
+ )
+
+ assert len(last_modified_map) == expected_tag_count
+ for repo_id, last_modified in last_modified_map.items():
+ tag = registry_model.get_most_recent_tag(RepositoryReference.for_id(repo_id))
+ assert last_modified == tag.lifetime_start_ms / 1000
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name', [
+ ('devtable', 'simple'),
+ ('devtable', 'complex'),
+ ('devtable', 'history'),
+ ('buynlarge', 'orgrepo'),
+])
+@pytest.mark.parametrize('via_manifest', [
+ False,
+ True,
+])
+def test_delete_tags(repo_namespace, repo_name, via_manifest, registry_model):
+ repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
+ tags = registry_model.list_all_active_repository_tags(repository_ref)
+ assert len(tags)
+
+ # Save history before the deletions.
+ previous_history, _ = registry_model.list_repository_tag_history(repository_ref, size=1000)
+ assert len(previous_history) >= len(tags)
+
+ # Delete every tag in the repository.
+ for tag in tags:
+ if via_manifest:
+ assert registry_model.delete_tag(repository_ref, tag.name)
+ else:
+ manifest = registry_model.get_manifest_for_tag(tag)
+ if manifest is not None:
+ assert registry_model.delete_tags_for_manifest(manifest)
+
+ # Make sure the tag is no longer found.
+ # TODO: Uncomment once we're done with the SplitModel.
+ #with assert_query_count(1):
+ found_tag = registry_model.get_repo_tag(repository_ref, tag.name, include_legacy_image=True)
+ assert found_tag is None
+
+ # Ensure all tags have been deleted.
+ tags = registry_model.list_all_active_repository_tags(repository_ref)
+ assert not len(tags)
+
+ # Ensure that the tags all live in history.
+ history, _ = registry_model.list_repository_tag_history(repository_ref, size=1000)
+ assert len(history) == len(previous_history)
+
+
+@pytest.mark.parametrize('use_manifest', [
+ True,
+ False,
+])
+def test_retarget_tag_history(use_manifest, registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'history')
+ history, _ = registry_model.list_repository_tag_history(repository_ref)
+
+ if use_manifest:
+ manifest_or_legacy_image = registry_model.lookup_manifest_by_digest(repository_ref,
+ history[0].manifest_digest,
+ allow_dead=True)
+ else:
+ manifest_or_legacy_image = history[0].legacy_image
+
+ # Retarget the tag.
+ assert manifest_or_legacy_image
+ updated_tag = registry_model.retarget_tag(repository_ref, 'latest', manifest_or_legacy_image,
+ storage, docker_v2_signing_key, is_reversion=True)
+
+ # Ensure the tag has changed targets.
+ if use_manifest:
+ assert updated_tag.manifest_digest == manifest_or_legacy_image.digest
+ else:
+ assert updated_tag.legacy_image == manifest_or_legacy_image
+
+ # Ensure history has been updated.
+ new_history, _ = registry_model.list_repository_tag_history(repository_ref)
+ assert len(new_history) == len(history) + 1
+
+
+def test_retarget_tag_schema1(oci_model):
+ repository_ref = oci_model.lookup_repository('devtable', 'simple')
+ latest_tag = oci_model.get_repo_tag(repository_ref, 'latest')
+ manifest = oci_model.get_manifest_for_tag(latest_tag)
+
+ existing_parsed = manifest.get_parsed_manifest()
+
+ # Retarget a new tag to the manifest.
+ updated_tag = oci_model.retarget_tag(repository_ref, 'somenewtag', manifest, storage,
+ docker_v2_signing_key)
+ assert updated_tag
+ assert updated_tag.name == 'somenewtag'
+
+ updated_manifest = oci_model.get_manifest_for_tag(updated_tag)
+ parsed = updated_manifest.get_parsed_manifest()
+ assert parsed.namespace == 'devtable'
+ assert parsed.repo_name == 'simple'
+ assert parsed.tag == 'somenewtag'
+
+ assert parsed.layers == existing_parsed.layers
+
+ # Ensure the tag has changed targets.
+ assert oci_model.get_repo_tag(repository_ref, 'somenewtag') == updated_tag
+
+
+def test_change_repository_tag_expiration(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ tag = registry_model.get_repo_tag(repository_ref, 'latest')
+ assert tag.lifetime_end_ts is None
+
+ new_datetime = datetime.utcnow() + timedelta(days=2)
+ previous, okay = registry_model.change_repository_tag_expiration(tag, new_datetime)
+
+ assert okay
+ assert previous is None
+
+ tag = registry_model.get_repo_tag(repository_ref, 'latest')
+ assert tag.lifetime_end_ts is not None
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name, expected_non_empty', [
+ ('devtable', 'simple', []),
+ ('devtable', 'complex', ['prod', 'v2.0']),
+ ('devtable', 'history', ['latest']),
+ ('buynlarge', 'orgrepo', []),
+ ('devtable', 'gargantuan', ['v2.0', 'v3.0', 'v4.0', 'v5.0', 'v6.0']),
+])
+def test_get_legacy_images_owned_by_tag(repo_namespace, repo_name, expected_non_empty,
+ registry_model):
+ repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
+ tags = registry_model.list_all_active_repository_tags(repository_ref)
+ assert len(tags)
+
+ non_empty = set()
+ for tag in tags:
+ if registry_model.get_legacy_images_owned_by_tag(tag):
+ non_empty.add(tag.name)
+
+ assert non_empty == set(expected_non_empty)
+
+
+def test_get_security_status(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ tags = registry_model.list_all_active_repository_tags(repository_ref, include_legacy_images=True)
+ assert len(tags)
+
+ for tag in tags:
+ assert registry_model.get_security_status(tag.legacy_image)
+ registry_model.reset_security_status(tag.legacy_image)
+ assert registry_model.get_security_status(tag.legacy_image)
+
+
+@pytest.fixture()
+def clear_rows(initialized_db):
+ # Remove all new-style rows so we can backfill.
+ TagToRepositoryTag.delete().execute()
+ Tag.delete().execute()
+ TagManifestLabelMap.delete().execute()
+ ManifestLabel.delete().execute()
+ ManifestBlob.delete().execute()
+ ManifestLegacyImage.delete().execute()
+ TagManifestToManifest.delete().execute()
+ Manifest.delete().execute()
+ TagManifestLabel.delete().execute()
+ TagManifest.delete().execute()
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name', [
+ ('devtable', 'simple'),
+ ('devtable', 'complex'),
+ ('devtable', 'history'),
+ ('buynlarge', 'orgrepo'),
+])
+def test_backfill_manifest_for_tag(repo_namespace, repo_name, clear_rows, pre_oci_model):
+ repository_ref = pre_oci_model.lookup_repository(repo_namespace, repo_name)
+ tags, has_more = pre_oci_model.list_repository_tag_history(repository_ref, size=2500)
+ assert tags
+ assert not has_more
+
+ for tag in tags:
+ assert not tag.manifest_digest
+ assert pre_oci_model.backfill_manifest_for_tag(tag)
+
+ tags, _ = pre_oci_model.list_repository_tag_history(repository_ref)
+ assert tags
+ for tag in tags:
+ assert tag.manifest_digest
+
+ manifest = pre_oci_model.get_manifest_for_tag(tag)
+ assert manifest
+
+ legacy_image = pre_oci_model.get_legacy_image(repository_ref, tag.legacy_image.docker_image_id,
+ include_parents=True)
+
+ parsed_manifest = manifest.get_parsed_manifest()
+ assert parsed_manifest.leaf_layer_v1_image_id == legacy_image.docker_image_id
+ assert parsed_manifest.parent_image_ids == {p.docker_image_id for p in legacy_image.parents}
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name', [
+ ('devtable', 'simple'),
+ ('devtable', 'complex'),
+ ('devtable', 'history'),
+ ('buynlarge', 'orgrepo'),
+])
+def test_backfill_manifest_on_lookup(repo_namespace, repo_name, clear_rows, pre_oci_model):
+ repository_ref = pre_oci_model.lookup_repository(repo_namespace, repo_name)
+ tags = pre_oci_model.list_all_active_repository_tags(repository_ref)
+ assert tags
+
+ for tag in tags:
+ assert not tag.manifest_digest
+ assert not pre_oci_model.get_manifest_for_tag(tag)
+
+ manifest = pre_oci_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
+ assert manifest
+
+ updated_tag = pre_oci_model.get_repo_tag(repository_ref, tag.name)
+ assert updated_tag.manifest_digest == manifest.digest
+
+
+@pytest.mark.parametrize('namespace, expect_enabled', [
+ ('devtable', True),
+ ('buynlarge', True),
+
+ ('disabled', False),
+])
+def test_is_namespace_enabled(namespace, expect_enabled, registry_model):
+ assert registry_model.is_namespace_enabled(namespace) == expect_enabled
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name', [
+ ('devtable', 'simple'),
+ ('devtable', 'complex'),
+ ('devtable', 'history'),
+ ('buynlarge', 'orgrepo'),
+])
+def test_layers_and_blobs(repo_namespace, repo_name, registry_model):
+ repository_ref = registry_model.lookup_repository(repo_namespace, repo_name)
+ tags = registry_model.list_all_active_repository_tags(repository_ref)
+ assert tags
+
+ for tag in tags:
+ manifest = registry_model.get_manifest_for_tag(tag)
+ assert manifest
+
+ parsed = manifest.get_parsed_manifest()
+ assert parsed
+
+ layers = registry_model.list_parsed_manifest_layers(repository_ref, parsed, storage)
+ assert layers
+
+ layers = registry_model.list_parsed_manifest_layers(repository_ref, parsed, storage,
+ include_placements=True)
+ assert layers
+
+ for index, manifest_layer in enumerate(layers):
+ assert manifest_layer.blob.storage_path
+ assert manifest_layer.blob.placements
+
+ repo_blob = registry_model.get_repo_blob_by_digest(repository_ref, manifest_layer.blob.digest)
+ assert repo_blob.digest == manifest_layer.blob.digest
+
+ assert manifest_layer.estimated_size(1) is not None
+ assert isinstance(manifest_layer.layer_info, ManifestImageLayer)
+
+ blobs = registry_model.get_manifest_local_blobs(manifest, include_placements=True)
+ assert {b.digest for b in blobs} == set(parsed.local_blob_digests)
+
+
+def test_manifest_remote_layers(oci_model):
+ # Create a config blob for testing.
+ config_json = json.dumps({
+ 'config': {},
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ },
+ "history": [
+ {
+ "created": "2018-04-03T18:37:09.284840891Z",
+ "created_by": "do something",
+ },
+ ],
+ })
+
+ app_config = {'TESTING': True}
+ repository_ref = oci_model.lookup_repository('devtable', 'simple')
+ with upload_blob(repository_ref, storage, BlobUploadSettings(500, 500, 500)) as upload:
+ upload.upload_chunk(app_config, BytesIO(config_json))
+ blob = upload.commit_to_blob(app_config)
+
+ # Create the manifest in the repo.
+ builder = DockerSchema2ManifestBuilder()
+ builder.set_config_digest(blob.digest, blob.compressed_size)
+ builder.add_layer('sha256:abcd', 1234, urls=['http://hello/world'])
+ manifest = builder.build()
+
+ created_manifest, _ = oci_model.create_manifest_and_retarget_tag(repository_ref, manifest,
+ 'sometag', storage)
+ assert created_manifest
+
+ layers = oci_model.list_parsed_manifest_layers(repository_ref,
+ created_manifest.get_parsed_manifest(),
+ storage)
+ assert len(layers) == 1
+ assert layers[0].layer_info.is_remote
+ assert layers[0].layer_info.urls == ['http://hello/world']
+ assert layers[0].blob is None
+
+
+def test_derived_image(registry_model):
+ # Clear all existing derived storage.
+ DerivedStorageForImage.delete().execute()
+
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ tag = registry_model.get_repo_tag(repository_ref, 'latest')
+ manifest = registry_model.get_manifest_for_tag(tag)
+
+ # Ensure the squashed image doesn't exist.
+ assert registry_model.lookup_derived_image(manifest, 'squash', storage, {}) is None
+
+ # Create a new one.
+ squashed = registry_model.lookup_or_create_derived_image(manifest, 'squash',
+ 'local_us', storage, {})
+ assert registry_model.lookup_or_create_derived_image(manifest, 'squash',
+ 'local_us', storage, {}) == squashed
+ assert squashed.unique_id
+
+ # Check and set the size.
+ assert squashed.blob.compressed_size is None
+ registry_model.set_derived_image_size(squashed, 1234)
+
+ found = registry_model.lookup_derived_image(manifest, 'squash', storage, {})
+ assert found.blob.compressed_size == 1234
+ assert found.unique_id == squashed.unique_id
+
+ # Ensure its returned now.
+ assert found == squashed
+
+ # Ensure different metadata results in a different derived image.
+ found = registry_model.lookup_derived_image(manifest, 'squash', storage, {'foo': 'bar'})
+ assert found is None
+
+ squashed_foo = registry_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us',
+ storage, {'foo': 'bar'})
+ assert squashed_foo != squashed
+
+ found = registry_model.lookup_derived_image(manifest, 'squash', storage, {'foo': 'bar'})
+ assert found == squashed_foo
+
+ assert squashed.unique_id != squashed_foo.unique_id
+
+ # Lookup with placements.
+ squashed = registry_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us',
+ storage, {}, include_placements=True)
+ assert squashed.blob.placements
+
+ # Delete the derived image.
+ registry_model.delete_derived_image(squashed)
+ assert registry_model.lookup_derived_image(manifest, 'squash', storage, {}) is None
+
+
+def test_derived_image_signatures(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ tag = registry_model.get_repo_tag(repository_ref, 'latest')
+ manifest = registry_model.get_manifest_for_tag(tag)
+
+ derived = registry_model.lookup_derived_image(manifest, 'squash', storage, {})
+ assert derived
+
+ signature = registry_model.get_derived_image_signature(derived, 'gpg2')
+ assert signature is None
+
+ registry_model.set_derived_image_signature(derived, 'gpg2', 'foo')
+ assert registry_model.get_derived_image_signature(derived, 'gpg2') == 'foo'
+
+
+def test_derived_image_for_manifest_list(oci_model):
+ # Clear all existing derived storage.
+ DerivedStorageForImage.delete().execute()
+
+ # Create a config blob for testing.
+ config_json = json.dumps({
+ 'config': {},
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": []
+ },
+ "history": [
+ {
+ "created": "2018-04-03T18:37:09.284840891Z",
+ "created_by": "do something",
+ },
+ ],
+ })
+
+ app_config = {'TESTING': True}
+ repository_ref = oci_model.lookup_repository('devtable', 'simple')
+ with upload_blob(repository_ref, storage, BlobUploadSettings(500, 500, 500)) as upload:
+ upload.upload_chunk(app_config, BytesIO(config_json))
+ blob = upload.commit_to_blob(app_config)
+
+ # Create the manifest in the repo.
+ builder = DockerSchema2ManifestBuilder()
+ builder.set_config_digest(blob.digest, blob.compressed_size)
+ builder.add_layer(blob.digest, blob.compressed_size)
+ amd64_manifest = builder.build()
+
+ oci_model.create_manifest_and_retarget_tag(repository_ref, amd64_manifest, 'submanifest', storage)
+
+ # Create a manifest list, pointing to at least one amd64+linux manifest.
+ builder = DockerSchema2ManifestListBuilder()
+ builder.add_manifest(amd64_manifest, 'amd64', 'linux')
+ manifestlist = builder.build()
+
+ oci_model.create_manifest_and_retarget_tag(repository_ref, manifestlist, 'listtag', storage)
+ manifest = oci_model.get_manifest_for_tag(oci_model.get_repo_tag(repository_ref, 'listtag'))
+ assert manifest
+ assert manifest.get_parsed_manifest().is_manifest_list
+
+ # Ensure the squashed image doesn't exist.
+ assert oci_model.lookup_derived_image(manifest, 'squash', storage, {}) is None
+
+ # Create a new one.
+ squashed = oci_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us', storage, {})
+ assert squashed.unique_id
+ assert oci_model.lookup_or_create_derived_image(manifest, 'squash',
+ 'local_us', storage, {}) == squashed
+
+ # Perform lookup.
+ assert oci_model.lookup_derived_image(manifest, 'squash', storage, {}) == squashed
+
+
+def test_torrent_info(registry_model):
+ # Remove all existing info.
+ TorrentInfo.delete().execute()
+
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ tag = registry_model.get_repo_tag(repository_ref, 'latest')
+ manifest = registry_model.get_manifest_for_tag(tag)
+
+ blobs = registry_model.get_manifest_local_blobs(manifest)
+ assert blobs
+
+ assert registry_model.get_torrent_info(blobs[0]) is None
+ registry_model.set_torrent_info(blobs[0], 2, 'foo')
+
+ # Set it again exactly, which should be a no-op.
+ registry_model.set_torrent_info(blobs[0], 2, 'foo')
+
+ # Check the information we've set.
+ torrent_info = registry_model.get_torrent_info(blobs[0])
+ assert torrent_info is not None
+ assert torrent_info.piece_length == 2
+ assert torrent_info.pieces == 'foo'
+
+ # Try setting it again. Nothing should happen.
+ registry_model.set_torrent_info(blobs[0], 3, 'bar')
+
+ torrent_info = registry_model.get_torrent_info(blobs[0])
+ assert torrent_info is not None
+ assert torrent_info.piece_length == 2
+ assert torrent_info.pieces == 'foo'
+
+
+def test_blob_uploads(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+
+ blob_upload = registry_model.create_blob_upload(repository_ref, str(uuid.uuid4()),
+ 'local_us', {'some': 'metadata'})
+ assert blob_upload
+ assert blob_upload.storage_metadata == {'some': 'metadata'}
+ assert blob_upload.location_name == 'local_us'
+
+ # Ensure we can find the blob upload.
+ assert registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id) == blob_upload
+
+ # Update and ensure the changes are saved.
+ assert registry_model.update_blob_upload(blob_upload, 1, 'the-pieces_hash',
+ blob_upload.piece_sha_state,
+ {'new': 'metadata'}, 2, 3,
+ blob_upload.sha_state)
+
+ updated = registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
+ assert updated
+ assert updated.uncompressed_byte_count == 1
+ assert updated.piece_hashes == 'the-pieces_hash'
+ assert updated.storage_metadata == {'new': 'metadata'}
+ assert updated.byte_count == 2
+ assert updated.chunk_count == 3
+
+ # Delete the upload.
+ registry_model.delete_blob_upload(blob_upload)
+
+ # Ensure it can no longer be found.
+ assert not registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
+
+
+def test_commit_blob_upload(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ blob_upload = registry_model.create_blob_upload(repository_ref, str(uuid.uuid4()),
+ 'local_us', {'some': 'metadata'})
+
+ # Commit the blob upload and make sure it is written as a blob.
+ digest = 'sha256:' + hashlib.sha256('hello').hexdigest()
+ blob = registry_model.commit_blob_upload(blob_upload, digest, 60)
+ assert blob.digest == digest
+
+ # Ensure the upload can no longer be found.
+ assert not registry_model.lookup_blob_upload(repository_ref, blob_upload.upload_id)
+
+
+# TODO: Re-enable for OCI model once we have a new table for temporary blobs.
+def test_mount_blob_into_repository(pre_oci_model):
+ repository_ref = pre_oci_model.lookup_repository('devtable', 'simple')
+ latest_tag = pre_oci_model.get_repo_tag(repository_ref, 'latest')
+ manifest = pre_oci_model.get_manifest_for_tag(latest_tag)
+
+ target_repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
+
+ blobs = pre_oci_model.get_manifest_local_blobs(manifest, include_placements=True)
+ assert blobs
+
+ for blob in blobs:
+ # Ensure the blob doesn't exist under the repository.
+ assert not pre_oci_model.get_repo_blob_by_digest(target_repository_ref, blob.digest)
+
+ # Mount the blob into the repository.
+ assert pre_oci_model.mount_blob_into_repository(blob, target_repository_ref, 60)
+
+ # Ensure it now exists.
+ found = pre_oci_model.get_repo_blob_by_digest(target_repository_ref, blob.digest)
+ assert found == blob
+
+
+class SomeException(Exception):
+ pass
+
+
+def test_get_cached_repo_blob(registry_model):
+ model_cache = InMemoryDataModelCache()
+
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ latest_tag = registry_model.get_repo_tag(repository_ref, 'latest')
+ manifest = registry_model.get_manifest_for_tag(latest_tag)
+
+ blobs = registry_model.get_manifest_local_blobs(manifest, include_placements=True)
+ assert blobs
+
+ blob = blobs[0]
+
+ # Load a blob to add it to the cache.
+ found = registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', blob.digest)
+ assert found.digest == blob.digest
+ assert found.uuid == blob.uuid
+ assert found.compressed_size == blob.compressed_size
+ assert found.uncompressed_size == blob.uncompressed_size
+ assert found.uploading == blob.uploading
+ assert found.placements == blob.placements
+
+ # Disconnect from the database by overwriting the connection.
+ def fail(x, y):
+ raise SomeException('Not connected!')
+
+ with patch('data.registry_model.registry_pre_oci_model.model.blob.get_repository_blob_by_digest',
+ fail):
+ with patch('data.registry_model.registry_oci_model.model.oci.blob.get_repository_blob_by_digest',
+ fail):
+ # Make sure we can load again, which should hit the cache.
+ cached = registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', blob.digest)
+ assert cached.digest == blob.digest
+ assert cached.uuid == blob.uuid
+ assert cached.compressed_size == blob.compressed_size
+ assert cached.uncompressed_size == blob.uncompressed_size
+ assert cached.uploading == blob.uploading
+ assert cached.placements == blob.placements
+
+ # Try another blob, which should fail since the DB is not connected and the cache
+ # does not contain the blob.
+ with pytest.raises(SomeException):
+ registry_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', 'some other digest')
+
+
+def test_create_manifest_and_retarget_tag(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
+ manifest = registry_model.get_manifest_for_tag(latest_tag).get_parsed_manifest()
+
+ builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
+ builder.add_layer(manifest.blob_digests[0],
+ '{"id": "%s"}' % latest_tag.legacy_image.docker_image_id)
+ sample_manifest = builder.build(docker_v2_signing_key)
+ assert sample_manifest is not None
+
+ another_manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref,
+ sample_manifest,
+ 'anothertag',
+ storage)
+ assert another_manifest is not None
+ assert tag is not None
+
+ assert tag.name == 'anothertag'
+ assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
+
+
+def test_get_schema1_parsed_manifest(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
+ manifest = registry_model.get_manifest_for_tag(latest_tag)
+ assert registry_model.get_schema1_parsed_manifest(manifest, '', '', '', storage)
+
+
+def test_convert_manifest(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
+ manifest = registry_model.get_manifest_for_tag(latest_tag)
+
+ mediatypes = DOCKER_SCHEMA1_CONTENT_TYPES
+ assert registry_model.convert_manifest(manifest, '', '', '', mediatypes, storage)
+
+ mediatypes = []
+ assert registry_model.convert_manifest(manifest, '', '', '', mediatypes, storage) is None
+
+
+def test_create_manifest_and_retarget_tag_with_labels(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ latest_tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
+ manifest = registry_model.get_manifest_for_tag(latest_tag).get_parsed_manifest()
+
+ json_metadata = {
+ 'id': latest_tag.legacy_image.docker_image_id,
+ 'config': {
+ 'Labels': {
+ 'quay.expires-after': '2w',
+ },
+ },
+ }
+
+ builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
+ builder.add_layer(manifest.blob_digests[0], json.dumps(json_metadata))
+ sample_manifest = builder.build(docker_v2_signing_key)
+ assert sample_manifest is not None
+
+ another_manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref,
+ sample_manifest,
+ 'anothertag',
+ storage)
+ assert another_manifest is not None
+ assert tag is not None
+
+ assert tag.name == 'anothertag'
+ assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
+
+ # Ensure the labels were applied.
+ assert tag.lifetime_end_ms is not None
+
+
+
+def _populate_blob(digest):
+ location = ImageStorageLocation.get(name='local_us')
+ store_blob_record_and_temp_link('devtable', 'simple', digest, location, 1, 120)
+
+
+def test_known_issue_schema1(registry_model):
+ test_dir = os.path.dirname(os.path.abspath(__file__))
+ path = os.path.join(test_dir, '../../../image/docker/test/validate_manifest_known_issue.json')
+ with open(path, 'r') as f:
+ manifest_bytes = f.read()
+
+ manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
+
+ for blob_digest in manifest.local_blob_digests:
+ _populate_blob(blob_digest)
+
+ digest = manifest.digest
+ assert digest == 'sha256:44518f5a4d1cb5b7a6347763116fb6e10f6a8563b6c40bb389a0a982f0a9f47a'
+
+ # Create the manifest in the database.
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ created_manifest, _ = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest,
+ 'latest', storage)
+ assert created_manifest
+ assert created_manifest.digest == manifest.digest
+ assert (created_manifest.internal_manifest_bytes.as_encoded_str() ==
+ manifest.bytes.as_encoded_str())
+
+ # Look it up again and validate.
+ found = registry_model.lookup_manifest_by_digest(repository_ref, manifest.digest, allow_dead=True)
+ assert found
+ assert found.digest == digest
+ assert found.internal_manifest_bytes.as_encoded_str() == manifest.bytes.as_encoded_str()
+ assert found.get_parsed_manifest().digest == digest
+
+
+def test_unicode_emoji(registry_model):
+ builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'latest')
+ builder.add_layer('sha256:abcde', json.dumps({
+ 'id': 'someid',
+ 'author': u'đź±',
+ }, ensure_ascii=False))
+
+ manifest = builder.build(ensure_ascii=False)
+ manifest._validate()
+
+ for blob_digest in manifest.local_blob_digests:
+ _populate_blob(blob_digest)
+
+ # Create the manifest in the database.
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ created_manifest, _ = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest,
+ 'latest', storage)
+ assert created_manifest
+ assert created_manifest.digest == manifest.digest
+ assert (created_manifest.internal_manifest_bytes.as_encoded_str() ==
+ manifest.bytes.as_encoded_str())
+
+ # Look it up again and validate.
+ found = registry_model.lookup_manifest_by_digest(repository_ref, manifest.digest, allow_dead=True)
+ assert found
+ assert found.digest == manifest.digest
+ assert found.internal_manifest_bytes.as_encoded_str() == manifest.bytes.as_encoded_str()
+ assert found.get_parsed_manifest().digest == manifest.digest
+
+
+def test_lookup_active_repository_tags(oci_model):
+ repository_ref = oci_model.lookup_repository('devtable', 'simple')
+ latest_tag = oci_model.get_repo_tag(repository_ref, 'latest')
+ manifest = oci_model.get_manifest_for_tag(latest_tag)
+
+ tag_count = 500
+
+ # Create a bunch of tags.
+ tags_expected = set()
+ for index in range(0, tag_count):
+ tags_expected.add('somenewtag%s' % index)
+ oci_model.retarget_tag(repository_ref, 'somenewtag%s' % index, manifest, storage,
+ docker_v2_signing_key)
+
+ assert tags_expected
+
+ # List the tags.
+ tags_found = set()
+ tag_id = None
+ while True:
+ tags = oci_model.lookup_active_repository_tags(repository_ref, tag_id, 11)
+ assert len(tags) <= 11
+ for tag in tags[0:10]:
+ assert tag.name not in tags_found
+ if tag.name in tags_expected:
+ tags_found.add(tag.name)
+ tags_expected.remove(tag.name)
+
+ if len(tags) < 11:
+ break
+
+ tag_id = tags[10].id
+
+ # Make sure we've found all the tags.
+ assert tags_found
+ assert not tags_expected
+
+
+def test_yield_tags_for_vulnerability_notification(registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'complex')
+
+ # Check for all legacy images under the tags and ensure not raised because
+ # no notification is yet registered.
+ for tag in registry_model.list_all_active_repository_tags(repository_ref,
+ include_legacy_images=True):
+ image = registry_model.get_legacy_image(repository_ref, tag.legacy_image.docker_image_id,
+ include_blob=True)
+ pairs = [(image.docker_image_id, image.blob.uuid)]
+ results = list(registry_model.yield_tags_for_vulnerability_notification(pairs))
+ assert not len(results)
+
+ # Register a notification.
+ model.notification.create_repo_notification(repository_ref.id, 'vulnerability_found', 'email',
+ {}, {})
+
+ # Check again.
+ for tag in registry_model.list_all_active_repository_tags(repository_ref,
+ include_legacy_images=True):
+ image = registry_model.get_legacy_image(repository_ref, tag.legacy_image.docker_image_id,
+ include_blob=True, include_parents=True)
+
+ # Check for every parent of the image.
+ for current in image.parents:
+ img = registry_model.get_legacy_image(repository_ref, current.docker_image_id,
+ include_blob=True)
+ pairs = [(img.docker_image_id, img.blob.uuid)]
+ results = list(registry_model.yield_tags_for_vulnerability_notification(pairs))
+ assert len(results) > 0
+ assert tag.name in {t.name for t in results}
+
+ # Check for the image itself.
+ pairs = [(image.docker_image_id, image.blob.uuid)]
+ results = list(registry_model.yield_tags_for_vulnerability_notification(pairs))
+ assert len(results) > 0
+ assert tag.name in {t.name for t in results}
diff --git a/data/registry_model/test/test_manifestbuilder.py b/data/registry_model/test/test_manifestbuilder.py
new file mode 100644
index 000000000..538731b8d
--- /dev/null
+++ b/data/registry_model/test/test_manifestbuilder.py
@@ -0,0 +1,104 @@
+import hashlib
+import json
+
+from io import BytesIO
+
+import pytest
+
+from mock import patch
+
+from app import docker_v2_signing_key
+
+from data.registry_model.blobuploader import BlobUploadSettings, upload_blob
+from data.registry_model.manifestbuilder import create_manifest_builder, lookup_manifest_builder
+from data.registry_model.registry_pre_oci_model import PreOCIModel
+from data.registry_model.registry_oci_model import OCIModel
+
+from storage.distributedstorage import DistributedStorage
+from storage.fakestorage import FakeStorage
+from test.fixtures import *
+
+
+@pytest.fixture(params=[PreOCIModel, OCIModel])
+def registry_model(request, initialized_db):
+ return request.param()
+
+
+@pytest.fixture()
+def fake_session():
+ with patch('data.registry_model.manifestbuilder.session', {}):
+ yield
+
+
+@pytest.mark.parametrize('layers', [
+ pytest.param([('someid', None, 'some data')], id='Single layer'),
+ pytest.param([('parentid', None, 'some parent data'),
+ ('someid', 'parentid', 'some data')],
+ id='Multi layer'),
+])
+def test_build_manifest(layers, fake_session, registry_model):
+ repository_ref = registry_model.lookup_repository('devtable', 'complex')
+ storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
+ settings = BlobUploadSettings('2M', 512 * 1024, 3600)
+ app_config = {'TESTING': True}
+
+ builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key)
+ assert lookup_manifest_builder(repository_ref, 'anotherid', storage,
+ docker_v2_signing_key) is None
+ assert lookup_manifest_builder(repository_ref, builder.builder_id, storage,
+ docker_v2_signing_key) is not None
+
+ blobs_by_layer = {}
+ for layer_id, parent_id, layer_bytes in layers:
+ # Start a new layer.
+ assert builder.start_layer(layer_id, json.dumps({'id': layer_id, 'parent': parent_id}),
+ 'local_us', None, 60)
+
+ checksum = hashlib.sha1(layer_bytes).hexdigest()
+
+ # Assign it a blob.
+ with upload_blob(repository_ref, storage, settings) as uploader:
+ uploader.upload_chunk(app_config, BytesIO(layer_bytes))
+ blob = uploader.commit_to_blob(app_config)
+ blobs_by_layer[layer_id] = blob
+ builder.assign_layer_blob(builder.lookup_layer(layer_id), blob, [checksum])
+
+ # Validate the checksum.
+ assert builder.validate_layer_checksum(builder.lookup_layer(layer_id), checksum)
+
+ # Commit the manifest to a tag.
+ tag = builder.commit_tag_and_manifest('somenewtag', builder.lookup_layer(layers[-1][0]))
+ assert tag
+ assert tag in builder.committed_tags
+
+ # Mark the builder as done.
+ builder.done()
+
+ # Verify the legacy image for the tag.
+ found = registry_model.get_repo_tag(repository_ref, 'somenewtag', include_legacy_image=True)
+ assert found
+ assert found.name == 'somenewtag'
+ assert found.legacy_image.docker_image_id == layers[-1][0]
+
+ # Verify the blob and manifest.
+ manifest = registry_model.get_manifest_for_tag(found)
+ assert manifest
+
+ parsed = manifest.get_parsed_manifest()
+ assert len(list(parsed.layers)) == len(layers)
+
+ for index, (layer_id, parent_id, layer_bytes) in enumerate(layers):
+ assert list(parsed.blob_digests)[index] == blobs_by_layer[layer_id].digest
+ assert list(parsed.layers)[index].v1_metadata.image_id == layer_id
+ assert list(parsed.layers)[index].v1_metadata.parent_image_id == parent_id
+
+ assert parsed.leaf_layer_v1_image_id == layers[-1][0]
+
+
+def test_build_manifest_missing_parent(fake_session, registry_model):
+ storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
+ repository_ref = registry_model.lookup_repository('devtable', 'complex')
+ builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key)
+
+ assert builder.start_layer('somelayer', json.dumps({'id': 'somelayer', 'parent': 'someparent'}),
+ 'local_us', None, 60) is None
diff --git a/data/runmigration.py b/data/runmigration.py
new file mode 100644
index 000000000..f4126aba1
--- /dev/null
+++ b/data/runmigration.py
@@ -0,0 +1,27 @@
+import logging
+
+from alembic.config import Config
+from alembic.script import ScriptDirectory
+from alembic.environment import EnvironmentContext
+from alembic.migration import __name__ as migration_name
+
+def run_alembic_migration(db_uri, log_handler=None, setup_app=True):
+ if log_handler:
+ logging.getLogger(migration_name).addHandler(log_handler)
+
+ config = Config()
+ config.set_main_option("script_location", "data:migrations")
+ config.set_main_option("db_uri", db_uri)
+
+ if setup_app:
+ config.set_main_option('alembic_setup_app', 'True')
+ else:
+ config.set_main_option('alembic_setup_app', '')
+
+ script = ScriptDirectory.from_config(config)
+
+ def fn(rev, context):
+ return script._upgrade_revs('head', rev)
+
+ with EnvironmentContext(config, script, fn=fn, destination_rev='head'):
+ script.run_env()
\ No newline at end of file
diff --git a/data/test/test_encryption.py b/data/test/test_encryption.py
new file mode 100644
index 000000000..f6ec8a94b
--- /dev/null
+++ b/data/test/test_encryption.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+
+from data.encryption import FieldEncrypter, _VERSIONS, DecryptionFailureException
+
+@pytest.mark.parametrize('test_data', [
+ '',
+ 'hello world',
+ 'wassup?!',
+ 'IGZ2Y8KUN3EFWAZZXR3D7U4V5NXDVYZI5VGU6STPB6KM83PAB8WRGM32RD9FW0C0',
+ 'JLRFBYS1EHKUE73S99HWOQWNPGLUZTBRF5HQEFUJS5BK3XVB54RNXYV4AUMJXCMC',
+ 'a' * 3,
+ 'a' * 4,
+ 'a' * 5,
+ 'a' * 31,
+ 'a' * 32,
+ 'a' * 33,
+ 'a' * 150,
+ u'đź‡',
+])
+@pytest.mark.parametrize('version', _VERSIONS.keys())
+@pytest.mark.parametrize('secret_key', [
+ u'test1234',
+ 'test1234',
+ 'thisisanothercoolsecretkeyhere',
+ '107383705745765174750346070528443780244192102846031525796571939503548634055845',
+])
+@pytest.mark.parametrize('use_valid_key', [
+ True,
+ False,
+])
+def test_encryption(test_data, version, secret_key, use_valid_key):
+ encrypter = FieldEncrypter(secret_key, version)
+ encrypted = encrypter.encrypt_value(test_data, field_max_length=255)
+ assert encrypted != test_data
+
+ if use_valid_key:
+ decrypted = encrypter.decrypt_value(encrypted)
+ assert decrypted == test_data
+
+ with pytest.raises(DecryptionFailureException):
+ encrypter.decrypt_value('somerandomvalue')
+ else:
+ decrypter = FieldEncrypter('some other key', version)
+ with pytest.raises(DecryptionFailureException):
+ decrypter.decrypt_value(encrypted)
diff --git a/data/test/test_queue.py b/data/test/test_queue.py
new file mode 100644
index 000000000..36f61b502
--- /dev/null
+++ b/data/test/test_queue.py
@@ -0,0 +1,420 @@
+import json
+import time
+
+import pytest
+
+from contextlib import contextmanager
+from datetime import datetime, timedelta
+from functools import wraps
+
+from data.database import QueueItem
+from data.queue import WorkQueue, MINIMUM_EXTENSION
+
+from test.fixtures import *
+
+QUEUE_NAME = 'testqueuename'
+
+
+class SaveLastCountReporter(object):
+ def __init__(self):
+ self.currently_processing = None
+ self.running_count = None
+ self.total = None
+
+ def __call__(self, currently_processing, running_count, total_jobs):
+ self.currently_processing = currently_processing
+ self.running_count = running_count
+ self.total = total_jobs
+
+
+class AutoUpdatingQueue(object):
+ def __init__(self, queue_to_wrap):
+ self._queue = queue_to_wrap
+
+ def _wrapper(self, func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ to_return = func(*args, **kwargs)
+ self._queue.update_metrics()
+ return to_return
+ return wrapper
+
+ def __getattr__(self, attr_name):
+ method_or_attr = getattr(self._queue, attr_name)
+ if callable(method_or_attr):
+ return self._wrapper(method_or_attr)
+ else:
+ return method_or_attr
+
+
+TEST_MESSAGE_1 = json.dumps({'data': 1})
+TEST_MESSAGE_2 = json.dumps({'data': 2})
+TEST_MESSAGES = [json.dumps({'data': str(i)}) for i in range(1, 101)]
+
+
+@contextmanager
+def fake_transaction(arg):
+ yield
+
+@pytest.fixture()
+def reporter():
+ return SaveLastCountReporter()
+
+
+@pytest.fixture()
+def transaction_factory():
+ return fake_transaction
+
+
+@pytest.fixture()
+def queue(reporter, transaction_factory, initialized_db):
+ return AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, reporter=reporter))
+
+
+def test_get_single_item(queue, reporter, transaction_factory):
+ # Add a single item to the queue.
+ queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
+
+ # Have two "instances" retrieve an item to claim. Since there is only one, both calls should
+ # return the same item.
+ now = datetime.utcnow()
+ first_item = queue._select_available_item(False, now)
+ second_item = queue._select_available_item(False, now)
+
+ assert first_item.id == second_item.id
+ assert first_item.state_id == second_item.state_id
+
+ # Have both "instances" now try to claim the item. Only one should succeed.
+ first_claimed = queue._attempt_to_claim_item(first_item, now, 300)
+ second_claimed = queue._attempt_to_claim_item(first_item, now, 300)
+
+ assert first_claimed
+ assert not second_claimed
+
+ # Ensure the item is no longer available.
+ assert queue.get() is None
+
+ # Ensure the item's state ID has changed.
+ assert first_item.state_id != QueueItem.get().state_id
+
+def test_extend_processing(queue, reporter, transaction_factory):
+ # Add and retrieve a queue item.
+ queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
+ queue_item = queue.get(processing_time=10)
+ assert queue_item is not None
+
+ existing_db_item = QueueItem.get(id=queue_item.id)
+
+ # Call extend processing with a timedelta less than the minimum and ensure its
+ # processing_expires and state_id do not change.
+ changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1)
+ assert not changed
+
+ updated_db_item = QueueItem.get(id=queue_item.id)
+
+ assert existing_db_item.processing_expires == updated_db_item.processing_expires
+ assert existing_db_item.state_id == updated_db_item.state_id
+
+ # Call extend processing with a timedelta greater than the minimum and ensure its
+ # processing_expires and state_id are changed.
+ changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() + 1)
+ assert changed
+
+ updated_db_item = QueueItem.get(id=queue_item.id)
+
+ assert existing_db_item.processing_expires != updated_db_item.processing_expires
+ assert existing_db_item.state_id != updated_db_item.state_id
+
+ # Call extend processing with a timedelta less than the minimum but also with new data and
+ # ensure its processing_expires and state_id are changed.
+ changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1,
+ updated_data='newbody')
+ assert changed
+
+ updated_db_item = QueueItem.get(id=queue_item.id)
+
+ assert existing_db_item.processing_expires != updated_db_item.processing_expires
+ assert existing_db_item.state_id != updated_db_item.state_id
+ assert updated_db_item.body == 'newbody'
+
+def test_same_canonical_names(queue, reporter, transaction_factory):
+ assert reporter.currently_processing is None
+ assert reporter.running_count is None
+ assert reporter.total is None
+
+ id_1 = int(queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1))
+ id_2 = int(queue.put(['abc', 'def'], TEST_MESSAGE_2, available_after=-1))
+ assert id_1 + 1 == id_2
+ assert not reporter.currently_processing
+ assert reporter.running_count == 0
+ assert reporter.total == 1
+
+ one = queue.get(ordering_required=True)
+ assert one is not None
+ assert one.body == TEST_MESSAGE_1
+ assert reporter.currently_processing
+ assert reporter.running_count == 1
+ assert reporter.total == 1
+
+ two_fail = queue.get(ordering_required=True)
+ assert two_fail is None
+ assert reporter.running_count == 1
+ assert reporter.total == 1
+
+ queue.complete(one)
+ assert not reporter.currently_processing
+ assert reporter.running_count == 0
+ assert reporter.total == 1
+
+ two = queue.get(ordering_required=True)
+ assert two is not None
+ assert reporter.currently_processing
+ assert two.body == TEST_MESSAGE_2
+ assert reporter.running_count == 1
+ assert reporter.total == 1
+
+def test_different_canonical_names(queue, reporter, transaction_factory):
+ queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
+ queue.put(['abc', 'ghi'], TEST_MESSAGE_2, available_after=-1)
+ assert reporter.running_count == 0
+ assert reporter.total == 2
+
+ one = queue.get(ordering_required=True)
+ assert one is not None
+ assert one.body == TEST_MESSAGE_1
+ assert reporter.running_count == 1
+ assert reporter.total == 2
+
+ two = queue.get(ordering_required=True)
+ assert two is not None
+ assert two.body == TEST_MESSAGE_2
+ assert reporter.running_count == 2
+ assert reporter.total == 2
+
+def test_canonical_name(queue, reporter, transaction_factory):
+ queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
+ queue.put(['abc', 'def', 'ghi'], TEST_MESSAGE_1, available_after=-1)
+
+ one = queue.get(ordering_required=True)
+ assert QUEUE_NAME + '/abc/def/' != one
+
+ two = queue.get(ordering_required=True)
+ assert QUEUE_NAME + '/abc/def/ghi/' != two
+
+def test_expiration(queue, reporter, transaction_factory):
+ queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
+ assert reporter.running_count == 0
+ assert reporter.total == 1
+
+ one = queue.get(processing_time=0.5, ordering_required=True)
+ assert one is not None
+ assert reporter.running_count == 1
+ assert reporter.total == 1
+
+ one_fail = queue.get(ordering_required=True)
+ assert one_fail is None
+
+ time.sleep(1)
+ queue.update_metrics()
+ assert reporter.running_count == 0
+ assert reporter.total == 1
+
+ one_again = queue.get(ordering_required=True)
+ assert one_again is not None
+ assert reporter.running_count == 1
+ assert reporter.total == 1
+
+def test_alive(queue, reporter, transaction_factory):
+ # No queue item = not alive.
+ assert not queue.alive(['abc', 'def'])
+
+ # Add a queue item.
+ queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
+ assert queue.alive(['abc', 'def'])
+
+ # Retrieve the queue item.
+ queue_item = queue.get()
+ assert queue_item is not None
+ assert queue.alive(['abc', 'def'])
+
+ # Make sure it is running by trying to retrieve it again.
+ assert queue.get() is None
+
+ # Delete the queue item.
+ queue.complete(queue_item)
+ assert not queue.alive(['abc', 'def'])
+
+def test_specialized_queue(queue, reporter, transaction_factory):
+ queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
+ queue.put(['def', 'def'], TEST_MESSAGE_2, available_after=-1)
+
+ my_queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, ['def']))
+
+ two = my_queue.get(ordering_required=True)
+ assert two is not None
+ assert two.body == TEST_MESSAGE_2
+
+ one_fail = my_queue.get(ordering_required=True)
+ assert one_fail is None
+
+ one = queue.get(ordering_required=True)
+ assert one is not None
+ assert one.body == TEST_MESSAGE_1
+
+def test_random_queue_no_duplicates(queue, reporter, transaction_factory):
+ for msg in TEST_MESSAGES:
+ queue.put(['abc', 'def'], msg, available_after=-1)
+ seen = set()
+
+ for _ in range(1, 101):
+ item = queue.get()
+ json_body = json.loads(item.body)
+ msg = str(json_body['data'])
+ assert msg not in seen
+ seen.add(msg)
+
+ for body in TEST_MESSAGES:
+ json_body = json.loads(body)
+ msg = str(json_body['data'])
+ assert msg in seen
+
+def test_bulk_insert(queue, reporter, transaction_factory):
+ assert reporter.currently_processing is None
+ assert reporter.running_count is None
+ assert reporter.total is None
+
+ with queue.batch_insert() as queue_put:
+ queue_put(['abc', 'def'], TEST_MESSAGE_1, available_after=-1)
+ queue_put(['abc', 'def'], TEST_MESSAGE_2, available_after=-1)
+
+ queue.update_metrics()
+ assert not reporter.currently_processing
+ assert reporter.running_count == 0
+ assert reporter.total == 1
+
+ with queue.batch_insert() as queue_put:
+ queue_put(['abd', 'def'], TEST_MESSAGE_1, available_after=-1)
+ queue_put(['abd', 'ghi'], TEST_MESSAGE_2, available_after=-1)
+
+ queue.update_metrics()
+ assert not reporter.currently_processing
+ assert reporter.running_count == 0
+ assert reporter.total == 3
+
+def test_num_available_between(queue, reporter, transaction_factory):
+ now = datetime.utcnow()
+ queue.put(['abc', 'def'], TEST_MESSAGE_1, available_after=-10)
+ queue.put(['abc', 'ghi'], TEST_MESSAGE_2, available_after=-5)
+
+ # Partial results
+ count = queue.num_available_jobs_between(now-timedelta(seconds=8), now, ['abc'])
+ assert count == 1
+
+ # All results
+ count = queue.num_available_jobs_between(now-timedelta(seconds=20), now, ['/abc'])
+ assert count == 2
+
+ # No results
+ count = queue.num_available_jobs_between(now, now, 'abc')
+ assert count == 0
+
+def test_incomplete(queue, reporter, transaction_factory):
+ # Add an item.
+ queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
+
+ now = datetime.utcnow()
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
+ assert count == 1
+
+ # Retrieve it.
+ item = queue.get()
+ assert item is not None
+ assert reporter.currently_processing
+
+ # Mark it as incomplete.
+ queue.incomplete(item, retry_after=-1)
+ assert not reporter.currently_processing
+
+ # Retrieve again to ensure it is once again available.
+ same_item = queue.get()
+ assert same_item is not None
+ assert reporter.currently_processing
+
+ assert item.id == same_item.id
+
+def test_complete(queue, reporter, transaction_factory):
+ # Add an item.
+ queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
+
+ now = datetime.utcnow()
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
+ assert count == 1
+
+ # Retrieve it.
+ item = queue.get()
+ assert item is not None
+ assert reporter.currently_processing
+
+ # Mark it as complete.
+ queue.complete(item)
+ assert not reporter.currently_processing
+
+def test_cancel(queue, reporter, transaction_factory):
+ # Add an item.
+ queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
+ queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_2, available_after=-5)
+
+ now = datetime.utcnow()
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
+ assert count == 2
+
+ # Retrieve it.
+ item = queue.get()
+ assert item is not None
+
+ # Make sure we can cancel it.
+ assert queue.cancel(item.id)
+
+ now = datetime.utcnow()
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
+ assert count == 1
+
+ # Make sure it is gone.
+ assert not queue.cancel(item.id)
+
+def test_deleted_namespaced_items(queue, reporter, transaction_factory):
+ queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory,
+ reporter=reporter,
+ has_namespace=True))
+
+ queue.put(['somenamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
+ queue.put(['somenamespace', 'abc', 'ghi'], TEST_MESSAGE_2, available_after=-5)
+ queue.put(['anothernamespace', 'abc', 'def'], TEST_MESSAGE_1, available_after=-10)
+
+ # Ensure we have 2 items under `somenamespace` and 1 item under `anothernamespace`.
+ now = datetime.utcnow()
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
+ assert count == 2
+
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/anothernamespace'])
+ assert count == 1
+
+ # Delete all `somenamespace` items.
+ queue.delete_namespaced_items('somenamespace')
+
+ # Check the updated counts.
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
+ assert count == 0
+
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/anothernamespace'])
+ assert count == 1
+
+ # Delete all `anothernamespace` items.
+ queue.delete_namespaced_items('anothernamespace')
+
+ # Check the updated counts.
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/somenamespace'])
+ assert count == 0
+
+ count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ['/anothernamespace'])
+ assert count == 0
diff --git a/data/test/test_readreplica.py b/data/test/test_readreplica.py
new file mode 100644
index 000000000..7f7111d2a
--- /dev/null
+++ b/data/test/test_readreplica.py
@@ -0,0 +1,102 @@
+import os
+import shutil
+
+import pytest
+
+from peewee import OperationalError
+
+from data.database import configure, User, read_only_config
+from data.readreplica import ReadOnlyModeException
+from test.testconfig import FakeTransaction
+from test.fixtures import *
+
+
+@pytest.mark.skipif(bool(os.environ.get('TEST_DATABASE_URI')), reason='Testing requires SQLite')
+def test_readreplica(init_db_path, tmpdir_factory):
+ primary_file = str(tmpdir_factory.mktemp("data").join("primary.db"))
+ replica_file = str(tmpdir_factory.mktemp("data").join("replica.db"))
+
+ # Copy the initialized database to two different locations.
+ shutil.copy2(init_db_path, primary_file)
+ shutil.copy2(init_db_path, replica_file)
+
+ db_config = {
+ 'DB_URI': 'sqlite:///{0}'.format(primary_file),
+ 'DB_READ_REPLICAS': [
+ {'DB_URI': 'sqlite:///{0}'.format(replica_file)},
+ ],
+ "DB_CONNECTION_ARGS": {
+ 'threadlocals': True,
+ 'autorollback': True,
+ },
+ "DB_TRANSACTION_FACTORY": lambda x: FakeTransaction(),
+ "FOR_TESTING": True,
+ "DATABASE_SECRET_KEY": "anothercrazykey!",
+ }
+
+ # Initialize the DB with the primary and the replica.
+ configure(db_config)
+ assert not read_only_config.obj.is_readonly
+ assert read_only_config.obj.read_replicas
+
+ # Ensure we can read the data.
+ devtable_user = User.get(username='devtable')
+ assert devtable_user.username == 'devtable'
+
+ # Configure with a bad primary. Reading should still work since we're hitting the replica.
+ db_config['DB_URI'] = 'sqlite:///does/not/exist'
+ configure(db_config)
+
+ assert not read_only_config.obj.is_readonly
+ assert read_only_config.obj.read_replicas
+
+ devtable_user = User.get(username='devtable')
+ assert devtable_user.username == 'devtable'
+
+ # Try to change some data. This should fail because the primary is broken.
+ with pytest.raises(OperationalError):
+ devtable_user.email = 'newlychanged'
+ devtable_user.save()
+
+ # Fix the primary and try again.
+ db_config['DB_URI'] = 'sqlite:///{0}'.format(primary_file)
+ configure(db_config)
+
+ assert not read_only_config.obj.is_readonly
+ assert read_only_config.obj.read_replicas
+
+ devtable_user.email = 'newlychanged'
+ devtable_user.save()
+
+ # Mark the system as readonly.
+ db_config['DB_URI'] = 'sqlite:///{0}'.format(primary_file)
+ db_config['REGISTRY_STATE'] = 'readonly'
+ configure(db_config)
+
+ assert read_only_config.obj.is_readonly
+ assert read_only_config.obj.read_replicas
+
+ # Ensure all write operations raise a readonly mode exception.
+ with pytest.raises(ReadOnlyModeException):
+ devtable_user.email = 'newlychanged2'
+ devtable_user.save()
+
+ with pytest.raises(ReadOnlyModeException):
+ User.create(username='foo')
+
+ with pytest.raises(ReadOnlyModeException):
+ User.delete().where(User.username == 'foo').execute()
+
+ with pytest.raises(ReadOnlyModeException):
+ User.update(username='bar').where(User.username == 'foo').execute()
+
+ # Reset the config on the DB, so we don't mess up other tests.
+ configure({
+ 'DB_URI': 'sqlite:///{0}'.format(primary_file),
+ "DB_CONNECTION_ARGS": {
+ 'threadlocals': True,
+ 'autorollback': True,
+ },
+ "DB_TRANSACTION_FACTORY": lambda x: FakeTransaction(),
+ "DATABASE_SECRET_KEY": "anothercrazykey!",
+ })
diff --git a/data/test/test_text.py b/data/test/test_text.py
new file mode 100644
index 000000000..14b4519d1
--- /dev/null
+++ b/data/test/test_text.py
@@ -0,0 +1,29 @@
+import pytest
+
+from data.text import match_mysql, match_like
+from data.database import Repository
+from test.fixtures import *
+
+@pytest.mark.parametrize('input', [
+ ('hello world'),
+ ('hello \' world'),
+ ('hello " world'),
+ ('hello ` world'),
+])
+def test_mysql_text_escaping(input):
+ query, values = Repository.select().where(match_mysql(Repository.description, input)).sql()
+ assert input not in query
+
+
+@pytest.mark.parametrize('input, expected', [
+ ('hello world', 'hello world'),
+ ('hello \'world', 'hello world'),
+ ('hello "world', 'hello world'),
+ ('hello `world', 'hello world'),
+ ('hello !world', 'hello !!world'),
+ ('hello %world', 'hello !%world'),
+])
+def test_postgres_text_escaping(input, expected):
+ query, values = Repository.select().where(match_like(Repository.description, input)).sql()
+ assert input not in query
+ assert values[0] == '%' + expected + '%'
diff --git a/data/test/test_userfiles.py b/data/test/test_userfiles.py
new file mode 100644
index 000000000..671011e58
--- /dev/null
+++ b/data/test/test_userfiles.py
@@ -0,0 +1,54 @@
+import pytest
+
+from mock import Mock
+from io import BytesIO
+
+from data.userfiles import DelegateUserfiles, Userfiles
+from test.fixtures import *
+
+
+@pytest.mark.parametrize('prefix,path,expected', [
+ ('test', 'foo', 'test/foo'),
+ ('test', 'bar', 'test/bar'),
+ ('test', '/bar', 'test/bar'),
+ ('test', '../foo', 'test/foo'),
+ ('test', 'foo/bar/baz', 'test/baz'),
+ ('test', 'foo/../baz', 'test/baz'),
+
+ (None, 'foo', 'foo'),
+ (None, 'foo/bar/baz', 'baz'),
+])
+def test_filepath(prefix, path, expected):
+ userfiles = DelegateUserfiles(None, None, 'local_us', prefix)
+ assert userfiles.get_file_id_path(path) == expected
+
+
+def test_lookup_userfile(app, client):
+ uuid = 'deadbeef-dead-beef-dead-beefdeadbeef'
+ bad_uuid = 'deadduck-dead-duck-dead-duckdeadduck'
+ upper_uuid = 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF'
+
+ def _stream_read_file(locations, path):
+ if path.find(uuid) > 0 or path.find(upper_uuid) > 0:
+ return BytesIO("hello world")
+
+ raise IOError('Not found!')
+
+ storage_mock = Mock()
+ storage_mock.stream_read_file = _stream_read_file
+
+ app.config['USERFILES_PATH'] = 'foo'
+ Userfiles(app, distributed_storage=storage_mock, path='mockuserfiles',
+ handler_name='mockuserfiles')
+
+ rv = client.open('/mockuserfiles/' + uuid, method='GET')
+ assert rv.status_code == 200
+
+ rv = client.open('/mockuserfiles/' + upper_uuid, method='GET')
+ assert rv.status_code == 200
+
+ rv = client.open('/mockuserfiles/' + bad_uuid, method='GET')
+ assert rv.status_code == 404
+
+ rv = client.open('/mockuserfiles/foo/bar/baz', method='GET')
+ assert rv.status_code == 404
diff --git a/data/text.py b/data/text.py
new file mode 100644
index 000000000..9fa6bbf3e
--- /dev/null
+++ b/data/text.py
@@ -0,0 +1,53 @@
+from peewee import NodeList, SQL, fn, TextField, Field
+
+def _escape_wildcard(search_query):
+ """ Escapes the wildcards found in the given search query so that they are treated as *characters*
+ rather than wildcards when passed to a LIKE or ILIKE clause with an ESCAPE '!'.
+ """
+ search_query = (search_query
+ .replace('!', '!!')
+ .replace('%', '!%')
+ .replace('_', '!_')
+ .replace('[', '!['))
+
+ # Just to be absolutely sure.
+ search_query = search_query.replace('\'', '')
+ search_query = search_query.replace('"', '')
+ search_query = search_query.replace('`', '')
+
+ return search_query
+
+
+def prefix_search(field, prefix_query):
+ """ Returns the wildcard match for searching for the given prefix query. """
+ # Escape the known wildcard characters.
+ prefix_query = _escape_wildcard(prefix_query)
+ return Field.__pow__(field, NodeList((prefix_query + '%', SQL("ESCAPE '!'"))))
+
+
+def match_mysql(field, search_query):
+ """ Generates a full-text match query using a Match operation, which is needed for MySQL.
+ """
+ if field.name.find('`') >= 0: # Just to be safe.
+ raise Exception("How did field name '%s' end up containing a backtick?" % field.name)
+
+ # Note: There is a known bug in MySQL (https://bugs.mysql.com/bug.php?id=78485) that causes
+ # queries of the form `*` to raise a parsing error. If found, simply filter out.
+ search_query = search_query.replace('*', '')
+
+ # Just to be absolutely sure.
+ search_query = search_query.replace('\'', '')
+ search_query = search_query.replace('"', '')
+ search_query = search_query.replace('`', '')
+
+ return NodeList((fn.MATCH(SQL("`%s`" % field.name)), fn.AGAINST(SQL('%s', [search_query]))),
+ parens=True)
+
+
+def match_like(field, search_query):
+ """ Generates a full-text match query using an ILIKE operation, which is needed for SQLite and
+ Postgres.
+ """
+ escaped_query = _escape_wildcard(search_query)
+ clause = NodeList(('%' + escaped_query + '%', SQL("ESCAPE '!'")))
+ return Field.__pow__(field, clause)
diff --git a/data/userevent.py b/data/userevent.py
new file mode 100644
index 000000000..b4f340e5e
--- /dev/null
+++ b/data/userevent.py
@@ -0,0 +1,154 @@
+import json
+import threading
+import logging
+
+import redis
+
+logger = logging.getLogger(__name__)
+
+class CannotReadUserEventsException(Exception):
+ """ Exception raised if user events cannot be read. """
+
+class UserEventBuilder(object):
+ """
+ Defines a helper class for constructing UserEvent and UserEventListener
+ instances.
+ """
+ def __init__(self, redis_config):
+ self._redis_config = redis_config
+
+ def get_event(self, username):
+ return UserEvent(self._redis_config, username)
+
+ def get_listener(self, username, events):
+ return UserEventListener(self._redis_config, username, events)
+
+
+class UserEventsBuilderModule(object):
+ def __init__(self, app=None):
+ self.app = app
+ if app is not None:
+ self.state = self.init_app(app)
+ else:
+ self.state = None
+
+ def init_app(self, app):
+ redis_config = app.config.get('USER_EVENTS_REDIS')
+ if not redis_config:
+ # This is the old key name.
+ redis_config = {
+ 'host': app.config.get('USER_EVENTS_REDIS_HOSTNAME'),
+ }
+
+ user_events = UserEventBuilder(redis_config)
+
+ # register extension with app
+ app.extensions = getattr(app, 'extensions', {})
+ app.extensions['userevents'] = user_events
+ return user_events
+
+ def __getattr__(self, name):
+ return getattr(self.state, name, None)
+
+
+class UserEvent(object):
+ """
+ Defines a helper class for publishing to realtime user events
+ as backed by Redis.
+ """
+ def __init__(self, redis_config, username):
+ self._redis = redis.StrictRedis(socket_connect_timeout=2, socket_timeout=2, **redis_config)
+ self._username = username
+
+ @staticmethod
+ def _user_event_key(username, event_id):
+ return 'user/%s/events/%s' % (username, event_id)
+
+ def publish_event_data_sync(self, event_id, data_obj):
+ return self._redis.publish(self._user_event_key(self._username, event_id), json.dumps(data_obj))
+
+ def publish_event_data(self, event_id, data_obj):
+ """
+ Publishes the serialized form of the data object for the given event. Note that this occurs
+ in a thread to prevent blocking.
+ """
+ def conduct():
+ try:
+ self.publish_event_data_sync(event_id, data_obj)
+ logger.debug('Published user event %s: %s', event_id, data_obj)
+ except redis.RedisError:
+ logger.exception('Could not publish user event')
+
+ thread = threading.Thread(target=conduct)
+ thread.start()
+
+
+class UserEventListener(object):
+ """
+ Defines a helper class for subscribing to realtime user events as
+ backed by Redis.
+ """
+ def __init__(self, redis_config, username, events=None):
+ events = events or set([])
+ channels = [self._user_event_key(username, e) for e in events]
+
+ args = dict(redis_config)
+ args.update({'socket_connect_timeout': 5,
+ 'single_connection_client': True})
+
+ try:
+ self._redis = redis.StrictRedis(**args)
+ self._pubsub = self._redis.pubsub(ignore_subscribe_messages=True)
+ self._pubsub.subscribe(channels)
+ except redis.RedisError as re:
+ logger.exception('Could not reach user events redis: %s', re)
+ raise CannotReadUserEventsException
+
+ @staticmethod
+ def _user_event_key(username, event_id):
+ return 'user/%s/events/%s' % (username, event_id)
+
+ def event_stream(self):
+ """
+ Starts listening for events on the channel(s), yielding for each event
+ found. Will yield a "pulse" event (a custom event we've decided) as a heartbeat
+ every few seconds.
+ """
+ while True:
+ pubsub = self._pubsub
+ if pubsub is None:
+ raise StopIteration
+
+ try:
+ item = pubsub.get_message(ignore_subscribe_messages=True, timeout=5)
+ except redis.RedisError:
+ item = None
+
+ if item is None:
+ yield 'pulse', {}
+ else:
+ channel = item['channel']
+ event_id = channel.split('/')[3] # user/{username}/{events}/{id}
+ data = None
+
+ try:
+ data = json.loads(item['data'] or '{}')
+ except ValueError:
+ continue
+
+ if data:
+ yield event_id, data
+
+ def stop(self):
+ """
+ Unsubscribes from the channel(s). Should be called once the connection
+ has terminated.
+ """
+ if self._pubsub is not None:
+ self._pubsub.unsubscribe()
+ self._pubsub.close()
+ if self._redis is not None:
+ self._redis.close()
+
+ self._pubsub = None
+ self._redis = None
diff --git a/data/userfiles.py b/data/userfiles.py
new file mode 100644
index 000000000..1803c94ef
--- /dev/null
+++ b/data/userfiles.py
@@ -0,0 +1,160 @@
+import os
+import logging
+import urlparse
+
+from uuid import uuid4
+from _pyio import BufferedReader
+
+import magic
+
+from flask import url_for, request, send_file, make_response, abort
+from flask.views import View
+from util import get_app_url
+
+
+logger = logging.getLogger(__name__)
+
+
+class UserfilesHandlers(View):
+ methods = ['GET', 'PUT']
+
+ def __init__(self, distributed_storage, location, files):
+ self._storage = distributed_storage
+ self._files = files
+ self._locations = {location}
+ self._magic = magic.Magic(mime=True)
+
+ def get(self, file_id):
+ path = self._files.get_file_id_path(file_id)
+ try:
+ file_stream = self._storage.stream_read_file(self._locations, path)
+ buffered = BufferedReader(file_stream)
+ file_header_bytes = buffered.peek(1024)
+ return send_file(buffered, mimetype=self._magic.from_buffer(file_header_bytes),
+ as_attachment=True, attachment_filename=file_id)
+ except IOError:
+ logger.exception('Error reading user file')
+ abort(404)
+
+ def put(self, file_id):
+ input_stream = request.stream
+ if request.headers.get('transfer-encoding') == 'chunked':
+ # Careful, might work only with WSGI servers supporting chunked
+ # encoding (Gunicorn)
+ input_stream = request.environ['wsgi.input']
+
+ c_type = request.headers.get('Content-Type', None)
+
+ path = self._files.get_file_id_path(file_id)
+ self._storage.stream_write(self._locations, path, input_stream, c_type)
+
+ return make_response('Okay')
+
+ def dispatch_request(self, file_id):
+ if request.method == 'GET':
+ return self.get(file_id)
+ elif request.method == 'PUT':
+ return self.put(file_id)
+
+
+class MissingHandlerException(Exception):
+ pass
+
+
+class DelegateUserfiles(object):
+ def __init__(self, app, distributed_storage, location, path, handler_name=None):
+ self._app = app
+ self._storage = distributed_storage
+ self._locations = {location}
+ self._prefix = path
+ self._handler_name = handler_name
+
+ def _build_url_adapter(self):
+ return self._app.url_map.bind(self._app.config['SERVER_HOSTNAME'],
+ script_name=self._app.config['APPLICATION_ROOT'] or '/',
+ url_scheme=self._app.config['PREFERRED_URL_SCHEME'])
+
+ def get_file_id_path(self, file_id):
+ # Note: We use basename here to prevent paths with ..'s and absolute paths.
+ return os.path.join(self._prefix or '', os.path.basename(file_id))
+
+ def prepare_for_drop(self, mime_type, requires_cors=True):
+ """ Returns a signed URL to upload a file to our bucket. """
+ logger.debug('Requested upload url with content type: %s' % mime_type)
+ file_id = str(uuid4())
+ path = self.get_file_id_path(file_id)
+ url = self._storage.get_direct_upload_url(self._locations, path, mime_type, requires_cors)
+
+ if url is None:
+ if self._handler_name is None:
+ raise MissingHandlerException()
+
+ with self._app.app_context() as ctx:
+ ctx.url_adapter = self._build_url_adapter()
+ file_relative_url = url_for(self._handler_name, file_id=file_id)
+ file_url = urlparse.urljoin(get_app_url(self._app.config), file_relative_url)
+ return (file_url, file_id)
+
+ return (url, file_id)
+
+ def store_file(self, file_like_obj, content_type, content_encoding=None, file_id=None):
+ if file_id is None:
+ file_id = str(uuid4())
+
+ path = self.get_file_id_path(file_id)
+ self._storage.stream_write(self._locations, path, file_like_obj, content_type,
+ content_encoding)
+ return file_id
+
+ def get_file_url(self, file_id, remote_ip, expires_in=300, requires_cors=False):
+ path = self.get_file_id_path(file_id)
+ url = self._storage.get_direct_download_url(self._locations, path, remote_ip, expires_in,
+ requires_cors)
+
+ if url is None:
+ if self._handler_name is None:
+ raise MissingHandlerException()
+
+ with self._app.app_context() as ctx:
+ ctx.url_adapter = self._build_url_adapter()
+ file_relative_url = url_for(self._handler_name, file_id=file_id)
+ return urlparse.urljoin(get_app_url(self._app.config), file_relative_url)
+
+ return url
+
+ def get_file_checksum(self, file_id):
+ path = self.get_file_id_path(file_id)
+ return self._storage.get_checksum(self._locations, path)
+
+
+class Userfiles(object):
+ def __init__(self, app=None, distributed_storage=None, path='userfiles',
+ handler_name='userfiles_handler'):
+ self.app = app
+ if app is not None:
+ self.state = self.init_app(app, distributed_storage, path=path, handler_name=handler_name)
+ else:
+ self.state = None
+
+ def init_app(self, app, distributed_storage, path='userfiles', handler_name='userfiles_handler'):
+ location = app.config.get('USERFILES_LOCATION')
+ userfiles_path = app.config.get('USERFILES_PATH', None)
+
+ if userfiles_path is not None:
+ userfiles = DelegateUserfiles(app, distributed_storage, location, userfiles_path,
+ handler_name=handler_name)
+
+ app.add_url_rule('/%s/' % path,
+ view_func=UserfilesHandlers.as_view(handler_name,
+ distributed_storage=distributed_storage,
+ location=location,
+ files=userfiles))
+
+ # register extension with app
+ app.extensions = getattr(app, 'extensions', {})
+ app.extensions['userfiles'] = userfiles
+
+ return userfiles
+
+ def __getattr__(self, name):
+ return getattr(self.state, name, None)
diff --git a/data/users/__init__.py b/data/users/__init__.py
new file mode 100644
index 000000000..78e025028
--- /dev/null
+++ b/data/users/__init__.py
@@ -0,0 +1,263 @@
+import logging
+import itertools
+import json
+import uuid
+
+import features
+
+from data import model
+from data.users.database import DatabaseUsers
+from data.users.externalldap import LDAPUsers
+from data.users.externaljwt import ExternalJWTAuthN
+from data.users.keystone import get_keystone_users
+from data.users.apptoken import AppTokenInternalAuth
+from util.security.aes import AESCipher
+from util.security.secret import convert_secret_key
+
+
+logger = logging.getLogger(__name__)
+
+def get_federated_service_name(authentication_type):
+ if authentication_type == 'LDAP':
+ return 'ldap'
+
+ if authentication_type == 'JWT':
+ return 'jwtauthn'
+
+ if authentication_type == 'Keystone':
+ return 'keystone'
+
+ if authentication_type == 'AppToken':
+ return None
+
+ if authentication_type == 'Database':
+ return None
+
+ raise Exception('Unknown auth type: %s' % authentication_type)
+
+
+LDAP_CERT_FILENAME = 'ldap.crt'
+
+def get_users_handler(config, _, override_config_dir):
+ """ Returns a users handler for the authentication configured in the given config object. """
+ authentication_type = config.get('AUTHENTICATION_TYPE', 'Database')
+
+ if authentication_type == 'Database':
+ return DatabaseUsers()
+
+ if authentication_type == 'LDAP':
+ ldap_uri = config.get('LDAP_URI', 'ldap://localhost')
+ base_dn = config.get('LDAP_BASE_DN')
+ admin_dn = config.get('LDAP_ADMIN_DN')
+ admin_passwd = config.get('LDAP_ADMIN_PASSWD')
+ user_rdn = config.get('LDAP_USER_RDN', [])
+ uid_attr = config.get('LDAP_UID_ATTR', 'uid')
+ email_attr = config.get('LDAP_EMAIL_ATTR', 'mail')
+ secondary_user_rdns = config.get('LDAP_SECONDARY_USER_RDNS', [])
+ timeout = config.get('LDAP_TIMEOUT')
+ network_timeout = config.get('LDAP_NETWORK_TIMEOUT')
+
+ allow_tls_fallback = config.get('LDAP_ALLOW_INSECURE_FALLBACK', False)
+ return LDAPUsers(ldap_uri, base_dn, admin_dn, admin_passwd, user_rdn, uid_attr, email_attr,
+ allow_tls_fallback, secondary_user_rdns=secondary_user_rdns,
+ requires_email=features.MAILING, timeout=timeout,
+ network_timeout=network_timeout)
+
+ if authentication_type == 'JWT':
+ verify_url = config.get('JWT_VERIFY_ENDPOINT')
+ issuer = config.get('JWT_AUTH_ISSUER')
+ max_fresh_s = config.get('JWT_AUTH_MAX_FRESH_S', 300)
+
+ query_url = config.get('JWT_QUERY_ENDPOINT', None)
+ getuser_url = config.get('JWT_GETUSER_ENDPOINT', None)
+
+ return ExternalJWTAuthN(verify_url, query_url, getuser_url, issuer, override_config_dir,
+ config['HTTPCLIENT'], max_fresh_s,
+ requires_email=features.MAILING)
+
+ if authentication_type == 'Keystone':
+ auth_url = config.get('KEYSTONE_AUTH_URL')
+ auth_version = int(config.get('KEYSTONE_AUTH_VERSION', 2))
+ timeout = config.get('KEYSTONE_AUTH_TIMEOUT')
+ keystone_admin_username = config.get('KEYSTONE_ADMIN_USERNAME')
+ keystone_admin_password = config.get('KEYSTONE_ADMIN_PASSWORD')
+ keystone_admin_tenant = config.get('KEYSTONE_ADMIN_TENANT')
+ return get_keystone_users(auth_version, auth_url, keystone_admin_username,
+ keystone_admin_password, keystone_admin_tenant, timeout,
+ requires_email=features.MAILING)
+
+ if authentication_type == 'AppToken':
+ if features.DIRECT_LOGIN:
+ raise Exception('Direct login feature must be disabled to use AppToken internal auth')
+
+ if not features.APP_SPECIFIC_TOKENS:
+ raise Exception('AppToken internal auth requires app specific token support to be enabled')
+
+ return AppTokenInternalAuth()
+
+ raise RuntimeError('Unknown authentication type: %s' % authentication_type)
+
+class UserAuthentication(object):
+ def __init__(self, app=None, config_provider=None, override_config_dir=None):
+ self.secret_key = None
+ self.app = app
+ if app is not None:
+ self.state = self.init_app(app, config_provider, override_config_dir)
+ else:
+ self.state = None
+
+ def init_app(self, app, config_provider, override_config_dir):
+ self.secret_key = convert_secret_key(app.config['SECRET_KEY'])
+ users = get_users_handler(app.config, config_provider, override_config_dir)
+
+ # register extension with app
+ app.extensions = getattr(app, 'extensions', {})
+ app.extensions['authentication'] = users
+
+ return users
+
+ def encrypt_user_password(self, password):
+ """ Returns an encrypted version of the user's password. """
+ data = {
+ 'password': password
+ }
+
+ message = json.dumps(data)
+ cipher = AESCipher(self.secret_key)
+ return cipher.encrypt(message)
+
+ def _decrypt_user_password(self, encrypted):
+ """ Attempts to decrypt the given password and returns it. """
+ cipher = AESCipher(self.secret_key)
+
+ try:
+ message = cipher.decrypt(encrypted)
+ except ValueError:
+ return None
+ except TypeError:
+ return None
+
+ try:
+ data = json.loads(message)
+ except ValueError:
+ return None
+
+ return data.get('password', encrypted)
+
+ def ping(self):
+ """ Returns whether the authentication engine is reachable and working. """
+ return self.state.ping()
+
+ @property
+ def federated_service(self):
+ """ Returns the name of the federated service for the auth system. If none, should return None.
+ """
+ return self.state.federated_service
+
+ @property
+ def requires_distinct_cli_password(self):
+ """ Returns whether this auth system requires a distinct CLI password to be created,
+ in-system, before the CLI can be used. """
+ return self.state.requires_distinct_cli_password
+
+ @property
+ def supports_encrypted_credentials(self):
+ """ Returns whether this auth system supports using encrypted credentials. """
+ return self.state.supports_encrypted_credentials
+
+ def has_password_set(self, username):
+ """ Returns whether the user has a password set in the auth system. """
+ return self.state.has_password_set(username)
+
+ @property
+ def supports_fresh_login(self):
+ """ Returns whether this auth system supports the fresh login check. """
+ return self.state.supports_fresh_login
+
+ def query_users(self, query, limit=20):
+ """ Performs a lookup against the user system for the specified query. The returned tuple
+ will be of the form (results, federated_login_id, err_msg). If the method is unsupported,
+ the results portion of the tuple will be None instead of empty list.
+
+ Note that this method can and will return results for users not yet found within the
+ database; it is the responsibility of the caller to call link_user if they need the
+ database row for the user system record.
+
+ Results will be in the form of objects's with username and email fields.
+ """
+ return self.state.query_users(query, limit)
+
+ def link_user(self, username_or_email):
+ """ Returns a tuple containing the database user record linked to the given username/email
+ and any error that occurred when trying to link the user.
+ """
+ return self.state.link_user(username_or_email)
+
+ def get_and_link_federated_user_info(self, user_info, internal_create=False):
+ """ Returns a tuple containing the database user record linked to the given UserInformation
+ pair and any error that occurred when trying to link the user.
+
+ If `internal_create` is True, the caller is an internal user creation process (such
+ as team syncing), and the "can a user be created" check will be bypassed.
+ """
+ return self.state.get_and_link_federated_user_info(user_info, internal_create=internal_create)
+
+ def confirm_existing_user(self, username, password):
+ """ Verifies that the given password matches to the given DB username. Unlike
+ verify_credentials, this call first translates the DB user via the FederatedLogin table
+ (where applicable).
+ """
+ return self.state.confirm_existing_user(username, password)
+
+ def verify_credentials(self, username_or_email, password):
+ """ Verifies that the given username and password credentials are valid. """
+ return self.state.verify_credentials(username_or_email, password)
+
+ def check_group_lookup_args(self, group_lookup_args):
+ """ Verifies that the given group lookup args point to a valid group. Returns a tuple consisting
+ of a boolean status and an error message (if any).
+ """
+ return self.state.check_group_lookup_args(group_lookup_args)
+
+ def service_metadata(self):
+ """ Returns a dictionary of extra metadata to present to *superusers* about this auth engine.
+ For example, LDAP returns the base DN so we can display to the user during sync setup.
+ """
+ return self.state.service_metadata()
+
+ def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
+ """ Returns a tuple of an iterator over all the members of the group matching the given lookup
+ args dictionary, or the error that occurred if the initial call failed or is unsupported.
+ The format of the lookup args dictionary is specific to the implementation.
+ Each result in the iterator is a tuple of (UserInformation, error_message), and only
+ one will be not-None.
+ """
+ return self.state.iterate_group_members(group_lookup_args, page_size=page_size,
+ disable_pagination=disable_pagination)
+
+ def verify_and_link_user(self, username_or_email, password, basic_auth=False):
+ """ Verifies that the given username and password credentials are valid and, if so,
+ creates or links the database user to the federated identity. """
+ # First try to decode the password as a signed token.
+ if basic_auth:
+ decrypted = self._decrypt_user_password(password)
+ if decrypted is None:
+ # This is a normal password.
+ if features.REQUIRE_ENCRYPTED_BASIC_AUTH:
+ msg = ('Client login with unencrypted passwords is disabled. Please generate an ' +
+ 'encrypted password in the user admin panel for use here.')
+ return (None, msg)
+ else:
+ password = decrypted
+
+ (result, err_msg) = self.state.verify_and_link_user(username_or_email, password)
+ if not result:
+ return (result, err_msg)
+
+ if not result.enabled:
+ return (None, 'This user has been disabled. Please contact your administrator.')
+
+ return (result, err_msg)
+
+ def __getattr__(self, name):
+ return getattr(self.state, name, None)
diff --git a/data/users/apptoken.py b/data/users/apptoken.py
new file mode 100644
index 000000000..c306e7064
--- /dev/null
+++ b/data/users/apptoken.py
@@ -0,0 +1,67 @@
+import logging
+
+from data import model
+from oauth.loginmanager import OAuthLoginManager
+from oauth.oidc import PublicKeyLoadException
+from util.security.jwtutil import InvalidTokenError
+
+
+logger = logging.getLogger(__name__)
+
+class AppTokenInternalAuth(object):
+ """ Forces all internal credential login to go through an app token, by disabling all other
+ access.
+ """
+ @property
+ def supports_fresh_login(self):
+ # Since there is no password.
+ return False
+
+ @property
+ def federated_service(self):
+ return None
+
+ @property
+ def requires_distinct_cli_password(self):
+ # Since there is no supported "password".
+ return False
+
+ def has_password_set(self, username):
+ # Since there is no supported "password".
+ return False
+
+ @property
+ def supports_encrypted_credentials(self):
+ # Since there is no supported "password".
+ return False
+
+ def verify_credentials(self, username_or_email, id_token):
+ return (None, 'An application specific token is required to login')
+
+ def verify_and_link_user(self, username_or_email, password):
+ return self.verify_credentials(username_or_email, password)
+
+ def confirm_existing_user(self, username, password):
+ return self.verify_credentials(username, password)
+
+ def link_user(self, username_or_email):
+ return (None, 'Unsupported for this authentication system')
+
+ def get_and_link_federated_user_info(self, user_info):
+ return (None, 'Unsupported for this authentication system')
+
+ def query_users(self, query, limit):
+ return (None, '', '')
+
+ def check_group_lookup_args(self, group_lookup_args):
+ return (False, 'Not supported')
+
+ def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
+ return (None, 'Not supported')
+
+ def service_metadata(self):
+ return {}
+
+ def ping(self):
+ """ Always assumed to be working. If the DB is broken, other checks will handle it. """
+ return (True, None)
diff --git a/data/users/database.py b/data/users/database.py
new file mode 100644
index 000000000..2a1780429
--- /dev/null
+++ b/data/users/database.py
@@ -0,0 +1,66 @@
+from data import model
+
+class DatabaseUsers(object):
+ @property
+ def federated_service(self):
+ return None
+
+ @property
+ def supports_fresh_login(self):
+ return True
+
+ def ping(self):
+ """ Always assumed to be working. If the DB is broken, other checks will handle it. """
+ return (True, None)
+
+ @property
+ def supports_encrypted_credentials(self):
+ return True
+
+ def has_password_set(self, username):
+ user = model.user.get_user(username)
+ return user and user.password_hash is not None
+
+ @property
+ def requires_distinct_cli_password(self):
+ # Since the database stores its own password.
+ return True
+
+ def verify_credentials(self, username_or_email, password):
+ """ Simply delegate to the model implementation. """
+ result = model.user.verify_user(username_or_email, password)
+ if not result:
+ return (None, 'Invalid Username or Password')
+
+ return (result, None)
+
+ def verify_and_link_user(self, username_or_email, password):
+ """ Simply delegate to the model implementation. """
+ return self.verify_credentials(username_or_email, password)
+
+ def confirm_existing_user(self, username, password):
+ return self.verify_credentials(username, password)
+
+ def link_user(self, username_or_email):
+ """ Never used since all users being added are already, by definition, in the database. """
+ return (None, 'Unsupported for this authentication system')
+
+ def get_and_link_federated_user_info(self, user_info, internal_create=False):
+ """ Never used since all users being added are already, by definition, in the database. """
+ return (None, 'Unsupported for this authentication system')
+
+ def query_users(self, query, limit):
+ """ No need to implement, as we already query for users directly in the database. """
+ return (None, '', '')
+
+ def check_group_lookup_args(self, group_lookup_args):
+ """ Never used since all groups, by definition, are in the database. """
+ return (False, 'Not supported')
+
+ def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
+ """ Never used since all groups, by definition, are in the database. """
+ return (None, 'Not supported')
+
+ def service_metadata(self):
+ """ Never used since database has no metadata """
+ return {}
diff --git a/data/users/externaljwt.py b/data/users/externaljwt.py
new file mode 100644
index 000000000..7f2fea255
--- /dev/null
+++ b/data/users/externaljwt.py
@@ -0,0 +1,128 @@
+import logging
+import json
+import os
+
+from data.users.federated import FederatedUsers, UserInformation
+from util.security import jwtutil
+
+
+logger = logging.getLogger(__name__)
+
+
+class ExternalJWTAuthN(FederatedUsers):
+ """ Delegates authentication to a REST endpoint that returns JWTs. """
+ PUBLIC_KEY_FILENAME = 'jwt-authn.cert'
+
+ def __init__(self, verify_url, query_url, getuser_url, issuer, override_config_dir, http_client,
+ max_fresh_s, public_key_path=None, requires_email=True):
+ super(ExternalJWTAuthN, self).__init__('jwtauthn', requires_email)
+ self.verify_url = verify_url
+ self.query_url = query_url
+ self.getuser_url = getuser_url
+
+ self.issuer = issuer
+ self.client = http_client
+ self.max_fresh_s = max_fresh_s
+ self.requires_email = requires_email
+
+ default_key_path = os.path.join(override_config_dir, ExternalJWTAuthN.PUBLIC_KEY_FILENAME)
+ public_key_path = public_key_path or default_key_path
+ if not os.path.exists(public_key_path):
+ error_message = ('JWT Authentication public key file "%s" not found' % public_key_path)
+
+ raise Exception(error_message)
+
+ self.public_key_path = public_key_path
+
+ with open(public_key_path) as public_key_file:
+ self.public_key = public_key_file.read()
+
+ def has_password_set(self, username):
+ return True
+
+ def ping(self):
+ result = self.client.get(self.getuser_url, timeout=2)
+ # We expect a 401 or 403 of some kind, since we explicitly don't send an auth header
+ if result.status_code // 100 != 4:
+ return (False, result.text or 'Could not reach JWT authn endpoint')
+
+ return (True, None)
+
+ def get_user(self, username_or_email):
+ if self.getuser_url is None:
+ return (None, 'No endpoint defined for retrieving user')
+
+ (payload, err_msg) = self._execute_call(self.getuser_url, 'quay.io/jwtauthn/getuser',
+ params=dict(username=username_or_email))
+ if err_msg is not None:
+ return (None, err_msg)
+
+ if not 'sub' in payload:
+ raise Exception('Missing sub field in JWT')
+
+ if self.requires_email and not 'email' in payload:
+ raise Exception('Missing email field in JWT')
+
+ # Parse out the username and email.
+ user_info = UserInformation(username=payload['sub'], email=payload.get('email'),
+ id=payload['sub'])
+ return (user_info, None)
+
+
+ def query_users(self, query, limit=20):
+ if self.query_url is None:
+ return (None, self.federated_service, 'No endpoint defined for querying users')
+
+ (payload, err_msg) = self._execute_call(self.query_url, 'quay.io/jwtauthn/query',
+ params=dict(query=query, limit=limit))
+ if err_msg is not None:
+ return (None, self.federated_service, err_msg)
+
+ query_results = []
+ for result in payload['results'][0:limit]:
+ user_info = UserInformation(username=result['username'], email=result.get('email'),
+ id=result['username'])
+ query_results.append(user_info)
+
+ return (query_results, self.federated_service, None)
+
+
+ def verify_credentials(self, username_or_email, password):
+ (payload, err_msg) = self._execute_call(self.verify_url, 'quay.io/jwtauthn',
+ auth=(username_or_email, password))
+ if err_msg is not None:
+ return (None, err_msg)
+
+ if not 'sub' in payload:
+ raise Exception('Missing sub field in JWT')
+
+ if self.requires_email and not 'email' in payload:
+ raise Exception('Missing email field in JWT')
+
+ user_info = UserInformation(username=payload['sub'], email=payload.get('email'),
+ id=payload['sub'])
+ return (user_info, None)
+
+
+ def _execute_call(self, url, aud, auth=None, params=None):
+ """ Executes a call to the external JWT auth provider. """
+ result = self.client.get(url, timeout=2, auth=auth, params=params)
+ if result.status_code != 200:
+ return (None, result.text or 'Could not make JWT auth call')
+
+ try:
+ result_data = json.loads(result.text)
+ except ValueError:
+ raise Exception('Returned JWT body for url %s does not contain JSON', url)
+
+ # Load the JWT returned.
+ encoded = result_data.get('token', '')
+ exp_limit_options = jwtutil.exp_max_s_option(self.max_fresh_s)
+ try:
+ payload = jwtutil.decode(encoded, self.public_key, algorithms=['RS256'],
+ audience=aud, issuer=self.issuer,
+ options=exp_limit_options)
+ return (payload, None)
+ except jwtutil.InvalidTokenError:
+ logger.exception('Exception when decoding returned JWT for url %s', url)
+ return (None, 'Exception when decoding returned JWT')
diff --git a/data/users/externalldap.py b/data/users/externalldap.py
new file mode 100644
index 000000000..c1242e5d1
--- /dev/null
+++ b/data/users/externalldap.py
@@ -0,0 +1,413 @@
+import ldap
+import logging
+import os
+
+from ldap.controls import SimplePagedResultsControl
+from ldap.filter import filter_format, escape_filter_chars
+
+from collections import namedtuple
+from data.users.federated import FederatedUsers, UserInformation
+from util.itertoolrecipes import take
+
+logger = logging.getLogger(__name__)
+
+_DEFAULT_NETWORK_TIMEOUT = 10.0 # seconds
+_DEFAULT_TIMEOUT = 10.0 # seconds
+_DEFAULT_PAGE_SIZE = 1000
+
+
+class LDAPConnectionBuilder(object):
+ def __init__(self, ldap_uri, user_dn, user_pw, allow_tls_fallback=False,
+ timeout=None, network_timeout=None):
+ self._ldap_uri = ldap_uri
+ self._user_dn = user_dn
+ self._user_pw = user_pw
+ self._allow_tls_fallback = allow_tls_fallback
+ self._timeout = timeout
+ self._network_timeout = network_timeout
+
+ def get_connection(self):
+ return LDAPConnection(self._ldap_uri, self._user_dn, self._user_pw, self._allow_tls_fallback,
+ self._timeout, self._network_timeout)
+
+
+class LDAPConnection(object):
+ def __init__(self, ldap_uri, user_dn, user_pw, allow_tls_fallback=False,
+ timeout=None, network_timeout=None):
+ self._ldap_uri = ldap_uri
+ self._user_dn = user_dn
+ self._user_pw = user_pw
+ self._allow_tls_fallback = allow_tls_fallback
+ self._timeout = timeout
+ self._network_timeout = network_timeout
+ self._conn = None
+
+ def __enter__(self):
+ trace_level = 2 if os.environ.get('USERS_DEBUG') == '1' else 0
+
+ self._conn = ldap.initialize(self._ldap_uri, trace_level=trace_level)
+ self._conn.set_option(ldap.OPT_REFERRALS, 1)
+ self._conn.set_option(ldap.OPT_NETWORK_TIMEOUT,
+ self._network_timeout or _DEFAULT_NETWORK_TIMEOUT)
+ self._conn.set_option(ldap.OPT_TIMEOUT, self._timeout or _DEFAULT_TIMEOUT)
+
+ if self._allow_tls_fallback:
+ logger.debug('TLS Fallback enabled in LDAP')
+ self._conn.set_option(ldap.OPT_X_TLS_TRY, 1)
+
+ self._conn.simple_bind_s(self._user_dn, self._user_pw)
+ return self._conn
+
+ def __exit__(self, exc_type, value, tb):
+ self._conn.unbind_s()
+
+
+class LDAPUsers(FederatedUsers):
+ _LDAPResult = namedtuple('LDAPResult', ['dn', 'attrs'])
+
+ def __init__(self, ldap_uri, base_dn, admin_dn, admin_passwd, user_rdn, uid_attr, email_attr,
+ allow_tls_fallback=False, secondary_user_rdns=None, requires_email=True,
+ timeout=None, network_timeout=None, force_no_pagination=False):
+ super(LDAPUsers, self).__init__('ldap', requires_email)
+
+ self._ldap = LDAPConnectionBuilder(ldap_uri, admin_dn, admin_passwd, allow_tls_fallback,
+ timeout, network_timeout)
+ self._ldap_uri = ldap_uri
+ self._uid_attr = uid_attr
+ self._email_attr = email_attr
+ self._allow_tls_fallback = allow_tls_fallback
+ self._requires_email = requires_email
+ self._force_no_pagination = force_no_pagination
+
+ # Note: user_rdn is a list of RDN pieces (for historical reasons), and secondary_user_rds
+ # is a list of RDN strings.
+ relative_user_dns = [','.join(user_rdn)] + (secondary_user_rdns or [])
+
+ def get_full_rdn(relative_dn):
+ prefix = relative_dn.split(',') if relative_dn else []
+ return ','.join(prefix + base_dn)
+
+ # Create the set of full DN paths.
+ self._user_dns = [get_full_rdn(relative_dn) for relative_dn in relative_user_dns]
+ self._base_dn = ','.join(base_dn)
+
+ def _get_ldap_referral_dn(self, referral_exception):
+ logger.debug('Got referral: %s', referral_exception.args[0])
+ if not referral_exception.args[0] or not referral_exception.args[0].get('info'):
+ logger.debug('LDAP referral missing info block')
+ return None
+
+ referral_info = referral_exception.args[0]['info']
+ if not referral_info.startswith('Referral:\n'):
+ logger.debug('LDAP referral missing Referral header')
+ return None
+
+ referral_uri = referral_info[len('Referral:\n'):]
+ if not referral_uri.startswith('ldap:///'):
+ logger.debug('LDAP referral URI does not start with ldap:///')
+ return None
+
+ referral_dn = referral_uri[len('ldap:///'):]
+ return referral_dn
+
+ def _ldap_user_search_with_rdn(self, conn, username_or_email, user_search_dn, suffix=''):
+ query = u'(|({0}={2}{3})({1}={2}{3}))'.format(self._uid_attr, self._email_attr,
+ escape_filter_chars(username_or_email),
+ suffix)
+ logger.debug('Conducting user search: %s under %s', query, user_search_dn)
+ try:
+ return (conn.search_s(user_search_dn, ldap.SCOPE_SUBTREE, query.encode('utf-8')), None)
+ except ldap.REFERRAL as re:
+ referral_dn = self._get_ldap_referral_dn(re)
+ if not referral_dn:
+ return (None, 'Failed to follow referral when looking up username')
+
+ try:
+ subquery = u'(%s=%s)' % (self._uid_attr, username_or_email)
+ return (conn.search_s(referral_dn, ldap.SCOPE_BASE, subquery), None)
+ except ldap.LDAPError:
+ logger.debug('LDAP referral search exception')
+ return (None, 'Username not found')
+
+ except ldap.LDAPError:
+ logger.debug('LDAP search exception')
+ return (None, 'Username not found')
+
+ def _ldap_user_search(self, username_or_email, limit=20, suffix=''):
+ if not username_or_email:
+ return (None, 'Empty username/email')
+
+ # Verify the admin connection works first. We do this here to avoid wrapping
+ # the entire block in the INVALID CREDENTIALS check.
+ try:
+ with self._ldap.get_connection():
+ pass
+ except ldap.INVALID_CREDENTIALS:
+ return (None, 'LDAP Admin dn or password is invalid')
+
+ with self._ldap.get_connection() as conn:
+ logger.debug('Incoming username or email param: %s', username_or_email.__repr__())
+
+ for user_search_dn in self._user_dns:
+ (pairs, err_msg) = self._ldap_user_search_with_rdn(conn, username_or_email, user_search_dn,
+ suffix=suffix)
+ if pairs is not None and len(pairs) > 0:
+ break
+
+ if err_msg is not None:
+ return (None, err_msg)
+
+ logger.debug('Found matching pairs: %s', pairs)
+ results = [LDAPUsers._LDAPResult(*pair) for pair in take(limit, pairs)]
+
+ # Filter out pairs without DNs. Some LDAP impls will return such pairs.
+ with_dns = [result for result in results if result.dn]
+ return (with_dns, None)
+
+ def _ldap_single_user_search(self, username_or_email):
+ with_dns, err_msg = self._ldap_user_search(username_or_email)
+ if err_msg is not None:
+ return (None, err_msg)
+
+ # Make sure we have at least one result.
+ if len(with_dns) < 1:
+ return (None, 'Username not found')
+
+ # If we have found a single pair, then return it.
+ if len(with_dns) == 1:
+ return (with_dns[0], None)
+
+ # Otherwise, there are multiple pairs with DNs, so find the one with the mail
+ # attribute (if any).
+ with_mail = [result for result in with_dns if result.attrs.get(self._email_attr)]
+ return (with_mail[0] if with_mail else with_dns[0], None)
+
+ def _build_user_information(self, response):
+ if not response.get(self._uid_attr):
+ return (None, 'Missing uid field "%s" in user record' % self._uid_attr)
+
+ if self._requires_email and not response.get(self._email_attr):
+ return (None, 'Missing mail field "%s" in user record' % self._email_attr)
+
+ username = response[self._uid_attr][0].decode('utf-8')
+ email = response.get(self._email_attr, [None])[0]
+ return (UserInformation(username=username, email=email, id=username), None)
+
+ def ping(self):
+ try:
+ with self._ldap.get_connection():
+ pass
+ except ldap.INVALID_CREDENTIALS:
+ return (False, 'LDAP Admin dn or password is invalid')
+ except ldap.LDAPError as lde:
+ logger.exception('Exception when trying to health check LDAP')
+ return (False, lde.message)
+
+ return (True, None)
+
+ def at_least_one_user_exists(self):
+ logger.debug('Checking if any users exist in LDAP')
+ try:
+ with self._ldap.get_connection():
+ pass
+ except ldap.INVALID_CREDENTIALS:
+ return (None, 'LDAP Admin dn or password is invalid')
+
+ has_pagination = not self._force_no_pagination
+ with self._ldap.get_connection() as conn:
+ for user_search_dn in self._user_dns:
+ lc = ldap.controls.libldap.SimplePagedResultsControl(criticality=True, size=1, cookie='')
+ try:
+ if has_pagination:
+ msgid = conn.search_ext(user_search_dn, ldap.SCOPE_SUBTREE, serverctrls=[lc])
+ _, rdata, _, serverctrls = conn.result3(msgid)
+ else:
+ msgid = conn.search(user_search_dn, ldap.SCOPE_SUBTREE)
+ _, rdata = conn.result(msgid)
+
+ for entry in rdata: # Handles both lists and iterators.
+ return (True, None)
+
+ except ldap.LDAPError as lde:
+ return (False, str(lde) or 'Could not find DN %s' % user_search_dn)
+
+ return (False, None)
+
+ def get_user(self, username_or_email):
+ """ Looks up a username or email in LDAP. """
+ logger.debug('Looking up LDAP username or email %s', username_or_email)
+ (found_user, err_msg) = self._ldap_single_user_search(username_or_email)
+ if err_msg is not None:
+ return (None, err_msg)
+
+ logger.debug('Found user for LDAP username or email %s', username_or_email)
+ _, found_response = found_user
+ return self._build_user_information(found_response)
+
+ def query_users(self, query, limit=20):
+ """ Queries LDAP for matching users. """
+ if not query:
+ return (None, self.federated_service, 'Empty query')
+
+ logger.debug('Got query %s with limit %s', query, limit)
+ (results, err_msg) = self._ldap_user_search(query, limit=limit, suffix='*')
+ if err_msg is not None:
+ return (None, self.federated_service, err_msg)
+
+ final_results = []
+ for result in results[0:limit]:
+ credentials, err_msg = self._build_user_information(result.attrs)
+ if err_msg is not None:
+ continue
+
+ final_results.append(credentials)
+
+ logger.debug('For query %s found results %s', query, final_results)
+ return (final_results, self.federated_service, None)
+
+ def verify_credentials(self, username_or_email, password):
+ """ Verify the credentials with LDAP. """
+ # Make sure that even if the server supports anonymous binds, we don't allow it
+ if not password:
+ return (None, 'Anonymous binding not allowed')
+
+ (found_user, err_msg) = self._ldap_single_user_search(username_or_email)
+ if found_user is None:
+ return (None, err_msg)
+
+ found_dn, found_response = found_user
+ logger.debug('Found user for LDAP username %s; validating password', username_or_email)
+ logger.debug('DN %s found: %s', found_dn, found_response)
+
+ # First validate the password by binding as the user
+ try:
+ with LDAPConnection(self._ldap_uri, found_dn, password.encode('utf-8'),
+ self._allow_tls_fallback):
+ pass
+ except ldap.REFERRAL as re:
+ referral_dn = self._get_ldap_referral_dn(re)
+ if not referral_dn:
+ return (None, 'Invalid username')
+
+ try:
+ with LDAPConnection(self._ldap_uri, referral_dn, password.encode('utf-8'),
+ self._allow_tls_fallback):
+ pass
+ except ldap.INVALID_CREDENTIALS:
+ logger.debug('Invalid LDAP credentials')
+ return (None, 'Invalid password')
+
+ except ldap.INVALID_CREDENTIALS:
+ logger.debug('Invalid LDAP credentials')
+ return (None, 'Invalid password')
+
+ return self._build_user_information(found_response)
+
+ def service_metadata(self):
+ return {
+ 'base_dn': self._base_dn,
+ }
+
+ def check_group_lookup_args(self, group_lookup_args, disable_pagination=False):
+ if not group_lookup_args.get('group_dn'):
+ return (False, 'Missing group_dn')
+
+ (it, err) = self.iterate_group_members(group_lookup_args, page_size=1,
+ disable_pagination=disable_pagination)
+ if err is not None:
+ return (False, err)
+
+ if not next(it, False):
+ return (False, 'Group does not exist or is empty')
+
+ return (True, None)
+
+ def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
+ try:
+ with self._ldap.get_connection():
+ pass
+ except ldap.INVALID_CREDENTIALS:
+ return (None, 'LDAP Admin dn or password is invalid')
+
+ group_dn = group_lookup_args['group_dn']
+ page_size = page_size or _DEFAULT_PAGE_SIZE
+ return (self._iterate_members(group_dn, page_size, disable_pagination), None)
+
+ def _iterate_members(self, group_dn, page_size, disable_pagination):
+ has_pagination = not(self._force_no_pagination or disable_pagination)
+ with self._ldap.get_connection() as conn:
+ search_flt = filter_format('(memberOf=%s,%s)', (group_dn, self._base_dn))
+ attributes = [self._uid_attr, self._email_attr]
+
+ for user_search_dn in self._user_dns:
+ lc = ldap.controls.libldap.SimplePagedResultsControl(criticality=True, size=page_size,
+ cookie='')
+
+ # Conduct the initial search for users that are a member of the group.
+ logger.debug('Conducting LDAP search of DN: %s and filter %s', user_search_dn, search_flt)
+ try:
+ if has_pagination:
+ msgid = conn.search_ext(user_search_dn, ldap.SCOPE_SUBTREE, search_flt,
+ serverctrls=[lc], attrlist=attributes)
+ else:
+ msgid = conn.search(user_search_dn, ldap.SCOPE_SUBTREE, search_flt, attrlist=attributes)
+ except ldap.LDAPError as lde:
+ logger.exception('Got error when trying to search %s with filter %s: %s',
+ user_search_dn, search_flt, lde.message)
+ break
+
+ while True:
+ try:
+ if has_pagination:
+ _, rdata, _, serverctrls = conn.result3(msgid)
+ else:
+ _, rdata = conn.result(msgid)
+
+ # Yield any users found.
+ found_results = 0
+ for userdata in rdata:
+ found_results = found_results + 1
+ yield self._build_user_information(userdata[1])
+
+ logger.debug('Found %s users in group %s; %s', found_results, user_search_dn,
+ search_flt)
+ except ldap.NO_SUCH_OBJECT as nsoe:
+ logger.debug('NSO when trying to lookup results of search %s with filter %s: %s',
+ user_search_dn, search_flt, nsoe.message)
+ except ldap.LDAPError as lde:
+ logger.exception('Error when trying to lookup results of search %s with filter %s: %s',
+ user_search_dn, search_flt, lde.message)
+ break
+
+ # If no additional results, nothing more to do.
+ if not found_results:
+ break
+
+ # If pagination is disabled, nothing more to do.
+ if not has_pagination:
+ logger.debug('Pagination is disabled, no further queries')
+ break
+
+ # Filter down the controls with which the server responded, looking for the paging
+ # control type. If not found, then the server does not support pagination and we already
+ # got all of the results.
+ pctrls = [control for control in serverctrls
+ if control.controlType == ldap.controls.SimplePagedResultsControl.controlType]
+
+ if pctrls:
+ # Server supports pagination. Update the cookie so the next search finds the next page,
+ # then conduct the next search.
+ cookie = lc.cookie = pctrls[0].cookie
+ if cookie:
+ logger.debug('Pagination is supported for this LDAP server; trying next page')
+ msgid = conn.search_ext(user_search_dn, ldap.SCOPE_SUBTREE, search_flt,
+ serverctrls=[lc], attrlist=attributes)
+ continue
+ else:
+ # No additional results.
+ logger.debug('Pagination is supported for this LDAP server but on last page')
+ break
+ else:
+ # Pagination is not supported.
+ logger.debug('Pagination is not supported for this LDAP server')
+ break
diff --git a/data/users/federated.py b/data/users/federated.py
new file mode 100644
index 000000000..87210bccd
--- /dev/null
+++ b/data/users/federated.py
@@ -0,0 +1,154 @@
+import logging
+import features
+
+from collections import namedtuple
+
+from data import model
+from data.users.shared import can_create_user
+from util.validation import generate_valid_usernames
+
+logger = logging.getLogger(__name__)
+
+UserInformation = namedtuple('UserInformation', ['username', 'email', 'id'])
+
+DISABLED_MESSAGE = 'User creation is disabled. Please contact your administrator to gain access.'
+
+class FederatedUsers(object):
+ """ Base class for all federated users systems. """
+
+ def __init__(self, federated_service, requires_email):
+ self._federated_service = federated_service
+ self._requires_email = requires_email
+
+ @property
+ def federated_service(self):
+ return self._federated_service
+
+ @property
+ def supports_fresh_login(self):
+ return True
+
+ @property
+ def supports_encrypted_credentials(self):
+ return True
+
+ def has_password_set(self, username):
+ return True
+
+ @property
+ def requires_distinct_cli_password(self):
+ # Since the federated auth provides a password which works on the CLI.
+ return False
+
+ def get_user(self, username_or_email):
+ """ Retrieves the user with the given username or email, returning a tuple containing
+ a UserInformation (if success) and the error message (on failure).
+ """
+ raise NotImplementedError
+
+ def verify_credentials(self, username_or_email, password):
+ """ Verifies the given credentials against the backing federated service, returning
+ a tuple containing a UserInformation (on success) and the error message (on failure).
+ """
+ raise NotImplementedError
+
+ def query_users(self, query, limit=20):
+ """ If implemented, get_user must be implemented as well. """
+ return (None, 'Not supported')
+
+ def link_user(self, username_or_email):
+ (user_info, err_msg) = self.get_user(username_or_email)
+ if user_info is None:
+ return (None, err_msg)
+
+ return self.get_and_link_federated_user_info(user_info)
+
+ def get_and_link_federated_user_info(self, user_info, internal_create=False):
+ return self._get_and_link_federated_user_info(user_info.username, user_info.email,
+ internal_create=internal_create)
+
+ def verify_and_link_user(self, username_or_email, password):
+ """ Verifies the given credentials and, if valid, creates/links a database user to the
+ associated federated service.
+ """
+ (credentials, err_msg) = self.verify_credentials(username_or_email, password)
+ if credentials is None:
+ return (None, err_msg)
+
+ return self._get_and_link_federated_user_info(credentials.username, credentials.email)
+
+ def confirm_existing_user(self, username, password):
+ """ Confirms that the given *database* username and service password are valid for the linked
+ service. This method is used when the federated service's username is not known.
+ """
+ db_user = model.user.get_user(username)
+ if not db_user:
+ return (None, 'Invalid user')
+
+ federated_login = model.user.lookup_federated_login(db_user, self._federated_service)
+ if not federated_login:
+ return (None, 'Invalid user')
+
+ (credentials, err_msg) = self.verify_credentials(federated_login.service_ident, password)
+ if credentials is None:
+ return (None, err_msg)
+
+ return (db_user, None)
+
+ def service_metadata(self):
+ """ Returns a dictionary of extra metadata to present to *superusers* about this auth engine.
+ For example, LDAP returns the base DN so we can display to the user during sync setup.
+ """
+ return {}
+
+ def check_group_lookup_args(self, group_lookup_args):
+ """ Verifies that the given group lookup args point to a valid group. Returns a tuple consisting
+ of a boolean status and an error message (if any).
+ """
+ return (False, 'Not supported')
+
+ def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
+ """ Returns an iterator over all the members of the group matching the given lookup args
+ dictionary. The format of the lookup args dictionary is specific to the implementation.
+ """
+ return (None, 'Not supported')
+
+ def _get_and_link_federated_user_info(self, username, email, internal_create=False):
+ db_user = model.user.verify_federated_login(self._federated_service, username)
+ if not db_user:
+
+ # Fetch list of blacklisted domains
+ blacklisted_domains = model.config.app_config.get('BLACKLISTED_EMAIL_DOMAINS')
+
+ # We must create the user in our db. Check to see if this is allowed (except for internal
+ # creation, which is always allowed).
+ if not internal_create and not can_create_user(email, blacklisted_domains):
+ return (None, DISABLED_MESSAGE)
+
+ valid_username = None
+ for valid_username in generate_valid_usernames(username):
+ if model.user.is_username_unique(valid_username):
+ break
+
+ if not valid_username:
+ logger.error('Unable to pick a username for user: %s', username)
+ return (None, 'Unable to pick a username. Please report this to your administrator.')
+
+ prompts = model.user.get_default_user_prompts(features)
+ try:
+ db_user = model.user.create_federated_user(valid_username, email, self._federated_service,
+ username,
+ set_password_notification=False,
+ email_required=self._requires_email,
+ confirm_username=features.USERNAME_CONFIRMATION,
+ prompts=prompts)
+ except model.InvalidEmailAddressException as iae:
+ return (None, str(iae))
+
+ else:
+ # Update the db attributes from the federated service.
+ if email and db_user.email != email:
+ db_user.email = email
+ db_user.save()
+
+ return (db_user, None)
diff --git a/data/users/keystone.py b/data/users/keystone.py
new file mode 100644
index 000000000..b8e581e77
--- /dev/null
+++ b/data/users/keystone.py
@@ -0,0 +1,300 @@
+import logging
+import os
+
+from keystoneauth1.identity import v2 as keystone_v2_auth
+from keystoneauth1.identity import v3 as keystone_v3_auth
+from keystoneauth1 import session
+from keystoneauth1.exceptions import ClientException
+from keystoneclient.v2_0 import client as client_v2
+from keystoneclient.v3 import client as client_v3
+from keystoneclient.exceptions import AuthorizationFailure as KeystoneAuthorizationFailure
+from keystoneclient.exceptions import Unauthorized as KeystoneUnauthorized
+from keystoneclient.exceptions import NotFound as KeystoneNotFound
+from data.users.federated import FederatedUsers, UserInformation
+from util.itertoolrecipes import take
+
+logger = logging.getLogger(__name__)
+
+DEFAULT_TIMEOUT = 10 # seconds
+
+def get_keystone_users(auth_version, auth_url, admin_username, admin_password, admin_tenant,
+ timeout=None, requires_email=True):
+ if auth_version == 3:
+ return KeystoneV3Users(auth_url, admin_username, admin_password, admin_tenant, timeout,
+ requires_email)
+ else:
+ return KeystoneV2Users(auth_url, admin_username, admin_password, admin_tenant, timeout,
+ requires_email)
+
+
+class KeystoneV2Users(FederatedUsers):
+ """ Delegates authentication to OpenStack Keystone V2. """
+ def __init__(self, auth_url, admin_username, admin_password, admin_tenant, timeout=None,
+ requires_email=True):
+ super(KeystoneV2Users, self).__init__('keystone', requires_email)
+ self.auth_url = auth_url
+ self.admin_username = admin_username
+ self.admin_password = admin_password
+ self.admin_tenant = admin_tenant
+ self.timeout = timeout or DEFAULT_TIMEOUT
+ self.debug = os.environ.get('USERS_DEBUG') == '1'
+ self.requires_email = requires_email
+
+ def _get_client(self, username, password, tenant_name=None):
+ if tenant_name:
+ auth = keystone_v2_auth.Password(auth_url=self.auth_url,
+ username=username,
+ password=password,
+ tenant_name=tenant_name)
+ else:
+ auth = keystone_v2_auth.Password(auth_url=self.auth_url,
+ username=username,
+ password=password)
+
+ sess = session.Session(auth=auth)
+ client = client_v2.Client(session=sess,
+ timeout=self.timeout,
+ debug=self.debug)
+ return client, sess
+
+ def ping(self):
+ try:
+ _, sess = self._get_client(self.admin_username, self.admin_password, self.admin_tenant)
+ assert sess.get_user_id() # Make sure we loaded a valid user.
+ except KeystoneUnauthorized as kut:
+ logger.exception('Keystone unauthorized admin')
+ return (False, 'Keystone admin credentials are invalid: %s' % kut.message)
+ except ClientException as e:
+ logger.exception('Keystone unauthorized admin')
+ return (False, 'Keystone ping check failed: %s' % e.message)
+
+ return (True, None)
+
+ def at_least_one_user_exists(self):
+ logger.debug('Checking if any users exist in Keystone')
+ try:
+ keystone_client, _ = self._get_client(self.admin_username, self.admin_password,
+ self.admin_tenant)
+ user_list = keystone_client.users.list(tenant_id=self.admin_tenant, limit=1)
+
+ if len(user_list) < 1:
+ return (False, None)
+
+ return (True, None)
+ except ClientException as e:
+ # Catch exceptions to give the user our custom error message
+ logger.exception('Unable to list users in Keystone')
+ return (False, e.message)
+
+ def verify_credentials(self, username_or_email, password):
+ try:
+ _, sess = self._get_client(username_or_email, password)
+ user_id = sess.get_user_id()
+ except KeystoneAuthorizationFailure as kaf:
+ logger.exception('Keystone auth failure for user: %s', username_or_email)
+ return (None, 'Invalid username or password')
+ except KeystoneUnauthorized as kut:
+ logger.exception('Keystone unauthorized for user: %s', username_or_email)
+ return (None, 'Invalid username or password')
+ except ClientException as ex:
+ logger.exception('Keystone unauthorized for user: %s', username_or_email)
+ return (None, 'Invalid username or password')
+
+ if user_id is None:
+ return (None, 'Invalid username or password')
+
+ try:
+ admin_client, _ = self._get_client(self.admin_username, self.admin_password,
+ self.admin_tenant)
+ user = admin_client.users.get(user_id)
+ except KeystoneUnauthorized as kut:
+ logger.exception('Keystone unauthorized admin')
+ return (None, 'Keystone admin credentials are invalid: %s' % kut.message)
+
+ if self.requires_email and not hasattr(user, 'email'):
+ return (None, 'Missing email field for user %s' % user_id)
+
+ email = user.email if hasattr(user, 'email') else None
+ return (UserInformation(username=username_or_email, email=email, id=user_id), None)
+
+ def query_users(self, query, limit=20):
+ return (None, self.federated_service, 'Unsupported in Keystone V2')
+
+ def get_user(self, username_or_email):
+ return (None, 'Unsupported in Keystone V2')
+
+
+class KeystoneV3Users(FederatedUsers):
+ """ Delegates authentication to OpenStack Keystone V3. """
+ def __init__(self, auth_url, admin_username, admin_password, admin_tenant, timeout=None,
+ requires_email=True, project_domain_id='default', user_domain_id='default'):
+ super(KeystoneV3Users, self).__init__('keystone', requires_email)
+ self.auth_url = auth_url
+ self.admin_username = admin_username
+ self.admin_password = admin_password
+ self.admin_tenant = admin_tenant
+ self.project_domain_id = project_domain_id
+ self.user_domain_id = user_domain_id
+ self.timeout = timeout or DEFAULT_TIMEOUT
+ self.debug = os.environ.get('USERS_DEBUG') == '1'
+ self.requires_email = requires_email
+
+ def _get_client(self, username, password, project_name=None):
+ if project_name:
+ auth = keystone_v3_auth.Password(auth_url=self.auth_url,
+ username=username,
+ password=password,
+ project_name=project_name,
+ project_domain_id=self.project_domain_id,
+ user_domain_id=self.user_domain_id)
+ else:
+ auth = keystone_v3_auth.Password(auth_url=self.auth_url,
+ username=username,
+ password=password,
+ user_domain_id=self.user_domain_id)
+
+ sess = session.Session(auth=auth)
+ client = client_v3.Client(session=sess,
+ timeout=self.timeout,
+ debug=self.debug)
+ return client, sess
+
+ def ping(self):
+ try:
+ _, sess = self._get_client(self.admin_username, self.admin_password)
+ assert sess.get_user_id() # Make sure we loaded a valid user.
+ except KeystoneUnauthorized as kut:
+ logger.exception('Keystone unauthorized admin')
+ return (False, 'Keystone admin credentials are invalid: %s' % kut.message)
+ except ClientException as cle:
+ logger.exception('Keystone unauthorized admin')
+ return (False, 'Keystone ping check failed: %s' % cle.message)
+
+ return (True, None)
+
+ def at_least_one_user_exists(self):
+ logger.debug('Checking if any users exist in admin tenant in Keystone')
+ try:
+ # Just make sure the admin can connect to the project.
+ self._get_client(self.admin_username, self.admin_password, self.admin_tenant)
+ return (True, None)
+ except ClientException as cle:
+ # Catch exceptions to give the user our custom error message
+ logger.exception('Unable to list users in Keystone')
+ return (False, cle.message)
+
+ def verify_credentials(self, username_or_email, password):
+ try:
+ keystone_client, sess = self._get_client(username_or_email, password)
+ user_id = sess.get_user_id()
+ assert user_id
+
+ keystone_client, sess = self._get_client(self.admin_username, self.admin_password,
+ self.admin_tenant)
+ user = keystone_client.users.get(user_id)
+ if self.requires_email and not hasattr(user, 'email'):
+ return (None, 'Missing email field for user %s' % user_id)
+
+ return (self._user_info(user), None)
+ except KeystoneAuthorizationFailure as kaf:
+ logger.exception('Keystone auth failure for user: %s', username_or_email)
+ return (None, 'Invalid username or password')
+ except KeystoneUnauthorized as kut:
+ logger.exception('Keystone unauthorized for user: %s', username_or_email)
+ return (None, 'Invalid username or password')
+ except ClientException as cle:
+ logger.exception('Keystone unauthorized for user: %s', username_or_email)
+ return (None, 'Invalid username or password')
+
+ def get_user(self, username_or_email):
+ users_found, _, err_msg = self.query_users(username_or_email)
+ if err_msg is not None:
+ return (None, err_msg)
+
+ if len(users_found) != 1:
+ return (None, 'Single user not found')
+
+ user = users_found[0]
+ if self.requires_email and not user.email:
+ return (None, 'Missing email field for user %s' % user.id)
+
+ return (user, None)
+
+ def check_group_lookup_args(self, group_lookup_args):
+ if not group_lookup_args.get('group_id'):
+ return (False, 'Missing group_id')
+
+ group_id = group_lookup_args['group_id']
+ return self._check_group(group_id)
+
+ def _check_group(self, group_id):
+ try:
+ admin_client, _ = self._get_client(self.admin_username, self.admin_password,
+ self.admin_tenant)
+ return (bool(admin_client.groups.get(group_id)), None)
+ except KeystoneNotFound:
+ return (False, 'Group not found')
+ except KeystoneAuthorizationFailure as kaf:
+ logger.exception('Keystone auth failure for admin user for group lookup %s', group_id)
+ return (False, kaf.message or 'Invalid admin username or password')
+ except KeystoneUnauthorized as kut:
+ logger.exception('Keystone unauthorized for admin user for group lookup %s', group_id)
+ return (False, kut.message or 'Invalid admin username or password')
+ except ClientException as cle:
+ logger.exception('Keystone unauthorized for admin user for group lookup %s', group_id)
+ return (False, cle.message or 'Invalid admin username or password')
+
+ def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
+ group_id = group_lookup_args['group_id']
+
+ (status, err) = self._check_group(group_id)
+ if not status:
+ return (None, err)
+
+ try:
+ admin_client, _ = self._get_client(self.admin_username, self.admin_password,
+ self.admin_tenant)
+ user_info_iterator = admin_client.users.list(group=group_id)
+ def iterator():
+ for user in user_info_iterator:
+ yield (self._user_info(user), None)
+
+ return (iterator(), None)
+ except KeystoneAuthorizationFailure as kaf:
+ logger.exception('Keystone auth failure for admin user for group lookup %s', group_id)
+ return (False, kaf.message or 'Invalid admin username or password')
+ except KeystoneUnauthorized as kut:
+ logger.exception('Keystone unauthorized for admin user for group lookup %s', group_id)
+ return (False, kut.message or 'Invalid admin username or password')
+ except ClientException as cle:
+ logger.exception('Keystone unauthorized for admin user for group lookup %s', group_id)
+ return (False, cle.message or 'Invalid admin username or password')
+
+ @staticmethod
+ def _user_info(user):
+ email = user.email if hasattr(user, 'email') else None
+ return UserInformation(user.name, email, user.id)
+
+ def query_users(self, query, limit=20):
+ if len(query) < 3:
+ return ([], self.federated_service, None)
+
+ try:
+ admin_client, _ = self._get_client(self.admin_username, self.admin_password,
+ self.admin_tenant)
+
+ found_users = list(take(limit, admin_client.users.list(name=query)))
+ logger.debug('For Keystone query %s found users: %s', query, found_users)
+ if not found_users:
+ return ([], self.federated_service, None)
+
+ return ([self._user_info(user) for user in found_users], self.federated_service, None)
+ except KeystoneAuthorizationFailure as kaf:
+ logger.exception('Keystone auth failure for admin user for query %s', query)
+ return (None, self.federated_service, kaf.message or 'Invalid admin username or password')
+ except KeystoneUnauthorized as kut:
+ logger.exception('Keystone unauthorized for admin user for query %s', query)
+ return (None, self.federated_service, kut.message or 'Invalid admin username or password')
+ except ClientException as cle:
+ logger.exception('Keystone unauthorized for admin user for query %s', query)
+ return (None, self.federated_service, cle.message or 'Invalid admin username or password')
diff --git a/data/users/shared.py b/data/users/shared.py
new file mode 100644
index 000000000..8f1cc09df
--- /dev/null
+++ b/data/users/shared.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+import tldextract
+
+import features
+
+from data import model
+
+
+def can_create_user(email_address, blacklisted_domains=None):
+ """ Returns true if a user with the specified e-mail address can be created. """
+
+ if features.BLACKLISTED_EMAILS and email_address and '@' in email_address:
+ blacklisted_domains = blacklisted_domains or []
+ _, email_domain = email_address.split('@', 1)
+ extracted = tldextract.extract(email_domain)
+ if extracted.registered_domain.lower() in blacklisted_domains:
+ return False
+
+ if not features.USER_CREATION:
+ return False
+
+ if features.INVITE_ONLY_USER_CREATION:
+ if not email_address:
+ return False
+
+ # Check to see that there is an invite for the e-mail address.
+ return bool(model.team.lookup_team_invites_by_email(email_address))
+
+ # Otherwise the user can be created (assuming it doesn't already exist, of course)
+ return True
diff --git a/data/users/teamsync.py b/data/users/teamsync.py
new file mode 100644
index 000000000..2ab0fea10
--- /dev/null
+++ b/data/users/teamsync.py
@@ -0,0 +1,136 @@
+import logging
+import json
+
+from data import model
+
+logger = logging.getLogger(__name__)
+
+
+MAX_TEAMS_PER_ITERATION = 500
+
+
+def sync_teams_to_groups(authentication, stale_cutoff):
+ """ Performs team syncing by looking up any stale team(s) found, and performing the sync
+ operation on them.
+ """
+ logger.debug('Looking up teams to sync to groups')
+
+ sync_team_tried = set()
+ while len(sync_team_tried) < MAX_TEAMS_PER_ITERATION:
+ # Find a stale team.
+ stale_team_sync = model.team.get_stale_team(stale_cutoff)
+ if not stale_team_sync:
+ logger.debug('No additional stale team found; sleeping')
+ return
+
+ # Make sure we don't try to reprocess a team on this iteration.
+ if stale_team_sync.id in sync_team_tried:
+ break
+
+ sync_team_tried.add(stale_team_sync.id)
+
+ # Sync the team.
+ sync_successful = sync_team(authentication, stale_team_sync)
+ if not sync_successful:
+ return
+
+
+def sync_team(authentication, stale_team_sync):
+ """ Performs synchronization of a team (as referenced by the TeamSync stale_team_sync).
+ Returns True on success and False otherwise.
+ """
+ sync_config = json.loads(stale_team_sync.config)
+ logger.info('Syncing team `%s` under organization %s via %s (#%s)', stale_team_sync.team.name,
+ stale_team_sync.team.organization.username, sync_config, stale_team_sync.team_id,
+ extra={'team': stale_team_sync.team_id, 'sync_config': sync_config})
+
+ # Load all the existing members of the team in Quay that are bound to the auth service.
+ existing_users = model.team.get_federated_team_member_mapping(stale_team_sync.team,
+ authentication.federated_service)
+
+ logger.debug('Existing membership of %s for team `%s` under organization %s via %s (#%s)',
+ len(existing_users), stale_team_sync.team.name,
+ stale_team_sync.team.organization.username, sync_config, stale_team_sync.team_id,
+ extra={'team': stale_team_sync.team_id, 'sync_config': sync_config,
+ 'existing_member_count': len(existing_users)})
+
+ # Load all the members of the team from the authenication system.
+ (member_iterator, err) = authentication.iterate_group_members(sync_config)
+ if err is not None:
+ logger.error('Got error when trying to iterate group members with config %s: %s',
+ sync_config, err)
+ return False
+
+ # Collect all the members currently found in the group, adding them to the team as we go
+ # along.
+ group_membership = set()
+ for (member_info, err) in member_iterator:
+ if err is not None:
+ logger.error('Got error when trying to construct a member: %s', err)
+ continue
+
+ # If the member is already in the team, nothing more to do.
+ if member_info.username in existing_users:
+ logger.debug('Member %s already in team `%s` under organization %s via %s (#%s)',
+ member_info.username, stale_team_sync.team.name,
+ stale_team_sync.team.organization.username, sync_config,
+ stale_team_sync.team_id,
+ extra={'team': stale_team_sync.team_id, 'sync_config': sync_config,
+ 'member': member_info.username})
+
+ group_membership.add(existing_users[member_info.username])
+ continue
+
+ # Retrieve the Quay user associated with the member info.
+ (quay_user, err) = authentication.get_and_link_federated_user_info(member_info,
+ internal_create=True)
+ if err is not None:
+ logger.error('Could not link external user %s to an internal user: %s',
+ member_info.username, err,
+ extra={'team': stale_team_sync.team_id, 'sync_config': sync_config,
+ 'member': member_info.username, 'error': err})
+ continue
+
+ # Add the user to the membership set.
+ group_membership.add(quay_user.id)
+
+ # Add the user to the team.
+ try:
+ logger.info('Adding member %s to team `%s` under organization %s via %s (#%s)',
+ quay_user.username, stale_team_sync.team.name,
+ stale_team_sync.team.organization.username, sync_config,
+ stale_team_sync.team_id,
+ extra={'team': stale_team_sync.team_id, 'sync_config': sync_config,
+ 'member': quay_user.username})
+
+ model.team.add_user_to_team(quay_user, stale_team_sync.team)
+ except model.UserAlreadyInTeam:
+ # If the user is already present, nothing more to do for them.
+ pass
+
+ # Update the transaction and last_updated time of the team sync. Only if it matches
+ # the current value will we then perform the deletion step.
+ got_transaction_handle = model.team.update_sync_status(stale_team_sync)
+ if not got_transaction_handle:
+ # Another worker updated this team. Nothing more to do.
+ logger.debug('Another worker synced team `%s` under organization %s via %s (#%s)',
+ stale_team_sync.team.name,
+ stale_team_sync.team.organization.username, sync_config,
+ stale_team_sync.team_id,
+ extra={'team': stale_team_sync.team_id, 'sync_config': sync_config})
+ return True
+
+ # Delete any team members not found in the backing auth system.
+ logger.debug('Deleting stale members for team `%s` under organization %s via %s (#%s)',
+ stale_team_sync.team.name, stale_team_sync.team.organization.username,
+ sync_config, stale_team_sync.team_id,
+ extra={'team': stale_team_sync.team_id, 'sync_config': sync_config})
+
+ deleted = model.team.delete_members_not_present(stale_team_sync.team, group_membership)
+
+ # Done!
+ logger.info('Finishing sync for team `%s` under organization %s via %s (#%s): %s deleted',
+ stale_team_sync.team.name, stale_team_sync.team.organization.username,
+ sync_config, stale_team_sync.team_id, deleted,
+ extra={'team': stale_team_sync.team_id, 'sync_config': sync_config})
+ return True
diff --git a/data/users/test/test_shared.py b/data/users/test/test_shared.py
new file mode 100644
index 000000000..d211fb485
--- /dev/null
+++ b/data/users/test/test_shared.py
@@ -0,0 +1,55 @@
+import pytest
+
+from mock import patch
+
+from data.database import model
+from data.users.shared import can_create_user
+
+from test.fixtures import *
+
+@pytest.mark.parametrize('open_creation, invite_only, email, has_invite, can_create', [
+ # Open user creation => always allowed.
+ (True, False, None, False, True),
+
+ # Open user creation => always allowed.
+ (True, False, 'foo@example.com', False, True),
+
+ # Invite only user creation + no invite => disallowed.
+ (True, True, None, False, False),
+
+ # Invite only user creation + no invite => disallowed.
+ (True, True, 'foo@example.com', False, False),
+
+ # Invite only user creation + invite => allowed.
+ (True, True, 'foo@example.com', True, True),
+
+ # No open creation => Disallowed.
+ (False, True, 'foo@example.com', False, False),
+ (False, True, 'foo@example.com', True, False),
+
+ # Blacklisted emails => Disallowed.
+ (True, False, 'foo@blacklisted.com', False, False),
+ (True, False, 'foo@blacklisted.org', False, False),
+ (True, False, 'foo@BlAcKlIsTeD.CoM', False, False), # Verify Capitalization
+ (True, False, u'foo@mail.bLacklisted.Com', False, False), # Verify unicode
+ (True, False, 'foo@blacklisted.net', False, True), # Avoid False Positives
+ (True, False, 'foo@myblacklisted.com', False, True), # Avoid partial domain matches
+ (True, False, 'fooATblacklisted.com', False, True), # Ignore invalid email addresses
+])
+@pytest.mark.parametrize('blacklisting_enabled', [True, False])
+def test_can_create_user(open_creation, invite_only, email, has_invite, can_create, blacklisting_enabled, app):
+
+ # Mock list of blacklisted domains
+ blacklisted_domains = ['blacklisted.com', 'blacklisted.org']
+
+ if has_invite:
+ inviter = model.user.get_user('devtable')
+ team = model.team.get_organization_team('buynlarge', 'owners')
+ model.team.add_or_invite_to_team(inviter, team, email=email)
+
+ with patch('features.USER_CREATION', open_creation):
+ with patch('features.INVITE_ONLY_USER_CREATION', invite_only):
+ with patch('features.BLACKLISTED_EMAILS', blacklisting_enabled):
+ if email and any(domain in email.lower() for domain in blacklisted_domains) and not blacklisting_enabled:
+ can_create = True # blacklisted domains can be used, if blacklisting is disabled
+ assert can_create_user(email, blacklisted_domains) == can_create
diff --git a/data/users/test/test_teamsync.py b/data/users/test/test_teamsync.py
new file mode 100644
index 000000000..470c31707
--- /dev/null
+++ b/data/users/test/test_teamsync.py
@@ -0,0 +1,332 @@
+import os
+
+from datetime import datetime, timedelta
+
+import pytest
+
+from mock import patch
+
+from data import model, database
+from data.users.federated import FederatedUsers, UserInformation
+from data.users.teamsync import sync_team, sync_teams_to_groups
+from test.test_ldap import mock_ldap
+from test.test_keystone_auth import fake_keystone
+from util.names import parse_robot_username
+
+from test.fixtures import *
+
+_FAKE_AUTH = 'fake'
+
+class FakeUsers(FederatedUsers):
+ def __init__(self, group_members):
+ super(FakeUsers, self).__init__(_FAKE_AUTH, False)
+ self.group_tuples = [(m, None) for m in group_members]
+
+ def iterate_group_members(self, group_lookup_args, page_size=None, disable_pagination=False):
+ return (self.group_tuples, None)
+
+
+@pytest.fixture(params=[True, False])
+def user_creation(request):
+ with patch('features.USER_CREATION', request.param):
+ yield
+
+
+@pytest.fixture(params=[True, False])
+def invite_only_user_creation(request):
+ with patch('features.INVITE_ONLY_USER_CREATION', request.param):
+ yield
+
+
+@pytest.fixture(params=[True, False])
+def blacklisted_emails(request):
+ mock_blacklisted_domains = {'BLACKLISTED_EMAIL_DOMAINS': ['blacklisted.com', 'blacklisted.net']}
+ with patch('features.BLACKLISTED_EMAILS', request.param):
+ with patch.dict('data.model.config.app_config', mock_blacklisted_domains):
+ yield
+
+
+@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('postgres') >= 0,
+ reason="Postgres fails when existing members are added under the savepoint")
+@pytest.mark.parametrize('starting_membership,group_membership,expected_membership', [
+ # Empty team + single member in group => Single member in team.
+ ([],
+ [
+ UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
+ ],
+ ['someuser']),
+
+ # Team with a Quay user + empty group => empty team.
+ ([('someuser', None)],
+ [],
+ []),
+
+ # Team with an existing external user + user is in the group => no changes.
+ ([
+ ('someuser', 'someuser'),
+ ],
+ [
+ UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
+ ],
+ ['someuser']),
+
+ # Team with an existing external user (with a different Quay username) + user is in the group.
+ # => no changes
+ ([
+ ('anotherquayname', 'someuser'),
+ ],
+ [
+ UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
+ ],
+ ['someuser']),
+
+ # Team missing a few members that are in the group => members added.
+ ([('someuser', 'someuser')],
+ [
+ UserInformation('anotheruser', 'anotheruser', 'anotheruser@devtable.com'),
+ UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
+ UserInformation('thirduser', 'thirduser', 'thirduser@devtable.com'),
+ ],
+ ['anotheruser', 'someuser', 'thirduser']),
+
+ # Team has a few extra members no longer in the group => members removed.
+ ([
+ ('anotheruser', 'anotheruser'),
+ ('someuser', 'someuser'),
+ ('thirduser', 'thirduser'),
+ ('nontestuser', None),
+ ],
+ [
+ UserInformation('thirduser', 'thirduser', 'thirduser@devtable.com'),
+ ],
+ ['thirduser']),
+
+ # Team has different membership than the group => members added and removed.
+ ([
+ ('anotheruser', 'anotheruser'),
+ ('someuser', 'someuser'),
+ ('nontestuser', None),
+ ],
+ [
+ UserInformation('anotheruser', 'anotheruser', 'anotheruser@devtable.com'),
+ UserInformation('missinguser', 'missinguser', 'missinguser@devtable.com'),
+ ],
+ ['anotheruser', 'missinguser']),
+
+ # Team has same membership but some robots => robots remain and no other changes.
+ ([
+ ('someuser', 'someuser'),
+ ('buynlarge+anotherbot', None),
+ ('buynlarge+somerobot', None),
+ ],
+ [
+ UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
+ ],
+ ['someuser', 'buynlarge+somerobot', 'buynlarge+anotherbot']),
+
+ # Team has an extra member and some robots => member removed and robots remain.
+ ([
+ ('someuser', 'someuser'),
+ ('buynlarge+anotherbot', None),
+ ('buynlarge+somerobot', None),
+ ],
+ [
+ # No members.
+ ],
+ ['buynlarge+somerobot', 'buynlarge+anotherbot']),
+
+ # Team has a different member and some robots => member changed and robots remain.
+ ([
+ ('someuser', 'someuser'),
+ ('buynlarge+anotherbot', None),
+ ('buynlarge+somerobot', None),
+ ],
+ [
+ UserInformation('anotheruser', 'anotheruser', 'anotheruser@devtable.com'),
+ ],
+ ['anotheruser', 'buynlarge+somerobot', 'buynlarge+anotherbot']),
+
+ # Team with an existing external user (with a different Quay username) + user is in the group.
+ # => no changes and robots remain.
+ ([
+ ('anotherquayname', 'someuser'),
+ ('buynlarge+anotherbot', None),
+ ],
+ [
+ UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
+ ],
+ ['someuser', 'buynlarge+anotherbot']),
+
+ # Team which returns the same member twice, as pagination in some engines (like LDAP) is not
+ # stable.
+ ([],
+ [
+ UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
+ UserInformation('anotheruser', 'anotheruser', 'anotheruser@devtable.com'),
+ UserInformation('someuser', 'someuser', 'someuser@devtable.com'),
+ ],
+ ['anotheruser', 'someuser']),
+])
+def test_syncing(user_creation, invite_only_user_creation, starting_membership, group_membership,
+ expected_membership, blacklisted_emails, app):
+ org = model.organization.get_organization('buynlarge')
+
+ # Necessary for the fake auth entries to be created in FederatedLogin.
+ database.LoginService.create(name=_FAKE_AUTH)
+
+ # Assert the team is empty, so we have a clean slate.
+ sync_team_info = model.team.get_team_sync_information('buynlarge', 'synced')
+ assert len(list(model.team.list_team_users(sync_team_info.team))) == 0
+
+ # Add the existing starting members to the team.
+ for starting_member in starting_membership:
+ (quay_username, fakeauth_username) = starting_member
+ if '+' in quay_username:
+ # Add a robot.
+ (_, shortname) = parse_robot_username(quay_username)
+ robot, _ = model.user.create_robot(shortname, org)
+ model.team.add_user_to_team(robot, sync_team_info.team)
+ else:
+ email = quay_username + '@devtable.com'
+
+ if fakeauth_username is None:
+ quay_user = model.user.create_user_noverify(quay_username, email)
+ else:
+ quay_user = model.user.create_federated_user(quay_username, email, _FAKE_AUTH,
+ fakeauth_username, False)
+
+ model.team.add_user_to_team(quay_user, sync_team_info.team)
+
+ # Call syncing on the team.
+ fake_auth = FakeUsers(group_membership)
+ assert sync_team(fake_auth, sync_team_info)
+
+ # Ensure the last updated time and transaction_id's have changed.
+ updated_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
+ assert updated_sync_info.last_updated is not None
+ assert updated_sync_info.transaction_id != sync_team_info.transaction_id
+
+ users_expected = set([name for name in expected_membership if '+' not in name])
+ robots_expected = set([name for name in expected_membership if '+' in name])
+ assert len(users_expected) + len(robots_expected) == len(expected_membership)
+
+ # Check that the team's users match those expected.
+ service_user_map = model.team.get_federated_team_member_mapping(sync_team_info.team,
+ _FAKE_AUTH)
+ assert set(service_user_map.keys()) == users_expected
+
+ quay_users = model.team.list_team_users(sync_team_info.team)
+ assert len(quay_users) == len(users_expected)
+
+ for quay_user in quay_users:
+ fakeauth_record = model.user.lookup_federated_login(quay_user, _FAKE_AUTH)
+ assert fakeauth_record is not None
+ assert fakeauth_record.service_ident in users_expected
+ assert service_user_map[fakeauth_record.service_ident] == quay_user.id
+
+ # Check that the team's robots match those expected.
+ robots_found = set([r.username for r in model.team.list_team_robots(sync_team_info.team)])
+ assert robots_expected == robots_found
+
+
+def test_sync_teams_to_groups(user_creation, invite_only_user_creation, blacklisted_emails, app):
+ # Necessary for the fake auth entries to be created in FederatedLogin.
+ database.LoginService.create(name=_FAKE_AUTH)
+
+ # Assert the team has not yet been updated.
+ sync_team_info = model.team.get_team_sync_information('buynlarge', 'synced')
+ assert sync_team_info.last_updated is None
+
+ # Call to sync all teams.
+ fake_auth = FakeUsers([])
+ sync_teams_to_groups(fake_auth, timedelta(seconds=1))
+
+ # Ensure the team was synced.
+ updated_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
+ assert updated_sync_info.last_updated is not None
+ assert updated_sync_info.transaction_id != sync_team_info.transaction_id
+
+ # Set the stale threshold to a high amount and ensure the team is not resynced.
+ current_info = model.team.get_team_sync_information('buynlarge', 'synced')
+ current_info.last_updated = datetime.now() - timedelta(seconds=2)
+ current_info.save()
+
+ sync_teams_to_groups(fake_auth, timedelta(seconds=120))
+
+ third_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
+ assert third_sync_info.transaction_id == updated_sync_info.transaction_id
+
+ # Set the stale threshold to 10 seconds, and ensure the team is resynced, after making it
+ # "updated" 20s ago.
+ current_info = model.team.get_team_sync_information('buynlarge', 'synced')
+ current_info.last_updated = datetime.now() - timedelta(seconds=20)
+ current_info.save()
+
+ sync_teams_to_groups(fake_auth, timedelta(seconds=10))
+
+ fourth_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
+ assert fourth_sync_info.transaction_id != updated_sync_info.transaction_id
+
+
+@pytest.mark.parametrize('auth_system_builder,config', [
+ (mock_ldap, {'group_dn': 'cn=AwesomeFolk'}),
+ (fake_keystone, {'group_id': 'somegroupid'}),
+])
+def test_teamsync_end_to_end(user_creation, invite_only_user_creation, auth_system_builder, config,
+ blacklisted_emails, app):
+ with auth_system_builder() as auth:
+ # Create an new team to sync.
+ org = model.organization.get_organization('buynlarge')
+ new_synced_team = model.team.create_team('synced2', org, 'member', 'Some synced team.')
+ sync_team_info = model.team.set_team_syncing(new_synced_team, auth.federated_service, config)
+
+ # Sync the team.
+ assert sync_team(auth, sync_team_info)
+
+ # Ensure we now have members.
+ msg = 'Auth system: %s' % auth.federated_service
+ sync_team_info = model.team.get_team_sync_information('buynlarge', 'synced2')
+ team_members = list(model.team.list_team_users(sync_team_info.team))
+ assert len(team_members) > 1, msg
+
+ it, _ = auth.iterate_group_members(config)
+ assert len(team_members) == len(list(it)), msg
+
+ sync_team_info.last_updated = datetime.now() - timedelta(hours=6)
+ sync_team_info.save()
+
+ # Remove one of the members and force a sync again to ensure we re-link the correct users.
+ first_member = team_members[0]
+ model.team.remove_user_from_team('buynlarge', 'synced2', first_member.username, 'devtable')
+
+ team_members2 = list(model.team.list_team_users(sync_team_info.team))
+ assert len(team_members2) == 1, msg
+ assert sync_team(auth, sync_team_info)
+
+ team_members3 = list(model.team.list_team_users(sync_team_info.team))
+ assert len(team_members3) > 1, msg
+ assert set([m.id for m in team_members]) == set([m.id for m in team_members3])
+
+
+@pytest.mark.parametrize('auth_system_builder,config', [
+ (mock_ldap, {'group_dn': 'cn=AwesomeFolk'}),
+ (fake_keystone, {'group_id': 'somegroupid'}),
+])
+def test_teamsync_existing_email(user_creation, invite_only_user_creation, auth_system_builder,
+ blacklisted_emails, config, app):
+ with auth_system_builder() as auth:
+ # Create an new team to sync.
+ org = model.organization.get_organization('buynlarge')
+ new_synced_team = model.team.create_team('synced2', org, 'member', 'Some synced team.')
+ sync_team_info = model.team.set_team_syncing(new_synced_team, auth.federated_service, config)
+
+ # Add a new *unlinked* user with the same email address as one of the team members.
+ it, _ = auth.iterate_group_members(config)
+ members = list(it)
+ model.user.create_user_noverify('someusername', members[0][0].email)
+
+ # Sync the team and ensure it doesn't fail.
+ assert sync_team(auth, sync_team_info)
+
+ team_members = list(model.team.list_team_users(sync_team_info.team))
+ assert len(team_members) > 0
diff --git a/data/users/test/test_users.py b/data/users/test/test_users.py
new file mode 100644
index 000000000..81f6660bd
--- /dev/null
+++ b/data/users/test/test_users.py
@@ -0,0 +1,99 @@
+import pytest
+
+from contextlib import contextmanager
+from mock import patch
+
+from data.database import model
+from data.users.federated import DISABLED_MESSAGE
+from test.test_ldap import mock_ldap
+from test.test_keystone_auth import fake_keystone
+from test.test_external_jwt_authn import fake_jwt
+
+from test.fixtures import *
+
+@pytest.mark.parametrize('auth_system_builder, user1, user2', [
+ (mock_ldap, ('someuser', 'somepass'), ('testy', 'password')),
+ (fake_keystone, ('cool.user', 'password'), ('some.neat.user', 'foobar')),
+])
+def test_auth_createuser(auth_system_builder, user1, user2, config, app):
+ with auth_system_builder() as auth:
+ # Login as a user and ensure a row in the database is created for them.
+ user, err = auth.verify_and_link_user(*user1)
+ assert err is None
+ assert user
+
+ federated_info = model.user.lookup_federated_login(user, auth.federated_service)
+ assert federated_info is not None
+
+ # Disable user creation.
+ with patch('features.USER_CREATION', False):
+ # Ensure that the existing user can login.
+ user_again, err = auth.verify_and_link_user(*user1)
+ assert err is None
+ assert user_again.id == user.id
+
+ # Ensure that a new user cannot.
+ new_user, err = auth.verify_and_link_user(*user2)
+ assert new_user is None
+ assert err == DISABLED_MESSAGE
+
+
+@pytest.mark.parametrize(
+ 'email, blacklisting_enabled, can_create',
+ [
+ # Blacklisting Enabled, Blacklisted Domain => Blocked
+ ('foo@blacklisted.net', True, False),
+ ('foo@blacklisted.com', True, False),
+
+ # Blacklisting Enabled, similar to blacklisted domain => Allowed
+ ('foo@notblacklisted.com', True, True),
+ ('foo@blacklisted.org', True, True),
+
+ # Blacklisting *Disabled*, Blacklisted Domain => Allowed
+ ('foo@blacklisted.com', False, True),
+ ('foo@blacklisted.net', False, True),
+ ]
+)
+@pytest.mark.parametrize('auth_system_builder', [mock_ldap, fake_keystone, fake_jwt])
+def test_createuser_with_blacklist(auth_system_builder, email, blacklisting_enabled, can_create, config, app):
+ """Verify email blacklisting with User Creation"""
+
+ MOCK_CONFIG = {'BLACKLISTED_EMAIL_DOMAINS': ['blacklisted.com', 'blacklisted.net']}
+ MOCK_PASSWORD = 'somepass'
+
+ with auth_system_builder() as auth:
+ with patch('features.BLACKLISTED_EMAILS', blacklisting_enabled):
+ with patch.dict('data.model.config.app_config', MOCK_CONFIG):
+ with patch('features.USER_CREATION', True):
+ new_user, err = auth.verify_and_link_user(email, MOCK_PASSWORD)
+ if can_create:
+ assert err is None
+ assert new_user
+ else:
+ assert err
+ assert new_user is None
+
+
+@pytest.mark.parametrize('auth_system_builder,auth_kwargs', [
+ (mock_ldap, {}),
+ (fake_keystone, {'version': 3}),
+ (fake_keystone, {'version': 2}),
+ (fake_jwt, {}),
+])
+def test_ping(auth_system_builder, auth_kwargs, app):
+ with auth_system_builder(**auth_kwargs) as auth:
+ status, err = auth.ping()
+ assert status
+ assert err is None
+
+
+@pytest.mark.parametrize('auth_system_builder,auth_kwargs', [
+ (mock_ldap, {}),
+ (fake_keystone, {'version': 3}),
+ (fake_keystone, {'version': 2}),
+])
+def test_at_least_one_user_exists(auth_system_builder, auth_kwargs, app):
+ with auth_system_builder(**auth_kwargs) as auth:
+ status, err = auth.at_least_one_user_exists()
+ assert status
+ assert err is None
diff --git a/deploy/README.md b/deploy/README.md
new file mode 100644
index 000000000..6885fa85f
--- /dev/null
+++ b/deploy/README.md
@@ -0,0 +1,8 @@
+# Quay Deployment Manifests for Kubernetes/OpenShift
+
+OpenShift deployments should be using the Quay Setup Operator. Manifests are provided here for manual deployment or situations where the Operator is not possible.
+
+Instructions for Deploying on OpenShift
+
+
+
diff --git a/deploy/k8s/clair-config.yaml b/deploy/k8s/clair-config.yaml
new file mode 100644
index 000000000..6c69579db
--- /dev/null
+++ b/deploy/k8s/clair-config.yaml
@@ -0,0 +1,75 @@
+---
+clair:
+ database:
+ type: pgsql
+ options:
+ # Check that the database options match those set earlier in postgres-clair-deployment.yaml.
+ source: host=postgres-clair port=5432 dbname=clair user=clair password=test123 sslmode=disable
+ cachesize: 16384
+ api:
+ # The port at which Clair will report its health status. For example, if Clair is running at
+ # https://clair.mycompany.com, the health will be reported at
+ # http://clair.mycompany.com:6061/health.
+ healthport: 6061
+
+ port: 6062
+ timeout: 900s
+
+ # paginationkey can be any random set of characters. *Must be the same across all Clair
+ # instances*.
+ paginationkey: "XxoPtCUzrUv4JV5dS+yQ+MdW7yLEJnRMwigVY/bpgtQ="
+
+ updater:
+ # interval defines how often Clair will check for updates from its upstream vulnerability databases.
+ interval: 6h
+ notifier:
+ attempts: 3
+ renotifyinterval: 1h
+ http:
+ # QUAY_ENDPOINT defines the endpoint at which Quay Enterprise is running.
+ # For example: https://myregistry.mycompany.com
+ endpoint: http://quay-enterprise-clusterip/secscan/notify
+ proxy: http://localhost:6063
+
+jwtproxy:
+ signer_proxy:
+ enabled: true
+ listen_addr: :6063
+ ca_key_file: /certificates/mitm.key # Generated internally, do not change.
+ ca_crt_file: /certificates/mitm.crt # Generated internally, do not change.
+ signer:
+ issuer: security_scanner
+ expiration_time: 5m
+ max_skew: 1m
+ nonce_length: 32
+ private_key:
+ type: preshared
+ options:
+ # The ID of the service key generated for Clair. The ID is returned when setting up
+ # the key in [Quay Enterprise Setup](security-scanning.md)
+ key_id: cd40f1c6a63f574c68ce882258925374882fac2b2f535ae5f8157c429e0c4b2e
+ private_key_path: /clair/config/security_scanner.pem
+
+ verifier_proxies:
+ - enabled: true
+ # The port at which Clair will listen.
+ listen_addr: :6060
+
+ # If Clair is to be served via TLS, uncomment these lines. See the "Running Clair under TLS"
+ # section below for more information.
+ # key_file: /config/clair.key
+ # crt_file: /config/clair.crt
+
+ verifier:
+ # CLAIR_ENDPOINT is the endpoint at which this Clair will be accessible. Note that the port
+ # specified here must match the listen_addr port a few lines above this.
+ # Example: https://myclair.mycompany.com:6060
+ audience: http://clair-service:6060
+
+ upstream: http://localhost:6062
+ key_server:
+ type: keyregistry
+ options:
+ # QUAY_ENDPOINT defines the endpoint at which Quay Enterprise is running.
+ # Example: https://myregistry.mycompany.com
+ registry: http://quay-enterprise-clusterip/keys/
diff --git a/deploy/k8s/clair-deployment.yaml b/deploy/k8s/clair-deployment.yaml
new file mode 100644
index 000000000..994c3770c
--- /dev/null
+++ b/deploy/k8s/clair-deployment.yaml
@@ -0,0 +1,40 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ quay-enterprise-component: clair-scanner
+ name: clair-scanner
+ namespace: quay-enterprise
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ quay-enterprise-component: clair-scanner
+ template:
+ metadata:
+ labels:
+ quay-enterprise-component: clair-scanner
+ namespace: quay-enterprise
+ spec:
+ containers:
+ - image: quay.io/redhat/clair-jwt:v3.0.4
+ imagePullPolicy: IfNotPresent
+ name: clair-scanner
+ ports:
+ - containerPort: 6060
+ name: clair-api
+ protocol: TCP
+ - containerPort: 6061
+ name: clair-health
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /clair/config
+ name: configvolume
+ imagePullSecrets:
+ - name: redhat-quay-pull-secret
+ restartPolicy: Always
+ volumes:
+ - name: configvolume
+ secret:
+ secretName: clair-scanner-config-secret
\ No newline at end of file
diff --git a/deploy/k8s/clair-service.yaml b/deploy/k8s/clair-service.yaml
new file mode 100644
index 000000000..cfb8c0cb4
--- /dev/null
+++ b/deploy/k8s/clair-service.yaml
@@ -0,0 +1,19 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: clair-service
+ namespace: quay-enterprise
+spec:
+ ports:
+ - name: clair-api
+ port: 6060
+ protocol: TCP
+ targetPort: 6060
+ - name: clair-health
+ port: 6061
+ protocol: TCP
+ targetPort: 6061
+ selector:
+ quay-enterprise-component: clair-scanner
+ type: ClusterIP
\ No newline at end of file
diff --git a/deploy/k8s/db-pvc.yaml b/deploy/k8s/db-pvc.yaml
new file mode 100644
index 000000000..30a9f1d56
--- /dev/null
+++ b/deploy/k8s/db-pvc.yaml
@@ -0,0 +1,15 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: postgres-storage
+ namespace: quay-enterprise
+spec:
+ accessModes:
+ - ReadWriteOnce
+ volumeMode: Filesystem
+ resources:
+ requests:
+ # The 10Gi creates 10 gigabytes of storage for use by the Postgres database.
+ storage: 10Gi
+ storageClassName: quay-storageclass
diff --git a/deploy/k8s/postgres-clair-deployment.yaml b/deploy/k8s/postgres-clair-deployment.yaml
new file mode 100644
index 000000000..66cd62c66
--- /dev/null
+++ b/deploy/k8s/postgres-clair-deployment.yaml
@@ -0,0 +1,44 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: postgres-clair
+ name: postgres-clair
+ namespace: quay-enterprise
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: postgres-clair
+ template:
+ metadata:
+ labels:
+ app: postgres-clair
+ spec:
+ containers:
+ - env:
+ - name: POSTGRESQL_USER
+ # Set the username for the Clair postgres database (clair by default)
+ value: clair
+ - name: POSTGRESQL_DATABASE
+ # Set the name of the Clair postgres database
+ value: clair
+ - name: POSTGRESQL_PASSWORD
+ # Set the password for the Clair postgress user
+ value: test123
+ image: registry.access.redhat.com/rhscl/postgresql-10-rhel7:1-35
+ imagePullPolicy: IfNotPresent
+ name: postgres-clair
+ ports:
+ - containerPort: 5432
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /var/lib/pgsql/data
+ name: postgredb
+ serviceAccount: postgres
+ serviceAccountName: postgres
+ volumes:
+ - name: postgredb
+ persistentVolumeClaim:
+ claimName: postgres-clair-storage
\ No newline at end of file
diff --git a/deploy/k8s/postgres-clair-service.yaml b/deploy/k8s/postgres-clair-service.yaml
new file mode 100644
index 000000000..a44ee33de
--- /dev/null
+++ b/deploy/k8s/postgres-clair-service.yaml
@@ -0,0 +1,17 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: postgres-clair
+ name: postgres-clair
+ namespace: quay-enterprise
+spec:
+ ports:
+ - nodePort: 30680
+ port: 5432
+ protocol: TCP
+ targetPort: 5432
+ selector:
+ app: postgres-clair
+ type: NodePort
\ No newline at end of file
diff --git a/deploy/k8s/postgres-clair-storage.yaml b/deploy/k8s/postgres-clair-storage.yaml
new file mode 100644
index 000000000..ba941ffce
--- /dev/null
+++ b/deploy/k8s/postgres-clair-storage.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: postgres-clair-storage
+ namespace: quay-enterprise
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: quay-storageclass
\ No newline at end of file
diff --git a/deploy/k8s/postgres-deployment.yaml b/deploy/k8s/postgres-deployment.yaml
new file mode 100644
index 000000000..c2f38bb1f
--- /dev/null
+++ b/deploy/k8s/postgres-deployment.yaml
@@ -0,0 +1,38 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: postgres
+ namespace: quay-enterprise
+spec:
+ # Only one instance of the postgres database is defined here. Adjust replicas based on demand.
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: postgres
+ spec:
+ containers:
+ - name: postgres
+ image: registry.access.redhat.com/rhscl/postgresql-10-rhel7:1-35
+ imagePullPolicy: "IfNotPresent"
+ ports:
+ - containerPort: 5432
+ env:
+ - name: POSTGRESQL_USER
+ # Replace "username" with a name for your Postgres user
+ value: "username"
+ - name: POSTGRESQL_DATABASE
+ # Replace "password" with a password for your Postgres user
+ value: "quay"
+ - name: POSTGRESQL_PASSWORD
+ value: "password"
+ volumeMounts:
+ - mountPath: /var/lib/pgsql/data
+ name: postgredb
+ serviceAccount: postgres
+ serviceAccountName: postgres
+ volumes:
+ - name: postgredb
+ persistentVolumeClaim:
+ claimName: postgres-storage
\ No newline at end of file
diff --git a/deploy/k8s/postgres-service.yaml b/deploy/k8s/postgres-service.yaml
new file mode 100644
index 000000000..898123c43
--- /dev/null
+++ b/deploy/k8s/postgres-service.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: postgres
+ namespace: quay-enterprise
+ labels:
+ app: postgres
+spec:
+ type: NodePort
+ ports:
+ - port: 5432
+ selector:
+ app: postgres
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-app-rc.yaml b/deploy/k8s/quay-enterprise-app-rc.yaml
new file mode 100644
index 000000000..f65bf6045
--- /dev/null
+++ b/deploy/k8s/quay-enterprise-app-rc.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ namespace: quay-enterprise
+ name: quay-enterprise-app
+ labels:
+ quay-enterprise-component: app
+spec:
+ # Only one instance of the quay container is defined here. Adjust replicas based on demand.
+ replicas: 1
+ selector:
+ matchLabels:
+ quay-enterprise-component: app
+ template:
+ metadata:
+ namespace: quay-enterprise
+ labels:
+ quay-enterprise-component: app
+ spec:
+ volumes:
+ - name: configvolume
+ secret:
+ secretName: quay-enterprise-secret
+ containers:
+ - name: quay-enterprise-app
+ # Pick the Quay version you wish to run
+ image: quay.io/redhat/quay:v3.1.0
+ ports:
+ - containerPort: 8443
+ volumeMounts:
+ - name: configvolume
+ readOnly: false
+ mountPath: /conf/stack
+ imagePullSecrets:
+ - name: redhat-quay-pull-secret
diff --git a/deploy/k8s/quay-enterprise-config-secret.yaml b/deploy/k8s/quay-enterprise-config-secret.yaml
new file mode 100644
index 000000000..1b74c4fa3
--- /dev/null
+++ b/deploy/k8s/quay-enterprise-config-secret.yaml
@@ -0,0 +1,6 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ namespace: quay-enterprise
+ name: quay-enterprise-config-secret
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-config-service-clusterip.yaml b/deploy/k8s/quay-enterprise-config-service-clusterip.yaml
new file mode 100644
index 000000000..4f6a90b34
--- /dev/null
+++ b/deploy/k8s/quay-enterprise-config-service-clusterip.yaml
@@ -0,0 +1,15 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ namespace: quay-enterprise
+ name: quay-enterprise-config
+spec:
+ type: ClusterIP
+ ports:
+ - protocol: TCP
+ name: https
+ port: 443
+ targetPort: 8443
+ selector:
+ quay-enterprise-component: config-app
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-config.yaml b/deploy/k8s/quay-enterprise-config.yaml
new file mode 100644
index 000000000..4b6647ec9
--- /dev/null
+++ b/deploy/k8s/quay-enterprise-config.yaml
@@ -0,0 +1,28 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ namespace: quay-enterprise
+ name: quay-enterprise-config-app
+ labels:
+ quay-enterprise-component: config-app
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ quay-enterprise-component: config-app
+ template:
+ metadata:
+ namespace: quay-enterprise
+ labels:
+ quay-enterprise-component: config-app
+ spec:
+ containers:
+ - name: quay-enterprise-config-app
+ image: quay.io/redhat/quay:v3.1.0
+ ports:
+ - containerPort: 8443
+ command: ["/quay-registry/quay-entrypoint.sh"]
+ args: ["config", "secret"]
+ imagePullSecrets:
+ - name: redhat-quay-pull-secret
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-mirror.yaml b/deploy/k8s/quay-enterprise-mirror.yaml
new file mode 100644
index 000000000..86b923d3b
--- /dev/null
+++ b/deploy/k8s/quay-enterprise-mirror.yaml
@@ -0,0 +1,28 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ namespace: quay-enterprise
+ name: quay-enterprise-mirror
+ labels:
+ quay-enterprise-component: mirror-app
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ quay-enterprise-component: mirror-app
+ template:
+ metadata:
+ namespace: quay-enterprise
+ labels:
+ quay-enterprise-component: mirror-app
+ spec:
+ containers:
+ - name: quay-enterprise-mirror-app
+ image: quay.io/redhat/quay:v3.1.0
+ ports:
+ - containerPort: 8443
+ command: ["/quay-registry/quay-entrypoint.sh"]
+ args: ["repomirror"]
+ imagePullSecrets:
+ - name: redhat-quay-pull-secret
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-namespace.yaml b/deploy/k8s/quay-enterprise-namespace.yaml
new file mode 100644
index 000000000..9c8b90322
--- /dev/null
+++ b/deploy/k8s/quay-enterprise-namespace.yaml
@@ -0,0 +1,5 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: quay-enterprise
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-redhat-quay-pull-secret.yaml b/deploy/k8s/quay-enterprise-redhat-quay-pull-secret.yaml
new file mode 100644
index 000000000..7fb445ac0
--- /dev/null
+++ b/deploy/k8s/quay-enterprise-redhat-quay-pull-secret.yaml
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ namespace: quay-enterprise
+ name: redhat-quay-pull-secret
+data:
+ # Change to include the credentials shown from https://access.redhat.com/solutions/3533201
+ .dockerconfigjson:
+type: kubernetes.io/dockerconfigjson
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-redis.yaml b/deploy/k8s/quay-enterprise-redis.yaml
new file mode 100644
index 000000000..2a77f41d4
--- /dev/null
+++ b/deploy/k8s/quay-enterprise-redis.yaml
@@ -0,0 +1,39 @@
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ namespace: quay-enterprise
+ name: quay-enterprise-redis
+ labels:
+ quay-enterprise-component: redis
+spec:
+ # Only one instance of the redis database is defined here. Adjust replicas based on demand.
+ replicas: 1
+ selector:
+ matchLabels:
+ quay-enterprise-component: redis
+ template:
+ metadata:
+ namespace: quay-enterprise
+ labels:
+ quay-enterprise-component: redis
+ spec:
+ containers:
+ - name: redis-master
+ image: registry.access.redhat.com/rhscl/redis-32-rhel7
+ imagePullPolicy: "IfNotPresent"
+ ports:
+ - containerPort: 6379
+---
+apiVersion: v1
+kind: Service
+metadata:
+ namespace: quay-enterprise
+ name: quay-enterprise-redis
+ labels:
+ quay-enterprise-component: redis
+spec:
+ ports:
+ - port: 6379
+ selector:
+ quay-enterprise-component: redis
\ No newline at end of file
diff --git a/deploy/k8s/quay-enterprise-service-clusterip.yaml b/deploy/k8s/quay-enterprise-service-clusterip.yaml
new file mode 100644
index 000000000..7ae0e3d84
--- /dev/null
+++ b/deploy/k8s/quay-enterprise-service-clusterip.yaml
@@ -0,0 +1,15 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ namespace: quay-enterprise
+ name: quay-enterprise-clusterip
+spec:
+ type: ClusterIP
+ ports:
+ - protocol: TCP
+ name: https
+ port: 443
+ targetPort: 8443
+ selector:
+ quay-enterprise-component: app
\ No newline at end of file
diff --git a/deploy/k8s/quay-servicetoken-role-binding-k8s1-6.yaml b/deploy/k8s/quay-servicetoken-role-binding-k8s1-6.yaml
new file mode 100644
index 000000000..a882af64b
--- /dev/null
+++ b/deploy/k8s/quay-servicetoken-role-binding-k8s1-6.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+ name: quay-enterprise-secret-writer
+ namespace: quay-enterprise
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: quay-enterprise-serviceaccount
+subjects:
+- kind: ServiceAccount
+ name: default
\ No newline at end of file
diff --git a/deploy/k8s/quay-servicetoken-role-k8s1-6.yaml b/deploy/k8s/quay-servicetoken-role-k8s1-6.yaml
new file mode 100644
index 000000000..0dc5de79c
--- /dev/null
+++ b/deploy/k8s/quay-servicetoken-role-k8s1-6.yaml
@@ -0,0 +1,33 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+ name: quay-enterprise-serviceaccount
+ namespace: quay-enterprise
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - put
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+- apiGroups:
+ - extensions
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
\ No newline at end of file
diff --git a/deploy/k8s/quay-storageclass.yaml b/deploy/k8s/quay-storageclass.yaml
new file mode 100644
index 000000000..44cdbaced
--- /dev/null
+++ b/deploy/k8s/quay-storageclass.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: quay-storageclass
+parameters:
+ type: gp2
+ # Uncomment out following lines to encrypt the volume (AWS EBS example with xfs shown)
+ #encrypted: "true"
+ #fsType: xfs
+ #kmsKeyId:
+provisioner: kubernetes.io/aws-ebs
+reclaimPolicy: Delete
\ No newline at end of file
diff --git a/deploy/openshift/quay-enterprise-app-route.yaml b/deploy/openshift/quay-enterprise-app-route.yaml
new file mode 100644
index 000000000..0de2dc570
--- /dev/null
+++ b/deploy/openshift/quay-enterprise-app-route.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Route
+metadata:
+ name: quay-enterprise
+ namespace: quay-enterprise
+spec:
+ to:
+ kind: Service
+ name: quay-enterprise-clusterip
+ tls:
+ termination: passthrough
\ No newline at end of file
diff --git a/deploy/openshift/quay-enterprise-config-route.yaml b/deploy/openshift/quay-enterprise-config-route.yaml
new file mode 100644
index 000000000..b5ddf7fb9
--- /dev/null
+++ b/deploy/openshift/quay-enterprise-config-route.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Route
+metadata:
+ name: quay-enterprise-config
+ namespace: quay-enterprise
+spec:
+ to:
+ kind: Service
+ name: quay-enterprise-config
+ tls:
+ termination: passthrough
\ No newline at end of file
diff --git a/dev.df b/dev.df
new file mode 100644
index 000000000..57db0befa
--- /dev/null
+++ b/dev.df
@@ -0,0 +1,28 @@
+# vim:ft=dockerfile
+
+FROM phusion/baseimage:0.9.18
+
+ENV DEBIAN_FRONTEND noninteractive
+ENV HOME /root
+
+# Install the dependencies.
+RUN apt-get update # 24JUN2015
+
+# New ubuntu packages should be added as their own apt-get install lines below the existing install commands
+RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev libgpgme11 libgpgme11-dev
+
+# Build the python dependencies
+ADD requirements.txt requirements.txt
+RUN virtualenv --distribute venv
+RUN venv/bin/pip install -r requirements.txt
+
+ARG src_subdir
+
+RUN apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D \
+ && echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" > /etc/apt/sources.list.d/docker.list \
+ && apt-get update \
+ && apt-get install -y docker-engine
+
+ENV PYTHONPATH=/
+ENV PATH=/venv/bin:$PATH
+WORKDIR /src/$src_subdir
diff --git a/digest/__init__.py b/digest/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/digest/checksums.py b/digest/checksums.py
new file mode 100644
index 000000000..dd7c6f2ba
--- /dev/null
+++ b/digest/checksums.py
@@ -0,0 +1,98 @@
+import hashlib
+import logging
+import tarfile
+
+
+TarError = tarfile.TarError
+logger = logging.getLogger(__name__)
+
+
+def sha256_file(fp, data=None):
+ h = hashlib.sha256(data or '')
+ if not fp:
+ return h.hexdigest()
+ while True:
+ buf = fp.read(4096)
+ if not buf:
+ break
+ h.update(buf)
+ return h.hexdigest()
+
+
+def sha256_string(s):
+ return hashlib.sha256(s).hexdigest()
+
+
+def compute_tarsum(fp, json_data):
+ header_fields = ('name', 'mode', 'uid', 'gid', 'size', 'mtime',
+ 'type', 'linkname', 'uname', 'gname', 'devmajor',
+ 'devminor')
+ tar = None
+ hashes = []
+ try:
+ tar = tarfile.open(mode='r|*', fileobj=fp)
+ for member in tar:
+ header = ''
+ for field in header_fields:
+ value = getattr(member, field)
+ if field == 'type':
+ field = 'typeflag'
+ elif field == 'name':
+ if member.isdir() and not value.endswith('/'):
+ value += '/'
+ header += '{0}{1}'.format(field, value)
+ h = None
+ try:
+ if member.size > 0:
+ f = tar.extractfile(member)
+ h = sha256_file(f, header)
+ else:
+ h = sha256_string(header)
+ except KeyError:
+ h = sha256_string(header)
+ hashes.append(h)
+ hashes.sort()
+ except tarfile.ReadError as e:
+ if e.message != 'empty file':
+ # NOTE(samalba): ignore empty tarfiles but still let the tarsum
+ # compute with json data
+ raise
+ finally:
+ if tar:
+ tar.close()
+ data = json_data + ''.join(hashes)
+ tarsum = 'tarsum+sha256:{0}'.format(sha256_string(data))
+ logger.debug('checksums.compute_tarsum: return %s', tarsum)
+ return tarsum
+
+
+def simple_checksum_handler(json_data):
+ h = hashlib.sha256(json_data.encode('utf8') + '\n')
+
+ def fn(buf):
+ h.update(buf)
+ return h, fn
+
+
+def content_checksum_handler():
+ h = hashlib.sha256()
+
+ def fn(buf):
+ h.update(buf)
+ return h, fn
+
+
+def compute_simple(fp, json_data):
+ data = json_data + '\n'
+ return 'sha256:{0}'.format(sha256_file(fp, data))
+
+
+if __name__ == '__main__':
+ import sys
+ if len(sys.argv) < 3:
+ print 'Usage: {0} json_file layer_file'.format(sys.argv[0])
+ sys.exit(1)
+ json_data = file(sys.argv[1]).read()
+ fp = open(sys.argv[2])
+ print compute_simple(fp, json_data)
+ print compute_tarsum(fp, json_data)
diff --git a/digest/digest_tools.py b/digest/digest_tools.py
new file mode 100644
index 000000000..212088236
--- /dev/null
+++ b/digest/digest_tools.py
@@ -0,0 +1,82 @@
+import re
+import os.path
+import hashlib
+
+
+DIGEST_PATTERN = r'([A-Za-z0-9_+.-]+):([A-Fa-f0-9]+)'
+REPLACE_WITH_PATH = re.compile(r'[+.]')
+REPLACE_DOUBLE_SLASHES = re.compile(r'/+')
+
+class InvalidDigestException(RuntimeError):
+ pass
+
+
+class Digest(object):
+ DIGEST_REGEX = re.compile(DIGEST_PATTERN)
+
+ def __init__(self, hash_alg, hash_bytes):
+ self._hash_alg = hash_alg
+ self._hash_bytes = hash_bytes
+
+ def __str__(self):
+ return '{0}:{1}'.format(self._hash_alg, self._hash_bytes)
+
+ def __eq__(self, rhs):
+ return isinstance(rhs, Digest) and str(self) == str(rhs)
+
+ @staticmethod
+ def parse_digest(digest):
+ """ Returns the digest parsed out to its components. """
+ match = Digest.DIGEST_REGEX.match(digest)
+ if match is None or match.end() != len(digest):
+ raise InvalidDigestException('Not a valid digest: %s', digest)
+
+ return Digest(match.group(1), match.group(2))
+
+ @property
+ def hash_alg(self):
+ return self._hash_alg
+
+ @property
+ def hash_bytes(self):
+ return self._hash_bytes
+
+
+def content_path(digest):
+ """ Returns a relative path to the parsed digest. """
+ parsed = Digest.parse_digest(digest)
+ components = []
+
+ # Generate a prefix which is always two characters, and which will be filled with leading zeros
+ # if the input does not contain at least two characters. e.g. ABC -> AB, A -> 0A
+ prefix = parsed.hash_bytes[0:2].zfill(2)
+ pathish = REPLACE_WITH_PATH.sub('/', parsed.hash_alg)
+ normalized = REPLACE_DOUBLE_SLASHES.sub('/', pathish).lstrip('/')
+ components.extend([normalized, prefix, parsed.hash_bytes])
+ return os.path.join(*components)
+
+
+def sha256_digest(content):
+ """ Returns a sha256 hash of the content bytes in digest form. """
+ def single_chunk_generator():
+ yield content
+ return sha256_digest_from_generator(single_chunk_generator())
+
+
+def sha256_digest_from_generator(content_generator):
+ """ Reads all of the data from the iterator and creates a sha256 digest from the content
+ """
+ digest = hashlib.sha256()
+ for chunk in content_generator:
+ digest.update(chunk)
+ return 'sha256:{0}'.format(digest.hexdigest())
+
+
+def sha256_digest_from_hashlib(sha256_hash_obj):
+ return 'sha256:{0}'.format(sha256_hash_obj.hexdigest())
+
+
+def digests_equal(lhs_digest_string, rhs_digest_string):
+ """ Parse and compare the two digests, returns True if the digests are equal, False otherwise.
+ """
+ return Digest.parse_digest(lhs_digest_string) == Digest.parse_digest(rhs_digest_string)
diff --git a/digest/test/test_digest_tools.py b/digest/test/test_digest_tools.py
new file mode 100644
index 000000000..b04f64c6f
--- /dev/null
+++ b/digest/test/test_digest_tools.py
@@ -0,0 +1,43 @@
+import pytest
+
+from digest.digest_tools import Digest, content_path, InvalidDigestException
+
+@pytest.mark.parametrize('digest, output_args', [
+ ('tarsum.v123123+sha1:123deadbeef', ('tarsum.v123123+sha1', '123deadbeef')),
+ ('tarsum.v1+sha256:123123', ('tarsum.v1+sha256', '123123')),
+ ('tarsum.v0+md5:abc', ('tarsum.v0+md5', 'abc')),
+ ('tarsum+sha1:abc', ('tarsum+sha1', 'abc')),
+ ('sha1:123deadbeef', ('sha1', '123deadbeef')),
+ ('sha256:123123', ('sha256', '123123')),
+ ('md5:abc', ('md5', 'abc')),
+])
+def test_parse_good(digest, output_args):
+ assert Digest.parse_digest(digest) == Digest(*output_args)
+ assert str(Digest.parse_digest(digest)) == digest
+
+
+@pytest.mark.parametrize('bad_digest', [
+ 'tarsum.v+md5:abc:',
+ 'sha1:123deadbeefzxczxv',
+ 'sha256123123',
+ 'tarsum.v1+',
+ 'tarsum.v1123+sha1:',
+])
+def test_parse_fail(bad_digest):
+ with pytest.raises(InvalidDigestException):
+ Digest.parse_digest(bad_digest)
+
+
+@pytest.mark.parametrize('digest, path', [
+ ('tarsum.v123123+sha1:123deadbeef', 'tarsum/v123123/sha1/12/123deadbeef'),
+ ('tarsum.v1+sha256:123123', 'tarsum/v1/sha256/12/123123'),
+ ('tarsum.v0+md5:abc', 'tarsum/v0/md5/ab/abc'),
+ ('sha1:123deadbeef', 'sha1/12/123deadbeef'),
+ ('sha256:123123', 'sha256/12/123123'),
+ ('md5:abc', 'md5/ab/abc'),
+ ('md5:1', 'md5/01/1'),
+ ('md5.....+++:1', 'md5/01/1'),
+ ('.md5.:1', 'md5/01/1'),
+])
+def test_paths(digest, path):
+ assert content_path(digest) == path
diff --git a/displayversion.py b/displayversion.py
new file mode 100644
index 000000000..463271059
--- /dev/null
+++ b/displayversion.py
@@ -0,0 +1,12 @@
+from _init import __version__, __gitrev__
+
+def displayversion():
+ version_string = ('Quay %s (%s)' % (__version__, __gitrev__.strip())).strip()
+
+ print '=' * (len(version_string) + 4)
+ print '= ' + version_string + ' ='
+ print '=' * (len(version_string) + 4)
+ print ""
+
+if __name__ == "__main__":
+ displayversion()
diff --git a/docs/development-container.md b/docs/development-container.md
new file mode 100644
index 000000000..444054429
--- /dev/null
+++ b/docs/development-container.md
@@ -0,0 +1,121 @@
+# Quay Development Container
+
+The instructions below describe how to create and setup a container for working with local source changes and testing. It is meant for a developer familiar with python, javascript, and the tools associated with development.
+
+## Quay Source
+
+For convenience, the environment variable `QUAY_DEVEL_HOME` will be referenced for locations of the parent directory for source and other directories.
+
+The directory `$QUAY_DEVEL_HOME/quay` is your _development_ git checkout of [quay](https://github.com/quay/projectquay).
+```
+cd $QUAY_DEVEL_HOME
+git checkout github.com/quay/projectquay.git
+```
+
+For local storage, create the directory where images will be written to disk.
+```
+cd $QUAY_DEVEL_HOME
+mkdir quay-storage
+```
+
+The Quay config app should be previously run and the resulting _quay-config.tar.gz_ expanded into place. This file is copied into this directory for convenience only: Keeping it available will allow upload into the config app for modifications later.
+```
+cd $QUAY_DEVEL_HOME
+mkdir quay-config
+cp ~/Downloads/quay-config.tar.gz $QUAY_DEVEL_HOME/quay-config/quay-config.tar.gz
+tar xvf $QUAY_DEVEL_HOME/quay-config/quay-config.tar.gz -C $QUAY_DEVEL_HOME/quay-config
+```
+
+## Building Development Container
+
+Build an image in a clean git checkout of master branch. It's important not to do this in your own development directory as there may be files that conflict or break a clean build.
+```
+cd $QUAY_DEVEL_HOME/quay
+docker build -t quay:devel -f Dockerfile .
+```
+
+## Run Development Container
+
+Note: Assumptions are that the config app has successfully run and database is available. This can be done via regular user documentation and using the `quay:devel` image built above.
+
+```
+docker run --rm --name quay \
+ -v $QUAY_DEVEL_HOME/quay-config:/conf/stack \
+ -v $QUAY_DEVEL_HOME/quay-storage:/datastorage \
+ -v $QUAY_DEVEL_HOME/quay:$QUAY_DEVEL_HOME/quay \
+ -p 8080:8080 \
+ -p 8443:8443 \
+ -p 9092:9092 \
+ -e QUAY_DEVEL_HOME=$QUAY_DEVEL_HOME \
+ quay:devel
+```
+
+This will start the quay container and be fully running. The web UI is available at port 8443.
+
+## Switch Services to Development
+
+### Inside the Container
+
+When exec'ing into the development container, it is best to run under the [SCLs](https://www.softwarecollections.org) used during production. This will provide the correct paths to python and other executables.
+
+```
+docker exec --rm -it quay scl enable python27 rh-nginx112 bash
+```
+
+The following sections are perhaps easiest to run in separate `docker exec` sessions, which is how they will be described. Some or all could be run in the background and managed differently than described here.
+
+### Stop Services
+
+When running the quay container, all regular services are started by default. In order to run a service locally, stopping the default is important. Below are the steps for developing the web UI and web backend python service.
+
+Stop services:
+```
+supervisorctl -c /quay-registry/conf/supervisord.conf stop gunicorn-web
+```
+
+Change the web resources to devel location:
+```
+cd /quay-registry
+mv static static.bak
+ln -s $QUAY_DEVEL_HOME/static
+```
+
+Build web assets:
+```
+cd $QUAY_DEVEL_HOME
+mkdir -p static/webfonts
+mkdir -p static/fonts
+mkdir -p static/ldn
+PYTHONPATH=. python -m external_libraries
+
+yarn build
+```
+
+### Run Migrations
+
+If `$QUAY_DEVEL_HOME/quay`, which presumably has your local code changes, has migrations, special care should be taken when switching between different versions of code.
+
+To run a migration:
+```
+cd $QUAY_DEVEL_HOME
+PYTHONPATH=. alembic upgrade 5248ddf35167
+```
+
+To revert a migration:
+```
+cd $QUAY_DEVEL_HOME
+PYTHONPATH=. alembic downgrade -1
+```
+
+### Web UI Assets
+
+```
+cd $QUAY_DEVEL_HOME
+yarn build && npm run watch
+```
+
+### Run Web Server
+```
+cd $QUAY_DEVEL_HOME
+PYTHONPATH=. gunicorn -c conf/gunicorn_web.py web:application
+```
diff --git a/docs/talks.md b/docs/talks.md
new file mode 100644
index 000000000..6d2bd6225
--- /dev/null
+++ b/docs/talks.md
@@ -0,0 +1,17 @@
+# Talks
+
+The following is a chronological list of presentations given by various members of the community.
+
+## 2017
+
+* [Towards a standard Containerized App Registry - Antoine Legrand, Jimmy Zelinskie](https://youtu.be/zGJsXyzE5A8)
+* [Demoing a Kubernetes CI/CD Pipeline using App Registry - Antoine Legrand, Jimmy Zelinskie](https://youtu.be/6Gpuj_cCZII)
+
+## 2016
+
+* [Better Image Distribution to Worldwide Clusters - Joey Schorr](https://youtu.be/dX9-ComoJTs)
+* [Container Management at eBay - Thuc Nguyen](https://youtu.be/h4f7nqYRPK8)
+
+## 2015
+
+* [Continuous Containerization - Jake Moshenko, Joey Schorr](https://youtu.be/anfmeB_JzB0)
diff --git a/emails/base.html b/emails/base.html
new file mode 100644
index 000000000..52004713f
--- /dev/null
+++ b/emails/base.html
@@ -0,0 +1,76 @@
+{% if with_base_template %}
+
+
+
+
+
+ {{ subject }}
+
+ {% if action_metadata %}
+
+ {% endif %}
+
+
+
+
+
+
+
+
+
+{% endif %}
+ {% block content %}{% endblock %}
+{% if with_base_template %}
+
+
+
+ If you have any questions, respond to this email and we’ll be happy to help!
+
+
+
+
+
+
+
+ {% if hosted %}
+
+
+ Quay [ builds, analyzes, distributes ] your container images
+
+
+
+ {% endif %}
+
+
+{% endif %}
diff --git a/emails/changeemail.html b/emails/changeemail.html
new file mode 100644
index 000000000..ee239f3fb
--- /dev/null
+++ b/emails/changeemail.html
@@ -0,0 +1,26 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+E-mail Address Change Requested
+
+This email address was added to the {{ app_title }} account {{ username }} .
+
+
+
+
+ If you did not add this address to {{ username }} , you can safely ignore this message.
+
+
+Best Regards,
+
+The {{ app_title }} Team
+
+
+
+
+{% endblock %}
diff --git a/emails/confirmemail.html b/emails/confirmemail.html
new file mode 100644
index 000000000..11ea31d00
--- /dev/null
+++ b/emails/confirmemail.html
@@ -0,0 +1,30 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+Confirm email for new user: {{ username }}
+
+
+
+
+ This email address was used to register user {{ username }}.
+
+
+
+
+
+ Once you confirm this email, you’ll be able to access your {{ app_title }} account.
+
+
+
+
+
+
+Welcome!
+The {{ app_title }} Team
+
+{% endblock %}
diff --git a/emails/email-template-viewer.html b/emails/email-template-viewer.html
new file mode 100644
index 000000000..f3f6f9a38
--- /dev/null
+++ b/emails/email-template-viewer.html
@@ -0,0 +1,17 @@
+
+
+
+ Email Template Viewer
+
+
+ Email Template Viewer
+ Here is a list of the templates available:
+
+ {% for template in templates %}
+ {% if template != 'email-template-viewer' %}
+ {{template}}
+ {% endif %}
+ {% endfor %}
+
+
+
\ No newline at end of file
diff --git a/emails/emailchanged.html b/emails/emailchanged.html
new file mode 100644
index 000000000..e5b12aef3
--- /dev/null
+++ b/emails/emailchanged.html
@@ -0,0 +1,29 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+Account password changed: {{ username }}
+
+
+
+
+ The password for user {{ username }} has been changed.
+
+
+
+
+
+ No action is required if you made this change.
+
+
+
+
+
+Best wishes,
+The {{ app_title }} Team
+
+{% endblock %}
diff --git a/emails/logsexported.html b/emails/logsexported.html
new file mode 100644
index 000000000..945ddedcc
--- /dev/null
+++ b/emails/logsexported.html
@@ -0,0 +1,44 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+Usage Logs Export has completed
+Export ID: {{ export_id }}
+
+
+{% if status == 'success' %}
+
+
+ The exported logs information can be found at {{ exported_data_url }} and will remain accessible for {{ exported_data_expiration }} seconds before being deleted.
+
+
+{% elif status == 'failed' %}
+
+
+ The attempt to export the logs in the specified range has failed. This operation will be retried up to 3 times. Please contact support if this problem persists.
+
+
+{% elif status == 'timedout' %}
+
+
+ The attempt to export the logs in the specified range has timed out. Please contact support if this problem persists.
+
+
+{% elif status == 'invalidrequest' %}
+
+
+ The attempt to export the logs failed due to an invalid request. Please contact support if this problem persists.
+
+
+{% endif %}
+
+
+
+ If you did not initiate this operation, please delete this e-mail.
+
+
+
+Best Wishes,
+The {{ app_title }} Team
+
+{% endblock %}
diff --git a/emails/orgrecovery.html b/emails/orgrecovery.html
new file mode 100644
index 000000000..8d5cd3072
--- /dev/null
+++ b/emails/orgrecovery.html
@@ -0,0 +1,45 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+Organization recovery: {{ organization }}
+
+
+
+
+ A user at {{ app_link() }} has attempted to recover access to organization {{ organization }} via this email address.
+
+
+
+
+
+ Please login with one of the following user accounts to access this organization:
+
+
+
+
+
+ {% for admin_user in admin_usernames %}
+ {{ admin_user | user_reference }}
+ {% endfor %}
+
+
+
+
+
+
+
+
+ If you did not make this request, your organization has not been compromised and the user was not given access. You can safely ignore this message.
+
+
+
+
+Best Wishes,
+The {{ app_title }} Team
+
+{% endblock %}
diff --git a/emails/passwordchanged.html b/emails/passwordchanged.html
new file mode 100644
index 000000000..870ef7981
--- /dev/null
+++ b/emails/passwordchanged.html
@@ -0,0 +1,30 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+Account password changed: {{ username }}
+
+
+
+
+ The password for user {{ username }} has been changed.
+
+
+
+
+
+ No action is required if you made this change.
+
+
+
+
+
+
+Best Wishes,
+The {{ app_title }} Team
+
+{% endblock %}
diff --git a/emails/paymentfailure.html b/emails/paymentfailure.html
new file mode 100644
index 000000000..b901d597f
--- /dev/null
+++ b/emails/paymentfailure.html
@@ -0,0 +1,30 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+Subscription payment failure: {{ username }}
+
+
+
+
+ A recent payment for account {{ username }} failed.
+
+
+
+
+
+ If you would like to continue to use the account {{ username }} without interruption, update your payment information.
+
+
+
+
+
+
+Thank you,
+The {{ app_title }} Team
+
+{% endblock %}
diff --git a/emails/recovery.html b/emails/recovery.html
new file mode 100644
index 000000000..27d3eaf48
--- /dev/null
+++ b/emails/recovery.html
@@ -0,0 +1,30 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+Account recovery
+
+
+
+
+ A user at {{ app_title }} has attempted to recover their access to the account registered to this email address.
+
+
+
+
+
+
+
+ If you did not request this password reset, you can safely ignore this message and the account password and access will not change.
+
+
+
+
+Best Wishes,
+The {{ app_title }} Team
+
+{% endblock %}
diff --git a/emails/repoauthorizeemail.html b/emails/repoauthorizeemail.html
new file mode 100644
index 000000000..7d779d852
--- /dev/null
+++ b/emails/repoauthorizeemail.html
@@ -0,0 +1,30 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+Verify e-mail to recieve {{namespace}}/{{repository}} notifications
+
+
+
+
+ A request has been made to send notifications to this email address for the repository {{namespace}}/{{repository}} .
+
+
+
+
+
+
+
+ If you do not wish to receive notifications for {{namespace}}/{{repository}} , you can ignore this message.
+
+
+
+
+Thank you,
+The {{ app_title }} Team
+
+{% endblock %}
diff --git a/emails/teaminvite.html b/emails/teaminvite.html
new file mode 100644
index 000000000..0e4f11198
--- /dev/null
+++ b/emails/teaminvite.html
@@ -0,0 +1,30 @@
+{% extends "base.html" %}
+
+{% block content %}
+
+Invitation to join team: {{ organization }}/{{ teamname }}
+
+
+
+
+ You’ve been invited to join the team {{ teamname }} in the organization {{ organization }} by user {{ inviter | user_reference }} .
+
+
+
+
+
+
+
+ If you were not expecting this invitation, you can safely ignore this email.
+
+
+
+
+Thank you,
+The {{ app_title }} Team
+
+{% endblock %}
diff --git a/endpoints/__init__.py b/endpoints/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/endpoints/api/__init__.py b/endpoints/api/__init__.py
new file mode 100644
index 000000000..8dcabe6a3
--- /dev/null
+++ b/endpoints/api/__init__.py
@@ -0,0 +1,448 @@
+import logging
+import datetime
+
+from calendar import timegm
+from email.utils import formatdate
+from functools import partial, wraps
+
+from flask import Blueprint, request, session
+from flask_restful import Resource, abort, Api, reqparse
+from flask_restful.utils.cors import crossdomain
+from jsonschema import validate, ValidationError
+
+from app import app, metric_queue, authentication
+from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission,
+ AdministerRepositoryPermission, UserReadPermission,
+ UserAdminPermission)
+from auth import scopes
+from auth.auth_context import (get_authenticated_context, get_authenticated_user,
+ get_validated_oauth_token)
+from auth.decorators import process_oauth
+from data import model as data_model
+from data.logs_model import logs_model
+from data.database import RepositoryState
+from endpoints.csrf import csrf_protect
+from endpoints.exception import (Unauthorized, InvalidRequest, InvalidResponse,
+ FreshLoginRequired, NotFound)
+from endpoints.decorators import check_anon_protection, require_xhr_from_browser, check_readonly
+from util.metrics.metricqueue import time_decorator
+from util.names import parse_namespace_repository
+from util.pagination import encrypt_page_token, decrypt_page_token
+from util.request import get_request_ip
+from __init__models_pre_oci import pre_oci_model as model
+
+
+logger = logging.getLogger(__name__)
+api_bp = Blueprint('api', __name__)
+
+
+CROSS_DOMAIN_HEADERS = ['Authorization', 'Content-Type', 'X-Requested-With']
+
+class ApiExceptionHandlingApi(Api):
+ @crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS)
+ def handle_error(self, error):
+ return super(ApiExceptionHandlingApi, self).handle_error(error)
+
+
+api = ApiExceptionHandlingApi()
+api.init_app(api_bp)
+api.decorators = [csrf_protect(),
+ crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS),
+ process_oauth, time_decorator(api_bp.name, metric_queue),
+ require_xhr_from_browser]
+
+
+def resource(*urls, **kwargs):
+ def wrapper(api_resource):
+ if not api_resource:
+ return None
+
+ api_resource.registered = True
+ api.add_resource(api_resource, *urls, **kwargs)
+ return api_resource
+ return wrapper
+
+
+def show_if(value):
+ def f(inner):
+ if hasattr(inner, 'registered') and inner.registered:
+ msg = ('API endpoint %s is already registered; please switch the ' +
+ '@show_if to be *below* the @resource decorator')
+ raise Exception(msg % inner)
+
+ if not value:
+ return None
+
+ return inner
+ return f
+
+
+def hide_if(value):
+ def f(inner):
+ if hasattr(inner, 'registered') and inner.registered:
+ msg = ('API endpoint %s is already registered; please switch the ' +
+ '@hide_if to be *below* the @resource decorator')
+ raise Exception(msg % inner)
+
+ if value:
+ return None
+
+ return inner
+ return f
+
+
+def truthy_bool(param):
+ return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'}
+
+
+def format_date(date):
+ """ Output an RFC822 date format. """
+ if date is None:
+ return None
+ return formatdate(timegm(date.utctimetuple()))
+
+
+def add_method_metadata(name, value):
+ def modifier(func):
+ if func is None:
+ return None
+
+ if '__api_metadata' not in dir(func):
+ func.__api_metadata = {}
+ func.__api_metadata[name] = value
+ return func
+ return modifier
+
+
+def method_metadata(func, name):
+ if func is None:
+ return None
+
+ if '__api_metadata' in dir(func):
+ return func.__api_metadata.get(name, None)
+ return None
+
+
+
+nickname = partial(add_method_metadata, 'nickname')
+related_user_resource = partial(add_method_metadata, 'related_user_resource')
+internal_only = add_method_metadata('internal', True)
+
+
+def path_param(name, description):
+ def add_param(func):
+ if not func:
+ return func
+
+ if '__api_path_params' not in dir(func):
+ func.__api_path_params = {}
+ func.__api_path_params[name] = {
+ 'name': name,
+ 'description': description
+ }
+ return func
+ return add_param
+
+
+def query_param(name, help_str, type=reqparse.text_type, default=None,
+ choices=(), required=False):
+ def add_param(func):
+ if '__api_query_params' not in dir(func):
+ func.__api_query_params = []
+ func.__api_query_params.append({
+ 'name': name,
+ 'type': type,
+ 'help': help_str,
+ 'default': default,
+ 'choices': choices,
+ 'required': required,
+ 'location': ('args')
+ })
+ return func
+ return add_param
+
+def page_support(page_token_kwarg='page_token', parsed_args_kwarg='parsed_args'):
+ def inner(func):
+ """ Adds pagination support to an API endpoint. The decorated API will have an
+ added query parameter named 'next_page'. Works in tandem with the
+ modelutil paginate method.
+ """
+ @wraps(func)
+ @query_param('next_page', 'The page token for the next page', type=str)
+ def wrapper(self, *args, **kwargs):
+ # Note: if page_token is None, we'll receive the first page of results back.
+ page_token = decrypt_page_token(kwargs[parsed_args_kwarg]['next_page'])
+ kwargs[page_token_kwarg] = page_token
+
+ (result, next_page_token) = func(self, *args, **kwargs)
+ if next_page_token is not None:
+ result['next_page'] = encrypt_page_token(next_page_token)
+
+ return result
+ return wrapper
+ return inner
+
+def parse_args(kwarg_name='parsed_args'):
+ def inner(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ if '__api_query_params' not in dir(func):
+ abort(500)
+
+ parser = reqparse.RequestParser()
+ for arg_spec in func.__api_query_params:
+ parser.add_argument(**arg_spec)
+ kwargs[kwarg_name] = parser.parse_args()
+
+ return func(self, *args, **kwargs)
+ return wrapper
+ return inner
+
+def parse_repository_name(func):
+ @wraps(func)
+ def wrapper(repository, *args, **kwargs):
+ (namespace, repository) = parse_namespace_repository(repository, app.config['LIBRARY_NAMESPACE'])
+ return func(namespace, repository, *args, **kwargs)
+ return wrapper
+
+
+class ApiResource(Resource):
+ registered = False
+ method_decorators = [check_anon_protection, check_readonly]
+
+ def options(self):
+ return None, 200
+
+
+class RepositoryParamResource(ApiResource):
+ method_decorators = [check_anon_protection, parse_repository_name, check_readonly]
+
+
+def disallow_for_app_repositories(func):
+ @wraps(func)
+ def wrapped(self, namespace_name, repository_name, *args, **kwargs):
+ # Lookup the repository with the given namespace and name and ensure it is not an application
+ # repository.
+ if model.is_app_repository(namespace_name, repository_name):
+ abort(501)
+
+ return func(self, namespace_name, repository_name, *args, **kwargs)
+
+ return wrapped
+
+
+def disallow_for_non_normal_repositories(func):
+ @wraps(func)
+ def wrapped(self, namespace_name, repository_name, *args, **kwargs):
+ repo = data_model.repository.get_repository(namespace_name, repository_name)
+ if repo and repo.state != RepositoryState.NORMAL:
+ abort(503, message='Repository is in read only or mirror mode: %s' % repo.state)
+
+ return func(self, namespace_name, repository_name, *args, **kwargs)
+ return wrapped
+
+
+def require_repo_permission(permission_class, scope, allow_public=False):
+ def wrapper(func):
+ @add_method_metadata('oauth2_scope', scope)
+ @wraps(func)
+ def wrapped(self, namespace, repository, *args, **kwargs):
+ logger.debug('Checking permission %s for repo: %s/%s', permission_class, namespace,
+ repository)
+ permission = permission_class(namespace, repository)
+ if (permission.can() or
+ (allow_public and
+ model.repository_is_public(namespace, repository))):
+ return func(self, namespace, repository, *args, **kwargs)
+ raise Unauthorized()
+ return wrapped
+ return wrapper
+
+
+require_repo_read = require_repo_permission(ReadRepositoryPermission, scopes.READ_REPO, True)
+require_repo_write = require_repo_permission(ModifyRepositoryPermission, scopes.WRITE_REPO)
+require_repo_admin = require_repo_permission(AdministerRepositoryPermission, scopes.ADMIN_REPO)
+
+
+def require_user_permission(permission_class, scope=None):
+ def wrapper(func):
+ @add_method_metadata('oauth2_scope', scope)
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ user = get_authenticated_user()
+ if not user:
+ raise Unauthorized()
+
+ logger.debug('Checking permission %s for user %s', permission_class, user.username)
+ permission = permission_class(user.username)
+ if permission.can():
+ return func(self, *args, **kwargs)
+ raise Unauthorized()
+ return wrapped
+ return wrapper
+
+
+require_user_read = require_user_permission(UserReadPermission, scopes.READ_USER)
+require_user_admin = require_user_permission(UserAdminPermission, scopes.ADMIN_USER)
+
+
+def verify_not_prod(func):
+ @add_method_metadata('enterprise_only', True)
+ @wraps(func)
+ def wrapped(*args, **kwargs):
+ # Verify that we are not running on a production (i.e. hosted) stack. If so, we fail.
+ # This should never happen (because of the feature-flag on SUPER_USERS), but we want to be
+ # absolutely sure.
+ if app.config['SERVER_HOSTNAME'].find('quay.io') >= 0:
+ logger.error('!!! Super user method called IN PRODUCTION !!!')
+ raise NotFound()
+
+ return func(*args, **kwargs)
+
+ return wrapped
+
+
+def require_fresh_login(func):
+ @add_method_metadata('requires_fresh_login', True)
+ @wraps(func)
+ def wrapped(*args, **kwargs):
+ user = get_authenticated_user()
+ if not user:
+ raise Unauthorized()
+
+ if get_validated_oauth_token():
+ return func(*args, **kwargs)
+
+ logger.debug('Checking fresh login for user %s', user.username)
+
+ last_login = session.get('login_time', datetime.datetime.min)
+ valid_span = datetime.datetime.now() - datetime.timedelta(minutes=10)
+
+ if (not user.password_hash or last_login >= valid_span or
+ not authentication.supports_fresh_login):
+ return func(*args, **kwargs)
+
+ raise FreshLoginRequired()
+ return wrapped
+
+
+def require_scope(scope_object):
+ def wrapper(func):
+ @add_method_metadata('oauth2_scope', scope_object)
+ @wraps(func)
+ def wrapped(*args, **kwargs):
+ return func(*args, **kwargs)
+ return wrapped
+ return wrapper
+
+
+def max_json_size(max_size):
+ def wrapper(func):
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ if request.is_json and len(request.get_data()) > max_size:
+ raise InvalidRequest()
+
+ return func(self, *args, **kwargs)
+ return wrapped
+ return wrapper
+
+
+def validate_json_request(schema_name, optional=False):
+ def wrapper(func):
+ @add_method_metadata('request_schema', schema_name)
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ schema = self.schemas[schema_name]
+ try:
+ json_data = request.get_json()
+ if json_data is None:
+ if not optional:
+ raise InvalidRequest('Missing JSON body')
+ else:
+ validate(json_data, schema)
+ return func(self, *args, **kwargs)
+ except ValidationError as ex:
+ raise InvalidRequest(str(ex))
+ return wrapped
+ return wrapper
+
+
+def request_error(exception=None, **kwargs):
+ data = kwargs.copy()
+ message = 'Request error.'
+ if exception:
+ message = str(exception)
+
+ message = data.pop('message', message)
+ raise InvalidRequest(message, data)
+
+
+def log_action(kind, user_or_orgname, metadata=None, repo=None, repo_name=None):
+ if not metadata:
+ metadata = {}
+
+ oauth_token = get_validated_oauth_token()
+ if oauth_token:
+ metadata['oauth_token_id'] = oauth_token.id
+ metadata['oauth_token_application_id'] = oauth_token.application.client_id
+ metadata['oauth_token_application'] = oauth_token.application.name
+
+ performer = get_authenticated_user()
+
+ if repo_name is not None:
+ repo = data_model.repository.get_repository(user_or_orgname, repo_name)
+
+ logs_model.log_action(kind, user_or_orgname,
+ repository=repo,
+ performer=performer,
+ ip=get_request_ip(),
+ metadata=metadata)
+
+
+def define_json_response(schema_name):
+ def wrapper(func):
+ @add_method_metadata('response_schema', schema_name)
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ schema = self.schemas[schema_name]
+ resp = func(self, *args, **kwargs)
+
+ if app.config['TESTING']:
+ try:
+ validate(resp, schema)
+ except ValidationError as ex:
+ raise InvalidResponse(str(ex))
+
+ return resp
+ return wrapped
+ return wrapper
+
+
+import endpoints.api.appspecifictokens
+import endpoints.api.billing
+import endpoints.api.build
+import endpoints.api.discovery
+import endpoints.api.error
+import endpoints.api.globalmessages
+import endpoints.api.image
+import endpoints.api.logs
+import endpoints.api.manifest
+import endpoints.api.organization
+import endpoints.api.permission
+import endpoints.api.prototype
+import endpoints.api.repository
+import endpoints.api.repositorynotification
+import endpoints.api.repoemail
+import endpoints.api.repotoken
+import endpoints.api.robot
+import endpoints.api.search
+import endpoints.api.suconfig
+import endpoints.api.superuser
+import endpoints.api.tag
+import endpoints.api.team
+import endpoints.api.trigger
+import endpoints.api.user
+import endpoints.api.secscan
+import endpoints.api.signing
+import endpoints.api.mirror
diff --git a/endpoints/api/__init__models_interface.py b/endpoints/api/__init__models_interface.py
new file mode 100644
index 000000000..974d9e0e1
--- /dev/null
+++ b/endpoints/api/__init__models_interface.py
@@ -0,0 +1,54 @@
+from abc import ABCMeta, abstractmethod
+
+from six import add_metaclass
+
+
+@add_metaclass(ABCMeta)
+class InitDataInterface(object):
+ """
+ Interface that represents all data store interactions required by __init__.
+ """
+
+ @abstractmethod
+ def is_app_repository(self, namespace_name, repository_name):
+ """
+
+ Args:
+ namespace_name: namespace or user
+ repository_name: repository
+
+ Returns:
+ Boolean
+ """
+ pass
+
+ @abstractmethod
+ def repository_is_public(self, namespace_name, repository_name):
+ """
+
+ Args:
+ namespace_name: namespace or user
+ repository_name: repository
+
+ Returns:
+ Boolean
+ """
+ pass
+
+ @abstractmethod
+ def log_action(self, kind, namespace_name, repository_name, performer, ip, metadata):
+ """
+
+ Args:
+ kind: type of log
+ user_or_orgname: name of user or organization
+ performer: user doing the action
+ ip: originating ip
+ metadata: metadata
+ repository: repository the action is related to
+
+ Returns:
+ None
+ """
+ pass
+
diff --git a/endpoints/api/__init__models_pre_oci.py b/endpoints/api/__init__models_pre_oci.py
new file mode 100644
index 000000000..f14e7267c
--- /dev/null
+++ b/endpoints/api/__init__models_pre_oci.py
@@ -0,0 +1,19 @@
+from __init__models_interface import InitDataInterface
+
+from data import model
+from data.logs_model import logs_model
+
+class PreOCIModel(InitDataInterface):
+ def is_app_repository(self, namespace_name, repository_name):
+ return model.repository.get_repository(namespace_name, repository_name,
+ kind_filter='application') is not None
+
+ def repository_is_public(self, namespace_name, repository_name):
+ return model.repository.repository_is_public(namespace_name, repository_name)
+
+ def log_action(self, kind, namespace_name, repository_name, performer, ip, metadata):
+ repository = model.repository.get_repository(namespace_name, repository_name)
+ logs_model.log_action(kind, namespace_name, performer=performer, ip=ip, metadata=metadata,
+ repository=repository)
+
+pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/appspecifictokens.py b/endpoints/api/appspecifictokens.py
new file mode 100644
index 000000000..1e886c385
--- /dev/null
+++ b/endpoints/api/appspecifictokens.py
@@ -0,0 +1,133 @@
+""" Manages app specific tokens for the current user. """
+
+import logging
+import math
+
+from datetime import timedelta
+from flask import request
+
+import features
+
+from app import app
+from auth.auth_context import get_authenticated_user
+from data import model
+from endpoints.api import (ApiResource, nickname, resource, validate_json_request,
+ log_action, require_user_admin, require_fresh_login,
+ path_param, NotFound, format_date, show_if, query_param, parse_args,
+ truthy_bool)
+from util.timedeltastring import convert_to_timedelta
+
+logger = logging.getLogger(__name__)
+
+
+def token_view(token, include_code=False):
+ data = {
+ 'uuid': token.uuid,
+ 'title': token.title,
+ 'last_accessed': format_date(token.last_accessed),
+ 'created': format_date(token.created),
+ 'expiration': format_date(token.expiration),
+ }
+
+ if include_code:
+ data.update({
+ 'token_code': model.appspecifictoken.get_full_token_string(token),
+ })
+
+ return data
+
+
+# The default window to use when looking up tokens that will be expiring.
+_DEFAULT_TOKEN_EXPIRATION_WINDOW = '4w'
+
+
+@resource('/v1/user/apptoken')
+@show_if(features.APP_SPECIFIC_TOKENS)
+class AppTokens(ApiResource):
+ """ Lists all app specific tokens for a user """
+ schemas = {
+ 'NewToken': {
+ 'type': 'object',
+ 'required': [
+ 'title',
+ ],
+ 'properties': {
+ 'title': {
+ 'type': 'string',
+ 'description': 'The user-defined title for the token',
+ },
+ }
+ },
+ }
+
+ @require_user_admin
+ @nickname('listAppTokens')
+ @parse_args()
+ @query_param('expiring', 'If true, only returns those tokens expiring soon', type=truthy_bool)
+ def get(self, parsed_args):
+ """ Lists the app specific tokens for the user. """
+ expiring = parsed_args['expiring']
+ if expiring:
+ expiration = app.config.get('APP_SPECIFIC_TOKEN_EXPIRATION')
+ token_expiration = convert_to_timedelta(expiration or _DEFAULT_TOKEN_EXPIRATION_WINDOW)
+ seconds = math.ceil(token_expiration.total_seconds() * 0.1) or 1
+ soon = timedelta(seconds=seconds)
+ tokens = model.appspecifictoken.get_expiring_tokens(get_authenticated_user(), soon)
+ else:
+ tokens = model.appspecifictoken.list_tokens(get_authenticated_user())
+
+ return {
+ 'tokens': [token_view(token, include_code=False) for token in tokens],
+ 'only_expiring': expiring,
+ }
+
+ @require_user_admin
+ @require_fresh_login
+ @nickname('createAppToken')
+ @validate_json_request('NewToken')
+ def post(self):
+ """ Create a new app specific token for user. """
+ title = request.get_json()['title']
+ token = model.appspecifictoken.create_token(get_authenticated_user(), title)
+
+ log_action('create_app_specific_token', get_authenticated_user().username,
+ {'app_specific_token_title': token.title,
+ 'app_specific_token': token.uuid})
+
+ return {
+ 'token': token_view(token, include_code=True),
+ }
+
+
+@resource('/v1/user/apptoken/')
+@show_if(features.APP_SPECIFIC_TOKENS)
+@path_param('token_uuid', 'The uuid of the app specific token')
+class AppToken(ApiResource):
+ """ Provides operations on an app specific token """
+ @require_user_admin
+ @require_fresh_login
+ @nickname('getAppToken')
+ def get(self, token_uuid):
+ """ Returns a specific app token for the user. """
+ token = model.appspecifictoken.get_token_by_uuid(token_uuid, owner=get_authenticated_user())
+ if token is None:
+ raise NotFound()
+
+ return {
+ 'token': token_view(token, include_code=True),
+ }
+
+ @require_user_admin
+ @require_fresh_login
+ @nickname('revokeAppToken')
+ def delete(self, token_uuid):
+ """ Revokes a specific app token for the user. """
+ token = model.appspecifictoken.revoke_token_by_uuid(token_uuid, owner=get_authenticated_user())
+ if token is None:
+ raise NotFound()
+
+ log_action('revoke_app_specific_token', get_authenticated_user().username,
+ {'app_specific_token_title': token.title,
+ 'app_specific_token': token.uuid})
+
+ return '', 204
diff --git a/endpoints/api/billing.py b/endpoints/api/billing.py
new file mode 100644
index 000000000..db7158d12
--- /dev/null
+++ b/endpoints/api/billing.py
@@ -0,0 +1,607 @@
+""" Billing information, subscriptions, and plan information. """
+
+import stripe
+
+from flask import request
+from app import billing
+from endpoints.api import (resource, nickname, ApiResource, validate_json_request, log_action,
+ related_user_resource, internal_only, require_user_admin, show_if,
+ path_param, require_scope, abort)
+from endpoints.exception import Unauthorized, NotFound
+from endpoints.api.subscribe import subscribe, subscription_view
+from auth.permissions import AdministerOrganizationPermission
+from auth.auth_context import get_authenticated_user
+from auth import scopes
+from data import model
+from data.billing import PLANS, get_plan
+
+import features
+import uuid
+import json
+
+def get_namespace_plan(namespace):
+ """ Returns the plan of the given namespace. """
+ namespace_user = model.user.get_namespace_user(namespace)
+ if namespace_user is None:
+ return None
+
+ if not namespace_user.stripe_id:
+ return None
+
+ # Ask Stripe for the subscribed plan.
+ # TODO: Can we cache this or make it faster somehow?
+ try:
+ cus = billing.Customer.retrieve(namespace_user.stripe_id)
+ except stripe.error.APIConnectionError:
+ abort(503, message='Cannot contact Stripe')
+
+ if not cus.subscription:
+ return None
+
+ return get_plan(cus.subscription.plan.id)
+
+
+def lookup_allowed_private_repos(namespace):
+ """ Returns false if the given namespace has used its allotment of private repositories. """
+ current_plan = get_namespace_plan(namespace)
+ if current_plan is None:
+ return False
+
+ # Find the number of private repositories used by the namespace and compare it to the
+ # plan subscribed.
+ private_repos = model.user.get_private_repo_count(namespace)
+
+ return private_repos < current_plan['privateRepos']
+
+
+def carderror_response(e):
+ return {'carderror': str(e)}, 402
+
+
+def get_card(user):
+ card_info = {
+ 'is_valid': False
+ }
+
+ if user.stripe_id:
+ try:
+ cus = billing.Customer.retrieve(user.stripe_id)
+ except stripe.error.APIConnectionError as e:
+ abort(503, message='Cannot contact Stripe')
+
+ if cus and cus.default_card:
+ # Find the default card.
+ default_card = None
+ for card in cus.cards.data:
+ if card.id == cus.default_card:
+ default_card = card
+ break
+
+ if default_card:
+ card_info = {
+ 'owner': default_card.name,
+ 'type': default_card.type,
+ 'last4': default_card.last4,
+ 'exp_month': default_card.exp_month,
+ 'exp_year': default_card.exp_year
+ }
+
+ return {'card': card_info}
+
+
+def set_card(user, token):
+ if user.stripe_id:
+ try:
+ cus = billing.Customer.retrieve(user.stripe_id)
+ except stripe.error.APIConnectionError as e:
+ abort(503, message='Cannot contact Stripe')
+
+ if cus:
+ try:
+ cus.card = token
+ cus.save()
+ except stripe.error.CardError as exc:
+ return carderror_response(exc)
+ except stripe.error.InvalidRequestError as exc:
+ return carderror_response(exc)
+ except stripe.error.APIConnectionError as e:
+ return carderror_response(e)
+
+ return get_card(user)
+
+
+def get_invoices(customer_id):
+ def invoice_view(i):
+ return {
+ 'id': i.id,
+ 'date': i.date,
+ 'period_start': i.period_start,
+ 'period_end': i.period_end,
+ 'paid': i.paid,
+ 'amount_due': i.amount_due,
+ 'next_payment_attempt': i.next_payment_attempt,
+ 'attempted': i.attempted,
+ 'closed': i.closed,
+ 'total': i.total,
+ 'plan': i.lines.data[0].plan.id if i.lines.data[0].plan else None
+ }
+
+ try:
+ invoices = billing.Invoice.list(customer=customer_id, count=12)
+ except stripe.error.APIConnectionError as e:
+ abort(503, message='Cannot contact Stripe')
+
+ return {
+ 'invoices': [invoice_view(i) for i in invoices.data]
+ }
+
+
+def get_invoice_fields(user):
+ try:
+ cus = billing.Customer.retrieve(user.stripe_id)
+ except stripe.error.APIConnectionError:
+ abort(503, message='Cannot contact Stripe')
+
+ if not 'metadata' in cus:
+ cus.metadata = {}
+
+ return json.loads(cus.metadata.get('invoice_fields') or '[]'), cus
+
+
+def create_billing_invoice_field(user, title, value):
+ new_field = {
+ 'uuid': str(uuid.uuid4()).split('-')[0],
+ 'title': title,
+ 'value': value
+ }
+
+ invoice_fields, cus = get_invoice_fields(user)
+ invoice_fields.append(new_field)
+
+ if not 'metadata' in cus:
+ cus.metadata = {}
+
+ cus.metadata['invoice_fields'] = json.dumps(invoice_fields)
+ cus.save()
+ return new_field
+
+
+def delete_billing_invoice_field(user, field_uuid):
+ invoice_fields, cus = get_invoice_fields(user)
+ invoice_fields = [field for field in invoice_fields if not field['uuid'] == field_uuid]
+
+ if not 'metadata' in cus:
+ cus.metadata = {}
+
+ cus.metadata['invoice_fields'] = json.dumps(invoice_fields)
+ cus.save()
+ return True
+
+
+@resource('/v1/plans/')
+@show_if(features.BILLING)
+class ListPlans(ApiResource):
+ """ Resource for listing the available plans. """
+ @nickname('listPlans')
+ def get(self):
+ """ List the avaialble plans. """
+ return {
+ 'plans': PLANS,
+ }
+
+
+@resource('/v1/user/card')
+@internal_only
+@show_if(features.BILLING)
+class UserCard(ApiResource):
+ """ Resource for managing a user's credit card. """
+ schemas = {
+ 'UserCard': {
+ 'id': 'UserCard',
+ 'type': 'object',
+ 'description': 'Description of a user card',
+ 'required': [
+ 'token',
+ ],
+ 'properties': {
+ 'token': {
+ 'type': 'string',
+ 'description': 'Stripe token that is generated by stripe checkout.js',
+ },
+ },
+ },
+ }
+
+ @require_user_admin
+ @nickname('getUserCard')
+ def get(self):
+ """ Get the user's credit card. """
+ user = get_authenticated_user()
+ return get_card(user)
+
+ @require_user_admin
+ @nickname('setUserCard')
+ @validate_json_request('UserCard')
+ def post(self):
+ """ Update the user's credit card. """
+ user = get_authenticated_user()
+ token = request.get_json()['token']
+ response = set_card(user, token)
+ log_action('account_change_cc', user.username)
+ return response
+
+
+@resource('/v1/organization//card')
+@path_param('orgname', 'The name of the organization')
+@internal_only
+@related_user_resource(UserCard)
+@show_if(features.BILLING)
+class OrganizationCard(ApiResource):
+ """ Resource for managing an organization's credit card. """
+ schemas = {
+ 'OrgCard': {
+ 'id': 'OrgCard',
+ 'type': 'object',
+ 'description': 'Description of a user card',
+ 'required': [
+ 'token',
+ ],
+ 'properties': {
+ 'token': {
+ 'type': 'string',
+ 'description': 'Stripe token that is generated by stripe checkout.js',
+ },
+ },
+ },
+ }
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrgCard')
+ def get(self, orgname):
+ """ Get the organization's credit card. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ organization = model.organization.get_organization(orgname)
+ return get_card(organization)
+
+ raise Unauthorized()
+
+ @nickname('setOrgCard')
+ @validate_json_request('OrgCard')
+ def post(self, orgname):
+ """ Update the orgnaization's credit card. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ organization = model.organization.get_organization(orgname)
+ token = request.get_json()['token']
+ response = set_card(organization, token)
+ log_action('account_change_cc', orgname)
+ return response
+
+ raise Unauthorized()
+
+
+@resource('/v1/user/plan')
+@internal_only
+@show_if(features.BILLING)
+class UserPlan(ApiResource):
+ """ Resource for managing a user's subscription. """
+ schemas = {
+ 'UserSubscription': {
+ 'id': 'UserSubscription',
+ 'type': 'object',
+ 'description': 'Description of a user card',
+ 'required': [
+ 'plan',
+ ],
+ 'properties': {
+ 'token': {
+ 'type': 'string',
+ 'description': 'Stripe token that is generated by stripe checkout.js',
+ },
+ 'plan': {
+ 'type': 'string',
+ 'description': 'Plan name to which the user wants to subscribe',
+ },
+ },
+ },
+ }
+
+ @require_user_admin
+ @nickname('updateUserSubscription')
+ @validate_json_request('UserSubscription')
+ def put(self):
+ """ Create or update the user's subscription. """
+ request_data = request.get_json()
+ plan = request_data['plan']
+ token = request_data['token'] if 'token' in request_data else None
+ user = get_authenticated_user()
+ return subscribe(user, plan, token, False) # Business features not required
+
+ @require_user_admin
+ @nickname('getUserSubscription')
+ def get(self):
+ """ Fetch any existing subscription for the user. """
+ cus = None
+ user = get_authenticated_user()
+ private_repos = model.user.get_private_repo_count(user.username)
+
+ if user.stripe_id:
+ try:
+ cus = billing.Customer.retrieve(user.stripe_id)
+ except stripe.error.APIConnectionError as e:
+ abort(503, message='Cannot contact Stripe')
+
+ if cus.subscription:
+ return subscription_view(cus.subscription, private_repos)
+
+ return {
+ 'hasSubscription': False,
+ 'isExistingCustomer': cus is not None,
+ 'plan': 'free',
+ 'usedPrivateRepos': private_repos,
+ }
+
+
+@resource('/v1/organization//plan')
+@path_param('orgname', 'The name of the organization')
+@internal_only
+@related_user_resource(UserPlan)
+@show_if(features.BILLING)
+class OrganizationPlan(ApiResource):
+ """ Resource for managing a org's subscription. """
+ schemas = {
+ 'OrgSubscription': {
+ 'id': 'OrgSubscription',
+ 'type': 'object',
+ 'description': 'Description of a user card',
+ 'required': [
+ 'plan',
+ ],
+ 'properties': {
+ 'token': {
+ 'type': 'string',
+ 'description': 'Stripe token that is generated by stripe checkout.js',
+ },
+ 'plan': {
+ 'type': 'string',
+ 'description': 'Plan name to which the user wants to subscribe',
+ },
+ },
+ },
+ }
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('updateOrgSubscription')
+ @validate_json_request('OrgSubscription')
+ def put(self, orgname):
+ """ Create or update the org's subscription. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ request_data = request.get_json()
+ plan = request_data['plan']
+ token = request_data['token'] if 'token' in request_data else None
+ organization = model.organization.get_organization(orgname)
+ return subscribe(organization, plan, token, True) # Business plan required
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrgSubscription')
+ def get(self, orgname):
+ """ Fetch any existing subscription for the org. """
+ cus = None
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ private_repos = model.user.get_private_repo_count(orgname)
+ organization = model.organization.get_organization(orgname)
+ if organization.stripe_id:
+ try:
+ cus = billing.Customer.retrieve(organization.stripe_id)
+ except stripe.error.APIConnectionError as e:
+ abort(503, message='Cannot contact Stripe')
+
+ if cus.subscription:
+ return subscription_view(cus.subscription, private_repos)
+
+ return {
+ 'hasSubscription': False,
+ 'isExistingCustomer': cus is not None,
+ 'plan': 'free',
+ 'usedPrivateRepos': private_repos,
+ }
+
+ raise Unauthorized()
+
+
+@resource('/v1/user/invoices')
+@internal_only
+@show_if(features.BILLING)
+class UserInvoiceList(ApiResource):
+ """ Resource for listing a user's invoices. """
+ @require_user_admin
+ @nickname('listUserInvoices')
+ def get(self):
+ """ List the invoices for the current user. """
+ user = get_authenticated_user()
+ if not user.stripe_id:
+ raise NotFound()
+
+ return get_invoices(user.stripe_id)
+
+
+@resource('/v1/organization//invoices')
+@path_param('orgname', 'The name of the organization')
+@related_user_resource(UserInvoiceList)
+@show_if(features.BILLING)
+class OrganizationInvoiceList(ApiResource):
+ """ Resource for listing an orgnaization's invoices. """
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('listOrgInvoices')
+ def get(self, orgname):
+ """ List the invoices for the specified orgnaization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ organization = model.organization.get_organization(orgname)
+ if not organization.stripe_id:
+ raise NotFound()
+
+ return get_invoices(organization.stripe_id)
+
+ raise Unauthorized()
+
+
+@resource('/v1/user/invoice/fields')
+@internal_only
+@show_if(features.BILLING)
+class UserInvoiceFieldList(ApiResource):
+ """ Resource for listing and creating a user's custom invoice fields. """
+ schemas = {
+ 'InvoiceField': {
+ 'id': 'InvoiceField',
+ 'type': 'object',
+ 'description': 'Description of an invoice field',
+ 'required': [
+ 'title', 'value'
+ ],
+ 'properties': {
+ 'title': {
+ 'type': 'string',
+ 'description': 'The title of the field being added',
+ },
+ 'value': {
+ 'type': 'string',
+ 'description': 'The value of the field being added',
+ },
+ },
+ },
+ }
+
+ @require_user_admin
+ @nickname('listUserInvoiceFields')
+ def get(self):
+ """ List the invoice fields for the current user. """
+ user = get_authenticated_user()
+ if not user.stripe_id:
+ raise NotFound()
+
+ return {'fields': get_invoice_fields(user)[0]}
+
+ @require_user_admin
+ @nickname('createUserInvoiceField')
+ @validate_json_request('InvoiceField')
+ def post(self):
+ """ Creates a new invoice field. """
+ user = get_authenticated_user()
+ if not user.stripe_id:
+ raise NotFound()
+
+ data = request.get_json()
+ created_field = create_billing_invoice_field(user, data['title'], data['value'])
+ return created_field
+
+
+@resource('/v1/user/invoice/field/')
+@internal_only
+@show_if(features.BILLING)
+class UserInvoiceField(ApiResource):
+ """ Resource for deleting a user's custom invoice fields. """
+ @require_user_admin
+ @nickname('deleteUserInvoiceField')
+ def delete(self, field_uuid):
+ """ Deletes the invoice field for the current user. """
+ user = get_authenticated_user()
+ if not user.stripe_id:
+ raise NotFound()
+
+ result = delete_billing_invoice_field(user, field_uuid)
+ if not result:
+ abort(404)
+
+ return 'Okay', 201
+
+
+@resource('/v1/organization//invoice/fields')
+@path_param('orgname', 'The name of the organization')
+@related_user_resource(UserInvoiceFieldList)
+@internal_only
+@show_if(features.BILLING)
+class OrganizationInvoiceFieldList(ApiResource):
+ """ Resource for listing and creating an organization's custom invoice fields. """
+ schemas = {
+ 'InvoiceField': {
+ 'id': 'InvoiceField',
+ 'type': 'object',
+ 'description': 'Description of an invoice field',
+ 'required': [
+ 'title', 'value'
+ ],
+ 'properties': {
+ 'title': {
+ 'type': 'string',
+ 'description': 'The title of the field being added',
+ },
+ 'value': {
+ 'type': 'string',
+ 'description': 'The value of the field being added',
+ },
+ },
+ },
+ }
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('listOrgInvoiceFields')
+ def get(self, orgname):
+ """ List the invoice fields for the organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ organization = model.organization.get_organization(orgname)
+ if not organization.stripe_id:
+ raise NotFound()
+
+ return {'fields': get_invoice_fields(organization)[0]}
+
+ abort(403)
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('createOrgInvoiceField')
+ @validate_json_request('InvoiceField')
+ def post(self, orgname):
+ """ Creates a new invoice field. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ organization = model.organization.get_organization(orgname)
+ if not organization.stripe_id:
+ raise NotFound()
+
+ data = request.get_json()
+ created_field = create_billing_invoice_field(organization, data['title'], data['value'])
+ return created_field
+
+ abort(403)
+
+
+@resource('/v1/organization//invoice/field/')
+@path_param('orgname', 'The name of the organization')
+@related_user_resource(UserInvoiceField)
+@internal_only
+@show_if(features.BILLING)
+class OrganizationInvoiceField(ApiResource):
+ """ Resource for deleting an organization's custom invoice fields. """
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('deleteOrgInvoiceField')
+ def delete(self, orgname, field_uuid):
+ """ Deletes the invoice field for the current user. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ organization = model.organization.get_organization(orgname)
+ if not organization.stripe_id:
+ raise NotFound()
+
+ result = delete_billing_invoice_field(organization, field_uuid)
+ if not result:
+ abort(404)
+
+ return 'Okay', 201
+
+ abort(403)
diff --git a/endpoints/api/build.py b/endpoints/api/build.py
new file mode 100644
index 000000000..d7fb55ae1
--- /dev/null
+++ b/endpoints/api/build.py
@@ -0,0 +1,485 @@
+""" Create, list, cancel and get status/logs of repository builds. """
+import datetime
+import hashlib
+import json
+import logging
+import os
+
+from flask import request
+from urlparse import urlparse
+
+import features
+
+from app import userfiles as user_files, build_logs, log_archive, dockerfile_build_queue
+from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission,
+ AdministerRepositoryPermission, AdministerOrganizationPermission,
+ SuperUserPermission)
+from buildtrigger.basehandler import BuildTriggerHandler
+from data import database
+from data import model
+from data.buildlogs import BuildStatusRetrievalError
+from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
+ require_repo_read, require_repo_write, validate_json_request,
+ ApiResource, internal_only, format_date, api, path_param,
+ require_repo_admin, abort, disallow_for_app_repositories,
+ disallow_for_non_normal_repositories)
+from endpoints.building import (start_build, PreparedBuild, MaximumBuildsQueuedException,
+ BuildTriggerDisabledException)
+from endpoints.exception import Unauthorized, NotFound, InvalidRequest
+from util.names import parse_robot_username
+from util.request import get_request_ip
+
+logger = logging.getLogger(__name__)
+
+
+def get_trigger_config(trigger):
+ try:
+ return json.loads(trigger.config)
+ except:
+ return {}
+
+
+def get_job_config(build_obj):
+ try:
+ return json.loads(build_obj.job_config)
+ except:
+ return {}
+
+
+def user_view(user):
+ return {
+ 'name': user.username,
+ 'kind': 'user',
+ 'is_robot': user.robot,
+ }
+
+
+def trigger_view(trigger, can_read=False, can_admin=False, for_build=False):
+ if trigger and trigger.uuid:
+ build_trigger = BuildTriggerHandler.get_handler(trigger)
+ build_source = build_trigger.config.get('build_source')
+
+ repo_url = build_trigger.get_repository_url() if build_source else None
+ can_read = can_read or can_admin
+
+ trigger_data = {
+ 'id': trigger.uuid,
+ 'service': trigger.service.name,
+ 'is_active': build_trigger.is_active(),
+
+ 'build_source': build_source if can_read else None,
+ 'repository_url': repo_url if can_read else None,
+
+ 'config': build_trigger.config if can_admin else {},
+ 'can_invoke': can_admin,
+ 'enabled': trigger.enabled,
+ 'disabled_reason': trigger.disabled_reason.name if trigger.disabled_reason else None,
+ }
+
+ if not for_build and can_admin and trigger.pull_robot:
+ trigger_data['pull_robot'] = user_view(trigger.pull_robot)
+
+ return trigger_data
+
+ return None
+
+
+def _get_build_status(build_obj):
+ """ Returns the updated build phase, status and (if any) error for the build object. """
+ phase = build_obj.phase
+ status = {}
+ error = None
+
+ # If the build is currently running, then load its "real-time" status from Redis.
+ if not database.BUILD_PHASE.is_terminal_phase(phase):
+ try:
+ status = build_logs.get_status(build_obj.uuid)
+ except BuildStatusRetrievalError as bsre:
+ phase = 'cannot_load'
+ if SuperUserPermission().can():
+ error = str(bsre)
+ else:
+ error = 'Redis may be down. Please contact support.'
+
+ if phase != 'cannot_load':
+ # If the status contains a heartbeat, then check to see if has been written in the last few
+ # minutes. If not, then the build timed out.
+ if status is not None and 'heartbeat' in status and status['heartbeat']:
+ heartbeat = datetime.datetime.utcfromtimestamp(status['heartbeat'])
+ if datetime.datetime.utcnow() - heartbeat > datetime.timedelta(minutes=1):
+ phase = database.BUILD_PHASE.INTERNAL_ERROR
+
+ # If the phase is internal error, return 'expired' instead if the number of retries
+ # on the queue item is 0.
+ if phase == database.BUILD_PHASE.INTERNAL_ERROR:
+ retry = (build_obj.queue_id and
+ dockerfile_build_queue.has_retries_remaining(build_obj.queue_id))
+ if not retry:
+ phase = 'expired'
+
+ return (phase, status, error)
+
+
+def build_status_view(build_obj):
+ phase, status, error = _get_build_status(build_obj)
+ repo_namespace = build_obj.repository.namespace_user.username
+ repo_name = build_obj.repository.name
+
+ can_read = ReadRepositoryPermission(repo_namespace, repo_name).can()
+ can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can()
+ can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can()
+
+ job_config = get_job_config(build_obj)
+
+ resp = {
+ 'id': build_obj.uuid,
+ 'phase': phase,
+ 'started': format_date(build_obj.started),
+ 'display_name': build_obj.display_name,
+ 'status': status or {},
+ 'subdirectory': job_config.get('build_subdir', ''),
+ 'dockerfile_path': job_config.get('build_subdir', ''),
+ 'context': job_config.get('context', ''),
+ 'tags': job_config.get('docker_tags', []),
+ 'manual_user': job_config.get('manual_user', None),
+ 'is_writer': can_write,
+ 'trigger': trigger_view(build_obj.trigger, can_read, can_admin, for_build=True),
+ 'trigger_metadata': job_config.get('trigger_metadata', None) if can_read else None,
+ 'resource_key': build_obj.resource_key,
+ 'pull_robot': user_view(build_obj.pull_robot) if build_obj.pull_robot else None,
+ 'repository': {
+ 'namespace': repo_namespace,
+ 'name': repo_name
+ },
+ 'error': error,
+ }
+
+ if can_write or features.READER_BUILD_LOGS:
+ if build_obj.resource_key is not None:
+ resp['archive_url'] = user_files.get_file_url(build_obj.resource_key,
+ get_request_ip(), requires_cors=True)
+ elif job_config.get('archive_url', None):
+ resp['archive_url'] = job_config['archive_url']
+
+ return resp
+
+
+@resource('/v1/repository//build/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryBuildList(RepositoryParamResource):
+ """ Resource related to creating and listing repository builds. """
+ schemas = {
+ 'RepositoryBuildRequest': {
+ 'type': 'object',
+ 'description': 'Description of a new repository build.',
+ 'properties': {
+ 'file_id': {
+ 'type': 'string',
+ 'description': 'The file id that was generated when the build spec was uploaded',
+ },
+ 'archive_url': {
+ 'type': 'string',
+ 'description': 'The URL of the .tar.gz to build. Must start with "http" or "https".',
+ },
+ 'subdirectory': {
+ 'type': 'string',
+ 'description': 'Subdirectory in which the Dockerfile can be found. You can only specify this or dockerfile_path',
+ },
+ 'dockerfile_path': {
+ 'type': 'string',
+ 'description': 'Path to a dockerfile. You can only specify this or subdirectory.',
+ },
+ 'context': {
+ 'type': 'string',
+ 'description': 'Pass in the context for the dockerfile. This is optional.',
+ },
+ 'pull_robot': {
+ 'type': 'string',
+ 'description': 'Username of a Quay robot account to use as pull credentials',
+ },
+ 'docker_tags': {
+ 'type': 'array',
+ 'description': 'The tags to which the built images will be pushed. ' +
+ 'If none specified, "latest" is used.',
+ 'items': {
+ 'type': 'string'
+ },
+ 'minItems': 1,
+ 'uniqueItems': True
+ }
+ },
+ },
+ }
+
+ @require_repo_read
+ @parse_args()
+ @query_param('limit', 'The maximum number of builds to return', type=int, default=5)
+ @query_param('since', 'Returns all builds since the given unix timecode', type=int, default=None)
+ @nickname('getRepoBuilds')
+ @disallow_for_app_repositories
+ def get(self, namespace, repository, parsed_args):
+ """ Get the list of repository builds. """
+ limit = parsed_args.get('limit', 5)
+ since = parsed_args.get('since', None)
+
+ if since is not None:
+ since = datetime.datetime.utcfromtimestamp(since)
+
+ builds = model.build.list_repository_builds(namespace, repository, limit, since=since)
+ return {
+ 'builds': [build_status_view(build) for build in builds]
+ }
+
+ @require_repo_write
+ @nickname('requestRepoBuild')
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @validate_json_request('RepositoryBuildRequest')
+ def post(self, namespace, repository):
+ """ Request that a repository be built and pushed from the specified input. """
+ logger.debug('User requested repository initialization.')
+ request_json = request.get_json()
+
+ dockerfile_id = request_json.get('file_id', None)
+ archive_url = request_json.get('archive_url', None)
+
+ if not dockerfile_id and not archive_url:
+ raise InvalidRequest('file_id or archive_url required')
+
+ if archive_url:
+ archive_match = None
+ try:
+ archive_match = urlparse(archive_url)
+ except ValueError:
+ pass
+
+ if not archive_match:
+ raise InvalidRequest('Invalid Archive URL: Must be a valid URI')
+
+ scheme = archive_match.scheme
+ if scheme != 'http' and scheme != 'https':
+ raise InvalidRequest('Invalid Archive URL: Must be http or https')
+
+ context, subdir = self.get_dockerfile_context(request_json)
+ tags = request_json.get('docker_tags', ['latest'])
+ pull_robot_name = request_json.get('pull_robot', None)
+
+ # Verify the security behind the pull robot.
+ if pull_robot_name:
+ result = parse_robot_username(pull_robot_name)
+ if result:
+ try:
+ model.user.lookup_robot(pull_robot_name)
+ except model.InvalidRobotException:
+ raise NotFound()
+
+ # Make sure the user has administer permissions for the robot's namespace.
+ (robot_namespace, _) = result
+ if not AdministerOrganizationPermission(robot_namespace).can():
+ raise Unauthorized()
+ else:
+ raise Unauthorized()
+
+ # Check if the dockerfile resource has already been used. If so, then it
+ # can only be reused if the user has access to the repository in which the
+ # dockerfile was previously built.
+ if dockerfile_id:
+ associated_repository = model.build.get_repository_for_resource(dockerfile_id)
+ if associated_repository:
+ if not ModifyRepositoryPermission(associated_repository.namespace_user.username,
+ associated_repository.name):
+ raise Unauthorized()
+
+ # Start the build.
+ repo = model.repository.get_repository(namespace, repository)
+ if repo is None:
+ raise NotFound()
+
+ try:
+ build_name = (user_files.get_file_checksum(dockerfile_id)
+ if dockerfile_id
+ else hashlib.sha224(archive_url).hexdigest()[0:7])
+ except IOError:
+ raise InvalidRequest('File %s could not be found or is invalid' % dockerfile_id)
+
+ prepared = PreparedBuild()
+ prepared.build_name = build_name
+ prepared.dockerfile_id = dockerfile_id
+ prepared.archive_url = archive_url
+ prepared.tags = tags
+ prepared.subdirectory = subdir
+ prepared.context = context
+ prepared.is_manual = True
+ prepared.metadata = {}
+ try:
+ build_request = start_build(repo, prepared, pull_robot_name=pull_robot_name)
+ except MaximumBuildsQueuedException:
+ abort(429, message='Maximum queued build rate exceeded.')
+ except BuildTriggerDisabledException:
+ abort(400, message='Build trigger is disabled')
+
+ resp = build_status_view(build_request)
+ repo_string = '%s/%s' % (namespace, repository)
+ headers = {
+ 'Location': api.url_for(RepositoryBuildStatus, repository=repo_string,
+ build_uuid=build_request.uuid),
+ }
+ return resp, 201, headers
+
+ @staticmethod
+ def get_dockerfile_context(request_json):
+ context = request_json['context'] if 'context' in request_json else os.path.sep
+ if 'dockerfile_path' in request_json:
+ subdir = request_json['dockerfile_path']
+ if 'context' not in request_json:
+ context = os.path.dirname(subdir)
+ return context, subdir
+
+ if 'subdirectory' in request_json:
+ subdir = request_json['subdirectory']
+ context = subdir
+ if not subdir.endswith(os.path.sep):
+ subdir += os.path.sep
+
+ subdir += 'Dockerfile'
+ else:
+ if context.endswith(os.path.sep):
+ subdir = context + 'Dockerfile'
+ else:
+ subdir = context + os.path.sep + 'Dockerfile'
+
+ return context, subdir
+
+@resource('/v1/repository//build/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('build_uuid', 'The UUID of the build')
+class RepositoryBuildResource(RepositoryParamResource):
+ """ Resource for dealing with repository builds. """
+ @require_repo_read
+ @nickname('getRepoBuild')
+ @disallow_for_app_repositories
+ def get(self, namespace, repository, build_uuid):
+ """ Returns information about a build. """
+ try:
+ build = model.build.get_repository_build(build_uuid)
+ except model.build.InvalidRepositoryBuildException:
+ raise NotFound()
+
+ if build.repository.name != repository or build.repository.namespace_user.username != namespace:
+ raise NotFound()
+
+ return build_status_view(build)
+
+ @require_repo_admin
+ @nickname('cancelRepoBuild')
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ def delete(self, namespace, repository, build_uuid):
+ """ Cancels a repository build. """
+ try:
+ build = model.build.get_repository_build(build_uuid)
+ except model.build.InvalidRepositoryBuildException:
+ raise NotFound()
+
+ if build.repository.name != repository or build.repository.namespace_user.username != namespace:
+ raise NotFound()
+
+ if model.build.cancel_repository_build(build, dockerfile_build_queue):
+ return 'Okay', 201
+ else:
+ raise InvalidRequest('Build is currently running or has finished')
+
+
+@resource('/v1/repository//build//status')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('build_uuid', 'The UUID of the build')
+class RepositoryBuildStatus(RepositoryParamResource):
+ """ Resource for dealing with repository build status. """
+ @require_repo_read
+ @nickname('getRepoBuildStatus')
+ @disallow_for_app_repositories
+ def get(self, namespace, repository, build_uuid):
+ """ Return the status for the builds specified by the build uuids. """
+ build = model.build.get_repository_build(build_uuid)
+ if (not build or build.repository.name != repository or
+ build.repository.namespace_user.username != namespace):
+ raise NotFound()
+
+ return build_status_view(build)
+
+
+def get_logs_or_log_url(build):
+ # If the logs have been archived, just return a URL of the completed archive
+ if build.logs_archived:
+ return {
+ 'logs_url': log_archive.get_file_url(build.uuid, get_request_ip(), requires_cors=True)
+ }
+ start = int(request.args.get('start', 0))
+
+ try:
+ count, logs = build_logs.get_log_entries(build.uuid, start)
+ except BuildStatusRetrievalError:
+ count, logs = (0, [])
+
+ response_obj = {}
+ response_obj.update({
+ 'start': start,
+ 'total': count,
+ 'logs': [log for log in logs],
+ })
+
+ return response_obj
+
+
+@resource('/v1/repository//build//logs')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('build_uuid', 'The UUID of the build')
+class RepositoryBuildLogs(RepositoryParamResource):
+ """ Resource for loading repository build logs. """
+ @require_repo_read
+ @nickname('getRepoBuildLogs')
+ @disallow_for_app_repositories
+ def get(self, namespace, repository, build_uuid):
+ """ Return the build logs for the build specified by the build uuid. """
+ can_write = ModifyRepositoryPermission(namespace, repository).can()
+ if not features.READER_BUILD_LOGS and not can_write:
+ raise Unauthorized()
+
+ build = model.build.get_repository_build(build_uuid)
+ if (not build or build.repository.name != repository or
+ build.repository.namespace_user.username != namespace):
+ raise NotFound()
+
+ return get_logs_or_log_url(build)
+
+
+@resource('/v1/filedrop/')
+@internal_only
+class FileDropResource(ApiResource):
+ """ Custom verb for setting up a client side file transfer. """
+ schemas = {
+ 'FileDropRequest': {
+ 'type': 'object',
+ 'description': 'Description of the file that the user wishes to upload.',
+ 'required': [
+ 'mimeType',
+ ],
+ 'properties': {
+ 'mimeType': {
+ 'type': 'string',
+ 'description': 'Type of the file which is about to be uploaded',
+ },
+ },
+ },
+ }
+
+ @nickname('getFiledropUrl')
+ @validate_json_request('FileDropRequest')
+ def post(self):
+ """ Request a URL to which a file may be uploaded. """
+ mime_type = request.get_json()['mimeType']
+ (url, file_id) = user_files.prepare_for_drop(mime_type, requires_cors=True)
+ return {
+ 'url': url,
+ 'file_id': str(file_id),
+ }
diff --git a/endpoints/api/discovery.py b/endpoints/api/discovery.py
new file mode 100644
index 000000000..66e7c74a3
--- /dev/null
+++ b/endpoints/api/discovery.py
@@ -0,0 +1,334 @@
+# TODO to extract the discovery stuff into a util at the top level and then use it both here and config_app discovery.py
+""" API discovery information. """
+
+import re
+import logging
+import sys
+
+from collections import OrderedDict
+
+from flask_restful import reqparse
+
+from app import app
+from auth import scopes
+from endpoints.api import (ApiResource, resource, method_metadata, nickname, truthy_bool,
+ parse_args, query_param)
+from endpoints.decorators import anon_allowed
+
+
+logger = logging.getLogger(__name__)
+
+
+PARAM_REGEX = re.compile(r'<([^:>]+:)*([\w]+)>')
+
+
+TYPE_CONVERTER = {
+ truthy_bool: 'boolean',
+ str: 'string',
+ basestring: 'string',
+ reqparse.text_type: 'string',
+ int: 'integer',
+}
+
+PREFERRED_URL_SCHEME = app.config['PREFERRED_URL_SCHEME']
+SERVER_HOSTNAME = app.config['SERVER_HOSTNAME']
+
+
+def fully_qualified_name(method_view_class):
+ return '%s.%s' % (method_view_class.__module__, method_view_class.__name__)
+
+
+def swagger_route_data(include_internal=False, compact=False):
+ def swagger_parameter(name, description, kind='path', param_type='string', required=True,
+ enum=None, schema=None):
+ # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject
+ parameter_info = {
+ 'name': name,
+ 'in': kind,
+ 'required': required
+ }
+
+ if not compact:
+ parameter_info['description'] = description or ''
+
+ if schema:
+ parameter_info['schema'] = {
+ '$ref': '#/definitions/%s' % schema
+ }
+ else:
+ parameter_info['type'] = param_type
+
+ if enum is not None and len(list(enum)) > 0:
+ parameter_info['enum'] = list(enum)
+
+ return parameter_info
+
+ paths = {}
+ models = {}
+ tags = []
+ tags_added = set()
+ operationIds = set()
+
+ for rule in app.url_map.iter_rules():
+ endpoint_method = app.view_functions[rule.endpoint]
+
+ # Verify that we have a view class for this API method.
+ if not 'view_class' in dir(endpoint_method):
+ continue
+
+ view_class = endpoint_method.view_class
+
+ # Hide the class if it is internal.
+ internal = method_metadata(view_class, 'internal')
+ if not include_internal and internal:
+ continue
+
+ # Build the tag.
+ parts = fully_qualified_name(view_class).split('.')
+ tag_name = parts[-2]
+ if not tag_name in tags_added:
+ tags_added.add(tag_name)
+ tags.append({
+ 'name': tag_name,
+ 'description': (sys.modules[view_class.__module__].__doc__ or '').strip()
+ })
+
+ # Build the Swagger data for the path.
+ swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule)
+ full_name = fully_qualified_name(view_class)
+ path_swagger = {
+ 'x-name': full_name,
+ 'x-path': swagger_path,
+ 'x-tag': tag_name
+ }
+
+ if include_internal:
+ related_user_res = method_metadata(view_class, 'related_user_resource')
+ if related_user_res is not None:
+ path_swagger['x-user-related'] = fully_qualified_name(related_user_res)
+
+ paths[swagger_path] = path_swagger
+
+ # Add any global path parameters.
+ param_data_map = view_class.__api_path_params if '__api_path_params' in dir(view_class) else {}
+ if param_data_map:
+ path_parameters_swagger = []
+ for path_parameter in param_data_map:
+ description = param_data_map[path_parameter].get('description')
+ path_parameters_swagger.append(swagger_parameter(path_parameter, description))
+
+ path_swagger['parameters'] = path_parameters_swagger
+
+ # Add the individual HTTP operations.
+ method_names = list(rule.methods.difference(['HEAD', 'OPTIONS']))
+ for method_name in method_names:
+ # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object
+ method = getattr(view_class, method_name.lower(), None)
+ if method is None:
+ logger.debug('Unable to find method for %s in class %s', method_name, view_class)
+ continue
+
+ operationId = method_metadata(method, 'nickname')
+ operation_swagger = {
+ 'operationId': operationId,
+ 'parameters': [],
+ }
+
+ if operationId is None:
+ continue
+
+ if operationId in operationIds:
+ raise Exception('Duplicate operation Id: %s' % operationId)
+
+ operationIds.add(operationId)
+
+ if not compact:
+ operation_swagger.update({
+ 'description': method.__doc__.strip() if method.__doc__ else '',
+ 'tags': [tag_name]
+ })
+
+ # Mark the method as internal.
+ internal = method_metadata(method, 'internal')
+ if internal is not None:
+ operation_swagger['x-internal'] = True
+
+ if include_internal:
+ requires_fresh_login = method_metadata(method, 'requires_fresh_login')
+ if requires_fresh_login is not None:
+ operation_swagger['x-requires-fresh-login'] = True
+
+ # Add the path parameters.
+ if rule.arguments:
+ for path_parameter in rule.arguments:
+ description = param_data_map.get(path_parameter, {}).get('description')
+ operation_swagger['parameters'].append(swagger_parameter(path_parameter, description))
+
+ # Add the query parameters.
+ if '__api_query_params' in dir(method):
+ for query_parameter_info in method.__api_query_params:
+ name = query_parameter_info['name']
+ description = query_parameter_info['help']
+ param_type = TYPE_CONVERTER[query_parameter_info['type']]
+ required = query_parameter_info['required']
+
+ operation_swagger['parameters'].append(
+ swagger_parameter(name, description, kind='query',
+ param_type=param_type,
+ required=required,
+ enum=query_parameter_info['choices']))
+
+ # Add the OAuth security block.
+ # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject
+ scope = method_metadata(method, 'oauth2_scope')
+ if scope and not compact:
+ operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}]
+
+ # Add the responses block.
+ # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject
+ response_schema_name = method_metadata(method, 'response_schema')
+ if not compact:
+ if response_schema_name:
+ models[response_schema_name] = view_class.schemas[response_schema_name]
+
+ models['ApiError'] = {
+ 'type': 'object',
+ 'properties': {
+ 'status': {
+ 'type': 'integer',
+ 'description': 'Status code of the response.'
+ },
+ 'type': {
+ 'type': 'string',
+ 'description': 'Reference to the type of the error.'
+ },
+ 'detail': {
+ 'type': 'string',
+ 'description': 'Details about the specific instance of the error.'
+ },
+ 'title': {
+ 'type': 'string',
+ 'description': 'Unique error code to identify the type of error.'
+ },
+ 'error_message': {
+ 'type': 'string',
+ 'description': 'Deprecated; alias for detail'
+ },
+ 'error_type': {
+ 'type': 'string',
+ 'description': 'Deprecated; alias for detail'
+ }
+ },
+ 'required': [
+ 'status',
+ 'type',
+ 'title',
+ ]
+ }
+
+ responses = {
+ '400': {
+ 'description': 'Bad Request',
+ },
+
+ '401': {
+ 'description': 'Session required',
+ },
+
+ '403': {
+ 'description': 'Unauthorized access',
+ },
+
+ '404': {
+ 'description': 'Not found',
+ },
+ }
+
+ for _, body in responses.items():
+ body['schema'] = {'$ref': '#/definitions/ApiError'}
+
+ if method_name == 'DELETE':
+ responses['204'] = {
+ 'description': 'Deleted'
+ }
+ elif method_name == 'POST':
+ responses['201'] = {
+ 'description': 'Successful creation'
+ }
+ else:
+ responses['200'] = {
+ 'description': 'Successful invocation'
+ }
+
+ if response_schema_name:
+ responses['200']['schema'] = {
+ '$ref': '#/definitions/%s' % response_schema_name
+ }
+
+ operation_swagger['responses'] = responses
+
+
+ # Add the request block.
+ request_schema_name = method_metadata(method, 'request_schema')
+ if request_schema_name and not compact:
+ models[request_schema_name] = view_class.schemas[request_schema_name]
+
+ operation_swagger['parameters'].append(
+ swagger_parameter('body', 'Request body contents.', kind='body',
+ schema=request_schema_name))
+
+ # Add the operation to the parent path.
+ if not internal or (internal and include_internal):
+ path_swagger[method_name.lower()] = operation_swagger
+
+ tags.sort(key=lambda t: t['name'])
+ paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag']))
+
+ if compact:
+ return {'paths': paths}
+
+ swagger_data = {
+ 'swagger': '2.0',
+ 'host': SERVER_HOSTNAME,
+ 'basePath': '/',
+ 'schemes': [
+ PREFERRED_URL_SCHEME
+ ],
+ 'info': {
+ 'version': 'v1',
+ 'title': 'Quay Frontend',
+ 'description': ('This API allows you to perform many of the operations required to work '
+ 'with Quay repositories, users, and organizations. You can find out more '
+ 'at Quay .'),
+ 'termsOfService': 'https://quay.io/tos',
+ 'contact': {
+ 'email': 'support@quay.io'
+ }
+ },
+ 'securityDefinitions': {
+ 'oauth2_implicit': {
+ "type": "oauth2",
+ "flow": "implicit",
+ "authorizationUrl": "%s://%s/oauth/authorize" % (PREFERRED_URL_SCHEME, SERVER_HOSTNAME),
+ 'scopes': {scope.scope:scope.description
+ for scope in scopes.app_scopes(app.config).values()},
+ },
+ },
+ 'paths': paths,
+ 'definitions': models,
+ 'tags': tags
+ }
+
+ return swagger_data
+
+
+@resource('/v1/discovery')
+class DiscoveryResource(ApiResource):
+ """Ability to inspect the API for usage information and documentation."""
+ @parse_args()
+ @query_param('internal', 'Whether to include internal APIs.', type=truthy_bool, default=False)
+ @nickname('discovery')
+ @anon_allowed
+ def get(self, parsed_args):
+ """ List all of the API endpoints available in the swagger API format."""
+ return swagger_route_data(parsed_args['internal'])
diff --git a/endpoints/api/error.py b/endpoints/api/error.py
new file mode 100644
index 000000000..bfa80efe2
--- /dev/null
+++ b/endpoints/api/error.py
@@ -0,0 +1,61 @@
+""" Error details API """
+from flask import url_for
+
+from endpoints.api import (resource, nickname, ApiResource, path_param,
+ define_json_response)
+from endpoints.exception import NotFound, ApiErrorType, ERROR_DESCRIPTION
+
+def error_view(error_type):
+ return {
+ 'type': url_for('api.error', error_type=error_type, _external=True),
+ 'title': error_type,
+ 'description': ERROR_DESCRIPTION[error_type]
+ }
+
+
+@resource('/v1/error/')
+@path_param('error_type', 'The error code identifying the type of error.')
+class Error(ApiResource):
+ """ Resource for Error Descriptions"""
+ schemas = {
+ 'ApiErrorDescription': {
+ 'type': 'object',
+ 'description': 'Description of an error',
+ 'required': [
+ 'type',
+ 'description',
+ 'title',
+ ],
+ 'properties': {
+ 'type': {
+ 'type': 'string',
+ 'description': 'A reference to the error type resource'
+ },
+ 'title': {
+ 'type': 'string',
+ 'description': (
+ 'The title of the error. Can be used to uniquely identify the kind'
+ ' of error.'
+ ),
+ 'enum': list(ApiErrorType.__members__)
+ },
+ 'description': {
+ 'type': 'string',
+ 'description': (
+ 'A more detailed description of the error that may include help for'
+ ' fixing the issue.'
+ )
+ }
+ },
+ },
+ }
+
+ @define_json_response('ApiErrorDescription')
+ @nickname('getErrorDescription')
+ def get(self, error_type):
+ """ Get a detailed description of the error """
+ if error_type in ERROR_DESCRIPTION.keys():
+ return error_view(error_type)
+
+ raise NotFound()
+
diff --git a/endpoints/api/globalmessages.py b/endpoints/api/globalmessages.py
new file mode 100644
index 000000000..43ea58083
--- /dev/null
+++ b/endpoints/api/globalmessages.py
@@ -0,0 +1,128 @@
+""" Messages API. """
+from flask import abort
+from flask import make_response
+from flask import request
+
+import features
+from auth import scopes
+from auth.permissions import SuperUserPermission
+from endpoints.api import (ApiResource, resource, nickname,
+ require_fresh_login, verify_not_prod, validate_json_request,
+ require_scope, show_if,)
+from globalmessages_models_pre_oci import pre_oci_model as model
+
+
+@resource('/v1/messages')
+class GlobalUserMessages(ApiResource):
+ """ Resource for getting a list of super user messages """
+ schemas = {
+ 'GetMessage': {
+ 'id': 'GetMessage',
+ 'type': 'object',
+ 'description': 'Messages that a super user has saved in the past',
+ 'properties': {
+ 'message': {
+ 'type': 'array',
+ 'description': 'A list of messages',
+ 'itemType': {
+ 'type': 'object',
+ 'properties': {
+ 'uuid': {
+ 'type': 'string',
+ 'description': 'The message id',
+ },
+ 'content': {
+ 'type': 'string',
+ 'description': 'The actual message',
+ },
+ 'media_type': {
+ 'type': 'string',
+ 'description': 'The media type of the message',
+ 'enum': ['text/plain', 'text/markdown'],
+ },
+ 'severity': {
+ 'type': 'string',
+ 'description': 'The severity of the message',
+ 'enum': ['info', 'warning', 'error'],
+ },
+ },
+ },
+ },
+ },
+ },
+ 'CreateMessage': {
+ 'id': 'CreateMessage',
+ 'type': 'object',
+ 'description': 'Create a new message',
+ 'properties': {
+ 'message': {
+ 'type': 'object',
+ 'description': 'A single message',
+ 'required': [
+ 'content',
+ 'media_type',
+ 'severity',
+ ],
+ 'properties': {
+ 'content': {
+ 'type': 'string',
+ 'description': 'The actual message',
+ },
+ 'media_type': {
+ 'type': 'string',
+ 'description': 'The media type of the message',
+ 'enum': ['text/plain', 'text/markdown'],
+ },
+ 'severity': {
+ 'type': 'string',
+ 'description': 'The severity of the message',
+ 'enum': ['info', 'warning', 'error'],
+ },
+ },
+ },
+ },
+ }
+ }
+
+ @nickname('getGlobalMessages')
+ def get(self):
+ """ Return a super users messages """
+ return {
+ 'messages': [m.to_dict() for m in model.get_all_messages()],
+ }
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('createGlobalMessage')
+ @validate_json_request('CreateMessage')
+ @require_scope(scopes.SUPERUSER)
+ def post(self):
+ """ Create a message """
+ if not features.SUPER_USERS:
+ abort(404)
+
+ if SuperUserPermission().can():
+ message_req = request.get_json()['message']
+ message = model.create_message(message_req['severity'], message_req['media_type'], message_req['content'])
+ if message is None:
+ abort(400)
+ return make_response('', 201)
+
+ abort(403)
+
+
+@resource('/v1/message/')
+@show_if(features.SUPER_USERS)
+class GlobalUserMessage(ApiResource):
+ """ Resource for managing individual messages """
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('deleteGlobalMessage')
+ @require_scope(scopes.SUPERUSER)
+ def delete(self, uuid):
+ """ Delete a message """
+ if SuperUserPermission().can():
+ model.delete_message(uuid)
+ return make_response('', 204)
+
+ abort(403)
diff --git a/endpoints/api/globalmessages_models_interface.py b/endpoints/api/globalmessages_models_interface.py
new file mode 100644
index 000000000..679462c1d
--- /dev/null
+++ b/endpoints/api/globalmessages_models_interface.py
@@ -0,0 +1,54 @@
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple
+
+from six import add_metaclass
+
+class GlobalMessage(
+ namedtuple('GlobalMessage', [
+ 'uuid',
+ 'content',
+ 'severity',
+ 'media_type_name',
+ ])):
+
+ def to_dict(self):
+ return {
+ 'uuid': self.uuid,
+ 'content': self.content,
+ 'severity': self.severity,
+ 'media_type': self.media_type_name,
+ }
+
+
+
+@add_metaclass(ABCMeta)
+class GlobalMessageDataInterface(object):
+ """
+ Data interface for globalmessages API
+ """
+
+ @abstractmethod
+ def get_all_messages(self):
+ """
+
+ Returns:
+ list(GlobalMessage)
+ """
+
+ @abstractmethod
+ def create_message(self, severity, media_type_name, content):
+ """
+
+ Returns:
+ GlobalMessage or None
+ """
+
+ @abstractmethod
+ def delete_message(self, uuid):
+ """
+
+ Returns:
+ void
+ """
+
+
\ No newline at end of file
diff --git a/endpoints/api/globalmessages_models_pre_oci.py b/endpoints/api/globalmessages_models_pre_oci.py
new file mode 100644
index 000000000..d9a623f1b
--- /dev/null
+++ b/endpoints/api/globalmessages_models_pre_oci.py
@@ -0,0 +1,33 @@
+from globalmessages_models_interface import GlobalMessageDataInterface, GlobalMessage
+from data import model
+
+
+class GlobalMessagePreOCI(GlobalMessageDataInterface):
+
+ def get_all_messages(self):
+ messages = model.message.get_messages()
+ return [self._message(m) for m in messages]
+
+ def create_message(self, severity, media_type_name, content):
+ message = {
+ 'severity': severity,
+ 'media_type': media_type_name,
+ 'content': content
+ }
+ messages = model.message.create([message])
+ return self._message(messages[0])
+
+ def delete_message(self, uuid):
+ model.message.delete_message([uuid])
+
+ def _message(self, message_obj):
+ if message_obj is None:
+ return None
+ return GlobalMessage(
+ uuid=message_obj.uuid,
+ content=message_obj.content,
+ severity=message_obj.severity,
+ media_type_name=message_obj.media_type.name,
+ )
+
+pre_oci_model = GlobalMessagePreOCI()
\ No newline at end of file
diff --git a/endpoints/api/image.py b/endpoints/api/image.py
new file mode 100644
index 000000000..3a9dcd82c
--- /dev/null
+++ b/endpoints/api/image.py
@@ -0,0 +1,77 @@
+""" List and lookup repository images. """
+import json
+
+from data.registry_model import registry_model
+from endpoints.api import (resource, nickname, require_repo_read, RepositoryParamResource,
+ path_param, disallow_for_app_repositories, format_date)
+from endpoints.exception import NotFound
+
+
+def image_dict(image, with_history=False, with_tags=False):
+ parsed_command = None
+ if image.command:
+ try:
+ parsed_command = json.loads(image.command)
+ except (ValueError, TypeError):
+ parsed_command = {'error': 'Could not parse command'}
+
+ image_data = {
+ 'id': image.docker_image_id,
+ 'created': format_date(image.created),
+ 'comment': image.comment,
+ 'command': parsed_command,
+ 'size': image.image_size,
+ 'uploading': image.uploading,
+ 'sort_index': len(image.parents),
+ }
+
+ if with_tags:
+ image_data['tags'] = [tag.name for tag in image.tags]
+
+ if with_history:
+ image_data['history'] = [image_dict(parent) for parent in image.parents]
+
+ # Calculate the ancestors string, with the DBID's replaced with the docker IDs.
+ parent_docker_ids = [parent_image.docker_image_id for parent_image in image.parents]
+ image_data['ancestors'] = '/{0}/'.format('/'.join(parent_docker_ids))
+ return image_data
+
+
+@resource('/v1/repository//image/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryImageList(RepositoryParamResource):
+ """ Resource for listing repository images. """
+
+ @require_repo_read
+ @nickname('listRepositoryImages')
+ @disallow_for_app_repositories
+ def get(self, namespace, repository):
+ """ List the images for the specified repository. """
+ repo_ref = registry_model.lookup_repository(namespace, repository)
+ if repo_ref is None:
+ raise NotFound()
+
+ images = registry_model.get_legacy_images(repo_ref)
+ return {'images': [image_dict(image, with_tags=True) for image in images]}
+
+
+@resource('/v1/repository//image/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('image_id', 'The Docker image ID')
+class RepositoryImage(RepositoryParamResource):
+ """ Resource for handling repository images. """
+
+ @require_repo_read
+ @nickname('getImage')
+ @disallow_for_app_repositories
+ def get(self, namespace, repository, image_id):
+ """ Get the information available for the specified image. """
+ repo_ref = registry_model.lookup_repository(namespace, repository)
+ if repo_ref is None:
+ raise NotFound()
+
+ image = registry_model.get_legacy_image(repo_ref, image_id, include_parents=True)
+ if image is None:
+ raise NotFound()
+
+ return image_dict(image, with_history=True)
diff --git a/endpoints/api/logs.py b/endpoints/api/logs.py
new file mode 100644
index 000000000..1760a2e9b
--- /dev/null
+++ b/endpoints/api/logs.py
@@ -0,0 +1,344 @@
+""" Access usage logs for organizations or repositories. """
+from datetime import datetime, timedelta
+
+from flask import request
+
+import features
+
+from app import app, export_action_logs_queue, avatar
+from auth.permissions import AdministerOrganizationPermission
+from auth.auth_context import get_authenticated_user
+from auth import scopes
+from data.logs_model import logs_model
+from data.registry_model import registry_model
+from endpoints.api import (resource, nickname, ApiResource, query_param, parse_args,
+ RepositoryParamResource, require_repo_admin, related_user_resource,
+ format_date, require_user_admin, path_param, require_scope, page_support,
+ validate_json_request, InvalidRequest, show_if)
+from endpoints.exception import Unauthorized, NotFound
+
+
+LOGS_PER_PAGE = 20
+SERVICE_LEVEL_LOG_KINDS = set(['service_key_create', 'service_key_approve', 'service_key_delete',
+ 'service_key_modify', 'service_key_extend', 'service_key_rotate'])
+
+
+def _parse_datetime(dt_string):
+ if not dt_string:
+ return None
+
+ try:
+ return datetime.strptime(dt_string + ' UTC', '%m/%d/%Y %Z')
+ except ValueError:
+ return None
+
+
+def _validate_logs_arguments(start_time, end_time):
+ start_time = _parse_datetime(start_time) or (datetime.today() - timedelta(days=1))
+ end_time = _parse_datetime(end_time) or datetime.today()
+ end_time = end_time + timedelta(days=1)
+ return start_time, end_time
+
+
+def _get_logs(start_time, end_time, performer_name=None, repository_name=None, namespace_name=None,
+ page_token=None, filter_kinds=None):
+ (start_time, end_time) = _validate_logs_arguments(start_time, end_time)
+ log_entry_page = logs_model.lookup_logs(start_time, end_time, performer_name, repository_name,
+ namespace_name, filter_kinds, page_token,
+ app.config['ACTION_LOG_MAX_PAGE'])
+ include_namespace = namespace_name is None and repository_name is None
+ return {
+ 'start_time': format_date(start_time),
+ 'end_time': format_date(end_time),
+ 'logs': [log.to_dict(avatar, include_namespace) for log in log_entry_page.logs],
+ }, log_entry_page.next_page_token
+
+
+def _get_aggregate_logs(start_time, end_time, performer_name=None, repository=None, namespace=None,
+ filter_kinds=None):
+ (start_time, end_time) = _validate_logs_arguments(start_time, end_time)
+ aggregated_logs = logs_model.get_aggregated_log_counts(start_time, end_time,
+ performer_name=performer_name,
+ repository_name=repository,
+ namespace_name=namespace,
+ filter_kinds=filter_kinds)
+
+ return {
+ 'aggregated': [log.to_dict() for log in aggregated_logs]
+ }
+
+
+@resource('/v1/repository//logs')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryLogs(RepositoryParamResource):
+ """ Resource for fetching logs for the specific repository. """
+
+ @require_repo_admin
+ @nickname('listRepoLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @page_support()
+ def get(self, namespace, repository, page_token, parsed_args):
+ """ List the logs for the specified repository. """
+ if registry_model.lookup_repository(namespace, repository) is None:
+ raise NotFound()
+
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+ return _get_logs(start_time, end_time,
+ repository_name=repository,
+ page_token=page_token,
+ namespace_name=namespace)
+
+
+@resource('/v1/user/logs')
+class UserLogs(ApiResource):
+ """ Resource for fetching logs for the current user. """
+
+ @require_user_admin
+ @nickname('listUserLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('performer', 'Username for which to filter logs.', type=str)
+ @page_support()
+ def get(self, parsed_args, page_token):
+ """ List the logs for the current user. """
+ performer_name = parsed_args['performer']
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+
+ user = get_authenticated_user()
+ return _get_logs(start_time, end_time,
+ performer_name=performer_name,
+ namespace_name=user.username,
+ page_token=page_token,
+ filter_kinds=SERVICE_LEVEL_LOG_KINDS)
+
+
+@resource('/v1/organization//logs')
+@path_param('orgname', 'The name of the organization')
+@related_user_resource(UserLogs)
+class OrgLogs(ApiResource):
+ """ Resource for fetching logs for the entire organization. """
+
+ @nickname('listOrgLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('performer', 'Username for which to filter logs.', type=str)
+ @page_support()
+ @require_scope(scopes.ORG_ADMIN)
+ def get(self, orgname, page_token, parsed_args):
+ """ List the logs for the specified organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ performer_name = parsed_args['performer']
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+
+ return _get_logs(start_time, end_time,
+ namespace_name=orgname,
+ performer_name=performer_name,
+ page_token=page_token)
+
+ raise Unauthorized()
+
+
+@resource('/v1/repository//aggregatelogs')
+@show_if(features.AGGREGATED_LOG_COUNT_RETRIEVAL)
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryAggregateLogs(RepositoryParamResource):
+ """ Resource for fetching aggregated logs for the specific repository. """
+
+ @require_repo_admin
+ @nickname('getAggregateRepoLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ def get(self, namespace, repository, parsed_args):
+ """ Returns the aggregated logs for the specified repository. """
+ if registry_model.lookup_repository(namespace, repository) is None:
+ raise NotFound()
+
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+ return _get_aggregate_logs(start_time, end_time,
+ repository=repository,
+ namespace=namespace)
+
+
+@resource('/v1/user/aggregatelogs')
+@show_if(features.AGGREGATED_LOG_COUNT_RETRIEVAL)
+class UserAggregateLogs(ApiResource):
+ """ Resource for fetching aggregated logs for the current user. """
+
+ @require_user_admin
+ @nickname('getAggregateUserLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('performer', 'Username for which to filter logs.', type=str)
+ def get(self, parsed_args):
+ """ Returns the aggregated logs for the current user. """
+ performer_name = parsed_args['performer']
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+
+ user = get_authenticated_user()
+ return _get_aggregate_logs(start_time, end_time,
+ performer_name=performer_name,
+ namespace=user.username,
+ filter_kinds=SERVICE_LEVEL_LOG_KINDS)
+
+
+@resource('/v1/organization//aggregatelogs')
+@show_if(features.AGGREGATED_LOG_COUNT_RETRIEVAL)
+@path_param('orgname', 'The name of the organization')
+@related_user_resource(UserLogs)
+class OrgAggregateLogs(ApiResource):
+ """ Resource for fetching aggregate logs for the entire organization. """
+
+ @nickname('getAggregateOrgLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('performer', 'Username for which to filter logs.', type=str)
+ @require_scope(scopes.ORG_ADMIN)
+ def get(self, orgname, parsed_args):
+ """ Gets the aggregated logs for the specified organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ performer_name = parsed_args['performer']
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+
+ return _get_aggregate_logs(start_time, end_time,
+ namespace=orgname,
+ performer_name=performer_name)
+
+ raise Unauthorized()
+
+
+EXPORT_LOGS_SCHEMA = {
+ 'type': 'object',
+ 'description': 'Configuration for an export logs operation',
+ 'properties': {
+ 'callback_url': {
+ 'type': 'string',
+ 'description': 'The callback URL to invoke with a link to the exported logs',
+ },
+ 'callback_email': {
+ 'type': 'string',
+ 'description': 'The e-mail address at which to e-mail a link to the exported logs',
+ },
+ },
+}
+
+
+def _queue_logs_export(start_time, end_time, options, namespace_name, repository_name=None):
+ callback_url = options.get('callback_url')
+ if callback_url:
+ if not callback_url.startswith('https://') and not callback_url.startswith('http://'):
+ raise InvalidRequest('Invalid callback URL')
+
+ callback_email = options.get('callback_email')
+ if callback_email:
+ if callback_email.find('@') < 0:
+ raise InvalidRequest('Invalid callback e-mail')
+
+ (start_time, end_time) = _validate_logs_arguments(start_time, end_time)
+ export_id = logs_model.queue_logs_export(start_time, end_time, export_action_logs_queue,
+ namespace_name, repository_name, callback_url,
+ callback_email)
+ if export_id is None:
+ raise InvalidRequest('Invalid export request')
+
+ return export_id
+
+
+@resource('/v1/repository//exportlogs')
+@show_if(features.LOG_EXPORT)
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class ExportRepositoryLogs(RepositoryParamResource):
+ """ Resource for exporting the logs for the specific repository. """
+ schemas = {
+ 'ExportLogs': EXPORT_LOGS_SCHEMA
+ }
+
+ @require_repo_admin
+ @nickname('exportRepoLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @validate_json_request('ExportLogs')
+ def post(self, namespace, repository, parsed_args):
+ """ Queues an export of the logs for the specified repository. """
+ if registry_model.lookup_repository(namespace, repository) is None:
+ raise NotFound()
+
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+ export_id = _queue_logs_export(start_time, end_time, request.get_json(), namespace,
+ repository_name=repository)
+ return {
+ 'export_id': export_id,
+ }
+
+
+@resource('/v1/user/exportlogs')
+@show_if(features.LOG_EXPORT)
+class ExportUserLogs(ApiResource):
+ """ Resource for exporting the logs for the current user repository. """
+ schemas = {
+ 'ExportLogs': EXPORT_LOGS_SCHEMA
+ }
+
+ @require_user_admin
+ @nickname('exportUserLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @validate_json_request('ExportLogs')
+ def post(self, parsed_args):
+ """ Returns the aggregated logs for the current user. """
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+
+ user = get_authenticated_user()
+ export_id = _queue_logs_export(start_time, end_time, request.get_json(), user.username)
+ return {
+ 'export_id': export_id,
+ }
+
+
+@resource('/v1/organization//exportlogs')
+@show_if(features.LOG_EXPORT)
+@path_param('orgname', 'The name of the organization')
+@related_user_resource(ExportUserLogs)
+class ExportOrgLogs(ApiResource):
+ """ Resource for exporting the logs for an entire organization. """
+ schemas = {
+ 'ExportLogs': EXPORT_LOGS_SCHEMA
+ }
+
+ @nickname('exportOrgLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @require_scope(scopes.ORG_ADMIN)
+ @validate_json_request('ExportLogs')
+ def post(self, orgname, parsed_args):
+ """ Exports the logs for the specified organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+
+ export_id = _queue_logs_export(start_time, end_time, request.get_json(), orgname)
+ return {
+ 'export_id': export_id,
+ }
+
+ raise Unauthorized()
diff --git a/endpoints/api/manifest.py b/endpoints/api/manifest.py
new file mode 100644
index 000000000..1370fa743
--- /dev/null
+++ b/endpoints/api/manifest.py
@@ -0,0 +1,273 @@
+""" Manage the manifests of a repository. """
+import json
+import logging
+
+from flask import request
+
+from app import label_validator, storage
+from data.model import InvalidLabelKeyException, InvalidMediaTypeException
+from data.registry_model import registry_model
+from digest import digest_tools
+from endpoints.api import (resource, nickname, require_repo_read, require_repo_write,
+ RepositoryParamResource, log_action, validate_json_request,
+ path_param, parse_args, query_param, abort, api,
+ disallow_for_app_repositories, format_date,
+ disallow_for_non_normal_repositories)
+from endpoints.api.image import image_dict
+from endpoints.exception import NotFound
+from util.validation import VALID_LABEL_KEY_REGEX
+
+
+BASE_MANIFEST_ROUTE = '/v1/repository//manifest/'
+MANIFEST_DIGEST_ROUTE = BASE_MANIFEST_ROUTE.format(digest_tools.DIGEST_PATTERN)
+ALLOWED_LABEL_MEDIA_TYPES = ['text/plain', 'application/json']
+
+
+logger = logging.getLogger(__name__)
+
+def _label_dict(label):
+ return {
+ 'id': label.uuid,
+ 'key': label.key,
+ 'value': label.value,
+ 'source_type': label.source_type_name,
+ 'media_type': label.media_type_name,
+ }
+
+
+def _layer_dict(manifest_layer, index):
+ # NOTE: The `command` in the layer is either a JSON string of an array (schema 1) or
+ # a single string (schema 2). The block below normalizes it to have the same format.
+ command = None
+ if manifest_layer.command:
+ try:
+ command = json.loads(manifest_layer.command)
+ except (TypeError, ValueError):
+ command = [manifest_layer.command]
+
+ return {
+ 'index': index,
+ 'compressed_size': manifest_layer.compressed_size,
+ 'is_remote': manifest_layer.is_remote,
+ 'urls': manifest_layer.urls,
+ 'command': command,
+ 'comment': manifest_layer.comment,
+ 'author': manifest_layer.author,
+ 'blob_digest': str(manifest_layer.blob_digest),
+ 'created_datetime': format_date(manifest_layer.created_datetime),
+ }
+
+
+def _manifest_dict(manifest):
+ image = None
+ if manifest.legacy_image_if_present is not None:
+ image = image_dict(manifest.legacy_image, with_history=True)
+
+ layers = None
+ if not manifest.is_manifest_list:
+ layers = registry_model.list_manifest_layers(manifest, storage)
+ if layers is None:
+ logger.debug('Missing layers for manifest `%s`', manifest.digest)
+ abort(404)
+
+ return {
+ 'digest': manifest.digest,
+ 'is_manifest_list': manifest.is_manifest_list,
+ 'manifest_data': manifest.internal_manifest_bytes.as_unicode(),
+ 'image': image,
+ 'layers': ([_layer_dict(lyr.layer_info, idx) for idx, lyr in enumerate(layers)]
+ if layers else None),
+ }
+
+
+@resource(MANIFEST_DIGEST_ROUTE)
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('manifestref', 'The digest of the manifest')
+class RepositoryManifest(RepositoryParamResource):
+ """ Resource for retrieving a specific repository manifest. """
+ @require_repo_read
+ @nickname('getRepoManifest')
+ @disallow_for_app_repositories
+ def get(self, namespace_name, repository_name, manifestref):
+ repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
+ if repo_ref is None:
+ raise NotFound()
+
+ manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref,
+ include_legacy_image=True)
+ if manifest is None:
+ raise NotFound()
+
+ return _manifest_dict(manifest)
+
+
+@resource(MANIFEST_DIGEST_ROUTE + '/labels')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('manifestref', 'The digest of the manifest')
+class RepositoryManifestLabels(RepositoryParamResource):
+ """ Resource for listing the labels on a specific repository manifest. """
+ schemas = {
+ 'AddLabel': {
+ 'type': 'object',
+ 'description': 'Adds a label to a manifest',
+ 'required': [
+ 'key',
+ 'value',
+ 'media_type',
+ ],
+ 'properties': {
+ 'key': {
+ 'type': 'string',
+ 'description': 'The key for the label',
+ },
+ 'value': {
+ 'type': 'string',
+ 'description': 'The value for the label',
+ },
+ 'media_type': {
+ 'type': ['string', 'null'],
+ 'description': 'The media type for this label',
+ 'enum': ALLOWED_LABEL_MEDIA_TYPES + [None],
+ },
+ },
+ },
+ }
+
+ @require_repo_read
+ @nickname('listManifestLabels')
+ @disallow_for_app_repositories
+ @parse_args()
+ @query_param('filter', 'If specified, only labels matching the given prefix will be returned',
+ type=str, default=None)
+ def get(self, namespace_name, repository_name, manifestref, parsed_args):
+ repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
+ if repo_ref is None:
+ raise NotFound()
+
+ manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref)
+ if manifest is None:
+ raise NotFound()
+
+ labels = registry_model.list_manifest_labels(manifest, parsed_args['filter'])
+ if labels is None:
+ raise NotFound()
+
+ return {
+ 'labels': [_label_dict(label) for label in labels]
+ }
+
+ @require_repo_write
+ @nickname('addManifestLabel')
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @validate_json_request('AddLabel')
+ def post(self, namespace_name, repository_name, manifestref):
+ """ Adds a new label into the tag manifest. """
+ label_data = request.get_json()
+
+ # Check for any reserved prefixes.
+ if label_validator.has_reserved_prefix(label_data['key']):
+ abort(400, message='Label has a reserved prefix')
+
+ repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
+ if repo_ref is None:
+ raise NotFound()
+
+ manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref)
+ if manifest is None:
+ raise NotFound()
+
+ label = None
+ try:
+ label = registry_model.create_manifest_label(manifest,
+ label_data['key'],
+ label_data['value'],
+ 'api',
+ label_data['media_type'])
+ except InvalidLabelKeyException:
+ message = ('Label is of an invalid format or missing please ' +
+ 'use %s format for labels' % VALID_LABEL_KEY_REGEX)
+ abort(400, message=message)
+ except InvalidMediaTypeException:
+ message = 'Media type is invalid please use a valid media type: text/plain, application/json'
+ abort(400, message=message)
+
+ if label is None:
+ raise NotFound()
+
+ metadata = {
+ 'id': label.uuid,
+ 'key': label.key,
+ 'value': label.value,
+ 'manifest_digest': manifestref,
+ 'media_type': label.media_type_name,
+ 'namespace': namespace_name,
+ 'repo': repository_name,
+ }
+
+ log_action('manifest_label_add', namespace_name, metadata, repo_name=repository_name)
+
+ resp = {'label': _label_dict(label)}
+ repo_string = '%s/%s' % (namespace_name, repository_name)
+ headers = {
+ 'Location': api.url_for(ManageRepositoryManifestLabel, repository=repo_string,
+ manifestref=manifestref, labelid=label.uuid),
+ }
+ return resp, 201, headers
+
+
+@resource(MANIFEST_DIGEST_ROUTE + '/labels/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('manifestref', 'The digest of the manifest')
+@path_param('labelid', 'The ID of the label')
+class ManageRepositoryManifestLabel(RepositoryParamResource):
+ """ Resource for managing the labels on a specific repository manifest. """
+ @require_repo_read
+ @nickname('getManifestLabel')
+ @disallow_for_app_repositories
+ def get(self, namespace_name, repository_name, manifestref, labelid):
+ """ Retrieves the label with the specific ID under the manifest. """
+ repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
+ if repo_ref is None:
+ raise NotFound()
+
+ manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref)
+ if manifest is None:
+ raise NotFound()
+
+ label = registry_model.get_manifest_label(manifest, labelid)
+ if label is None:
+ raise NotFound()
+
+ return _label_dict(label)
+
+
+ @require_repo_write
+ @nickname('deleteManifestLabel')
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ def delete(self, namespace_name, repository_name, manifestref, labelid):
+ """ Deletes an existing label from a manifest. """
+ repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
+ if repo_ref is None:
+ raise NotFound()
+
+ manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref)
+ if manifest is None:
+ raise NotFound()
+
+ deleted = registry_model.delete_manifest_label(manifest, labelid)
+ if deleted is None:
+ raise NotFound()
+
+ metadata = {
+ 'id': labelid,
+ 'key': deleted.key,
+ 'value': deleted.value,
+ 'manifest_digest': manifestref,
+ 'namespace': namespace_name,
+ 'repo': repository_name,
+ }
+
+ log_action('manifest_label_delete', namespace_name, metadata, repo_name=repository_name)
+ return '', 204
diff --git a/endpoints/api/mirror.py b/endpoints/api/mirror.py
new file mode 100644
index 000000000..cac7f9caa
--- /dev/null
+++ b/endpoints/api/mirror.py
@@ -0,0 +1,467 @@
+# -*- coding: utf-8 -*-
+import logging
+
+from email.utils import parsedate_tz, mktime_tz
+from datetime import datetime
+
+from jsonschema import ValidationError
+from flask import request
+
+import features
+
+from auth.auth_context import get_authenticated_user
+from data import model
+from endpoints.api import (RepositoryParamResource, nickname, path_param, require_repo_admin,
+ resource, validate_json_request, define_json_response, show_if,
+ format_date)
+from endpoints.exception import NotFound
+from util.audit import track_and_log, wrap_repository
+from util.names import parse_robot_username
+
+
+common_properties = {
+ 'is_enabled': {
+ 'type': 'boolean',
+ 'description': 'Used to enable or disable synchronizations.',
+ },
+ 'external_reference': {
+ 'type': 'string',
+ 'description': 'Location of the external repository.'
+ },
+ 'external_registry_username': {
+ 'type': ['string', 'null'],
+ 'description': 'Username used to authenticate with external registry.',
+ },
+ 'external_registry_password': {
+ 'type': ['string', 'null'],
+ 'description': 'Password used to authenticate with external registry.',
+ },
+ 'sync_start_date': {
+ 'type': 'string',
+ 'description': 'Determines the next time this repository is ready for synchronization.',
+ },
+ 'sync_interval': {
+ 'type': 'integer',
+ 'minimum': 0,
+ 'description': 'Number of seconds after next_start_date to begin synchronizing.'
+ },
+ 'robot_username': {
+ 'type': 'string',
+ 'description': 'Username of robot which will be used for image pushes.'
+ },
+ 'root_rule': {
+ 'type': 'object',
+ 'description': 'Tag mirror rule',
+ 'required': [
+ 'rule_type',
+ 'rule_value'
+ ],
+ 'properties': {
+ 'rule_type': {
+ 'type': 'string',
+ 'description': 'Rule type must be "TAG_GLOB_CSV"'
+ },
+ 'rule_value': {
+ 'type': 'array',
+ 'description': 'Array of tag patterns',
+ 'items': {
+ 'type': 'string'
+ }
+ }
+ },
+ 'description': 'A list of glob-patterns used to determine which tags should be synchronized.'
+ },
+ 'external_registry_config': {
+ 'type': 'object',
+ 'properties': {
+ 'verify_tls': {
+ 'type': 'boolean',
+ 'description': (
+ 'Determines whether HTTPs is required and the certificate is verified when '
+ 'communicating with the external repository.'
+ ),
+ },
+ 'proxy': {
+ 'type': 'object',
+ 'description': 'Proxy configuration for use during synchronization.',
+ 'properties': {
+ 'https_proxy': {
+ 'type': ['string', 'null'],
+ 'description': 'Value for HTTPS_PROXY environment variable during sync.'
+ },
+ 'http_proxy': {
+ 'type': ['string', 'null'],
+ 'description': 'Value for HTTP_PROXY environment variable during sync.'
+ },
+ 'no_proxy': {
+ 'type': ['string', 'null'],
+ 'description': 'Value for NO_PROXY environment variable during sync.'
+ }
+ }
+ }
+ }
+ }
+}
+
+
+@resource('/v1/repository//mirror/sync-now')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@show_if(features.REPO_MIRROR)
+class RepoMirrorSyncNowResource(RepositoryParamResource):
+ """ A resource for managing RepoMirrorConfig.sync_status """
+
+ @require_repo_admin
+ @nickname('syncNow')
+ def post(self, namespace_name, repository_name):
+ """ Update the sync_status for a given Repository's mirroring configuration. """
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ if not repo:
+ raise NotFound()
+
+ mirror = model.repo_mirror.get_mirror(repository=repo)
+ if not mirror:
+ raise NotFound()
+
+ if mirror and model.repo_mirror.update_sync_status_to_sync_now(mirror):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="sync_status", to="SYNC_NOW")
+ return '', 204
+
+ raise NotFound()
+
+
+@resource('/v1/repository//mirror/sync-cancel')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@show_if(features.REPO_MIRROR)
+class RepoMirrorSyncCancelResource(RepositoryParamResource):
+ """ A resource for managing RepoMirrorConfig.sync_status """
+
+ @require_repo_admin
+ @nickname('syncCancel')
+ def post(self, namespace_name, repository_name):
+ """ Update the sync_status for a given Repository's mirroring configuration. """
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ if not repo:
+ raise NotFound()
+
+ mirror = model.repo_mirror.get_mirror(repository=repo)
+ if not mirror:
+ raise NotFound()
+
+ if mirror and model.repo_mirror.update_sync_status_to_cancel(mirror):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="sync_status", to="SYNC_CANCEL")
+ return '', 204
+
+ raise NotFound()
+
+
+@resource('/v1/repository//mirror')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@show_if(features.REPO_MIRROR)
+class RepoMirrorResource(RepositoryParamResource):
+ """
+ Resource for managing repository mirroring.
+ """
+ schemas = {
+ 'CreateMirrorConfig': {
+ 'description': 'Create the repository mirroring configuration.',
+ 'type': 'object',
+ 'required': [
+ 'external_reference',
+ 'sync_interval',
+ 'sync_start_date',
+ 'root_rule'
+ ],
+ 'properties': common_properties
+ },
+ 'UpdateMirrorConfig': {
+ 'description': 'Update the repository mirroring configuration.',
+ 'type': 'object',
+ 'properties': common_properties
+ },
+ 'ViewMirrorConfig': {
+ 'description': 'View the repository mirroring configuration.',
+ 'type': 'object',
+ 'required': [
+ 'is_enabled',
+ 'mirror_type',
+ 'external_reference',
+ 'external_registry_username',
+ 'external_registry_config',
+ 'sync_interval',
+ 'sync_start_date',
+ 'sync_expiration_date',
+ 'sync_retries_remaining',
+ 'sync_status',
+ 'root_rule',
+ 'robot_username',
+ ],
+ 'properties': common_properties
+ }
+ }
+
+ @require_repo_admin
+ @define_json_response('ViewMirrorConfig')
+ @nickname('getRepoMirrorConfig')
+ def get(self, namespace_name, repository_name):
+ """ Return the Mirror configuration for a given Repository. """
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ if not repo:
+ raise NotFound()
+
+ mirror = model.repo_mirror.get_mirror(repo)
+ if not mirror:
+ raise NotFound()
+
+ # Transformations
+ rules = mirror.root_rule.rule_value
+ username = self._decrypt_username(mirror.external_registry_username)
+ sync_start_date = self._dt_to_string(mirror.sync_start_date)
+ sync_expiration_date = self._dt_to_string(mirror.sync_expiration_date)
+ robot = mirror.internal_robot.username if mirror.internal_robot is not None else None
+
+ return {
+ 'is_enabled': mirror.is_enabled,
+ 'mirror_type': mirror.mirror_type.name,
+ 'external_reference': mirror.external_reference,
+ 'external_registry_username': username,
+ 'external_registry_config': mirror.external_registry_config or {},
+ 'sync_interval': mirror.sync_interval,
+ 'sync_start_date': sync_start_date,
+ 'sync_expiration_date': sync_expiration_date,
+ 'sync_retries_remaining': mirror.sync_retries_remaining,
+ 'sync_status': mirror.sync_status.name,
+ 'root_rule': {
+ 'rule_type': 'TAG_GLOB_CSV',
+ 'rule_value': rules
+ },
+ 'robot_username': robot,
+ }
+
+ @require_repo_admin
+ @nickname('createRepoMirrorConfig')
+ @validate_json_request('CreateMirrorConfig')
+ def post(self, namespace_name, repository_name):
+ """ Create a RepoMirrorConfig for a given Repository. """
+ # TODO: Tidy up this function
+ # TODO: Specify only the data we want to pass on when creating the RepoMirrorConfig. Avoid
+ # the possibility of data injection.
+
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ if not repo:
+ raise NotFound()
+
+ if model.repo_mirror.get_mirror(repo):
+ return {'detail': 'Mirror configuration already exits for repository %s/%s' % (
+ namespace_name, repository_name)}, 409
+
+ data = request.get_json()
+
+ data['sync_start_date'] = self._string_to_dt(data['sync_start_date'])
+
+ rule = model.repo_mirror.create_rule(repo, data['root_rule']['rule_value'])
+ del data['root_rule']
+
+ # Verify the robot is part of the Repository's namespace
+ robot = self._setup_robot_for_mirroring(namespace_name, repository_name, data['robot_username'])
+ del data['robot_username']
+
+ mirror = model.repo_mirror.enable_mirroring_for_repository(repo, root_rule=rule,
+ internal_robot=robot, **data)
+ if mirror:
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_reference', to=data['external_reference'])
+ return '', 201
+ else:
+ # TODO: Determine appropriate Response
+ return {'detail': 'RepoMirrorConfig already exists for this repository.'}, 409
+
+ @require_repo_admin
+ @validate_json_request('UpdateMirrorConfig')
+ @nickname('changeRepoMirrorConfig')
+ def put(self, namespace_name, repository_name):
+ """ Allow users to modifying the repository's mirroring configuration. """
+ values = request.get_json()
+
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ if not repo:
+ raise NotFound()
+
+ mirror = model.repo_mirror.get_mirror(repo)
+ if not mirror:
+ raise NotFound()
+
+ if 'is_enabled' in values:
+ if values['is_enabled'] == True:
+ if model.repo_mirror.enable_mirror(repo):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='is_enabled', to=True)
+ if values['is_enabled'] == False:
+ if model.repo_mirror.disable_mirror(repo):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='is_enabled', to=False)
+
+ if 'external_reference' in values:
+ if values['external_reference'] == '':
+ return {'detail': 'Empty string is an invalid repository location.'}, 400
+ if model.repo_mirror.change_remote(repo, values['external_reference']):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_reference', to=values['external_reference'])
+
+ if 'robot_username' in values:
+ robot_username = values['robot_username']
+ robot = self._setup_robot_for_mirroring(namespace_name, repository_name, robot_username)
+ if model.repo_mirror.set_mirroring_robot(repo, robot):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='robot_username', to=robot_username)
+
+ if 'sync_start_date' in values:
+ try:
+ sync_start_date = self._string_to_dt(values['sync_start_date'])
+ except ValueError as e:
+ return {'detail': 'Incorrect DateTime format for sync_start_date.'}, 400
+ if model.repo_mirror.change_sync_start_date(repo, sync_start_date):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='sync_start_date', to=sync_start_date)
+
+ if 'sync_interval' in values:
+ if model.repo_mirror.change_sync_interval(repo, values['sync_interval']):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='sync_interval', to=values['sync_interval'])
+
+ if 'external_registry_username' in values and 'external_registry_password' in values:
+ username = values['external_registry_username']
+ password = values['external_registry_password']
+ if username is None and password is not None:
+ return {'detail': 'Unable to delete username while setting a password.'}, 400
+ if model.repo_mirror.change_credentials(repo, username, password):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_username', to=username)
+ if password is None:
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_password', to=None)
+ else:
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_password', to="********")
+
+ elif 'external_registry_username' in values:
+ username = values['external_registry_username']
+ if model.repo_mirror.change_username(repo, username):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_username', to=username)
+
+ # Do not allow specifying a password without setting a username
+ if 'external_registry_password' in values and 'external_registry_username' not in values:
+ return {'detail': 'Unable to set a new password without also specifying a username.'}, 400
+
+ if 'external_registry_config' in values:
+ external_registry_config = values.get('external_registry_config', {})
+
+ if 'verify_tls' in external_registry_config:
+ updates = {'verify_tls': external_registry_config['verify_tls']}
+ if model.repo_mirror.change_external_registry_config(repo, updates):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='verify_tls', to=external_registry_config['verify_tls'])
+
+ if 'proxy' in external_registry_config:
+ proxy_values = external_registry_config.get('proxy', {})
+
+ if 'http_proxy' in proxy_values:
+ updates = {'proxy': {'http_proxy': proxy_values['http_proxy']}}
+ if model.repo_mirror.change_external_registry_config(repo, updates):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='http_proxy', to=proxy_values['http_proxy'])
+
+ if 'https_proxy' in proxy_values:
+ updates = {'proxy': {'https_proxy': proxy_values['https_proxy']}}
+ if model.repo_mirror.change_external_registry_config(repo, updates):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='https_proxy', to=proxy_values['https_proxy'])
+
+ if 'no_proxy' in proxy_values:
+ updates = {'proxy': {'no_proxy': proxy_values['no_proxy']}}
+ if model.repo_mirror.change_external_registry_config(repo, updates):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='no_proxy', to=proxy_values['no_proxy'])
+
+ return '', 201
+
+ def _setup_robot_for_mirroring(self, namespace_name, repo_name, robot_username):
+ """ Validate robot exists and give write permissions. """
+ robot = model.user.lookup_robot(robot_username)
+ assert robot.robot
+
+ namespace, _ = parse_robot_username(robot_username)
+ if namespace != namespace_name:
+ raise model.DataModelException('Invalid robot')
+
+ # Ensure the robot specified has access to the repository. If not, grant it.
+ permissions = model.permission.get_user_repository_permissions(robot, namespace_name, repo_name)
+ if not permissions or permissions[0].role.name == 'read':
+ model.permission.set_user_repo_permission(robot.username, namespace_name, repo_name, 'write')
+
+ return robot
+
+ def _string_to_dt(self, string):
+ """ Convert String to correct DateTime format. """
+ if string is None:
+ return None
+
+ """
+ # TODO: Use RFC2822. This doesn't work consistently.
+ # TODO: Move this to same module as `format_date` once fixed.
+ tup = parsedate_tz(string)
+ if len(tup) == 8:
+ tup = tup + (0,) # If TimeZone is omitted, assume UTC
+ ts = mktime_tz(tup)
+ dt = datetime.fromtimestamp(ts, pytz.UTC)
+ return dt
+ """
+ assert isinstance(string, (str, unicode))
+ dt = datetime.strptime(string, "%Y-%m-%dT%H:%M:%SZ")
+ return dt
+
+ def _dt_to_string(self, dt):
+ """ Convert DateTime to correctly formatted String."""
+ if dt is None:
+ return None
+
+ """
+ # TODO: Use RFC2822. Need to make it work bi-directionally.
+ return format_date(dt)
+ """
+
+ assert isinstance(dt, datetime)
+ string = dt.isoformat() + 'Z'
+ return string
+
+ def _decrypt_username(self, username):
+ if username is None:
+ return None
+ return username.decrypt()
+
+
+@resource('/v1/repository//mirror/rules')
+@show_if(features.REPO_MIRROR)
+class ManageRepoMirrorRule(RepositoryParamResource):
+ """
+ Operations to manage a single Repository Mirroring Rule.
+ TODO: At the moment, we are only dealing with a single rule associated with the mirror.
+ This should change to update the rule and address it using its UUID.
+ """
+ schemas = {
+ 'MirrorRule': {
+ 'type': 'object',
+ 'description': 'A rule used to define how a repository is mirrored.',
+ 'required': ['root_rule'],
+ 'properties': {
+ 'root_rule': common_properties['root_rule']
+ }
+ }
+ }
+
+ @require_repo_admin
+ @nickname('changeRepoMirrorRule')
+ @validate_json_request('MirrorRule')
+ def put(self, namespace_name, repository_name):
+ """
+ Update an existing RepoMirrorRule
+ """
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ if not repo:
+ raise NotFound()
+
+ rule = model.repo_mirror.get_root_rule(repo)
+ if not rule:
+ return {'detail': 'The rule appears to be missing.'}, 400
+
+ data = request.get_json()
+ if model.repo_mirror.change_rule_value(rule, data['root_rule']['rule_value']):
+ track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="mirror_rule", to=data['root_rule']['rule_value'])
+ return 200
+ else:
+ return {'detail': 'Unable to update rule.'}, 400
diff --git a/endpoints/api/organization.py b/endpoints/api/organization.py
new file mode 100644
index 000000000..e53bba6b9
--- /dev/null
+++ b/endpoints/api/organization.py
@@ -0,0 +1,740 @@
+""" Manage organizations, members and OAuth applications. """
+
+import logging
+import recaptcha2
+
+from flask import request
+
+import features
+
+from active_migration import ActiveDataMigration, ERTMigrationFlags
+from app import (billing as stripe, avatar, all_queues, authentication, namespace_gc_queue,
+ ip_resolver, app)
+from endpoints.api import (resource, nickname, ApiResource, validate_json_request, request_error,
+ related_user_resource, internal_only, require_user_admin, log_action,
+ show_if, path_param, require_scope, require_fresh_login)
+from endpoints.exception import Unauthorized, NotFound
+from endpoints.api.user import User, PrivateRepositories
+from auth.permissions import (AdministerOrganizationPermission, OrganizationMemberPermission,
+ CreateRepositoryPermission, ViewTeamPermission)
+from auth.auth_context import get_authenticated_user
+from auth import scopes
+from data import model
+from data.billing import get_plan
+from util.names import parse_robot_username
+from util.request import get_request_ip
+
+
+logger = logging.getLogger(__name__)
+
+
+def team_view(orgname, team):
+ return {
+ 'name': team.name,
+ 'description': team.description,
+ 'role': team.role_name,
+ 'avatar': avatar.get_data_for_team(team),
+ 'can_view': ViewTeamPermission(orgname, team.name).can(),
+
+ 'repo_count': team.repo_count,
+ 'member_count': team.member_count,
+
+ 'is_synced': team.is_synced,
+ }
+
+
+def org_view(o, teams):
+ is_admin = AdministerOrganizationPermission(o.username).can()
+ is_member = OrganizationMemberPermission(o.username).can()
+
+ view = {
+ 'name': o.username,
+ 'email': o.email if is_admin else '',
+ 'avatar': avatar.get_data_for_user(o),
+ 'is_admin': is_admin,
+ 'is_member': is_member
+ }
+
+ if teams is not None:
+ teams = sorted(teams, key=lambda team: team.id)
+ view['teams'] = {t.name : team_view(o.username, t) for t in teams}
+ view['ordered_teams'] = [team.name for team in teams]
+
+ if is_admin:
+ view['invoice_email'] = o.invoice_email
+ view['invoice_email_address'] = o.invoice_email_address
+ view['tag_expiration_s'] = o.removed_tag_expiration_s
+ view['is_free_account'] = o.stripe_id is None
+
+ return view
+
+
+@resource('/v1/organization/')
+class OrganizationList(ApiResource):
+ """ Resource for creating organizations. """
+ schemas = {
+ 'NewOrg': {
+ 'type': 'object',
+ 'description': 'Description of a new organization.',
+ 'required': [
+ 'name',
+ ],
+ 'properties': {
+ 'name': {
+ 'type': 'string',
+ 'description': 'Organization username',
+ },
+ 'email': {
+ 'type': 'string',
+ 'description': 'Organization contact email',
+ },
+ 'recaptcha_response': {
+ 'type': 'string',
+ 'description': 'The (may be disabled) recaptcha response code for verification',
+ },
+ },
+ },
+ }
+
+ @require_user_admin
+ @nickname('createOrganization')
+ @validate_json_request('NewOrg')
+ def post(self):
+ """ Create a new organization. """
+ user = get_authenticated_user()
+ org_data = request.get_json()
+ existing = None
+
+ try:
+ existing = model.organization.get_organization(org_data['name'])
+ except model.InvalidOrganizationException:
+ pass
+
+ if not existing:
+ existing = model.user.get_user(org_data['name'])
+
+ if existing:
+ msg = 'A user or organization with this name already exists'
+ raise request_error(message=msg)
+
+ if features.MAILING and not org_data.get('email'):
+ raise request_error(message='Email address is required')
+
+ # If recaptcha is enabled, then verify the user is a human.
+ if features.RECAPTCHA:
+ recaptcha_response = org_data.get('recaptcha_response', '')
+ result = recaptcha2.verify(app.config['RECAPTCHA_SECRET_KEY'],
+ recaptcha_response,
+ get_request_ip())
+
+ if not result['success']:
+ return {
+ 'message': 'Are you a bot? If not, please revalidate the captcha.'
+ }, 400
+
+ is_possible_abuser = ip_resolver.is_ip_possible_threat(get_request_ip())
+ try:
+ model.organization.create_organization(org_data['name'], org_data.get('email'), user,
+ email_required=features.MAILING,
+ is_possible_abuser=is_possible_abuser)
+ return 'Created', 201
+ except model.DataModelException as ex:
+ raise request_error(exception=ex)
+
+
+@resource('/v1/organization/')
+@path_param('orgname', 'The name of the organization')
+@related_user_resource(User)
+class Organization(ApiResource):
+ """ Resource for managing organizations. """
+ schemas = {
+ 'UpdateOrg': {
+ 'type': 'object',
+ 'description': 'Description of updates for an existing organization',
+ 'properties': {
+ 'email': {
+ 'type': 'string',
+ 'description': 'Organization contact email',
+ },
+ 'invoice_email': {
+ 'type': 'boolean',
+ 'description': 'Whether the organization desires to receive emails for invoices',
+ },
+ 'invoice_email_address': {
+ 'type': ['string', 'null'],
+ 'description': 'The email address at which to receive invoices',
+ },
+ 'tag_expiration_s': {
+ 'type': 'integer',
+ 'minimum': 0,
+ 'description': 'The number of seconds for tag expiration',
+ },
+ },
+ },
+ }
+
+ @nickname('getOrganization')
+ def get(self, orgname):
+ """ Get the details for the specified organization """
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ teams = None
+ if OrganizationMemberPermission(orgname).can():
+ has_syncing = features.TEAM_SYNCING and bool(authentication.federated_service)
+ teams = model.team.get_teams_within_org(org, has_syncing)
+
+ return org_view(org, teams)
+
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('changeOrganizationDetails')
+ @validate_json_request('UpdateOrg')
+ def put(self, orgname):
+ """ Change the details for the specified organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ org_data = request.get_json()
+ if 'invoice_email' in org_data:
+ logger.debug('Changing invoice_email for organization: %s', org.username)
+ model.user.change_send_invoice_email(org, org_data['invoice_email'])
+
+ if ('invoice_email_address' in org_data and
+ org_data['invoice_email_address'] != org.invoice_email_address):
+ new_email = org_data['invoice_email_address']
+ logger.debug('Changing invoice email address for organization: %s', org.username)
+ model.user.change_invoice_email_address(org, new_email)
+
+ if 'email' in org_data and org_data['email'] != org.email:
+ new_email = org_data['email']
+ if model.user.find_user_by_email(new_email):
+ raise request_error(message='E-mail address already used')
+
+ logger.debug('Changing email address for organization: %s', org.username)
+ model.user.update_email(org, new_email)
+
+ if features.CHANGE_TAG_EXPIRATION and 'tag_expiration_s' in org_data:
+ logger.debug('Changing organization tag expiration to: %ss', org_data['tag_expiration_s'])
+ model.user.change_user_tag_expiration(org, org_data['tag_expiration_s'])
+
+ teams = model.team.get_teams_within_org(org)
+ return org_view(org, teams)
+ raise Unauthorized()
+
+
+ @require_scope(scopes.ORG_ADMIN)
+ @require_fresh_login
+ @nickname('deleteAdminedOrganization')
+ def delete(self, orgname):
+ """ Deletes the specified organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ model.user.mark_namespace_for_deletion(org, all_queues, namespace_gc_queue)
+ return '', 204
+
+ raise Unauthorized()
+
+
+@resource('/v1/organization//private')
+@path_param('orgname', 'The name of the organization')
+@internal_only
+@related_user_resource(PrivateRepositories)
+@show_if(features.BILLING)
+class OrgPrivateRepositories(ApiResource):
+ """ Custom verb to compute whether additional private repositories are available. """
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrganizationPrivateAllowed')
+ def get(self, orgname):
+ """ Return whether or not this org is allowed to create new private repositories. """
+ permission = CreateRepositoryPermission(orgname)
+ if permission.can():
+ organization = model.organization.get_organization(orgname)
+ private_repos = model.user.get_private_repo_count(organization.username)
+ data = {
+ 'privateAllowed': False
+ }
+
+ if organization.stripe_id:
+ cus = stripe.Customer.retrieve(organization.stripe_id)
+ if cus.subscription:
+ repos_allowed = 0
+ plan = get_plan(cus.subscription.plan.id)
+ if plan:
+ repos_allowed = plan['privateRepos']
+
+ data['privateAllowed'] = (private_repos < repos_allowed)
+
+
+ if AdministerOrganizationPermission(orgname).can():
+ data['privateCount'] = private_repos
+
+ return data
+
+ raise Unauthorized()
+
+
+@resource('/v1/organization//collaborators')
+@path_param('orgname', 'The name of the organization')
+class OrganizationCollaboratorList(ApiResource):
+ """ Resource for listing outside collaborators of an organization.
+
+ Collaborators are users that do not belong to any team in the
+ organiztion, but who have direct permissions on one or more
+ repositories belonging to the organization.
+ """
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrganizationCollaborators')
+ def get(self, orgname):
+ """ List outside collaborators of the specified organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if not permission.can():
+ raise Unauthorized()
+
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ all_perms = model.permission.list_organization_member_permissions(org)
+ membership = model.team.list_organization_members_by_teams(org)
+
+ org_members = set(m.user.username for m in membership)
+
+ collaborators = {}
+ for perm in all_perms:
+ username = perm.user.username
+
+ # Only interested in non-member permissions.
+ if username in org_members:
+ continue
+
+ if username not in collaborators:
+ collaborators[username] = {
+ 'kind': 'user',
+ 'name': username,
+ 'avatar': avatar.get_data_for_user(perm.user),
+ 'repositories': [],
+ }
+
+ collaborators[username]['repositories'].append(perm.repository.name)
+
+ return {'collaborators': collaborators.values()}
+
+
+@resource('/v1/organization//members')
+@path_param('orgname', 'The name of the organization')
+class OrganizationMemberList(ApiResource):
+ """ Resource for listing the members of an organization. """
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrganizationMembers')
+ def get(self, orgname):
+ """ List the human members of the specified organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ # Loop to create the members dictionary. Note that the members collection
+ # will return an entry for *every team* a member is on, so we will have
+ # duplicate keys (which is why we pre-build the dictionary).
+ members_dict = {}
+ members = model.team.list_organization_members_by_teams(org)
+ for member in members:
+ if member.user.robot:
+ continue
+
+ if not member.user.username in members_dict:
+ member_data = {
+ 'name': member.user.username,
+ 'kind': 'user',
+ 'avatar': avatar.get_data_for_user(member.user),
+ 'teams': [],
+ 'repositories': []
+ }
+
+ members_dict[member.user.username] = member_data
+
+ members_dict[member.user.username]['teams'].append({
+ 'name': member.team.name,
+ 'avatar': avatar.get_data_for_team(member.team),
+ })
+
+ # Loop to add direct repository permissions.
+ for permission in model.permission.list_organization_member_permissions(org):
+ username = permission.user.username
+ if not username in members_dict:
+ continue
+
+ members_dict[username]['repositories'].append(permission.repository.name)
+
+ return {'members': members_dict.values()}
+
+ raise Unauthorized()
+
+
+
+@resource('/v1/organization//members/')
+@path_param('orgname', 'The name of the organization')
+@path_param('membername', 'The username of the organization member')
+class OrganizationMember(ApiResource):
+ """ Resource for managing individual organization members. """
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrganizationMember')
+ def get(self, orgname, membername):
+ """ Retrieves the details of a member of the organization.
+ """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ # Lookup the user.
+ member = model.user.get_user(membername)
+ if not member:
+ raise NotFound()
+
+ organization = model.user.get_user_or_org(orgname)
+ if not organization:
+ raise NotFound()
+
+ # Lookup the user's information in the organization.
+ teams = list(model.team.get_user_teams_within_org(membername, organization))
+ if not teams:
+ # 404 if the user is not a robot under the organization, as that means the referenced
+ # user or robot is not a member of this organization.
+ if not member.robot:
+ raise NotFound()
+
+ namespace, _ = parse_robot_username(member.username)
+ if namespace != orgname:
+ raise NotFound()
+
+ repo_permissions = model.permission.list_organization_member_permissions(organization, member)
+
+ def local_team_view(team):
+ return {
+ 'name': team.name,
+ 'avatar': avatar.get_data_for_team(team),
+ }
+
+ return {
+ 'name': member.username,
+ 'kind': 'robot' if member.robot else 'user',
+ 'avatar': avatar.get_data_for_user(member),
+ 'teams': [local_team_view(team) for team in teams],
+ 'repositories': [permission.repository.name for permission in repo_permissions]
+ }
+
+ raise Unauthorized()
+
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('removeOrganizationMember')
+ def delete(self, orgname, membername):
+ """ Removes a member from an organization, revoking all its repository
+ priviledges and removing it from all teams in the organization.
+ """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ # Lookup the user.
+ user = model.user.get_nonrobot_user(membername)
+ if not user:
+ raise NotFound()
+
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ # Remove the user from the organization.
+ model.organization.remove_organization_member(org, user)
+ return '', 204
+
+ raise Unauthorized()
+
+
+@resource('/v1/app/')
+@path_param('client_id', 'The OAuth client ID')
+class ApplicationInformation(ApiResource):
+ """ Resource that returns public information about a registered application. """
+
+ @nickname('getApplicationInformation')
+ def get(self, client_id):
+ """ Get information on the specified application. """
+ application = model.oauth.get_application_for_client_id(client_id)
+ if not application:
+ raise NotFound()
+
+ app_email = application.avatar_email or application.organization.email
+ app_data = avatar.get_data(application.name, app_email, 'app')
+
+ return {
+ 'name': application.name,
+ 'description': application.description,
+ 'uri': application.application_uri,
+ 'avatar': app_data,
+ 'organization': org_view(application.organization, [])
+ }
+
+
+def app_view(application):
+ is_admin = AdministerOrganizationPermission(application.organization.username).can()
+ client_secret = None
+ if is_admin:
+ # TODO(remove-unenc): Remove legacy lookup.
+ client_secret = None
+ if application.secure_client_secret is not None:
+ client_secret = application.secure_client_secret.decrypt()
+
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS) and client_secret is None:
+ client_secret = application.client_secret
+
+ assert (client_secret is not None) == is_admin
+ return {
+ 'name': application.name,
+ 'description': application.description,
+ 'application_uri': application.application_uri,
+ 'client_id': application.client_id,
+ 'client_secret': client_secret,
+ 'redirect_uri': application.redirect_uri if is_admin else None,
+ 'avatar_email': application.avatar_email if is_admin else None,
+ }
+
+
+@resource('/v1/organization//applications')
+@path_param('orgname', 'The name of the organization')
+class OrganizationApplications(ApiResource):
+ """ Resource for managing applications defined by an organization. """
+ schemas = {
+ 'NewApp': {
+ 'type': 'object',
+ 'description': 'Description of a new organization application.',
+ 'required': [
+ 'name',
+ ],
+ 'properties': {
+ 'name': {
+ 'type': 'string',
+ 'description': 'The name of the application',
+ },
+ 'redirect_uri': {
+ 'type': 'string',
+ 'description': 'The URI for the application\'s OAuth redirect',
+ },
+ 'application_uri': {
+ 'type': 'string',
+ 'description': 'The URI for the application\'s homepage',
+ },
+ 'description': {
+ 'type': 'string',
+ 'description': 'The human-readable description for the application',
+ },
+ 'avatar_email': {
+ 'type': 'string',
+ 'description': 'The e-mail address of the avatar to use for the application',
+ }
+ },
+ },
+ }
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrganizationApplications')
+ def get(self, orgname):
+ """ List the applications for the specified organization """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ applications = model.oauth.list_applications_for_org(org)
+ return {'applications': [app_view(application) for application in applications]}
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('createOrganizationApplication')
+ @validate_json_request('NewApp')
+ def post(self, orgname):
+ """ Creates a new application under this organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ app_data = request.get_json()
+ application = model.oauth.create_application(org, app_data['name'],
+ app_data.get('application_uri', ''),
+ app_data.get('redirect_uri', ''),
+ description=app_data.get('description', ''),
+ avatar_email=app_data.get('avatar_email', None))
+
+ app_data.update({
+ 'application_name': application.name,
+ 'client_id': application.client_id
+ })
+
+ log_action('create_application', orgname, app_data)
+
+ return app_view(application)
+ raise Unauthorized()
+
+
+@resource('/v1/organization//applications/')
+@path_param('orgname', 'The name of the organization')
+@path_param('client_id', 'The OAuth client ID')
+class OrganizationApplicationResource(ApiResource):
+ """ Resource for managing an application defined by an organizations. """
+ schemas = {
+ 'UpdateApp': {
+ 'type': 'object',
+ 'description': 'Description of an updated application.',
+ 'required': [
+ 'name',
+ 'redirect_uri',
+ 'application_uri'
+ ],
+ 'properties': {
+ 'name': {
+ 'type': 'string',
+ 'description': 'The name of the application',
+ },
+ 'redirect_uri': {
+ 'type': 'string',
+ 'description': 'The URI for the application\'s OAuth redirect',
+ },
+ 'application_uri': {
+ 'type': 'string',
+ 'description': 'The URI for the application\'s homepage',
+ },
+ 'description': {
+ 'type': 'string',
+ 'description': 'The human-readable description for the application',
+ },
+ 'avatar_email': {
+ 'type': 'string',
+ 'description': 'The e-mail address of the avatar to use for the application',
+ }
+ },
+ },
+ }
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrganizationApplication')
+ def get(self, orgname, client_id):
+ """ Retrieves the application with the specified client_id under the specified organization """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ application = model.oauth.lookup_application(org, client_id)
+ if not application:
+ raise NotFound()
+
+ return app_view(application)
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('updateOrganizationApplication')
+ @validate_json_request('UpdateApp')
+ def put(self, orgname, client_id):
+ """ Updates an application under this organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ application = model.oauth.lookup_application(org, client_id)
+ if not application:
+ raise NotFound()
+
+ app_data = request.get_json()
+ application.name = app_data['name']
+ application.application_uri = app_data['application_uri']
+ application.redirect_uri = app_data['redirect_uri']
+ application.description = app_data.get('description', '')
+ application.avatar_email = app_data.get('avatar_email', None)
+ application.save()
+
+ app_data.update({
+ 'application_name': application.name,
+ 'client_id': application.client_id
+ })
+
+ log_action('update_application', orgname, app_data)
+
+ return app_view(application)
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('deleteOrganizationApplication')
+ def delete(self, orgname, client_id):
+ """ Deletes the application under this organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ application = model.oauth.delete_application(org, client_id)
+ if not application:
+ raise NotFound()
+
+ log_action('delete_application', orgname,
+ {'application_name': application.name, 'client_id': client_id})
+
+ return '', 204
+ raise Unauthorized()
+
+
+@resource('/v1/organization//applications//resetclientsecret')
+@path_param('orgname', 'The name of the organization')
+@path_param('client_id', 'The OAuth client ID')
+@internal_only
+class OrganizationApplicationResetClientSecret(ApiResource):
+ """ Custom verb for resetting the client secret of an application. """
+ @nickname('resetOrganizationApplicationClientSecret')
+ def post(self, orgname, client_id):
+ """ Resets the client secret of the application. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ application = model.oauth.lookup_application(org, client_id)
+ if not application:
+ raise NotFound()
+
+ application = model.oauth.reset_client_secret(application)
+ log_action('reset_application_client_secret', orgname,
+ {'application_name': application.name, 'client_id': client_id})
+
+ return app_view(application)
+ raise Unauthorized()
diff --git a/endpoints/api/permission.py b/endpoints/api/permission.py
new file mode 100644
index 000000000..e85c6480e
--- /dev/null
+++ b/endpoints/api/permission.py
@@ -0,0 +1,209 @@
+""" Manage repository permissions. """
+
+import logging
+
+from flask import request
+
+from endpoints.api import (resource, nickname, require_repo_admin, RepositoryParamResource,
+ log_action, request_error, validate_json_request, path_param)
+from endpoints.exception import NotFound
+from permission_models_pre_oci import pre_oci_model as model
+from permission_models_interface import DeleteException, SaveException
+
+logger = logging.getLogger(__name__)
+
+
+@resource('/v1/repository//permissions/team/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryTeamPermissionList(RepositoryParamResource):
+ """ Resource for repository team permissions. """
+ @require_repo_admin
+ @nickname('listRepoTeamPermissions')
+ def get(self, namespace_name, repository_name):
+ """ List all team permission. """
+ repo_perms = model.get_repo_permissions_by_team(namespace_name, repository_name)
+
+ return {
+ 'permissions': {repo_perm.team_name: repo_perm.to_dict()
+ for repo_perm in repo_perms}
+ }
+
+
+@resource('/v1/repository//permissions/user/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryUserPermissionList(RepositoryParamResource):
+ """ Resource for repository user permissions. """
+ @require_repo_admin
+ @nickname('listRepoUserPermissions')
+ def get(self, namespace_name, repository_name):
+ """ List all user permissions. """
+ perms = model.get_repo_permissions_by_user(namespace_name, repository_name)
+ return {'permissions': {p.username: p.to_dict() for p in perms}}
+
+
+@resource('/v1/repository//permissions/user//transitive')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('username', 'The username of the user to which the permissions apply')
+class RepositoryUserTransitivePermission(RepositoryParamResource):
+ """ Resource for retrieving whether a user has access to a repository, either directly
+ or via a team. """
+ @require_repo_admin
+ @nickname('getUserTransitivePermission')
+ def get(self, namespace_name, repository_name, username):
+ """ Get the fetch the permission for the specified user. """
+
+ roles = model.get_repo_roles(username, namespace_name, repository_name)
+
+ if not roles:
+ raise NotFound
+
+ return {
+ 'permissions': [r.to_dict() for r in roles]
+ }
+
+
+@resource('/v1/repository//permissions/user/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('username', 'The username of the user to which the permission applies')
+class RepositoryUserPermission(RepositoryParamResource):
+ """ Resource for managing individual user permissions. """
+ schemas = {
+ 'UserPermission': {
+ 'type': 'object',
+ 'description': 'Description of a user permission.',
+ 'required': [
+ 'role',
+ ],
+ 'properties': {
+ 'role': {
+ 'type': 'string',
+ 'description': 'Role to use for the user',
+ 'enum': [
+ 'read',
+ 'write',
+ 'admin',
+ ],
+ },
+ },
+ },
+ }
+
+ @require_repo_admin
+ @nickname('getUserPermissions')
+ def get(self, namespace_name, repository_name, username):
+ """ Get the permission for the specified user. """
+ logger.debug('Get repo: %s/%s permissions for user %s', namespace_name, repository_name, username)
+ perm = model.get_repo_permission_for_user(username, namespace_name, repository_name)
+ return perm.to_dict()
+
+ @require_repo_admin
+ @nickname('changeUserPermissions')
+ @validate_json_request('UserPermission')
+ def put(self, namespace_name, repository_name, username): # Also needs to respond to post
+ """ Update the perimssions for an existing repository. """
+ new_permission = request.get_json()
+
+ logger.debug('Setting permission to: %s for user %s', new_permission['role'], username)
+
+ try:
+ perm = model.set_repo_permission_for_user(username, namespace_name, repository_name,
+ new_permission['role'])
+ resp = perm.to_dict()
+ except SaveException as ex:
+ raise request_error(exception=ex)
+
+ log_action('change_repo_permission', namespace_name,
+ {'username': username, 'repo': repository_name,
+ 'namespace': namespace_name,
+ 'role': new_permission['role']},
+ repo_name=repository_name)
+
+ return resp, 200
+
+ @require_repo_admin
+ @nickname('deleteUserPermissions')
+ def delete(self, namespace_name, repository_name, username):
+ """ Delete the permission for the user. """
+ try:
+ model.delete_repo_permission_for_user(username, namespace_name, repository_name)
+ except DeleteException as ex:
+ raise request_error(exception=ex)
+
+ log_action('delete_repo_permission', namespace_name,
+ {'username': username, 'repo': repository_name, 'namespace': namespace_name},
+ repo_name=repository_name)
+
+ return '', 204
+
+
+@resource('/v1/repository//permissions/team/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('teamname', 'The name of the team to which the permission applies')
+class RepositoryTeamPermission(RepositoryParamResource):
+ """ Resource for managing individual team permissions. """
+ schemas = {
+ 'TeamPermission': {
+ 'type': 'object',
+ 'description': 'Description of a team permission.',
+ 'required': [
+ 'role',
+ ],
+ 'properties': {
+ 'role': {
+ 'type': 'string',
+ 'description': 'Role to use for the team',
+ 'enum': [
+ 'read',
+ 'write',
+ 'admin',
+ ],
+ },
+ },
+ },
+ }
+
+ @require_repo_admin
+ @nickname('getTeamPermissions')
+ def get(self, namespace_name, repository_name, teamname):
+ """ Fetch the permission for the specified team. """
+ logger.debug('Get repo: %s/%s permissions for team %s', namespace_name, repository_name, teamname)
+ role = model.get_repo_role_for_team(teamname, namespace_name, repository_name)
+ return role.to_dict()
+
+ @require_repo_admin
+ @nickname('changeTeamPermissions')
+ @validate_json_request('TeamPermission')
+ def put(self, namespace_name, repository_name, teamname):
+ """ Update the existing team permission. """
+ new_permission = request.get_json()
+
+ logger.debug('Setting permission to: %s for team %s', new_permission['role'], teamname)
+
+ try:
+ perm = model.set_repo_permission_for_team(teamname, namespace_name, repository_name,
+ new_permission['role'])
+ resp = perm.to_dict()
+ except SaveException as ex:
+ raise request_error(exception=ex)
+
+
+ log_action('change_repo_permission', namespace_name,
+ {'team': teamname, 'repo': repository_name,
+ 'role': new_permission['role']},
+ repo_name=repository_name)
+ return resp, 200
+
+ @require_repo_admin
+ @nickname('deleteTeamPermissions')
+ def delete(self, namespace_name, repository_name, teamname):
+ """ Delete the permission for the specified team. """
+ try:
+ model.delete_repo_permission_for_team(teamname, namespace_name, repository_name)
+ except DeleteException as ex:
+ raise request_error(exception=ex)
+
+ log_action('delete_repo_permission', namespace_name,
+ {'team': teamname, 'repo': repository_name},
+ repo_name=repository_name)
+
+ return '', 204
diff --git a/endpoints/api/permission_models_interface.py b/endpoints/api/permission_models_interface.py
new file mode 100644
index 000000000..49c24744c
--- /dev/null
+++ b/endpoints/api/permission_models_interface.py
@@ -0,0 +1,208 @@
+import sys
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple
+
+from six import add_metaclass
+
+
+class SaveException(Exception):
+ def __init__(self, other):
+ self.traceback = sys.exc_info()
+ super(SaveException, self).__init__(str(other))
+
+class DeleteException(Exception):
+ def __init__(self, other):
+ self.traceback = sys.exc_info()
+ super(DeleteException, self).__init__(str(other))
+
+
+class Role(namedtuple('Role', ['role_name'])):
+ def to_dict(self):
+ return {
+ 'role': self.role_name,
+ }
+
+class UserPermission(namedtuple('UserPermission', [
+ 'role_name',
+ 'username',
+ 'is_robot',
+ 'avatar',
+ 'is_org_member',
+ 'has_org',
+ ])):
+
+ def to_dict(self):
+ perm_dict = {
+ 'role': self.role_name,
+ 'name': self.username,
+ 'is_robot': self.is_robot,
+ 'avatar': self.avatar,
+ }
+ if self.has_org:
+ perm_dict['is_org_member'] = self.is_org_member
+ return perm_dict
+
+
+class RobotPermission(namedtuple('RobotPermission', [
+ 'role_name',
+ 'username',
+ 'is_robot',
+ 'is_org_member',
+])):
+
+ def to_dict(self, user=None, team=None, org_members=None):
+ return {
+ 'role': self.role_name,
+ 'name': self.username,
+ 'is_robot': True,
+ 'is_org_member': self.is_org_member,
+ }
+
+
+class TeamPermission(namedtuple('TeamPermission', [
+ 'role_name',
+ 'team_name',
+ 'avatar',
+])):
+
+ def to_dict(self):
+ return {
+ 'role': self.role_name,
+ 'name': self.team_name,
+ 'avatar': self.avatar,
+ }
+
+@add_metaclass(ABCMeta)
+class PermissionDataInterface(object):
+ """
+ Data interface used by permissions API
+ """
+
+ @abstractmethod
+ def get_repo_permissions_by_user(self, namespace_name, repository_name):
+ """
+
+ Args:
+ namespace_name: string
+ repository_name: string
+
+ Returns:
+ list(UserPermission)
+ """
+
+ @abstractmethod
+ def get_repo_roles(self, username, namespace_name, repository_name):
+ """
+
+ Args:
+ username: string
+ namespace_name: string
+ repository_name: string
+
+ Returns:
+ list(Role) or None
+ """
+
+ @abstractmethod
+ def get_repo_permission_for_user(self, username, namespace_name, repository_name):
+ """
+
+ Args:
+ username: string
+ namespace_name: string
+ repository_name: string
+
+ Returns:
+ UserPermission
+ """
+
+ @abstractmethod
+ def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name):
+ """
+
+ Args:
+ username: string
+ namespace_name: string
+ repository_name: string
+ role_name: string
+
+ Returns:
+ UserPermission
+
+ Raises:
+ SaveException
+ """
+
+ @abstractmethod
+ def delete_repo_permission_for_user(self, username, namespace_name, repository_name):
+ """
+
+ Args:
+ username: string
+ namespace_name: string
+ repository_name: string
+
+ Returns:
+ void
+
+ Raises:
+ DeleteException
+ """
+
+ @abstractmethod
+ def get_repo_permissions_by_team(self, namespace_name, repository_name):
+ """
+
+ Args:
+ namespace_name: string
+ repository_name: string
+
+ Returns:
+ list(TeamPermission)
+ """
+
+ @abstractmethod
+ def get_repo_role_for_team(self, team_name, namespace_name, repository_name):
+ """
+
+ Args:
+ team_name: string
+ namespace_name: string
+ repository_name: string
+
+ Returns:
+ Role
+ """
+
+ @abstractmethod
+ def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, permission):
+ """
+
+ Args:
+ team_name: string
+ namespace_name: string
+ repository_name: string
+ permission: string
+
+ Returns:
+ TeamPermission
+
+ Raises:
+ SaveException
+ """
+
+ @abstractmethod
+ def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name):
+ """
+
+ Args:
+ team_name: string
+ namespace_name: string
+ repository_name: string
+
+ Returns:
+ TeamPermission
+
+ Raises:
+ DeleteException
+ """
\ No newline at end of file
diff --git a/endpoints/api/permission_models_pre_oci.py b/endpoints/api/permission_models_pre_oci.py
new file mode 100644
index 000000000..1f19cad10
--- /dev/null
+++ b/endpoints/api/permission_models_pre_oci.py
@@ -0,0 +1,115 @@
+from app import avatar
+from data import model
+from permission_models_interface import PermissionDataInterface, UserPermission, TeamPermission, Role, SaveException, DeleteException
+
+
+class PreOCIModel(PermissionDataInterface):
+ """
+ PreOCIModel implements the data model for Permission using a database schema
+ before it was changed to support the OCI specification.
+ """
+
+ def get_repo_permissions_by_user(self, namespace_name, repository_name):
+ org = None
+ try:
+ org = model.organization.get_organization(namespace_name) # Will raise an error if not org
+ except model.InvalidOrganizationException:
+ # This repository isn't under an org
+ pass
+
+ # Load the permissions.
+ repo_perms = model.user.get_all_repo_users(namespace_name, repository_name)
+
+ if org:
+ users_filter = {perm.user for perm in repo_perms}
+ org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
+
+ def is_org_member(user):
+ if not org:
+ return False
+
+ return user.robot or user.username in org_members
+
+ return [self._user_permission(perm, org is not None, is_org_member(perm.user)) for perm in repo_perms]
+
+ def get_repo_roles(self, username, namespace_name, repository_name):
+ user = model.user.get_user(username)
+ if not user:
+ return None
+
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ if not repo:
+ return None
+
+ return [self._role(r) for r in model.permission.get_user_repo_permissions(user, repo)]
+
+ def get_repo_permission_for_user(self, username, namespace_name, repository_name):
+ perm = model.permission.get_user_reponame_permission(username, namespace_name, repository_name)
+ org = None
+ try:
+ org = model.organization.get_organization(namespace_name)
+ org_members = model.organization.get_organization_member_set(org, users_filter={perm.user})
+ is_org_member = perm.user.robot or perm.user.username in org_members
+ except model.InvalidOrganizationException:
+ # This repository is not part of an organization
+ is_org_member = False
+
+ return self._user_permission(perm, org is not None, is_org_member)
+
+ def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name):
+ try:
+ perm = model.permission.set_user_repo_permission(username, namespace_name, repository_name, role_name)
+ org = None
+ try:
+ org = model.organization.get_organization(namespace_name)
+ org_members = model.organization.get_organization_member_set(org, users_filter={perm.user})
+ is_org_member = perm.user.robot or perm.user.username in org_members
+ except model.InvalidOrganizationException:
+ # This repository is not part of an organization
+ is_org_member = False
+ return self._user_permission(perm, org is not None, is_org_member)
+ except model.DataModelException as ex:
+ raise SaveException(ex)
+
+ def delete_repo_permission_for_user(self, username, namespace_name, repository_name):
+ try:
+ model.permission.delete_user_permission(username, namespace_name, repository_name)
+ except model.DataModelException as ex:
+ raise DeleteException(ex)
+
+ def get_repo_permissions_by_team(self, namespace_name, repository_name):
+ repo_perms = model.permission.get_all_repo_teams(namespace_name, repository_name)
+ return [self._team_permission(perm, perm.team.name) for perm in repo_perms]
+
+ def get_repo_role_for_team(self, team_name, namespace_name, repository_name):
+ return self._role(model.permission.get_team_reponame_permission(team_name, namespace_name, repository_name))
+
+ def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, role_name):
+ try:
+ return self._team_permission(model.permission.set_team_repo_permission(team_name, namespace_name, repository_name, role_name), team_name)
+ except model.DataModelException as ex:
+ raise SaveException(ex)
+
+ def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name):
+ try:
+ model.permission.delete_team_permission(team_name, namespace_name, repository_name)
+ except model.DataModelException as ex:
+ raise DeleteException(ex)
+
+ def _role(self, permission_obj):
+ return Role(role_name=permission_obj.role.name)
+
+ def _user_permission(self, permission_obj, has_org, is_org_member):
+ return UserPermission(role_name=permission_obj.role.name,
+ username=permission_obj.user.username,
+ is_robot=permission_obj.user.robot,
+ avatar=avatar.get_data_for_user(permission_obj.user),
+ is_org_member=is_org_member,
+ has_org=has_org)
+
+ def _team_permission(self, permission_obj, team_name):
+ return TeamPermission(role_name=permission_obj.role.name,
+ team_name=permission_obj.team.name,
+ avatar=avatar.get_data_for_team(permission_obj.team))
+
+pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/prototype.py b/endpoints/api/prototype.py
new file mode 100644
index 000000000..2944aab60
--- /dev/null
+++ b/endpoints/api/prototype.py
@@ -0,0 +1,270 @@
+""" Manage default permissions added to repositories. """
+
+from flask import request
+
+from endpoints.api import (resource, nickname, ApiResource, validate_json_request, request_error,
+ log_action, path_param, require_scope)
+from endpoints.exception import Unauthorized, NotFound
+from auth.permissions import AdministerOrganizationPermission
+from auth.auth_context import get_authenticated_user
+from auth import scopes
+from data import model
+from app import avatar
+
+
+def prototype_view(proto, org_members):
+ def prototype_user_view(user):
+ return {
+ 'name': user.username,
+ 'is_robot': user.robot,
+ 'kind': 'user',
+ 'is_org_member': user.robot or user.username in org_members,
+ 'avatar': avatar.get_data_for_user(user)
+ }
+
+ if proto.delegate_user:
+ delegate_view = prototype_user_view(proto.delegate_user)
+ else:
+ delegate_view = {
+ 'name': proto.delegate_team.name,
+ 'kind': 'team',
+ 'avatar': avatar.get_data_for_team(proto.delegate_team)
+ }
+
+ return {
+ 'activating_user': (prototype_user_view(proto.activating_user)
+ if proto.activating_user else None),
+ 'delegate': delegate_view,
+ 'role': proto.role.name,
+ 'id': proto.uuid,
+ }
+
+def log_prototype_action(action_kind, orgname, prototype, **kwargs):
+ username = get_authenticated_user().username
+ log_params = {
+ 'prototypeid': prototype.uuid,
+ 'username': username,
+ 'activating_username': (prototype.activating_user.username
+ if prototype.activating_user else None),
+ 'role': prototype.role.name
+ }
+
+ for key, value in kwargs.items():
+ log_params[key] = value
+
+ if prototype.delegate_user:
+ log_params['delegate_user'] = prototype.delegate_user.username
+ elif prototype.delegate_team:
+ log_params['delegate_team'] = prototype.delegate_team.name
+
+ log_action(action_kind, orgname, log_params)
+
+
+@resource('/v1/organization//prototypes')
+@path_param('orgname', 'The name of the organization')
+class PermissionPrototypeList(ApiResource):
+ """ Resource for listing and creating permission prototypes. """
+ schemas = {
+ 'NewPrototype': {
+ 'type': 'object',
+ 'description': 'Description of a new prototype',
+ 'required': [
+ 'role',
+ 'delegate',
+ ],
+ 'properties': {
+ 'role': {
+ 'type': 'string',
+ 'description': 'Role that should be applied to the delegate',
+ 'enum': [
+ 'read',
+ 'write',
+ 'admin',
+ ],
+ },
+ 'activating_user': {
+ 'type': 'object',
+ 'description': 'Repository creating user to whom the rule should apply',
+ 'required': [
+ 'name',
+ ],
+ 'properties': {
+ 'name': {
+ 'type': 'string',
+ 'description': 'The username for the activating_user',
+ },
+ },
+ },
+ 'delegate': {
+ 'type': 'object',
+ 'description': 'Information about the user or team to which the rule grants access',
+ 'required': [
+ 'name',
+ 'kind',
+ ],
+ 'properties': {
+ 'name': {
+ 'type': 'string',
+ 'description': 'The name for the delegate team or user',
+ },
+ 'kind': {
+ 'type': 'string',
+ 'description': 'Whether the delegate is a user or a team',
+ 'enum': [
+ 'user',
+ 'team',
+ ],
+ },
+ },
+ },
+ },
+ },
+ }
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrganizationPrototypePermissions')
+ def get(self, orgname):
+ """ List the existing prototypes for this organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ permissions = model.permission.get_prototype_permissions(org)
+
+ users_filter = ({p.activating_user for p in permissions} |
+ {p.delegate_user for p in permissions})
+ org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
+ return {'prototypes': [prototype_view(p, org_members) for p in permissions]}
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('createOrganizationPrototypePermission')
+ @validate_json_request('NewPrototype')
+ def post(self, orgname):
+ """ Create a new permission prototype. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ details = request.get_json()
+ activating_username = None
+
+ if ('activating_user' in details and details['activating_user'] and
+ 'name' in details['activating_user']):
+ activating_username = details['activating_user']['name']
+
+ delegate = details['delegate'] if 'delegate' in details else {}
+ delegate_kind = delegate.get('kind', None)
+ delegate_name = delegate.get('name', None)
+
+ delegate_username = delegate_name if delegate_kind == 'user' else None
+ delegate_teamname = delegate_name if delegate_kind == 'team' else None
+
+ activating_user = (model.user.get_user(activating_username) if activating_username else None)
+ delegate_user = (model.user.get_user(delegate_username) if delegate_username else None)
+ delegate_team = (model.team.get_organization_team(orgname, delegate_teamname)
+ if delegate_teamname else None)
+
+ if activating_username and not activating_user:
+ raise request_error(message='Unknown activating user')
+
+ if not delegate_user and not delegate_team:
+ raise request_error(message='Missing delegate user or team')
+
+ role_name = details['role']
+
+ prototype = model.permission.add_prototype_permission(org, role_name, activating_user,
+ delegate_user, delegate_team)
+ log_prototype_action('create_prototype_permission', orgname, prototype)
+
+ users_filter = {prototype.activating_user, prototype.delegate_user}
+ org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
+ return prototype_view(prototype, org_members)
+
+ raise Unauthorized()
+
+
+@resource('/v1/organization//prototypes/')
+@path_param('orgname', 'The name of the organization')
+@path_param('prototypeid', 'The ID of the prototype')
+class PermissionPrototype(ApiResource):
+ """ Resource for managingin individual permission prototypes. """
+ schemas = {
+ 'PrototypeUpdate': {
+ 'type': 'object',
+ 'description': 'Description of a the new prototype role',
+ 'required': [
+ 'role',
+ ],
+ 'properties': {
+ 'role': {
+ 'type': 'string',
+ 'description': 'Role that should be applied to the permission',
+ 'enum': [
+ 'read',
+ 'write',
+ 'admin',
+ ],
+ },
+ },
+ },
+ }
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('deleteOrganizationPrototypePermission')
+ def delete(self, orgname, prototypeid):
+ """ Delete an existing permission prototype. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ prototype = model.permission.delete_prototype_permission(org, prototypeid)
+ if not prototype:
+ raise NotFound()
+
+ log_prototype_action('delete_prototype_permission', orgname, prototype)
+
+ return '', 204
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('updateOrganizationPrototypePermission')
+ @validate_json_request('PrototypeUpdate')
+ def put(self, orgname, prototypeid):
+ """ Update the role of an existing permission prototype. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ org = model.organization.get_organization(orgname)
+ except model.InvalidOrganizationException:
+ raise NotFound()
+
+ existing = model.permission.get_prototype_permission(org, prototypeid)
+ if not existing:
+ raise NotFound()
+
+ details = request.get_json()
+ role_name = details['role']
+ prototype = model.permission.update_prototype_permission(org, prototypeid, role_name)
+ if not prototype:
+ raise NotFound()
+
+ log_prototype_action('modify_prototype_permission', orgname, prototype,
+ original_role=existing.role.name)
+
+ users_filter = {prototype.activating_user, prototype.delegate_user}
+ org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
+ return prototype_view(prototype, org_members)
+
+ raise Unauthorized()
diff --git a/endpoints/api/repoemail.py b/endpoints/api/repoemail.py
new file mode 100644
index 000000000..3edccb4cc
--- /dev/null
+++ b/endpoints/api/repoemail.py
@@ -0,0 +1,52 @@
+""" Authorize repository to send e-mail notifications. """
+
+import logging
+
+from flask import request, abort
+
+from endpoints.api import (resource, nickname, require_repo_admin, RepositoryParamResource,
+ log_action, validate_json_request, internal_only, path_param, show_if)
+from endpoints.api.repoemail_models_pre_oci import pre_oci_model as model
+from endpoints.exception import NotFound
+from app import tf
+from data.database import db
+from util.useremails import send_repo_authorization_email
+
+import features
+
+logger = logging.getLogger(__name__)
+
+
+@internal_only
+@resource('/v1/repository//authorizedemail/')
+@show_if(features.MAILING)
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('email', 'The e-mail address')
+class RepositoryAuthorizedEmail(RepositoryParamResource):
+ """ Resource for checking and authorizing e-mail addresses to receive repo notifications. """
+
+ @require_repo_admin
+ @nickname('checkRepoEmailAuthorized')
+ def get(self, namespace, repository, email):
+ """ Checks to see if the given e-mail address is authorized on this repository. """
+ record = model.get_email_authorized_for_repo(namespace, repository, email)
+ if not record:
+ abort(404)
+
+ return record.to_dict()
+
+ @require_repo_admin
+ @nickname('sendAuthorizeRepoEmail')
+ def post(self, namespace, repository, email):
+ """ Starts the authorization process for an e-mail address on a repository. """
+
+ with tf(db):
+ record = model.get_email_authorized_for_repo(namespace, repository, email)
+ if record and record.confirmed:
+ return record.to_dict()
+
+ if not record:
+ record = model.create_email_authorization_for_repo(namespace, repository, email)
+
+ send_repo_authorization_email(namespace, repository, email, record.code)
+ return record.to_dict()
diff --git a/endpoints/api/repoemail_models_interface.py b/endpoints/api/repoemail_models_interface.py
new file mode 100644
index 000000000..2aae7ab9c
--- /dev/null
+++ b/endpoints/api/repoemail_models_interface.py
@@ -0,0 +1,50 @@
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple
+
+from six import add_metaclass
+
+
+class RepositoryAuthorizedEmail(
+ namedtuple('RepositoryAuthorizedEmail', [
+ 'email',
+ 'repository_name',
+ 'namespace_name',
+ 'confirmed',
+ 'code',
+ ])):
+ """
+ Tag represents a name to an image.
+ :type email: string
+ :type repository_name: string
+ :type namespace_name: string
+ :type confirmed: boolean
+ :type code: string
+ """
+
+ def to_dict(self):
+ return {
+ 'email': self.email,
+ 'repository': self.repository_name,
+ 'namespace': self.namespace_name,
+ 'confirmed': self.confirmed,
+ 'code': self.code
+ }
+
+
+@add_metaclass(ABCMeta)
+class RepoEmailDataInterface(object):
+ """
+ Interface that represents all data store interactions required by a Repo Email.
+ """
+
+ @abstractmethod
+ def get_email_authorized_for_repo(self, namespace_name, repository_name, email):
+ """
+ Returns a RepositoryAuthorizedEmail if available else None
+ """
+
+ @abstractmethod
+ def create_email_authorization_for_repo(self, namespace_name, repository_name, email):
+ """
+ Returns the newly created repository authorized email.
+ """
diff --git a/endpoints/api/repoemail_models_pre_oci.py b/endpoints/api/repoemail_models_pre_oci.py
new file mode 100644
index 000000000..80a65c995
--- /dev/null
+++ b/endpoints/api/repoemail_models_pre_oci.py
@@ -0,0 +1,28 @@
+from data import model
+from endpoints.api.repoemail_models_interface import RepoEmailDataInterface, RepositoryAuthorizedEmail
+
+
+def _return_none_or_data(func, namespace_name, repository_name, email):
+ data = func(namespace_name, repository_name, email)
+ if data is None:
+ return data
+ return RepositoryAuthorizedEmail(email, repository_name, namespace_name, data.confirmed,
+ data.code)
+
+
+class PreOCIModel(RepoEmailDataInterface):
+ """
+ PreOCIModel implements the data model for the Repo Email using a database schema
+ before it was changed to support the OCI specification.
+ """
+
+ def get_email_authorized_for_repo(self, namespace_name, repository_name, email):
+ return _return_none_or_data(model.repository.get_email_authorized_for_repo, namespace_name,
+ repository_name, email)
+
+ def create_email_authorization_for_repo(self, namespace_name, repository_name, email):
+ return _return_none_or_data(model.repository.create_email_authorization_for_repo,
+ namespace_name, repository_name, email)
+
+
+pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/repository.py b/endpoints/api/repository.py
new file mode 100644
index 000000000..d117f238d
--- /dev/null
+++ b/endpoints/api/repository.py
@@ -0,0 +1,404 @@
+""" List, create and manage repositories. """
+
+import logging
+import datetime
+import features
+
+from collections import defaultdict
+from datetime import timedelta, datetime
+
+from flask import request, abort
+
+from app import dockerfile_build_queue, tuf_metadata_api
+from data.database import RepositoryState
+from endpoints.api import (
+ format_date, nickname, log_action, validate_json_request, require_repo_read, require_repo_write,
+ require_repo_admin, RepositoryParamResource, resource, parse_args, ApiResource, request_error,
+ require_scope, path_param, page_support, query_param, truthy_bool, show_if)
+from endpoints.api.repository_models_pre_oci import pre_oci_model as model
+from endpoints.exception import (
+ Unauthorized, NotFound, InvalidRequest, ExceedsLicenseException, DownstreamIssue)
+from endpoints.api.billing import lookup_allowed_private_repos, get_namespace_plan
+from endpoints.api.subscribe import check_repository_usage
+
+from auth.permissions import (ModifyRepositoryPermission, AdministerRepositoryPermission,
+ CreateRepositoryPermission, ReadRepositoryPermission)
+from auth.auth_context import get_authenticated_user
+from auth import scopes
+from util.names import REPOSITORY_NAME_REGEX
+
+logger = logging.getLogger(__name__)
+
+REPOS_PER_PAGE = 100
+MAX_DAYS_IN_3_MONTHS = 92
+
+
+def check_allowed_private_repos(namespace):
+ """ Checks to see if the given namespace has reached its private repository limit. If so,
+ raises a ExceedsLicenseException.
+ """
+ # Not enabled if billing is disabled.
+ if not features.BILLING:
+ return
+
+ if not lookup_allowed_private_repos(namespace):
+ raise ExceedsLicenseException()
+
+
+@resource('/v1/repository')
+class RepositoryList(ApiResource):
+ """Operations for creating and listing repositories."""
+ schemas = {
+ 'NewRepo': {
+ 'type': 'object',
+ 'description': 'Description of a new repository',
+ 'required': [
+ 'repository',
+ 'visibility',
+ 'description',
+ ],
+ 'properties': {
+ 'repository': {
+ 'type': 'string',
+ 'description': 'Repository name',
+ },
+ 'visibility': {
+ 'type': 'string',
+ 'description': 'Visibility which the repository will start with',
+ 'enum': [
+ 'public',
+ 'private',
+ ],
+ },
+ 'namespace': {
+ 'type':
+ 'string',
+ 'description': ('Namespace in which the repository should be created. If omitted, the '
+ 'username of the caller is used'),
+ },
+ 'description': {
+ 'type': 'string',
+ 'description': 'Markdown encoded description for the repository',
+ },
+ 'repo_kind': {
+ 'type': ['string', 'null'],
+ 'description': 'The kind of repository',
+ 'enum': ['image', 'application', None],
+ }
+ },
+ },
+ }
+
+ @require_scope(scopes.CREATE_REPO)
+ @nickname('createRepo')
+ @validate_json_request('NewRepo')
+ def post(self):
+ """Create a new repository."""
+ owner = get_authenticated_user()
+ req = request.get_json()
+
+ if owner is None and 'namespace' not in 'req':
+ raise InvalidRequest('Must provide a namespace or must be logged in.')
+
+ namespace_name = req['namespace'] if 'namespace' in req else owner.username
+
+ permission = CreateRepositoryPermission(namespace_name)
+ if permission.can():
+ repository_name = req['repository']
+ visibility = req['visibility']
+
+ if model.repo_exists(namespace_name, repository_name):
+ raise request_error(message='Repository already exists')
+
+ visibility = req['visibility']
+ if visibility == 'private':
+ check_allowed_private_repos(namespace_name)
+
+ # Verify that the repository name is valid.
+ if not REPOSITORY_NAME_REGEX.match(repository_name):
+ raise InvalidRequest('Invalid repository name')
+
+ kind = req.get('repo_kind', 'image') or 'image'
+ model.create_repo(namespace_name, repository_name, owner, req['description'],
+ visibility=visibility, repo_kind=kind)
+
+ log_action('create_repo', namespace_name,
+ {'repo': repository_name,
+ 'namespace': namespace_name}, repo_name=repository_name)
+ return {
+ 'namespace': namespace_name,
+ 'name': repository_name,
+ 'kind': kind,
+ }, 201
+
+ raise Unauthorized()
+
+ @require_scope(scopes.READ_REPO)
+ @nickname('listRepos')
+ @parse_args()
+ @query_param('namespace', 'Filters the repositories returned to this namespace', type=str)
+ @query_param('starred', 'Filters the repositories returned to those starred by the user',
+ type=truthy_bool, default=False)
+ @query_param('public', 'Adds any repositories visible to the user by virtue of being public',
+ type=truthy_bool, default=False)
+ @query_param('last_modified', 'Whether to include when the repository was last modified.',
+ type=truthy_bool, default=False)
+ @query_param('popularity', 'Whether to include the repository\'s popularity metric.',
+ type=truthy_bool, default=False)
+ @query_param('repo_kind', 'The kind of repositories to return', type=str, default='image')
+ @page_support()
+ def get(self, page_token, parsed_args):
+ """ Fetch the list of repositories visible to the current user under a variety of situations.
+ """
+ # Ensure that the user requests either filtered by a namespace, only starred repositories,
+ # or public repositories. This ensures that the user is not requesting *all* visible repos,
+ # which can cause a surge in DB CPU usage.
+ if not parsed_args['namespace'] and not parsed_args['starred'] and not parsed_args['public']:
+ raise InvalidRequest('namespace, starred or public are required for this API call')
+
+ user = get_authenticated_user()
+ username = user.username if user else None
+ last_modified = parsed_args['last_modified']
+ popularity = parsed_args['popularity']
+
+ if parsed_args['starred'] and not username:
+ # No repositories should be returned, as there is no user.
+ abort(400)
+
+ repos, next_page_token = model.get_repo_list(
+ parsed_args['starred'], user, parsed_args['repo_kind'], parsed_args['namespace'], username,
+ parsed_args['public'], page_token, last_modified, popularity)
+
+ return {'repositories': [repo.to_dict() for repo in repos]}, next_page_token
+
+
+@resource('/v1/repository/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class Repository(RepositoryParamResource):
+ """Operations for managing a specific repository."""
+ schemas = {
+ 'RepoUpdate': {
+ 'type': 'object',
+ 'description': 'Fields which can be updated in a repository.',
+ 'required': ['description',],
+ 'properties': {
+ 'description': {
+ 'type': 'string',
+ 'description': 'Markdown encoded description for the repository',
+ },
+ }
+ }
+ }
+
+ @parse_args()
+ @query_param('includeStats', 'Whether to include action statistics', type=truthy_bool,
+ default=False)
+ @query_param('includeTags', 'Whether to include repository tags', type=truthy_bool,
+ default=True)
+ @require_repo_read
+ @nickname('getRepo')
+ def get(self, namespace, repository, parsed_args):
+ """Fetch the specified repository."""
+ logger.debug('Get repo: %s/%s' % (namespace, repository))
+ include_tags = parsed_args['includeTags']
+ max_tags = 500
+ repo = model.get_repo(namespace, repository, get_authenticated_user(), include_tags, max_tags)
+ if repo is None:
+ raise NotFound()
+
+ has_write_permission = ModifyRepositoryPermission(namespace, repository).can()
+ has_write_permission = has_write_permission and repo.state == RepositoryState.NORMAL
+
+ repo_data = repo.to_dict()
+ repo_data['can_write'] = has_write_permission
+ repo_data['can_admin'] = AdministerRepositoryPermission(namespace, repository).can()
+
+ if parsed_args['includeStats'] and repo.repository_base_elements.kind_name != 'application':
+ stats = []
+ found_dates = {}
+
+ for count in repo.counts:
+ stats.append(count.to_dict())
+ found_dates['%s/%s' % (count.date.month, count.date.day)] = True
+
+ # Fill in any missing stats with zeros.
+ for day in range(1, MAX_DAYS_IN_3_MONTHS):
+ day_date = datetime.now() - timedelta(days=day)
+ key = '%s/%s' % (day_date.month, day_date.day)
+ if key not in found_dates:
+ stats.append({
+ 'date': day_date.date().isoformat(),
+ 'count': 0,
+ })
+
+ repo_data['stats'] = stats
+ return repo_data
+
+ @require_repo_write
+ @nickname('updateRepo')
+ @validate_json_request('RepoUpdate')
+ def put(self, namespace, repository):
+ """ Update the description in the specified repository. """
+ if not model.repo_exists(namespace, repository):
+ raise NotFound()
+
+ values = request.get_json()
+ model.set_description(namespace, repository, values['description'])
+
+ log_action('set_repo_description', namespace,
+ {'repo': repository,
+ 'namespace': namespace,
+ 'description': values['description']}, repo_name=repository)
+ return {'success': True}
+
+ @require_repo_admin
+ @nickname('deleteRepository')
+ def delete(self, namespace, repository):
+ """ Delete a repository. """
+ username = model.purge_repository(namespace, repository)
+
+ if features.BILLING:
+ plan = get_namespace_plan(namespace)
+ model.check_repository_usage(username, plan)
+
+ # Remove any builds from the queue.
+ dockerfile_build_queue.delete_namespaced_items(namespace, repository)
+
+ log_action('delete_repo', namespace, {'repo': repository, 'namespace': namespace})
+ return '', 204
+
+
+@resource('/v1/repository//changevisibility')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryVisibility(RepositoryParamResource):
+ """ Custom verb for changing the visibility of the repository. """
+ schemas = {
+ 'ChangeVisibility': {
+ 'type': 'object',
+ 'description': 'Change the visibility for the repository.',
+ 'required': ['visibility',],
+ 'properties': {
+ 'visibility': {
+ 'type': 'string',
+ 'description': 'Visibility which the repository will start with',
+ 'enum': [
+ 'public',
+ 'private',
+ ],
+ },
+ }
+ }
+ }
+
+ @require_repo_admin
+ @nickname('changeRepoVisibility')
+ @validate_json_request('ChangeVisibility')
+ def post(self, namespace, repository):
+ """ Change the visibility of a repository. """
+ if model.repo_exists(namespace, repository):
+ values = request.get_json()
+ visibility = values['visibility']
+ if visibility == 'private':
+ check_allowed_private_repos(namespace)
+
+ model.set_repository_visibility(namespace, repository, visibility)
+ log_action('change_repo_visibility', namespace,
+ {'repo': repository,
+ 'namespace': namespace,
+ 'visibility': values['visibility']}, repo_name=repository)
+ return {'success': True}
+
+
+@resource('/v1/repository//changetrust')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryTrust(RepositoryParamResource):
+ """ Custom verb for changing the trust settings of the repository. """
+ schemas = {
+ 'ChangeRepoTrust': {
+ 'type': 'object',
+ 'description': 'Change the trust settings for the repository.',
+ 'required': ['trust_enabled',],
+ 'properties': {
+ 'trust_enabled': {
+ 'type': 'boolean',
+ 'description': 'Whether or not signing is enabled for the repository.'
+ },
+ }
+ }
+ }
+
+ @show_if(features.SIGNING)
+ @require_repo_admin
+ @nickname('changeRepoTrust')
+ @validate_json_request('ChangeRepoTrust')
+ def post(self, namespace, repository):
+ """ Change the visibility of a repository. """
+ if not model.repo_exists(namespace, repository):
+ raise NotFound()
+
+ tags, _ = tuf_metadata_api.get_default_tags_with_expiration(namespace, repository)
+ if tags and not tuf_metadata_api.delete_metadata(namespace, repository):
+ raise DownstreamIssue('Unable to delete downstream trust metadata')
+
+ values = request.get_json()
+ model.set_trust(namespace, repository, values['trust_enabled'])
+
+ log_action(
+ 'change_repo_trust', namespace,
+ {'repo': repository,
+ 'namespace': namespace,
+ 'trust_enabled': values['trust_enabled']}, repo_name=repository)
+
+ return {'success': True}
+
+
+@resource('/v1/repository//changestate')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@show_if(features.REPO_MIRROR)
+class RepositoryStateResource(RepositoryParamResource):
+ """ Custom verb for changing the state of the repository. """
+ schemas = {
+ 'ChangeRepoState': {
+ 'type': 'object',
+ 'description': 'Change the state of the repository.',
+ 'required': ['state'],
+ 'properties': {
+ 'state': {
+ 'type': 'string',
+ 'description': 'Determines whether pushes are allowed.',
+ 'enum': ['NORMAL', 'READ_ONLY', 'MIRROR'],
+ },
+ }
+ }
+ }
+
+ @require_repo_admin
+ @nickname('changeRepoState')
+ @validate_json_request('ChangeRepoState')
+ def put(self, namespace, repository):
+ """ Change the state of a repository. """
+ if not model.repo_exists(namespace, repository):
+ raise NotFound()
+
+ values = request.get_json()
+ state_name = values['state']
+
+ try:
+ state = RepositoryState[state_name]
+ except KeyError:
+ state = None
+
+ if state == RepositoryState.MIRROR and not features.REPO_MIRROR:
+ return {'detail': 'Unknown Repository State: %s' % state_name}, 400
+
+ if state is None:
+ return {'detail': '%s is not a valid Repository state.' % state_name}, 400
+
+ model.set_repository_state(namespace, repository, state)
+
+ log_action('change_repo_state', namespace,
+ {'repo': repository,
+ 'namespace': namespace,
+ 'state_changed': state_name}, repo_name=repository)
+
+ return {'success': True}
diff --git a/endpoints/api/repository_models_interface.py b/endpoints/api/repository_models_interface.py
new file mode 100644
index 000000000..3b5e06a2f
--- /dev/null
+++ b/endpoints/api/repository_models_interface.py
@@ -0,0 +1,279 @@
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple, defaultdict
+
+from datetime import datetime
+from six import add_metaclass
+
+import features
+from data.database import RepositoryState
+from endpoints.api import format_date
+
+
+class RepositoryBaseElement(
+ namedtuple('RepositoryBaseElement', [
+ 'namespace_name', 'repository_name', 'is_starred', 'is_public', 'kind_name', 'description',
+ 'namespace_user_organization', 'namespace_user_removed_tag_expiration_s', 'last_modified',
+ 'action_count', 'should_last_modified', 'should_popularity', 'should_is_starred',
+ 'is_free_account', 'state'
+ ])):
+ """
+ Repository a single quay repository
+ :type namespace_name: string
+ :type repository_name: string
+ :type is_starred: boolean
+ :type is_public: boolean
+ :type kind_name: string
+ :type description: string
+ :type namespace_user_organization: boolean
+ :type should_last_modified: boolean
+ :type should_popularity: boolean
+ :type should_is_starred: boolean
+ """
+
+ def to_dict(self):
+ repo = {
+ 'namespace': self.namespace_name,
+ 'name': self.repository_name,
+ 'description': self.description,
+ 'is_public': self.is_public,
+ 'kind': self.kind_name,
+ 'state': self.state.name if self.state is not None else None,
+ }
+
+ if self.should_last_modified:
+ repo['last_modified'] = self.last_modified
+
+ if self.should_popularity:
+ repo['popularity'] = float(self.action_count if self.action_count else 0)
+
+ if self.should_is_starred:
+ repo['is_starred'] = self.is_starred
+
+ return repo
+
+
+class ApplicationRepository(
+ namedtuple('ApplicationRepository', ['repository_base_elements', 'channels', 'releases', 'state'])):
+ """
+ Repository a single quay repository
+ :type repository_base_elements: RepositoryBaseElement
+ :type channels: [Channel]
+ :type releases: [Release]
+ """
+
+ def to_dict(self):
+ repo_data = {
+ 'namespace': self.repository_base_elements.namespace_name,
+ 'name': self.repository_base_elements.repository_name,
+ 'kind': self.repository_base_elements.kind_name,
+ 'description': self.repository_base_elements.description,
+ 'is_public': self.repository_base_elements.is_public,
+ 'is_organization': self.repository_base_elements.namespace_user_organization,
+ 'is_starred': self.repository_base_elements.is_starred,
+ 'channels': [chan.to_dict() for chan in self.channels],
+ 'releases': [release.to_dict() for release in self.releases],
+ 'state': self.state.name if self.state is not None else None,
+ 'is_free_account': self.repository_base_elements.is_free_account,
+ }
+
+ return repo_data
+
+
+class ImageRepositoryRepository(
+ namedtuple('NonApplicationRepository',
+ ['repository_base_elements', 'tags', 'counts', 'badge_token', 'trust_enabled',
+ 'state'])):
+ """
+ Repository a single quay repository
+ :type repository_base_elements: RepositoryBaseElement
+ :type tags: [Tag]
+ :type counts: [count]
+ :type badge_token: string
+ :type trust_enabled: boolean
+ """
+
+ def to_dict(self):
+ img_repo = {
+ 'namespace': self.repository_base_elements.namespace_name,
+ 'name': self.repository_base_elements.repository_name,
+ 'kind': self.repository_base_elements.kind_name,
+ 'description': self.repository_base_elements.description,
+ 'is_public': self.repository_base_elements.is_public,
+ 'is_organization': self.repository_base_elements.namespace_user_organization,
+ 'is_starred': self.repository_base_elements.is_starred,
+ 'status_token': self.badge_token if not self.repository_base_elements.is_public else '',
+ 'trust_enabled': bool(features.SIGNING) and self.trust_enabled,
+ 'tag_expiration_s': self.repository_base_elements.namespace_user_removed_tag_expiration_s,
+ 'is_free_account': self.repository_base_elements.is_free_account,
+ 'state': self.state.name if self.state is not None else None
+ }
+
+ if self.tags is not None:
+ img_repo['tags'] = {tag.name: tag.to_dict() for tag in self.tags}
+
+ if self.repository_base_elements.state:
+ img_repo['state'] = self.repository_base_elements.state.name
+
+ return img_repo
+
+
+class Repository(namedtuple('Repository', [
+ 'namespace_name',
+ 'repository_name',
+])):
+ """
+ Repository a single quay repository
+ :type namespace_name: string
+ :type repository_name: string
+ """
+
+
+class Channel(namedtuple('Channel', ['name', 'linked_tag_name', 'linked_tag_lifetime_start'])):
+ """
+ Repository a single quay repository
+ :type name: string
+ :type linked_tag_name: string
+ :type linked_tag_lifetime_start: string
+ """
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'release': self.linked_tag_name,
+ 'last_modified': format_date(datetime.fromtimestamp(self.linked_tag_lifetime_start / 1000)),
+ }
+
+
+class Release(
+ namedtuple('Channel', ['name', 'lifetime_start', 'releases_channels_map'])):
+ """
+ Repository a single quay repository
+ :type name: string
+ :type last_modified: string
+ :type releases_channels_map: {string -> string}
+ """
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'last_modified': format_date(datetime.fromtimestamp(self.lifetime_start / 1000)),
+ 'channels': self.releases_channels_map[self.name],
+ }
+
+
+class Tag(
+ namedtuple('Tag', [
+ 'name', 'image_docker_image_id', 'image_aggregate_size', 'lifetime_start_ts',
+ 'tag_manifest_digest', 'lifetime_end_ts',
+ ])):
+ """
+ :type name: string
+ :type image_docker_image_id: string
+ :type image_aggregate_size: int
+ :type lifetime_start_ts: int
+ :type lifetime_end_ts: int|None
+ :type tag_manifest_digest: string
+
+ """
+
+ def to_dict(self):
+ tag_info = {
+ 'name': self.name,
+ 'image_id': self.image_docker_image_id,
+ 'size': self.image_aggregate_size
+ }
+
+ if self.lifetime_start_ts > 0:
+ last_modified = format_date(datetime.fromtimestamp(self.lifetime_start_ts))
+ tag_info['last_modified'] = last_modified
+
+ if self.lifetime_end_ts:
+ expiration = format_date(datetime.fromtimestamp(self.lifetime_end_ts))
+ tag_info['expiration'] = expiration
+
+ if self.tag_manifest_digest is not None:
+ tag_info['manifest_digest'] = self.tag_manifest_digest
+
+ return tag_info
+
+
+class Count(namedtuple('Count', ['date', 'count'])):
+ """
+ date: DateTime
+ count: int
+ """
+
+ def to_dict(self):
+ return {
+ 'date': self.date.isoformat(),
+ 'count': self.count,
+ }
+
+
+@add_metaclass(ABCMeta)
+class RepositoryDataInterface(object):
+ """
+ Interface that represents all data store interactions required by a Repository.
+ """
+
+ @abstractmethod
+ def get_repo(self, namespace_name, repository_name, user, include_tags=True, max_tags=500):
+ """
+ Returns a repository
+ """
+
+ @abstractmethod
+ def repo_exists(self, namespace_name, repository_name):
+ """
+ Returns true if a repo exists and false if not
+ """
+
+ @abstractmethod
+ def create_repo(self, namespace, name, creating_user, description, visibility='private',
+ repo_kind='image'):
+ """
+ Returns creates a new repo
+ """
+
+ @abstractmethod
+ def get_repo_list(self, starred, user, repo_kind, namespace, username, public, page_token,
+ last_modified, popularity):
+ """
+ Returns a RepositoryBaseElement
+ """
+
+ @abstractmethod
+ def set_repository_visibility(self, namespace_name, repository_name, visibility):
+ """
+ Sets a repository's visibility if it is found
+ """
+
+ @abstractmethod
+ def set_trust(self, namespace_name, repository_name, trust):
+ """
+ Sets a repository's trust_enabled field if it is found
+ """
+
+ @abstractmethod
+ def set_description(self, namespace_name, repository_name, description):
+ """
+ Sets a repository's description if it is found.
+ """
+
+ @abstractmethod
+ def purge_repository(self, namespace_name, repository_name):
+ """
+ Removes a repository
+ """
+
+ @abstractmethod
+ def check_repository_usage(self, user_name, plan_found):
+ """
+ Creates a notification for a user if they are over or under on their repository usage
+ """
+
+ @abstractmethod
+ def set_repository_state(self, namespace_name, repository_name, state):
+ """
+ Set the State of the Repository.
+ """
diff --git a/endpoints/api/repository_models_pre_oci.py b/endpoints/api/repository_models_pre_oci.py
new file mode 100644
index 000000000..328c5443e
--- /dev/null
+++ b/endpoints/api/repository_models_pre_oci.py
@@ -0,0 +1,190 @@
+from collections import defaultdict
+
+from datetime import datetime, timedelta
+
+from auth.permissions import ReadRepositoryPermission
+from data.database import Repository as RepositoryTable, RepositoryState
+from data import model
+from data.appr_model import channel as channel_model, release as release_model
+from data.registry_model import registry_model
+from data.registry_model.datatypes import RepositoryReference
+from endpoints.appr.models_cnr import model as appr_model
+from endpoints.api.repository_models_interface import RepositoryDataInterface, RepositoryBaseElement, Repository, \
+ ApplicationRepository, ImageRepositoryRepository, Tag, Channel, Release, Count
+
+MAX_DAYS_IN_3_MONTHS = 92
+REPOS_PER_PAGE = 100
+
+
+def _create_channel(channel, releases_channels_map):
+ releases_channels_map[channel.linked_tag.name].append(channel.name)
+ return Channel(channel.name, channel.linked_tag.name, channel.linked_tag.lifetime_start)
+
+
+class PreOCIModel(RepositoryDataInterface):
+ """
+ PreOCIModel implements the data model for the Repo Email using a database schema
+ before it was changed to support the OCI specification.
+ """
+
+ def check_repository_usage(self, username, plan_found):
+ private_repos = model.user.get_private_repo_count(username)
+ if plan_found is None:
+ repos_allowed = 0
+ else:
+ repos_allowed = plan_found['privateRepos']
+
+ user_or_org = model.user.get_namespace_user(username)
+ if private_repos > repos_allowed:
+ model.notification.create_unique_notification('over_private_usage', user_or_org,
+ {'namespace': username})
+ else:
+ model.notification.delete_notifications_by_kind(user_or_org, 'over_private_usage')
+
+ def purge_repository(self, namespace_name, repository_name):
+ model.gc.purge_repository(namespace_name, repository_name)
+ user = model.user.get_namespace_user(namespace_name)
+ return user.username
+
+ def set_description(self, namespace_name, repository_name, description):
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ model.repository.set_description(repo, description)
+
+ def set_trust(self, namespace_name, repository_name, trust):
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ model.repository.set_trust(repo, trust)
+
+ def set_repository_visibility(self, namespace_name, repository_name, visibility):
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ model.repository.set_repository_visibility(repo, visibility)
+
+ def set_repository_state(self, namespace_name, repository_name, state):
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ model.repository.set_repository_state(repo, state)
+
+ def get_repo_list(self, starred, user, repo_kind, namespace, username, public, page_token,
+ last_modified, popularity):
+ next_page_token = None
+ # Lookup the requested repositories (either starred or non-starred.)
+ if starred:
+ # Return the full list of repos starred by the current user that are still visible to them.
+ def can_view_repo(repo):
+ can_view = ReadRepositoryPermission(repo.namespace_user.username, repo.name).can()
+ return can_view or model.repository.is_repository_public(repo)
+
+ unfiltered_repos = model.repository.get_user_starred_repositories(user,
+ kind_filter=repo_kind)
+ repos = [repo for repo in unfiltered_repos if can_view_repo(repo)]
+ elif namespace:
+ # Repositories filtered by namespace do not need pagination (their results are fairly small),
+ # so we just do the lookup directly.
+ repos = list(
+ model.repository.get_visible_repositories(username=username, include_public=public,
+ namespace=namespace, kind_filter=repo_kind))
+ else:
+ # Determine the starting offset for pagination. Note that we don't use the normal
+ # model.modelutil.paginate method here, as that does not operate over UNION queries, which
+ # get_visible_repositories will return if there is a logged-in user (for performance reasons).
+ #
+ # Also note the +1 on the limit, as paginate_query uses the extra result to determine whether
+ # there is a next page.
+ start_id = model.modelutil.pagination_start(page_token)
+ repo_query = model.repository.get_visible_repositories(
+ username=username, include_public=public, start_id=start_id, limit=REPOS_PER_PAGE + 1,
+ kind_filter=repo_kind)
+
+ repos, next_page_token = model.modelutil.paginate_query(repo_query, limit=REPOS_PER_PAGE,
+ sort_field_name='rid')
+
+ # Collect the IDs of the repositories found for subequent lookup of popularity
+ # and/or last modified.
+ last_modified_map = {}
+ action_sum_map = {}
+ if last_modified or popularity:
+ repository_refs = [RepositoryReference.for_id(repo.rid) for repo in repos]
+ repository_ids = [repo.rid for repo in repos]
+
+ if last_modified:
+ last_modified_map = registry_model.get_most_recent_tag_lifetime_start(repository_refs)
+
+ if popularity:
+ action_sum_map = model.log.get_repositories_action_sums(repository_ids)
+
+ # Collect the IDs of the repositories that are starred for the user, so we can mark them
+ # in the returned results.
+ star_set = set()
+ if username:
+ starred_repos = model.repository.get_user_starred_repositories(user)
+ star_set = {starred.id for starred in starred_repos}
+
+ return [
+ RepositoryBaseElement(repo.namespace_user.username, repo.name, repo.id in star_set,
+ repo.visibility_id == model.repository.get_public_repo_visibility().id,
+ repo_kind, repo.description, repo.namespace_user.organization,
+ repo.namespace_user.removed_tag_expiration_s,
+ last_modified_map.get(repo.rid),
+ action_sum_map.get(repo.rid), last_modified, popularity, username,
+ None, repo.state)
+ for repo in repos
+ ], next_page_token
+
+ def repo_exists(self, namespace_name, repository_name):
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ if repo is None:
+ return False
+
+ return True
+
+ def create_repo(self, namespace_name, repository_name, owner, description, visibility='private',
+ repo_kind='image'):
+ repo = model.repository.create_repository(namespace_name, repository_name, owner, visibility,
+ repo_kind=repo_kind, description=description)
+ return Repository(namespace_name, repository_name)
+
+ def get_repo(self, namespace_name, repository_name, user, include_tags=True, max_tags=500):
+ repo = model.repository.get_repository(namespace_name, repository_name)
+ if repo is None:
+ return None
+
+ is_starred = model.repository.repository_is_starred(user, repo) if user else False
+ is_public = model.repository.is_repository_public(repo)
+ kind_name = RepositoryTable.kind.get_name(repo.kind_id)
+ base = RepositoryBaseElement(
+ namespace_name, repository_name, is_starred, is_public, kind_name, repo.description,
+ repo.namespace_user.organization, repo.namespace_user.removed_tag_expiration_s, None, None,
+ False, False, False, repo.namespace_user.stripe_id is None, repo.state)
+
+ if base.kind_name == 'application':
+ channels = channel_model.get_repo_channels(repo, appr_model.models_ref)
+ releases = release_model.get_release_objs(repo, appr_model.models_ref)
+ releases_channels_map = defaultdict(list)
+ return ApplicationRepository(
+ base, [_create_channel(channel, releases_channels_map) for channel in channels], [
+ Release(release.name, release.lifetime_start, releases_channels_map)
+ for release in releases
+ ], repo.state)
+
+ tags = None
+ repo_ref = RepositoryReference.for_repo_obj(repo)
+ if include_tags:
+ tags, _ = registry_model.list_repository_tag_history(repo_ref, page=1, size=max_tags,
+ active_tags_only=True)
+ tags = [
+ Tag(tag.name,
+ tag.legacy_image.docker_image_id if tag.legacy_image_if_present else None,
+ tag.legacy_image.aggregate_size if tag.legacy_image_if_present else None,
+ tag.lifetime_start_ts,
+ tag.manifest_digest,
+ tag.lifetime_end_ts) for tag in tags
+ ]
+
+ start_date = datetime.now() - timedelta(days=MAX_DAYS_IN_3_MONTHS)
+ counts = model.log.get_repository_action_counts(repo, start_date)
+
+ assert repo.state is not None
+ return ImageRepositoryRepository(base, tags,
+ [Count(count.date, count.count) for count in counts],
+ repo.badge_token, repo.trust_enabled, repo.state)
+
+
+pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/repositorynotification.py b/endpoints/api/repositorynotification.py
new file mode 100644
index 000000000..c34cbc553
--- /dev/null
+++ b/endpoints/api/repositorynotification.py
@@ -0,0 +1,164 @@
+""" List, create and manage repository events/notifications. """
+
+import logging
+from flask import request
+
+from endpoints.api import (
+ RepositoryParamResource, nickname, resource, require_repo_admin, log_action,
+ validate_json_request, request_error, path_param, disallow_for_app_repositories, InvalidRequest)
+from endpoints.exception import NotFound
+from notifications.models_interface import Repository
+from notifications.notificationevent import NotificationEvent
+from notifications.notificationmethod import (
+ NotificationMethod, CannotValidateNotificationMethodException)
+from endpoints.api.repositorynotification_models_pre_oci import pre_oci_model as model
+
+logger = logging.getLogger(__name__)
+
+
+@resource('/v1/repository//notification/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryNotificationList(RepositoryParamResource):
+ """ Resource for dealing with listing and creating notifications on a repository. """
+ schemas = {
+ 'NotificationCreateRequest': {
+ 'type': 'object',
+ 'description': 'Information for creating a notification on a repository',
+ 'required': [
+ 'event',
+ 'method',
+ 'config',
+ 'eventConfig',
+ ],
+ 'properties': {
+ 'event': {
+ 'type': 'string',
+ 'description': 'The event on which the notification will respond',
+ },
+ 'method': {
+ 'type': 'string',
+ 'description': 'The method of notification (such as email or web callback)',
+ },
+ 'config': {
+ 'type': 'object',
+ 'description': 'JSON config information for the specific method of notification'
+ },
+ 'eventConfig': {
+ 'type': 'object',
+ 'description': 'JSON config information for the specific event of notification',
+ },
+ 'title': {
+ 'type': 'string',
+ 'description': 'The human-readable title of the notification',
+ },
+ }
+ },
+ }
+
+ @require_repo_admin
+ @nickname('createRepoNotification')
+ @disallow_for_app_repositories
+ @validate_json_request('NotificationCreateRequest')
+ def post(self, namespace_name, repository_name):
+ parsed = request.get_json()
+
+ method_handler = NotificationMethod.get_method(parsed['method'])
+ try:
+ method_handler.validate(namespace_name, repository_name, parsed['config'])
+ except CannotValidateNotificationMethodException as ex:
+ raise request_error(message=ex.message)
+
+ new_notification = model.create_repo_notification(namespace_name, repository_name,
+ parsed['event'], parsed['method'],
+ parsed['config'], parsed['eventConfig'],
+ parsed.get('title'))
+
+ log_action('add_repo_notification', namespace_name, {
+ 'repo': repository_name,
+ 'namespace': namespace_name,
+ 'notification_id': new_notification.uuid,
+ 'event': new_notification.event_name,
+ 'method': new_notification.method_name}, repo_name=repository_name)
+ return new_notification.to_dict(), 201
+
+ @require_repo_admin
+ @nickname('listRepoNotifications')
+ @disallow_for_app_repositories
+ def get(self, namespace_name, repository_name):
+ """ List the notifications for the specified repository. """
+ notifications = model.list_repo_notifications(namespace_name, repository_name)
+ return {'notifications': [n.to_dict() for n in notifications]}
+
+
+@resource('/v1/repository//notification/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('uuid', 'The UUID of the notification')
+class RepositoryNotification(RepositoryParamResource):
+ """ Resource for dealing with specific notifications. """
+
+ @require_repo_admin
+ @nickname('getRepoNotification')
+ @disallow_for_app_repositories
+ def get(self, namespace_name, repository_name, uuid):
+ """ Get information for the specified notification. """
+ found = model.get_repo_notification(uuid)
+ if not found:
+ raise NotFound()
+ return found.to_dict()
+
+ @require_repo_admin
+ @nickname('deleteRepoNotification')
+ @disallow_for_app_repositories
+ def delete(self, namespace_name, repository_name, uuid):
+ """ Deletes the specified notification. """
+ deleted = model.delete_repo_notification(namespace_name, repository_name, uuid)
+ if not deleted:
+ raise InvalidRequest("No repository notification found for: %s, %s, %s" %
+ (namespace_name, repository_name, uuid))
+
+ log_action('delete_repo_notification', namespace_name, {
+ 'repo': repository_name,
+ 'namespace': namespace_name,
+ 'notification_id': uuid,
+ 'event': deleted.event_name,
+ 'method': deleted.method_name}, repo_name=repository_name)
+
+ return 'No Content', 204
+
+ @require_repo_admin
+ @nickname('resetRepositoryNotificationFailures')
+ @disallow_for_app_repositories
+ def post(self, namespace_name, repository_name, uuid):
+ """ Resets repository notification to 0 failures. """
+ reset = model.reset_notification_number_of_failures(namespace_name, repository_name, uuid)
+ if not reset:
+ raise InvalidRequest("No repository notification found for: %s, %s, %s" %
+ (namespace_name, repository_name, uuid))
+
+ log_action('reset_repo_notification', namespace_name, {
+ 'repo': repository_name,
+ 'namespace': namespace_name,
+ 'notification_id': uuid,
+ 'event': reset.event_name,
+ 'method': reset.method_name}, repo_name=repository_name)
+
+ return 'No Content', 204
+
+
+@resource('/v1/repository//notification//test')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('uuid', 'The UUID of the notification')
+class TestRepositoryNotification(RepositoryParamResource):
+ """ Resource for queuing a test of a notification. """
+
+ @require_repo_admin
+ @nickname('testRepoNotification')
+ @disallow_for_app_repositories
+ def post(self, namespace_name, repository_name, uuid):
+ """ Queues a test notification for this repository. """
+ test_note = model.queue_test_notification(uuid)
+ if not test_note:
+ raise InvalidRequest("No repository notification found for: %s, %s, %s" %
+ (namespace_name, repository_name, uuid))
+
+ return {}, 200
diff --git a/endpoints/api/repositorynotification_models_interface.py b/endpoints/api/repositorynotification_models_interface.py
new file mode 100644
index 000000000..ed0ebd2f7
--- /dev/null
+++ b/endpoints/api/repositorynotification_models_interface.py
@@ -0,0 +1,146 @@
+import json
+
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple
+
+from six import add_metaclass
+
+
+class RepositoryNotification(
+ namedtuple('RepositoryNotification', [
+ 'uuid',
+ 'title',
+ 'event_name',
+ 'method_name',
+ 'config_json',
+ 'event_config_json',
+ 'number_of_failures',
+ ])):
+ """
+ RepositoryNotification represents a notification for a repository.
+ :type uuid: string
+ :type event: string
+ :type method: string
+ :type config: string
+ :type title: string
+ :type event_config: string
+ :type number_of_failures: int
+ """
+
+ def to_dict(self):
+ try:
+ config = json.loads(self.config_json)
+ except ValueError:
+ config = {}
+
+ try:
+ event_config = json.loads(self.event_config_json)
+ except ValueError:
+ event_config = {}
+
+ return {
+ 'uuid': self.uuid,
+ 'title': self.title,
+ 'event': self.event_name,
+ 'method': self.method_name,
+ 'config': config,
+ 'event_config': event_config,
+ 'number_of_failures': self.number_of_failures,
+ }
+
+
+@add_metaclass(ABCMeta)
+class RepoNotificationInterface(object):
+ """
+ Interface that represents all data store interactions required by the RepositoryNotification API
+ """
+
+ @abstractmethod
+ def create_repo_notification(self, namespace_name, repository_name, event_name, method_name,
+ method_config, event_config, title=None):
+ """
+
+ Args:
+ namespace_name: namespace of repository
+ repository_name: name of repository
+ event_name: name of event
+ method_name: name of method
+ method_config: method config, json string
+ event_config: event config, json string
+ title: title of the notification
+
+ Returns:
+ RepositoryNotification object
+
+ """
+ pass
+
+ @abstractmethod
+ def list_repo_notifications(self, namespace_name, repository_name, event_name=None):
+ """
+
+ Args:
+ namespace_name: namespace of repository
+ repository_name: name of repository
+ event_name: name of event
+
+ Returns:
+ list(RepositoryNotification)
+ """
+ pass
+
+ @abstractmethod
+ def get_repo_notification(self, uuid):
+ """
+
+ Args:
+ uuid: uuid of notification
+
+ Returns:
+ RepositoryNotification or None
+
+ """
+ pass
+
+ @abstractmethod
+ def delete_repo_notification(self, namespace_name, repository_name, uuid):
+ """
+
+ Args:
+ namespace_name: namespace of repository
+ repository_name: name of repository
+ uuid: uuid of notification
+
+ Returns:
+ RepositoryNotification or None
+
+ """
+ pass
+
+ @abstractmethod
+ def reset_notification_number_of_failures(self, namespace_name, repository_name, uuid):
+ """
+
+ Args:
+ namespace_name: namespace of repository
+ repository_name: name of repository
+ uuid: uuid of notification
+
+ Returns:
+ RepositoryNotification
+
+ """
+ pass
+
+ @abstractmethod
+ def queue_test_notification(self, uuid):
+ """
+
+ Args:
+ uuid: uuid of notification
+
+ Returns:
+ RepositoryNotification or None
+
+ """
+ pass
diff --git a/endpoints/api/repositorynotification_models_pre_oci.py b/endpoints/api/repositorynotification_models_pre_oci.py
new file mode 100644
index 000000000..b3edf43ae
--- /dev/null
+++ b/endpoints/api/repositorynotification_models_pre_oci.py
@@ -0,0 +1,72 @@
+import json
+
+from app import notification_queue
+from data import model
+from data.model import InvalidNotificationException
+from endpoints.api.repositorynotification_models_interface import (RepoNotificationInterface,
+ RepositoryNotification)
+from notifications import build_notification_data
+from notifications.notificationevent import NotificationEvent
+
+
+class RepoNotificationPreOCIModel(RepoNotificationInterface):
+ def create_repo_notification(self, namespace_name, repository_name, event_name, method_name,
+ method_config, event_config, title=None):
+ repository = model.repository.get_repository(namespace_name, repository_name)
+ return self._notification(
+ model.notification.create_repo_notification(repository, event_name, method_name,
+ method_config, event_config, title))
+
+ def list_repo_notifications(self, namespace_name, repository_name, event_name=None):
+ return [
+ self._notification(n)
+ for n in model.notification.list_repo_notifications(namespace_name, repository_name,
+ event_name)]
+
+ def get_repo_notification(self, uuid):
+ try:
+ found = model.notification.get_repo_notification(uuid)
+ except InvalidNotificationException:
+ return None
+ return self._notification(found)
+
+ def delete_repo_notification(self, namespace_name, repository_name, uuid):
+ try:
+ found = model.notification.delete_repo_notification(namespace_name, repository_name, uuid)
+ except InvalidNotificationException:
+ return None
+ return self._notification(found)
+
+ def reset_notification_number_of_failures(self, namespace_name, repository_name, uuid):
+ return self._notification(
+ model.notification.reset_notification_number_of_failures(namespace_name, repository_name,
+ uuid))
+
+ def queue_test_notification(self, uuid):
+ try:
+ notification = model.notification.get_repo_notification(uuid)
+ except InvalidNotificationException:
+ return None
+
+ event_config = json.loads(notification.event_config_json or '{}')
+ event_info = NotificationEvent.get_event(notification.event.name)
+ sample_data = event_info.get_sample_data(notification.repository.namespace_user.username,
+ notification.repository.name, event_config)
+ notification_data = build_notification_data(notification, sample_data)
+ notification_queue.put([
+ notification.repository.namespace_user.username, notification.uuid, notification.event.name],
+ json.dumps(notification_data))
+ return self._notification(notification)
+
+ def _notification(self, notification):
+ if not notification:
+ return None
+
+ return RepositoryNotification(
+ uuid=notification.uuid, title=notification.title, event_name=notification.event.name,
+ method_name=notification.method.name, config_json=notification.config_json,
+ event_config_json=notification.event_config_json,
+ number_of_failures=notification.number_of_failures)
+
+
+pre_oci_model = RepoNotificationPreOCIModel()
diff --git a/endpoints/api/repotoken.py b/endpoints/api/repotoken.py
new file mode 100644
index 000000000..efa25a2fb
--- /dev/null
+++ b/endpoints/api/repotoken.py
@@ -0,0 +1,100 @@
+""" Manage repository access tokens (DEPRECATED). """
+
+import logging
+
+from endpoints.api import (resource, nickname, require_repo_admin, RepositoryParamResource,
+ validate_json_request, path_param)
+
+logger = logging.getLogger(__name__)
+
+@resource('/v1/repository//tokens/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositoryTokenList(RepositoryParamResource):
+ """ Resource for creating and listing repository tokens. """
+ schemas = {
+ 'NewToken': {
+ 'type': 'object',
+ 'description': 'Description of a new token.',
+ 'required':[
+ 'friendlyName',
+ ],
+ 'properties': {
+ 'friendlyName': {
+ 'type': 'string',
+ 'description': 'Friendly name to help identify the token',
+ },
+ },
+ },
+ }
+
+ @require_repo_admin
+ @nickname('listRepoTokens')
+ def get(self, namespace_name, repo_name):
+ """ List the tokens for the specified repository. """
+ return {
+ 'message': 'Handling of access tokens is no longer supported',
+ }, 410
+
+
+ @require_repo_admin
+ @nickname('createToken')
+ @validate_json_request('NewToken')
+ def post(self, namespace_name, repo_name):
+ """ Create a new repository token. """
+ return {
+ 'message': 'Creation of access tokens is no longer supported',
+ }, 410
+
+
+@resource('/v1/repository//tokens/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('code', 'The token code')
+class RepositoryToken(RepositoryParamResource):
+ """ Resource for managing individual tokens. """
+ schemas = {
+ 'TokenPermission': {
+ 'type': 'object',
+ 'description': 'Description of a token permission',
+ 'required': [
+ 'role',
+ ],
+ 'properties': {
+ 'role': {
+ 'type': 'string',
+ 'description': 'Role to use for the token',
+ 'enum': [
+ 'read',
+ 'write',
+ 'admin',
+ ],
+ },
+ },
+ },
+ }
+
+ @require_repo_admin
+ @nickname('getTokens')
+ def get(self, namespace_name, repo_name, code):
+ """ Fetch the specified repository token information. """
+ return {
+ 'message': 'Handling of access tokens is no longer supported',
+ }, 410
+
+
+ @require_repo_admin
+ @nickname('changeToken')
+ @validate_json_request('TokenPermission')
+ def put(self, namespace_name, repo_name, code):
+ """ Update the permissions for the specified repository token. """
+ return {
+ 'message': 'Handling of access tokens is no longer supported',
+ }, 410
+
+
+ @require_repo_admin
+ @nickname('deleteToken')
+ def delete(self, namespace_name, repo_name, code):
+ """ Delete the repository token. """
+ return {
+ 'message': 'Handling of access tokens is no longer supported',
+ }, 410
diff --git a/endpoints/api/robot.py b/endpoints/api/robot.py
new file mode 100644
index 000000000..867329323
--- /dev/null
+++ b/endpoints/api/robot.py
@@ -0,0 +1,274 @@
+""" Manage user and organization robot accounts. """
+
+from endpoints.api import (resource, nickname, ApiResource, log_action, related_user_resource,
+ require_user_admin, require_scope, path_param, parse_args,
+ truthy_bool, query_param, validate_json_request, max_json_size)
+from endpoints.api.robot_models_pre_oci import pre_oci_model as model
+from endpoints.exception import Unauthorized
+from auth.permissions import AdministerOrganizationPermission, OrganizationMemberPermission
+from auth.auth_context import get_authenticated_user
+from auth import scopes
+from util.names import format_robot_username
+from flask import abort, request
+
+
+CREATE_ROBOT_SCHEMA = {
+ 'type': 'object',
+ 'description': 'Optional data for creating a robot',
+ 'properties': {
+ 'description': {
+ 'type': 'string',
+ 'description': 'Optional text description for the robot',
+ 'maxLength': 255,
+ },
+ 'unstructured_metadata': {
+ 'type': 'object',
+ 'description': 'Optional unstructured metadata for the robot',
+ },
+ },
+}
+
+ROBOT_MAX_SIZE = 1024 * 1024 # 1 KB.
+
+
+def robots_list(prefix, include_permissions=False, include_token=False, limit=None):
+ robots = model.list_entity_robot_permission_teams(prefix, limit=limit,
+ include_token=include_token,
+ include_permissions=include_permissions)
+ return {'robots': [robot.to_dict(include_token=include_token) for robot in robots]}
+
+
+@resource('/v1/user/robots')
+class UserRobotList(ApiResource):
+ """ Resource for listing user robots. """
+
+ @require_user_admin
+ @nickname('getUserRobots')
+ @parse_args()
+ @query_param('permissions',
+ 'Whether to include repositories and teams in which the robots have permission.',
+ type=truthy_bool, default=False)
+ @query_param('token',
+ 'If false, the robot\'s token is not returned.',
+ type=truthy_bool, default=True)
+ @query_param('limit',
+ 'If specified, the number of robots to return.',
+ type=int, default=None)
+ def get(self, parsed_args):
+ """ List the available robots for the user. """
+ user = get_authenticated_user()
+ return robots_list(user.username, include_token=parsed_args.get('token', True),
+ include_permissions=parsed_args.get('permissions', False),
+ limit=parsed_args.get('limit'))
+
+
+@resource('/v1/user/robots/')
+@path_param('robot_shortname',
+ 'The short name for the robot, without any user or organization prefix')
+class UserRobot(ApiResource):
+ """ Resource for managing a user's robots. """
+ schemas = {
+ 'CreateRobot': CREATE_ROBOT_SCHEMA,
+ }
+
+ @require_user_admin
+ @nickname('getUserRobot')
+ def get(self, robot_shortname):
+ """ Returns the user's robot with the specified name. """
+ parent = get_authenticated_user()
+ robot = model.get_user_robot(robot_shortname, parent)
+ return robot.to_dict(include_metadata=True, include_token=True)
+
+ @require_user_admin
+ @nickname('createUserRobot')
+ @max_json_size(ROBOT_MAX_SIZE)
+ @validate_json_request('CreateRobot', optional=True)
+ def put(self, robot_shortname):
+ """ Create a new user robot with the specified name. """
+ parent = get_authenticated_user()
+ create_data = request.get_json() or {}
+ robot = model.create_user_robot(robot_shortname, parent, create_data.get('description'),
+ create_data.get('unstructured_metadata'))
+ log_action('create_robot', parent.username, {
+ 'robot': robot_shortname,
+ 'description': create_data.get('description'),
+ 'unstructured_metadata': create_data.get('unstructured_metadata'),
+ })
+ return robot.to_dict(include_metadata=True, include_token=True), 201
+
+ @require_user_admin
+ @nickname('deleteUserRobot')
+ def delete(self, robot_shortname):
+ """ Delete an existing robot. """
+ parent = get_authenticated_user()
+ model.delete_robot(format_robot_username(parent.username, robot_shortname))
+ log_action('delete_robot', parent.username, {'robot': robot_shortname})
+ return '', 204
+
+
+@resource('/v1/organization//robots')
+@path_param('orgname', 'The name of the organization')
+@related_user_resource(UserRobotList)
+class OrgRobotList(ApiResource):
+ """ Resource for listing an organization's robots. """
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrgRobots')
+ @parse_args()
+ @query_param('permissions',
+ 'Whether to include repostories and teams in which the robots have permission.',
+ type=truthy_bool, default=False)
+ @query_param('token',
+ 'If false, the robot\'s token is not returned.',
+ type=truthy_bool, default=True)
+ @query_param('limit',
+ 'If specified, the number of robots to return.',
+ type=int, default=None)
+ def get(self, orgname, parsed_args):
+ """ List the organization's robots. """
+ permission = OrganizationMemberPermission(orgname)
+ if permission.can():
+ include_token = (AdministerOrganizationPermission(orgname).can() and
+ parsed_args.get('token', True))
+ include_permissions = (AdministerOrganizationPermission(orgname).can() and
+ parsed_args.get('permissions', False))
+ return robots_list(orgname, include_permissions=include_permissions,
+ include_token=include_token,
+ limit=parsed_args.get('limit'))
+
+ raise Unauthorized()
+
+
+@resource('/v1/organization//robots/')
+@path_param('orgname', 'The name of the organization')
+@path_param('robot_shortname',
+ 'The short name for the robot, without any user or organization prefix')
+@related_user_resource(UserRobot)
+class OrgRobot(ApiResource):
+ """ Resource for managing an organization's robots. """
+ schemas = {
+ 'CreateRobot': CREATE_ROBOT_SCHEMA,
+ }
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('getOrgRobot')
+ def get(self, orgname, robot_shortname):
+ """ Returns the organization's robot with the specified name. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ robot = model.get_org_robot(robot_shortname, orgname)
+ return robot.to_dict(include_metadata=True, include_token=True)
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('createOrgRobot')
+ @max_json_size(ROBOT_MAX_SIZE)
+ @validate_json_request('CreateRobot', optional=True)
+ def put(self, orgname, robot_shortname):
+ """ Create a new robot in the organization. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ create_data = request.get_json() or {}
+ robot = model.create_org_robot(robot_shortname, orgname, create_data.get('description'),
+ create_data.get('unstructured_metadata'))
+ log_action('create_robot', orgname, {
+ 'robot': robot_shortname,
+ 'description': create_data.get('description'),
+ 'unstructured_metadata': create_data.get('unstructured_metadata'),
+ })
+ return robot.to_dict(include_metadata=True, include_token=True), 201
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('deleteOrgRobot')
+ def delete(self, orgname, robot_shortname):
+ """ Delete an existing organization robot. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ model.delete_robot(format_robot_username(orgname, robot_shortname))
+ log_action('delete_robot', orgname, {'robot': robot_shortname})
+ return '', 204
+
+ raise Unauthorized()
+
+
+@resource('/v1/user/robots//permissions')
+@path_param('robot_shortname',
+ 'The short name for the robot, without any user or organization prefix')
+class UserRobotPermissions(ApiResource):
+ """ Resource for listing the permissions a user's robot has in the system. """
+
+ @require_user_admin
+ @nickname('getUserRobotPermissions')
+ def get(self, robot_shortname):
+ """ Returns the list of repository permissions for the user's robot. """
+ parent = get_authenticated_user()
+ robot = model.get_user_robot(robot_shortname, parent)
+ permissions = model.list_robot_permissions(robot.name)
+
+ return {
+ 'permissions': [permission.to_dict() for permission in permissions]
+ }
+
+
+@resource('/v1/organization//robots//permissions')
+@path_param('orgname', 'The name of the organization')
+@path_param('robot_shortname',
+ 'The short name for the robot, without any user or organization prefix')
+@related_user_resource(UserRobotPermissions)
+class OrgRobotPermissions(ApiResource):
+ """ Resource for listing the permissions an org's robot has in the system. """
+
+ @require_user_admin
+ @nickname('getOrgRobotPermissions')
+ def get(self, orgname, robot_shortname):
+ """ Returns the list of repository permissions for the org's robot. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ robot = model.get_org_robot(robot_shortname, orgname)
+ permissions = model.list_robot_permissions(robot.name)
+
+ return {
+ 'permissions': [permission.to_dict() for permission in permissions]
+ }
+
+ abort(403)
+
+
+@resource('/v1/user/robots//regenerate')
+@path_param('robot_shortname',
+ 'The short name for the robot, without any user or organization prefix')
+class RegenerateUserRobot(ApiResource):
+ """ Resource for regenerate an organization's robot's token. """
+
+ @require_user_admin
+ @nickname('regenerateUserRobotToken')
+ def post(self, robot_shortname):
+ """ Regenerates the token for a user's robot. """
+ parent = get_authenticated_user()
+ robot = model.regenerate_user_robot_token(robot_shortname, parent)
+ log_action('regenerate_robot_token', parent.username, {'robot': robot_shortname})
+ return robot.to_dict(include_token=True)
+
+
+@resource('/v1/organization//robots//regenerate')
+@path_param('orgname', 'The name of the organization')
+@path_param('robot_shortname',
+ 'The short name for the robot, without any user or organization prefix')
+@related_user_resource(RegenerateUserRobot)
+class RegenerateOrgRobot(ApiResource):
+ """ Resource for regenerate an organization's robot's token. """
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('regenerateOrgRobotToken')
+ def post(self, orgname, robot_shortname):
+ """ Regenerates the token for an organization robot. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ robot = model.regenerate_org_robot_token(robot_shortname, orgname)
+ log_action('regenerate_robot_token', orgname, {'robot': robot_shortname})
+ return robot.to_dict(include_token=True)
+
+ raise Unauthorized()
diff --git a/endpoints/api/robot_models_interface.py b/endpoints/api/robot_models_interface.py
new file mode 100644
index 000000000..c4a07d304
--- /dev/null
+++ b/endpoints/api/robot_models_interface.py
@@ -0,0 +1,196 @@
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple
+
+from six import add_metaclass
+
+from endpoints.api import format_date
+
+
+class Permission(namedtuple('Permission', ['repository_name', 'repository_visibility_name', 'role_name'])):
+ """
+ Permission the relationship between a robot and a repository and whether that robot can see the repo.
+ """
+
+ def to_dict(self):
+ return {
+ 'repository': {
+ 'name': self.repository_name,
+ 'is_public': self.repository_visibility_name == 'public'
+ },
+ 'role': self.role_name
+ }
+
+
+class Team(namedtuple('Team', ['name', 'avatar'])):
+ """
+ Team represents a team entry for a robot list entry.
+ :type name: string
+ :type avatar: {string -> string}
+ """
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'avatar': self.avatar,
+ }
+
+
+class RobotWithPermissions(
+ namedtuple('RobotWithPermissions', [
+ 'name',
+ 'password',
+ 'created',
+ 'last_accessed',
+ 'teams',
+ 'repository_names',
+ 'description',
+ ])):
+ """
+ RobotWithPermissions is a list of robot entries.
+ :type name: string
+ :type password: string
+ :type created: datetime|None
+ :type last_accessed: datetime|None
+ :type teams: [Team]
+ :type repository_names: [string]
+ :type description: string
+ """
+
+ def to_dict(self, include_token=False):
+ data = {
+ 'name': self.name,
+ 'created': format_date(self.created) if self.created is not None else None,
+ 'last_accessed': format_date(self.last_accessed) if self.last_accessed is not None else None,
+ 'teams': [team.to_dict() for team in self.teams],
+ 'repositories': self.repository_names,
+ 'description': self.description,
+ }
+
+ if include_token:
+ data['token'] = self.password
+
+ return data
+
+
+class Robot(
+ namedtuple('Robot', [
+ 'name',
+ 'password',
+ 'created',
+ 'last_accessed',
+ 'description',
+ 'unstructured_metadata',
+ ])):
+ """
+ Robot represents a robot entity.
+ :type name: string
+ :type password: string
+ :type created: datetime|None
+ :type last_accessed: datetime|None
+ :type description: string
+ :type unstructured_metadata: dict
+ """
+
+ def to_dict(self, include_metadata=False, include_token=False):
+ data = {
+ 'name': self.name,
+ 'created': format_date(self.created) if self.created is not None else None,
+ 'last_accessed': format_date(self.last_accessed) if self.last_accessed is not None else None,
+ 'description': self.description,
+ }
+
+ if include_token:
+ data['token'] = self.password
+
+ if include_metadata:
+ data['unstructured_metadata'] = self.unstructured_metadata
+
+ return data
+
+
+@add_metaclass(ABCMeta)
+class RobotInterface(object):
+ """
+ Interface that represents all data store interactions required by the Robot API
+ """
+
+ @abstractmethod
+ def get_org_robot(self, robot_shortname, orgname):
+ """
+
+ Returns:
+ Robot object
+
+ """
+
+ @abstractmethod
+ def get_user_robot(self, robot_shortname, owning_user):
+ """
+
+ Returns:
+ Robot object
+
+ """
+
+ @abstractmethod
+ def create_user_robot(self, robot_shortname, owning_user):
+ """
+
+ Returns:
+ Robot object
+
+ """
+
+ @abstractmethod
+ def create_org_robot(self, robot_shortname, orgname):
+ """
+
+ Returns:
+ Robot object
+
+ """
+
+ @abstractmethod
+ def delete_robot(self, robot_username):
+ """
+
+ Returns:
+ Robot object
+
+ """
+
+ @abstractmethod
+ def regenerate_user_robot_token(self, robot_shortname, owning_user):
+ """
+
+ Returns:
+ Robot object
+
+ """
+
+ @abstractmethod
+ def regenerate_org_robot_token(self, robot_shortname, orgname):
+ """
+
+ Returns:
+ Robot object
+
+ """
+
+ @abstractmethod
+ def list_entity_robot_permission_teams(self, prefix, include_permissions=False,
+ include_token=False, limit=None):
+ """
+
+ Returns:
+ list of RobotWithPermissions objects
+
+ """
+
+ @abstractmethod
+ def list_robot_permissions(self, username):
+ """
+
+ Returns:
+ list of Robot objects
+
+ """
diff --git a/endpoints/api/robot_models_pre_oci.py b/endpoints/api/robot_models_pre_oci.py
new file mode 100644
index 000000000..ad83decdf
--- /dev/null
+++ b/endpoints/api/robot_models_pre_oci.py
@@ -0,0 +1,123 @@
+import features
+
+from app import avatar
+from data import model
+from active_migration import ActiveDataMigration, ERTMigrationFlags
+from data.database import (User, FederatedLogin, RobotAccountToken, Team as TeamTable, Repository,
+ RobotAccountMetadata)
+from endpoints.api.robot_models_interface import (RobotInterface, Robot, RobotWithPermissions, Team,
+ Permission)
+
+
+class RobotPreOCIModel(RobotInterface):
+ def list_robot_permissions(self, username):
+ permissions = model.permission.list_robot_permissions(username)
+ return [Permission(permission.repository.name, permission.repository.visibility.name, permission.role.name) for
+ permission in permissions]
+
+ def list_entity_robot_permission_teams(self, prefix, include_token=False,
+ include_permissions=False, limit=None):
+ tuples = model.user.list_entity_robot_permission_teams(prefix, limit=limit,
+ include_permissions=include_permissions)
+ robots = {}
+ robot_teams = set()
+
+ for robot_tuple in tuples:
+ robot_name = robot_tuple.get(User.username)
+ if robot_name not in robots:
+ token = None
+ if include_token:
+ # TODO(remove-unenc): Remove branches once migrated.
+ if robot_tuple.get(RobotAccountToken.token):
+ token = robot_tuple.get(RobotAccountToken.token).decrypt()
+
+ if token is None and ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
+ token = robot_tuple.get(FederatedLogin.service_ident)
+ assert not token.startswith('robot:')
+
+ robot_dict = {
+ 'name': robot_name,
+ 'token': token,
+ 'created': robot_tuple.get(User.creation_date),
+ 'last_accessed': (robot_tuple.get(User.last_accessed)
+ if features.USER_LAST_ACCESSED else None),
+ 'description': robot_tuple.get(RobotAccountMetadata.description),
+ 'unstructured_metadata': robot_tuple.get(RobotAccountMetadata.unstructured_json),
+ }
+
+ if include_permissions:
+ robot_dict.update({
+ 'teams': [],
+ 'repositories': [],
+ })
+
+ robots[robot_name] = Robot(robot_dict['name'], robot_dict['token'], robot_dict['created'],
+ robot_dict['last_accessed'], robot_dict['description'],
+ robot_dict['unstructured_metadata'])
+ if include_permissions:
+ team_name = robot_tuple.get(TeamTable.name)
+ repository_name = robot_tuple.get(Repository.name)
+
+ if team_name is not None:
+ check_key = robot_name + ':' + team_name
+ if check_key not in robot_teams:
+ robot_teams.add(check_key)
+
+ robot_dict['teams'].append(Team(
+ team_name,
+ avatar.get_data(team_name, team_name, 'team')
+ ))
+
+ if repository_name is not None:
+ if repository_name not in robot_dict['repositories']:
+ robot_dict['repositories'].append(repository_name)
+ robots[robot_name] = RobotWithPermissions(robot_dict['name'], robot_dict['token'],
+ robot_dict['created'],
+ (robot_dict['last_accessed']
+ if features.USER_LAST_ACCESSED else None),
+ robot_dict['teams'],
+ robot_dict['repositories'],
+ robot_dict['description'])
+
+ return robots.values()
+
+ def regenerate_user_robot_token(self, robot_shortname, owning_user):
+ robot, password, metadata = model.user.regenerate_robot_token(robot_shortname, owning_user)
+ return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
+ metadata.description, metadata.unstructured_json)
+
+ def regenerate_org_robot_token(self, robot_shortname, orgname):
+ parent = model.organization.get_organization(orgname)
+ robot, password, metadata = model.user.regenerate_robot_token(robot_shortname, parent)
+ return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
+ metadata.description, metadata.unstructured_json)
+
+ def delete_robot(self, robot_username):
+ model.user.delete_robot(robot_username)
+
+ def create_user_robot(self, robot_shortname, owning_user, description, unstructured_metadata):
+ robot, password = model.user.create_robot(robot_shortname, owning_user, description or '',
+ unstructured_metadata)
+ return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
+ description or '', unstructured_metadata)
+
+ def create_org_robot(self, robot_shortname, orgname, description, unstructured_metadata):
+ parent = model.organization.get_organization(orgname)
+ robot, password = model.user.create_robot(robot_shortname, parent, description or '',
+ unstructured_metadata)
+ return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
+ description or '', unstructured_metadata)
+
+ def get_org_robot(self, robot_shortname, orgname):
+ parent = model.organization.get_organization(orgname)
+ robot, password, metadata = model.user.get_robot_and_metadata(robot_shortname, parent)
+ return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
+ metadata.description, metadata.unstructured_json)
+
+ def get_user_robot(self, robot_shortname, owning_user):
+ robot, password, metadata = model.user.get_robot_and_metadata(robot_shortname, owning_user)
+ return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
+ metadata.description, metadata.unstructured_json)
+
+
+pre_oci_model = RobotPreOCIModel()
diff --git a/endpoints/api/search.py b/endpoints/api/search.py
new file mode 100644
index 000000000..0ddbbc3fa
--- /dev/null
+++ b/endpoints/api/search.py
@@ -0,0 +1,382 @@
+""" Conduct searches against all registry context. """
+
+import features
+
+from endpoints.api import (ApiResource, parse_args, query_param, truthy_bool, nickname, resource,
+ require_scope, path_param, internal_only, Unauthorized, InvalidRequest,
+ show_if)
+from data.database import Repository
+from data import model
+from data.registry_model import registry_model
+from auth.permissions import (OrganizationMemberPermission, ReadRepositoryPermission,
+ UserAdminPermission, AdministerOrganizationPermission,
+ ReadRepositoryPermission)
+from auth.auth_context import get_authenticated_user
+from auth import scopes
+from app import app, avatar, authentication
+from flask import abort
+from operator import itemgetter
+from stringscore import liquidmetal
+from util.names import parse_robot_username
+
+import anunidecode # Don't listen to pylint's lies. This import is required.
+import math
+
+
+ENTITY_SEARCH_SCORE = 1
+TEAM_SEARCH_SCORE = 2
+REPOSITORY_SEARCH_SCORE = 4
+
+
+@resource('/v1/entities/link/')
+@internal_only
+class LinkExternalEntity(ApiResource):
+ """ Resource for linking external entities to internal users. """
+ @nickname('linkExternalUser')
+ def post(self, username):
+ if not authentication.federated_service:
+ abort(404)
+
+ # Only allowed if there is a logged in user.
+ if not get_authenticated_user():
+ raise Unauthorized()
+
+ # Try to link the user with the given *external* username, to an internal record.
+ (user, err_msg) = authentication.link_user(username)
+ if user is None:
+ raise InvalidRequest(err_msg, payload={'username': username})
+
+ return {
+ 'entity': {
+ 'name': user.username,
+ 'kind': 'user',
+ 'is_robot': False,
+ 'avatar': avatar.get_data_for_user(user)
+ }
+ }
+
+
+@resource('/v1/entities/')
+class EntitySearch(ApiResource):
+ """ Resource for searching entities. """
+ @path_param('prefix', 'The prefix of the entities being looked up')
+ @parse_args()
+ @query_param('namespace', 'Namespace to use when querying for org entities.', type=str,
+ default='')
+ @query_param('includeTeams', 'Whether to include team names.', type=truthy_bool, default=False)
+ @query_param('includeOrgs', 'Whether to include orgs names.', type=truthy_bool, default=False)
+ @nickname('getMatchingEntities')
+ def get(self, prefix, parsed_args):
+ """ Get a list of entities that match the specified prefix. """
+
+ # Ensure we don't have any unicode characters in the search, as it breaks the search. Nothing
+ # being searched can have unicode in it anyway, so this is a safe operation.
+ prefix = prefix.encode('unidecode', 'ignore').replace(' ', '').lower()
+
+ teams = []
+ org_data = []
+
+ namespace_name = parsed_args['namespace']
+ robot_namespace = None
+ organization = None
+
+ try:
+ organization = model.organization.get_organization(namespace_name)
+
+ # namespace name was an org
+ permission = OrganizationMemberPermission(namespace_name)
+ if permission.can():
+ robot_namespace = namespace_name
+
+ if parsed_args['includeTeams']:
+ teams = model.team.get_matching_teams(prefix, organization)
+
+ if (parsed_args['includeOrgs'] and AdministerOrganizationPermission(namespace_name) and
+ namespace_name.startswith(prefix)):
+ org_data = [{
+ 'name': namespace_name,
+ 'kind': 'org',
+ 'is_org_member': True,
+ 'avatar': avatar.get_data_for_org(organization),
+ }]
+
+ except model.organization.InvalidOrganizationException:
+ # namespace name was a user
+ user = get_authenticated_user()
+ if user and user.username == namespace_name:
+ # Check if there is admin user permissions (login only)
+ admin_permission = UserAdminPermission(user.username)
+ if admin_permission.can():
+ robot_namespace = namespace_name
+
+ # Lookup users in the database for the prefix query.
+ users = model.user.get_matching_users(prefix, robot_namespace, organization, limit=10,
+ exact_matches_only=not features.PARTIAL_USER_AUTOCOMPLETE)
+
+ # Lookup users via the user system for the prefix query. We'll filter out any users that
+ # already exist in the database.
+ external_users, federated_id, _ = authentication.query_users(prefix, limit=10)
+ filtered_external_users = []
+ if external_users and federated_id is not None:
+ users = list(users)
+ user_ids = [user.id for user in users]
+
+ # Filter the users if any are already found via the database. We do so by looking up all
+ # the found users in the federated user system.
+ federated_query = model.user.get_federated_logins(user_ids, federated_id)
+ found = {result.service_ident for result in federated_query}
+ filtered_external_users = [user for user in external_users if not user.username in found]
+
+ def entity_team_view(team):
+ result = {
+ 'name': team.name,
+ 'kind': 'team',
+ 'is_org_member': True,
+ 'avatar': avatar.get_data_for_team(team)
+ }
+ return result
+
+ def user_view(user):
+ user_json = {
+ 'name': user.username,
+ 'kind': 'user',
+ 'is_robot': user.robot,
+ 'avatar': avatar.get_data_for_user(user)
+ }
+
+ if organization is not None:
+ user_json['is_org_member'] = user.robot or user.is_org_member
+
+ return user_json
+
+ def external_view(user):
+ result = {
+ 'name': user.username,
+ 'kind': 'external',
+ 'title': user.email or '',
+ 'avatar': avatar.get_data_for_external_user(user)
+ }
+ return result
+
+ team_data = [entity_team_view(team) for team in teams]
+ user_data = [user_view(user) for user in users]
+ external_data = [external_view(user) for user in filtered_external_users]
+
+ return {
+ 'results': team_data + user_data + org_data + external_data
+ }
+
+
+def search_entity_view(username, entity, get_short_name=None):
+ kind = 'user'
+ title = 'user'
+ avatar_data = avatar.get_data_for_user(entity)
+ href = '/user/' + entity.username
+
+ if entity.organization:
+ kind = 'organization'
+ title = 'org'
+ avatar_data = avatar.get_data_for_org(entity)
+ href = '/organization/' + entity.username
+ elif entity.robot:
+ parts = parse_robot_username(entity.username)
+ if parts[0] == username:
+ href = '/user/' + username + '?tab=robots&showRobot=' + entity.username
+ else:
+ href = '/organization/' + parts[0] + '?tab=robots&showRobot=' + entity.username
+
+ kind = 'robot'
+ title = 'robot'
+ avatar_data = None
+
+ data = {
+ 'title': title,
+ 'kind': kind,
+ 'avatar': avatar_data,
+ 'name': entity.username,
+ 'score': ENTITY_SEARCH_SCORE,
+ 'href': href
+ }
+
+ if get_short_name:
+ data['short_name'] = get_short_name(entity.username)
+
+ return data
+
+
+def conduct_team_search(username, query, encountered_teams, results):
+ """ Finds the matching teams where the user is a member. """
+ matching_teams = model.team.get_matching_user_teams(query, get_authenticated_user(), limit=5)
+ for team in matching_teams:
+ if team.id in encountered_teams:
+ continue
+
+ encountered_teams.add(team.id)
+
+ results.append({
+ 'kind': 'team',
+ 'name': team.name,
+ 'organization': search_entity_view(username, team.organization),
+ 'avatar': avatar.get_data_for_team(team),
+ 'score': TEAM_SEARCH_SCORE,
+ 'href': '/organization/' + team.organization.username + '/teams/' + team.name
+ })
+
+
+def conduct_admined_team_search(username, query, encountered_teams, results):
+ """ Finds matching teams in orgs admined by the user. """
+ matching_teams = model.team.get_matching_admined_teams(query, get_authenticated_user(), limit=5)
+ for team in matching_teams:
+ if team.id in encountered_teams:
+ continue
+
+ encountered_teams.add(team.id)
+
+ results.append({
+ 'kind': 'team',
+ 'name': team.name,
+ 'organization': search_entity_view(username, team.organization),
+ 'avatar': avatar.get_data_for_team(team),
+ 'score': TEAM_SEARCH_SCORE,
+ 'href': '/organization/' + team.organization.username + '/teams/' + team.name
+ })
+
+
+def conduct_repo_search(username, query, results, offset=0, limit=5):
+ """ Finds matching repositories. """
+ matching_repos = model.repository.get_filtered_matching_repositories(query, username, limit=limit,
+ repo_kind=None,
+ offset=offset)
+
+ for repo in matching_repos:
+ # TODO: make sure the repo.kind.name doesn't cause extra queries
+ results.append(repo_result_view(repo, username))
+
+
+def conduct_namespace_search(username, query, results):
+ """ Finds matching users and organizations. """
+ matching_entities = model.user.get_matching_user_namespaces(query, username, limit=5)
+ for entity in matching_entities:
+ results.append(search_entity_view(username, entity))
+
+
+def conduct_robot_search(username, query, results):
+ """ Finds matching robot accounts. """
+ def get_short_name(name):
+ return parse_robot_username(name)[1]
+
+ matching_robots = model.user.get_matching_robots(query, username, limit=5)
+ for robot in matching_robots:
+ results.append(search_entity_view(username, robot, get_short_name))
+
+
+def repo_result_view(repo, username, last_modified=None, stars=None, popularity=None):
+ kind = 'application' if Repository.kind.get_name(repo.kind_id) == 'application' else 'repository'
+ view = {
+ 'kind': kind,
+ 'title': 'app' if kind == 'application' else 'repo',
+ 'namespace': search_entity_view(username, repo.namespace_user),
+ 'name': repo.name,
+ 'description': repo.description,
+ 'is_public': model.repository.is_repository_public(repo),
+ 'score': REPOSITORY_SEARCH_SCORE,
+ 'href': '/' + kind + '/' + repo.namespace_user.username + '/' + repo.name
+ }
+
+ if last_modified is not None:
+ view['last_modified'] = last_modified
+
+ if stars is not None:
+ view['stars'] = stars
+
+ if popularity is not None:
+ view['popularity'] = popularity
+
+ return view
+
+@resource('/v1/find/all')
+class ConductSearch(ApiResource):
+ """ Resource for finding users, repositories, teams, etc. """
+ @parse_args()
+ @query_param('query', 'The search query.', type=str, default='')
+ @require_scope(scopes.READ_REPO)
+ @nickname('conductSearch')
+ def get(self, parsed_args):
+ """ Get a list of entities and resources that match the specified query. """
+ query = parsed_args['query']
+ if not query:
+ return {'results': []}
+
+ username = None
+ results = []
+
+ if get_authenticated_user():
+ username = get_authenticated_user().username
+
+ # Search for teams.
+ encountered_teams = set()
+ conduct_team_search(username, query, encountered_teams, results)
+ conduct_admined_team_search(username, query, encountered_teams, results)
+
+ # Search for robot accounts.
+ conduct_robot_search(username, query, results)
+
+ # Search for repos.
+ conduct_repo_search(username, query, results)
+
+ # Search for users and orgs.
+ conduct_namespace_search(username, query, results)
+
+ # Modify the results' scores via how close the query term is to each result's name.
+ for result in results:
+ name = result.get('short_name', result['name'])
+ lm_score = liquidmetal.score(name, query) or 0.5
+ result['score'] = result['score'] * lm_score
+
+ return {'results': sorted(results, key=itemgetter('score'), reverse=True)}
+
+
+MAX_PER_PAGE = app.config.get('SEARCH_RESULTS_PER_PAGE', 10)
+MAX_RESULT_PAGE_COUNT = app.config.get('SEARCH_MAX_RESULT_PAGE_COUNT', 10)
+
+@resource('/v1/find/repositories')
+class ConductRepositorySearch(ApiResource):
+ """ Resource for finding repositories. """
+ @parse_args()
+ @query_param('query', 'The search query.', type=str, default='')
+ @query_param('page', 'The page.', type=int, default=1)
+ @nickname('conductRepoSearch')
+ def get(self, parsed_args):
+ """ Get a list of apps and repositories that match the specified query. """
+ query = parsed_args['query']
+ page = min(max(1, parsed_args['page']), MAX_RESULT_PAGE_COUNT)
+ offset = (page - 1) * MAX_PER_PAGE
+ limit = offset + MAX_PER_PAGE + 1
+
+ username = get_authenticated_user().username if get_authenticated_user() else None
+
+ # Lookup matching repositories.
+ matching_repos = list(model.repository.get_filtered_matching_repositories(query, username,
+ repo_kind=None,
+ limit=limit,
+ offset=offset))
+
+ # Load secondary information such as last modified time, star count and action count.
+ repository_ids = [repo.id for repo in matching_repos]
+ last_modified_map = registry_model.get_most_recent_tag_lifetime_start(matching_repos)
+ star_map = model.repository.get_stars(repository_ids)
+ action_sum_map = model.log.get_repositories_action_sums(repository_ids)
+
+ # Build the results list.
+ results = [repo_result_view(repo, username, last_modified_map.get(repo.id),
+ star_map.get(repo.id, 0),
+ float(action_sum_map.get(repo.id, 0)))
+ for repo in matching_repos]
+
+ return {
+ 'results': results[0:MAX_PER_PAGE],
+ 'has_additional': len(results) > MAX_PER_PAGE,
+ 'page': page,
+ 'page_size': MAX_PER_PAGE,
+ 'start_index': offset,
+ }
diff --git a/endpoints/api/secscan.py b/endpoints/api/secscan.py
new file mode 100644
index 000000000..71422184f
--- /dev/null
+++ b/endpoints/api/secscan.py
@@ -0,0 +1,108 @@
+""" List and manage repository vulnerabilities and other security information. """
+
+import logging
+import features
+
+from app import app, secscan_api
+from auth.decorators import process_basic_auth_no_pass
+from data.registry_model import registry_model
+from data.registry_model.datatypes import SecurityScanStatus
+from endpoints.api import (require_repo_read, path_param,
+ RepositoryParamResource, resource, nickname, show_if, parse_args,
+ query_param, truthy_bool, disallow_for_app_repositories)
+from endpoints.exception import NotFound, DownstreamIssue
+from endpoints.api.manifest import MANIFEST_DIGEST_ROUTE
+from util.secscan.api import APIRequestFailure
+
+
+logger = logging.getLogger(__name__)
+
+def _security_info(manifest_or_legacy_image, include_vulnerabilities=True):
+ """ Returns a dict representing the result of a call to the security status API for the given
+ manifest or image.
+ """
+ status = registry_model.get_security_status(manifest_or_legacy_image)
+ if status is None:
+ raise NotFound()
+
+ if status != SecurityScanStatus.SCANNED:
+ return {
+ 'status': status.value,
+ }
+
+ try:
+ if include_vulnerabilities:
+ data = secscan_api.get_layer_data(manifest_or_legacy_image, include_vulnerabilities=True)
+ else:
+ data = secscan_api.get_layer_data(manifest_or_legacy_image, include_features=True)
+ except APIRequestFailure as arf:
+ raise DownstreamIssue(arf.message)
+
+ if data is None:
+ # If no data was found but we reached this point, then it indicates we have incorrect security
+ # status for the manifest or legacy image. Mark the manifest or legacy image as unindexed
+ # so it automatically gets re-indexed.
+ if app.config.get('REGISTRY_STATE', 'normal') == 'normal':
+ registry_model.reset_security_status(manifest_or_legacy_image)
+
+ return {
+ 'status': SecurityScanStatus.QUEUED.value,
+ }
+
+ return {
+ 'status': status.value,
+ 'data': data,
+ }
+
+
+@resource('/v1/repository//image//security')
+@show_if(features.SECURITY_SCANNER)
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('imageid', 'The image ID')
+class RepositoryImageSecurity(RepositoryParamResource):
+ """ Operations for managing the vulnerabilities in a repository image. """
+
+ @process_basic_auth_no_pass
+ @require_repo_read
+ @nickname('getRepoImageSecurity')
+ @disallow_for_app_repositories
+ @parse_args()
+ @query_param('vulnerabilities', 'Include vulnerabilities informations', type=truthy_bool,
+ default=False)
+ def get(self, namespace, repository, imageid, parsed_args):
+ """ Fetches the features and vulnerabilities (if any) for a repository image. """
+ repo_ref = registry_model.lookup_repository(namespace, repository)
+ if repo_ref is None:
+ raise NotFound()
+
+ legacy_image = registry_model.get_legacy_image(repo_ref, imageid)
+ if legacy_image is None:
+ raise NotFound()
+
+ return _security_info(legacy_image, parsed_args.vulnerabilities)
+
+
+@resource(MANIFEST_DIGEST_ROUTE + '/security')
+@show_if(features.SECURITY_SCANNER)
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('manifestref', 'The digest of the manifest')
+class RepositoryManifestSecurity(RepositoryParamResource):
+ """ Operations for managing the vulnerabilities in a repository manifest. """
+
+ @process_basic_auth_no_pass
+ @require_repo_read
+ @nickname('getRepoManifestSecurity')
+ @disallow_for_app_repositories
+ @parse_args()
+ @query_param('vulnerabilities', 'Include vulnerabilities informations', type=truthy_bool,
+ default=False)
+ def get(self, namespace, repository, manifestref, parsed_args):
+ repo_ref = registry_model.lookup_repository(namespace, repository)
+ if repo_ref is None:
+ raise NotFound()
+
+ manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref, allow_dead=True)
+ if manifest is None:
+ raise NotFound()
+
+ return _security_info(manifest, parsed_args.vulnerabilities)
diff --git a/endpoints/api/signing.py b/endpoints/api/signing.py
new file mode 100644
index 000000000..eb2e942ec
--- /dev/null
+++ b/endpoints/api/signing.py
@@ -0,0 +1,29 @@
+""" List and manage repository signing information """
+
+import logging
+import features
+
+from app import tuf_metadata_api
+from endpoints.api import (require_repo_read, path_param,
+ RepositoryParamResource, resource, nickname, show_if,
+ disallow_for_app_repositories, NotFound)
+from endpoints.api.signing_models_pre_oci import pre_oci_model as model
+
+logger = logging.getLogger(__name__)
+
+
+@resource('/v1/repository//signatures')
+@show_if(features.SIGNING)
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class RepositorySignatures(RepositoryParamResource):
+ """ Operations for managing the signatures in a repository image. """
+
+ @require_repo_read
+ @nickname('getRepoSignatures')
+ @disallow_for_app_repositories
+ def get(self, namespace, repository):
+ """ Fetches the list of signed tags for the repository. """
+ if not model.is_trust_enabled(namespace, repository):
+ raise NotFound()
+
+ return {'delegations': tuf_metadata_api.get_all_tags_with_expiration(namespace, repository)}
diff --git a/endpoints/api/signing_models_interface.py b/endpoints/api/signing_models_interface.py
new file mode 100644
index 000000000..6e5ce4ca4
--- /dev/null
+++ b/endpoints/api/signing_models_interface.py
@@ -0,0 +1,14 @@
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+
+@add_metaclass(ABCMeta)
+class SigningInterface(object):
+ """
+ Interface that represents all data store interactions required by the signing API endpoint.
+ """
+ @abstractmethod
+ def is_trust_enabled(self, namespace_name, repo_name):
+ """
+ Returns whether the repository with the given namespace name and repository name exists and
+ has trust enabled.
+ """
diff --git a/endpoints/api/signing_models_pre_oci.py b/endpoints/api/signing_models_pre_oci.py
new file mode 100644
index 000000000..03afb1104
--- /dev/null
+++ b/endpoints/api/signing_models_pre_oci.py
@@ -0,0 +1,18 @@
+from data import model
+from endpoints.api.signing_models_interface import SigningInterface
+
+
+class PreOCIModel(SigningInterface):
+ """
+ PreOCIModel implements the data model for signing using a database schema
+ before it was changed to support the OCI specification.
+ """
+ def is_trust_enabled(self, namespace_name, repo_name):
+ repo = model.repository.get_repository(namespace_name, repo_name)
+ if repo is None:
+ return False
+
+ return repo.trust_enabled
+
+
+pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/subscribe.py b/endpoints/api/subscribe.py
new file mode 100644
index 000000000..b526e25d2
--- /dev/null
+++ b/endpoints/api/subscribe.py
@@ -0,0 +1,135 @@
+""" Subscribe to plans. """
+import logging
+import stripe
+import features
+from app import billing
+from endpoints.api import request_error, log_action
+from data.billing import PLANS
+from endpoints.api.subscribe_models_pre_oci import data_model as model
+from endpoints.exception import NotFound
+
+
+logger = logging.getLogger(__name__)
+
+
+def check_repository_usage(user_or_org, plan_found):
+ private_repos = model.get_private_repo_count(user_or_org.username)
+ if plan_found is None:
+ repos_allowed = 0
+ else:
+ repos_allowed = plan_found['privateRepos']
+
+ if private_repos > repos_allowed:
+ model.create_unique_notification('over_private_usage', user_or_org.username, {'namespace': user_or_org.username})
+ else:
+ model.delete_notifications_by_kind(user_or_org.username, 'over_private_usage')
+
+
+def carderror_response(exc):
+ return {'carderror': exc.message}, 402
+
+def connection_response(exc):
+ return {'message': 'Could not contact Stripe. Please try again.'}, 503
+
+
+def subscription_view(stripe_subscription, used_repos):
+ view = {
+ 'hasSubscription': True,
+ 'isExistingCustomer': True,
+ 'currentPeriodStart': stripe_subscription.current_period_start,
+ 'currentPeriodEnd': stripe_subscription.current_period_end,
+ 'plan': stripe_subscription.plan.id,
+ 'usedPrivateRepos': used_repos,
+ 'trialStart': stripe_subscription.trial_start,
+ 'trialEnd': stripe_subscription.trial_end
+ }
+
+ return view
+
+
+def subscribe(user, plan, token, require_business_plan):
+ if not features.BILLING:
+ return
+
+ plan_found = None
+ for plan_obj in PLANS:
+ if plan_obj['stripeId'] == plan:
+ plan_found = plan_obj
+
+ if not plan_found or plan_found['deprecated']:
+ logger.warning('Plan not found or deprecated: %s', plan)
+ raise NotFound()
+
+ if (require_business_plan and not plan_found['bus_features'] and not
+ plan_found['price'] == 0):
+ logger.warning('Business attempting to subscribe to personal plan: %s',
+ user.username)
+ raise request_error(message='No matching plan found')
+
+ private_repos = model.get_private_repo_count(user.username)
+
+ # This is the default response
+ response_json = {
+ 'plan': plan,
+ 'usedPrivateRepos': private_repos,
+ }
+ status_code = 200
+
+ if not user.stripe_id:
+ # Check if a non-paying user is trying to subscribe to a free plan
+ if not plan_found['price'] == 0:
+ # They want a real paying plan, create the customer and plan
+ # simultaneously
+ card = token
+
+ try:
+ cus = billing.Customer.create(email=user.email, plan=plan, card=card)
+ user.stripe_id = cus.id
+ user.save()
+ check_repository_usage(user, plan_found)
+ log_action('account_change_plan', user.username, {'plan': plan})
+ except stripe.error.CardError as e:
+ return carderror_response(e)
+ except stripe.error.APIConnectionError as e:
+ return connection_response(e)
+
+ response_json = subscription_view(cus.subscription, private_repos)
+ status_code = 201
+
+ else:
+ # Change the plan
+ try:
+ cus = billing.Customer.retrieve(user.stripe_id)
+ except stripe.error.APIConnectionError as e:
+ return connection_response(e)
+
+ if plan_found['price'] == 0:
+ if cus.subscription is not None:
+ # We only have to cancel the subscription if they actually have one
+ try:
+ cus.subscription.delete()
+ except stripe.error.APIConnectionError as e:
+ return connection_response(e)
+
+ check_repository_usage(user, plan_found)
+ log_action('account_change_plan', user.username, {'plan': plan})
+
+ else:
+ # User may have been a previous customer who is resubscribing
+ if token:
+ cus.card = token
+
+ cus.plan = plan
+
+ try:
+ cus.save()
+ except stripe.error.CardError as e:
+ return carderror_response(e)
+ except stripe.error.APIConnectionError as e:
+ return connection_response(e)
+
+ response_json = subscription_view(cus.subscription, private_repos)
+ check_repository_usage(user, plan_found)
+ log_action('account_change_plan', user.username, {'plan': plan})
+
+ return response_json, status_code
diff --git a/endpoints/api/subscribe_models_interface.py b/endpoints/api/subscribe_models_interface.py
new file mode 100644
index 000000000..fbc7a8a70
--- /dev/null
+++ b/endpoints/api/subscribe_models_interface.py
@@ -0,0 +1,26 @@
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+
+
+@add_metaclass(ABCMeta)
+class SubscribeInterface(object):
+ """
+ Interface that represents all data store interactions required by the subscribe API endpoint.
+ """
+ @abstractmethod
+ def get_private_repo_count(self, username):
+ """
+ Returns the number of private repositories for a given username or namespace.
+ """
+
+ @abstractmethod
+ def create_unique_notification(self, kind_name, target_username, metadata={}):
+ """
+ Creates a notification using the given parameters.
+ """
+
+ @abstractmethod
+ def delete_notifications_by_kind(self, target_username, kind_name):
+ """
+ Remove notifications for a target based on given kind.
+ """
diff --git a/endpoints/api/subscribe_models_pre_oci.py b/endpoints/api/subscribe_models_pre_oci.py
new file mode 100644
index 000000000..a5ca83149
--- /dev/null
+++ b/endpoints/api/subscribe_models_pre_oci.py
@@ -0,0 +1,23 @@
+from data.model.notification import create_unique_notification, delete_notifications_by_kind
+from data.model.user import get_private_repo_count, get_user_or_org
+from endpoints.api.subscribe_models_interface import SubscribeInterface
+
+
+class PreOCIModel(SubscribeInterface):
+ """
+ PreOCIModel implements the data model for build triggers using a database schema
+ before it was changed to support the OCI specification.
+ """
+ def get_private_repo_count(self, username):
+ return get_private_repo_count(username)
+
+ def create_unique_notification(self, kind_name, target_username, metadata={}):
+ target = get_user_or_org(target_username)
+ create_unique_notification(kind_name, target, metadata)
+
+ def delete_notifications_by_kind(self, target_username, kind_name):
+ target = get_user_or_org(target_username)
+ delete_notifications_by_kind(target, kind_name)
+
+
+data_model = PreOCIModel()
diff --git a/endpoints/api/suconfig.py b/endpoints/api/suconfig.py
new file mode 100644
index 000000000..a96a7356b
--- /dev/null
+++ b/endpoints/api/suconfig.py
@@ -0,0 +1,104 @@
+""" Superuser Config API. """
+
+import logging
+import os
+import signal
+import subprocess
+
+from flask import abort
+
+from app import app, config_provider
+from auth.permissions import SuperUserPermission
+from endpoints.api.suconfig_models_pre_oci import pre_oci_model as model
+from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if, verify_not_prod)
+
+import features
+
+
+logger = logging.getLogger(__name__)
+
+
+def database_is_valid():
+ """ Returns whether the database, as configured, is valid. """
+ if app.config['TESTING']:
+ return False
+
+ return model.is_valid()
+
+
+def database_has_users():
+ """ Returns whether the database has any users defined. """
+ return model.has_users()
+
+
+@resource('/v1/superuser/registrystatus')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserRegistryStatus(ApiResource):
+ """ Resource for determining the status of the registry, such as if config exists,
+ if a database is configured, and if it has any defined users.
+ """
+ @nickname('scRegistryStatus')
+ @verify_not_prod
+ def get(self):
+ """ Returns the status of the registry. """
+ # If we have SETUP_COMPLETE, then we're ready to go!
+ if app.config.get('SETUP_COMPLETE', False):
+ return {
+ 'provider_id': config_provider.provider_id,
+ 'status': 'ready'
+ }
+
+ return {
+ 'status': 'setup-incomplete'
+ }
+
+
+class _AlembicLogHandler(logging.Handler):
+ def __init__(self):
+ super(_AlembicLogHandler, self).__init__()
+ self.records = []
+
+ def emit(self, record):
+ self.records.append({
+ 'level': record.levelname,
+ 'message': record.getMessage()
+ })
+
+# From: https://stackoverflow.com/a/44712205
+def get_process_id(name):
+ """Return process ids found by (partial) name or regex.
+
+ >>> get_process_id('kthreadd')
+ [2]
+ >>> get_process_id('watchdog')
+ [10, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61] # ymmv
+ >>> get_process_id('non-existent process')
+ []
+ """
+ child = subprocess.Popen(['pgrep', name], stdout=subprocess.PIPE, shell=False)
+ response = child.communicate()[0]
+ return [int(pid) for pid in response.split()]
+
+
+@resource('/v1/superuser/shutdown')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserShutdown(ApiResource):
+ """ Resource for sending a shutdown signal to the container. """
+
+ @verify_not_prod
+ @nickname('scShutdownContainer')
+ def post(self):
+ """ Sends a signal to the phusion init system to shut down the container. """
+ # Note: This method is called to set the database configuration before super users exists,
+ # so we also allow it to be called if there is no valid registry configuration setup.
+ if app.config['TESTING'] or not database_has_users() or SuperUserPermission().can():
+ # Note: We skip if debugging locally.
+ if app.config.get('DEBUGGING') == True:
+ return {}
+
+ os.kill(get_process_id('my_init')[0], signal.SIGINT)
+ return {}
+
+ abort(403)
diff --git a/endpoints/api/suconfig_models_interface.py b/endpoints/api/suconfig_models_interface.py
new file mode 100644
index 000000000..9f8cbd0cb
--- /dev/null
+++ b/endpoints/api/suconfig_models_interface.py
@@ -0,0 +1,39 @@
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+
+
+@add_metaclass(ABCMeta)
+class SuperuserConfigDataInterface(object):
+ """
+ Interface that represents all data store interactions required by the superuser config API.
+ """
+
+ @abstractmethod
+ def is_valid(self):
+ """
+ Returns true if the configured database is valid.
+ """
+
+ @abstractmethod
+ def has_users(self):
+ """
+ Returns true if there are any users defined.
+ """
+
+ @abstractmethod
+ def create_superuser(self, username, password, email):
+ """
+ Creates a new superuser with the given username, password and email. Returns the user's UUID.
+ """
+
+ @abstractmethod
+ def has_federated_login(self, username, service_name):
+ """
+ Returns true if the matching user has a federated login under the matching service.
+ """
+
+ @abstractmethod
+ def attach_federated_login(self, username, service_name, federated_username):
+ """
+ Attaches a federatated login to the matching user, under the given service.
+ """
diff --git a/endpoints/api/suconfig_models_pre_oci.py b/endpoints/api/suconfig_models_pre_oci.py
new file mode 100644
index 000000000..9bcb40acd
--- /dev/null
+++ b/endpoints/api/suconfig_models_pre_oci.py
@@ -0,0 +1,33 @@
+from data import model
+from data.database import User
+from endpoints.api.suconfig_models_interface import SuperuserConfigDataInterface
+
+class PreOCIModel(SuperuserConfigDataInterface):
+ def is_valid(self):
+ try:
+ list(User.select().limit(1))
+ return True
+ except:
+ return False
+
+ def has_users(self):
+ return bool(list(User.select().limit(1)))
+
+ def create_superuser(self, username, password, email):
+ return model.user.create_user(username, password, email, auto_verify=True).uuid
+
+ def has_federated_login(self, username, service_name):
+ user = model.user.get_user(username)
+ if user is None:
+ return False
+
+ return bool(model.user.lookup_federated_login(user, service_name))
+
+ def attach_federated_login(self, username, service_name, federated_username):
+ user = model.user.get_user(username)
+ if user is None:
+ return False
+
+ model.user.attach_federated_login(user, service_name, federated_username)
+
+pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/superuser.py b/endpoints/api/superuser.py
new file mode 100644
index 000000000..ec1a4992f
--- /dev/null
+++ b/endpoints/api/superuser.py
@@ -0,0 +1,856 @@
+""" Superuser API. """
+import logging
+import os
+import string
+import socket
+
+from datetime import datetime
+from random import SystemRandom
+
+from flask import request, make_response, jsonify
+
+import features
+
+from app import app, avatar, superusers, authentication, config_provider
+from auth import scopes
+from auth.auth_context import get_authenticated_user
+from auth.permissions import SuperUserPermission
+from data.database import ServiceKeyApprovalType
+from data.logs_model import logs_model
+from endpoints.api import (ApiResource, nickname, resource, validate_json_request,
+ internal_only, require_scope, show_if, parse_args,
+ query_param, require_fresh_login, path_param, verify_not_prod,
+ page_support, log_action, format_date, truthy_bool,
+ InvalidRequest, NotFound, Unauthorized, InvalidResponse)
+from endpoints.api.build import get_logs_or_log_url
+from endpoints.api.superuser_models_pre_oci import (pre_oci_model, ServiceKeyDoesNotExist,
+ ServiceKeyAlreadyApproved,
+ InvalidRepositoryBuildException)
+from endpoints.api.logs import _validate_logs_arguments
+from util.request import get_request_ip
+from util.useremails import send_confirmation_email, send_recovery_email
+from util.validation import validate_service_key_name
+from _init import ROOT_DIR
+
+logger = logging.getLogger(__name__)
+
+
+def get_immediate_subdirectories(directory):
+ return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))]
+
+
+def get_services():
+ services = set(get_immediate_subdirectories(app.config['SYSTEM_SERVICES_PATH']))
+ services = services - set(app.config['SYSTEM_SERVICE_BLACKLIST'])
+ return services
+
+
+@resource('/v1/superuser/aggregatelogs')
+@internal_only
+class SuperUserAggregateLogs(ApiResource):
+ """ Resource for fetching aggregated logs for the current user. """
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('listAllAggregateLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str)
+ @query_param('endtime', 'Latest time to which to get logs. (%m/%d/%Y %Z)', type=str)
+ def get(self, parsed_args):
+ """ Returns the aggregated logs for the current system. """
+ if SuperUserPermission().can():
+ (start_time, end_time) = _validate_logs_arguments(parsed_args['starttime'],
+ parsed_args['endtime'])
+ aggregated_logs = logs_model.get_aggregated_log_counts(start_time, end_time)
+ return {
+ 'aggregated': [log.to_dict() for log in aggregated_logs]
+ }
+
+ raise Unauthorized()
+
+LOGS_PER_PAGE = 20
+
+@resource('/v1/superuser/logs')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserLogs(ApiResource):
+ """ Resource for fetching all logs in the system. """
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('listAllLogs')
+ @parse_args()
+ @query_param('starttime', 'Earliest time from which to get logs (%m/%d/%Y %Z)', type=str)
+ @query_param('endtime', 'Latest time to which to get logs (%m/%d/%Y %Z)', type=str)
+ @query_param('page', 'The page number for the logs', type=int, default=1)
+ @page_support()
+ @require_scope(scopes.SUPERUSER)
+ def get(self, parsed_args, page_token):
+ """ List the usage logs for the current system. """
+ if SuperUserPermission().can():
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
+
+ (start_time, end_time) = _validate_logs_arguments(start_time, end_time)
+ log_entry_page = logs_model.lookup_logs(start_time, end_time, page_token=page_token)
+ return {
+ 'start_time': format_date(start_time),
+ 'end_time': format_date(end_time),
+ 'logs': [log.to_dict(avatar, include_namespace=True) for log in log_entry_page.logs],
+ }, log_entry_page.next_page_token
+
+ raise Unauthorized()
+
+
+def org_view(org):
+ return {
+ 'name': org.username,
+ 'email': org.email,
+ 'avatar': avatar.get_data_for_org(org),
+ }
+
+
+def user_view(user, password=None):
+ user_data = {
+ 'kind': 'user',
+ 'name': user.username,
+ 'username': user.username,
+ 'email': user.email,
+ 'verified': user.verified,
+ 'avatar': avatar.get_data_for_user(user),
+ 'super_user': superusers.is_superuser(user.username),
+ 'enabled': user.enabled,
+ }
+
+ if password is not None:
+ user_data['encrypted_password'] = authentication.encrypt_user_password(password)
+
+ return user_data
+
+
+@resource('/v1/superuser/changelog/')
+@internal_only
+@show_if(features.SUPER_USERS)
+class ChangeLog(ApiResource):
+ """ Resource for returning the change log for enterprise customers. """
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('getChangeLog')
+ @require_scope(scopes.SUPERUSER)
+ def get(self):
+ """ Returns the change log for this installation. """
+ if SuperUserPermission().can():
+ with open(os.path.join(ROOT_DIR, 'CHANGELOG.md'), 'r') as f:
+ return {
+ 'log': f.read()
+ }
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser/organizations/')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserOrganizationList(ApiResource):
+ """ Resource for listing organizations in the system. """
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('listAllOrganizations')
+ @require_scope(scopes.SUPERUSER)
+ def get(self):
+ """ Returns a list of all organizations in the system. """
+ if SuperUserPermission().can():
+ return {
+ 'organizations': [org.to_dict() for org in pre_oci_model.get_organizations()]
+ }
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser/users/')
+@show_if(features.SUPER_USERS)
+class SuperUserList(ApiResource):
+ """ Resource for listing users in the system. """
+ schemas = {
+ 'CreateInstallUser': {
+ 'id': 'CreateInstallUser',
+ 'description': 'Data for creating a user',
+ 'required': ['username'],
+ 'properties': {
+ 'username': {
+ 'type': 'string',
+ 'description': 'The username of the user being created'
+ },
+
+ 'email': {
+ 'type': 'string',
+ 'description': 'The email address of the user being created'
+ }
+ }
+ }
+ }
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('listAllUsers')
+ @parse_args()
+ @query_param('disabled', 'If false, only enabled users will be returned.', type=truthy_bool,
+ default=True)
+ @require_scope(scopes.SUPERUSER)
+ def get(self, parsed_args):
+ """ Returns a list of all users in the system. """
+ if SuperUserPermission().can():
+ users = pre_oci_model.get_active_users(disabled=parsed_args['disabled'])
+ return {
+ 'users': [user.to_dict() for user in users]
+ }
+
+ raise Unauthorized()
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('createInstallUser')
+ @validate_json_request('CreateInstallUser')
+ @require_scope(scopes.SUPERUSER)
+ def post(self):
+ """ Creates a new user. """
+ # Ensure that we are using database auth.
+ if app.config['AUTHENTICATION_TYPE'] != 'Database':
+ raise InvalidRequest('Cannot create a user in a non-database auth system')
+
+ user_information = request.get_json()
+ if SuperUserPermission().can():
+ # Generate a temporary password for the user.
+ random = SystemRandom()
+ password = ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(32)])
+
+ # Create the user.
+ username = user_information['username']
+ email = user_information.get('email')
+ install_user, confirmation_code = pre_oci_model.create_install_user(username, password, email)
+ if features.MAILING:
+ send_confirmation_email(install_user.username, install_user.email, confirmation_code)
+
+ return {
+ 'username': username,
+ 'email': email,
+ 'password': password,
+ 'encrypted_password': authentication.encrypt_user_password(password),
+ }
+
+ raise Unauthorized()
+
+
+@resource('/v1/superusers/users//sendrecovery')
+@internal_only
+@show_if(features.SUPER_USERS)
+@show_if(features.MAILING)
+class SuperUserSendRecoveryEmail(ApiResource):
+ """ Resource for sending a recovery user on behalf of a user. """
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('sendInstallUserRecoveryEmail')
+ @require_scope(scopes.SUPERUSER)
+ def post(self, username):
+ # Ensure that we are using database auth.
+ if app.config['AUTHENTICATION_TYPE'] != 'Database':
+ raise InvalidRequest('Cannot send a recovery e-mail for non-database auth')
+
+ if SuperUserPermission().can():
+ user = pre_oci_model.get_nonrobot_user(username)
+ if user is None:
+ raise NotFound()
+
+ if superusers.is_superuser(username):
+ raise InvalidRequest('Cannot send a recovery email for a superuser')
+
+ code = pre_oci_model.create_reset_password_email_code(user.email)
+ send_recovery_email(user.email, code)
+ return {
+ 'email': user.email
+ }
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser/users/')
+@path_param('username', 'The username of the user being managed')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserManagement(ApiResource):
+ """ Resource for managing users in the system. """
+ schemas = {
+ 'UpdateUser': {
+ 'id': 'UpdateUser',
+ 'type': 'object',
+ 'description': 'Description of updates for a user',
+ 'properties': {
+ 'password': {
+ 'type': 'string',
+ 'description': 'The new password for the user',
+ },
+ 'email': {
+ 'type': 'string',
+ 'description': 'The new e-mail address for the user',
+ },
+ 'enabled': {
+ 'type': 'boolean',
+ 'description': 'Whether the user is enabled'
+ }
+ },
+ },
+ }
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('getInstallUser')
+ @require_scope(scopes.SUPERUSER)
+ def get(self, username):
+ """ Returns information about the specified user. """
+ if SuperUserPermission().can():
+ user = pre_oci_model.get_nonrobot_user(username)
+ if user is None:
+ raise NotFound()
+
+ return user.to_dict()
+
+ raise Unauthorized()
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('deleteInstallUser')
+ @require_scope(scopes.SUPERUSER)
+ def delete(self, username):
+ """ Deletes the specified user. """
+ if SuperUserPermission().can():
+ user = pre_oci_model.get_nonrobot_user(username)
+ if user is None:
+ raise NotFound()
+
+ if superusers.is_superuser(username):
+ raise InvalidRequest('Cannot delete a superuser')
+
+ pre_oci_model.mark_user_for_deletion(username)
+ return '', 204
+
+ raise Unauthorized()
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('changeInstallUser')
+ @validate_json_request('UpdateUser')
+ @require_scope(scopes.SUPERUSER)
+ def put(self, username):
+ """ Updates information about the specified user. """
+ if SuperUserPermission().can():
+ user = pre_oci_model.get_nonrobot_user(username)
+ if user is None:
+ raise NotFound()
+
+ if superusers.is_superuser(username):
+ raise InvalidRequest('Cannot update a superuser')
+
+ user_data = request.get_json()
+ if 'password' in user_data:
+ # Ensure that we are using database auth.
+ if app.config['AUTHENTICATION_TYPE'] != 'Database':
+ raise InvalidRequest('Cannot change password in non-database auth')
+
+ pre_oci_model.change_password(username, user_data['password'])
+
+ if 'email' in user_data:
+ # Ensure that we are using database auth.
+ if app.config['AUTHENTICATION_TYPE'] not in ['Database', 'AppToken']:
+ raise InvalidRequest('Cannot change e-mail in non-database auth')
+
+ pre_oci_model.update_email(username, user_data['email'], auto_verify=True)
+
+ if 'enabled' in user_data:
+ # Disable/enable the user.
+ pre_oci_model.update_enabled(username, bool(user_data['enabled']))
+
+ if 'superuser' in user_data:
+ config_object = config_provider.get_config()
+ superusers_set = set(config_object['SUPER_USERS'])
+
+ if user_data['superuser']:
+ superusers_set.add(username)
+ elif username in superusers_set:
+ superusers_set.remove(username)
+
+ config_object['SUPER_USERS'] = list(superusers_set)
+ config_provider.save_config(config_object)
+
+ return_value = user.to_dict()
+ if user_data.get('password') is not None:
+ password = user_data.get('password')
+ return_value['encrypted_password'] = authentication.encrypt_user_password(password)
+ if user_data.get('email') is not None:
+ return_value['email'] = user_data.get('email')
+
+ return return_value
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser/takeownership/')
+@path_param('namespace', 'The namespace of the user or organization being managed')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserTakeOwnership(ApiResource):
+ """ Resource for a superuser to take ownership of a namespace. """
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('takeOwnership')
+ @require_scope(scopes.SUPERUSER)
+ def post(self, namespace):
+ """ Takes ownership of the specified organization or user. """
+ if SuperUserPermission().can():
+ # Disallow for superusers.
+ if superusers.is_superuser(namespace):
+ raise InvalidRequest('Cannot take ownership of a superuser')
+
+ authed_user = get_authenticated_user()
+ entity_id, was_user = pre_oci_model.take_ownership(namespace, authed_user)
+ if entity_id is None:
+ raise NotFound()
+
+ # Log the change.
+ log_metadata = {
+ 'entity_id': entity_id,
+ 'namespace': namespace,
+ 'was_user': was_user,
+ 'superuser': authed_user.username,
+ }
+
+ log_action('take_ownership', authed_user.username, log_metadata)
+
+ return jsonify({
+ 'namespace': namespace
+ })
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser/organizations/')
+@path_param('name', 'The name of the organizaton being managed')
+@show_if(features.SUPER_USERS)
+class SuperUserOrganizationManagement(ApiResource):
+ """ Resource for managing organizations in the system. """
+ schemas = {
+ 'UpdateOrg': {
+ 'id': 'UpdateOrg',
+ 'type': 'object',
+ 'description': 'Description of updates for an organization',
+ 'properties': {
+ 'name': {
+ 'type': 'string',
+ 'description': 'The new name for the organization',
+ }
+ },
+ },
+ }
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('deleteOrganization')
+ @require_scope(scopes.SUPERUSER)
+ def delete(self, name):
+ """ Deletes the specified organization. """
+ if SuperUserPermission().can():
+ pre_oci_model.mark_organization_for_deletion(name)
+ return '', 204
+
+ raise Unauthorized()
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('changeOrganization')
+ @validate_json_request('UpdateOrg')
+ @require_scope(scopes.SUPERUSER)
+ def put(self, name):
+ """ Updates information about the specified user. """
+ if SuperUserPermission().can():
+ org_data = request.get_json()
+ old_name = org_data['name'] if 'name' in org_data else None
+ org = pre_oci_model.change_organization_name(name, old_name)
+ return org.to_dict()
+
+ raise Unauthorized()
+
+
+def key_view(key):
+ return {
+ 'name': key.name,
+ 'kid': key.kid,
+ 'service': key.service,
+ 'jwk': key.jwk,
+ 'metadata': key.metadata,
+ 'created_date': key.created_date,
+ 'expiration_date': key.expiration_date,
+ 'rotation_duration': key.rotation_duration,
+ 'approval': approval_view(key.approval) if key.approval is not None else None,
+ }
+
+
+def approval_view(approval):
+ return {
+ 'approver': user_view(approval.approver) if approval.approver else None,
+ 'approval_type': approval.approval_type,
+ 'approved_date': approval.approved_date,
+ 'notes': approval.notes,
+ }
+
+
+@resource('/v1/superuser/keys')
+@show_if(features.SUPER_USERS)
+class SuperUserServiceKeyManagement(ApiResource):
+ """ Resource for managing service keys."""
+ schemas = {
+ 'CreateServiceKey': {
+ 'id': 'CreateServiceKey',
+ 'type': 'object',
+ 'description': 'Description of creation of a service key',
+ 'required': ['service', 'expiration'],
+ 'properties': {
+ 'service': {
+ 'type': 'string',
+ 'description': 'The service authenticating with this key',
+ },
+ 'name': {
+ 'type': 'string',
+ 'description': 'The friendly name of a service key',
+ },
+ 'metadata': {
+ 'type': 'object',
+ 'description': 'The key/value pairs of this key\'s metadata',
+ },
+ 'notes': {
+ 'type': 'string',
+ 'description': 'If specified, the extra notes for the key',
+ },
+ 'expiration': {
+ 'description': 'The expiration date as a unix timestamp',
+ 'anyOf': [{'type': 'number'}, {'type': 'null'}],
+ },
+ },
+ },
+ }
+
+ @verify_not_prod
+ @nickname('listServiceKeys')
+ @require_scope(scopes.SUPERUSER)
+ def get(self):
+ if SuperUserPermission().can():
+ keys = pre_oci_model.list_all_service_keys()
+
+ return jsonify({
+ 'keys': [key.to_dict() for key in keys],
+ })
+
+ raise Unauthorized()
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('createServiceKey')
+ @require_scope(scopes.SUPERUSER)
+ @validate_json_request('CreateServiceKey')
+ def post(self):
+ if SuperUserPermission().can():
+ body = request.get_json()
+ key_name = body.get('name', '')
+ if not validate_service_key_name(key_name):
+ raise InvalidRequest('Invalid service key friendly name: %s' % key_name)
+
+ # Ensure we have a valid expiration date if specified.
+ expiration_date = body.get('expiration', None)
+ if expiration_date is not None:
+ try:
+ expiration_date = datetime.utcfromtimestamp(float(expiration_date))
+ except ValueError as ve:
+ raise InvalidRequest('Invalid expiration date: %s' % ve)
+
+ if expiration_date <= datetime.now():
+ raise InvalidRequest('Expiration date cannot be in the past')
+
+ # Create the metadata for the key.
+ user = get_authenticated_user()
+ metadata = body.get('metadata', {})
+ metadata.update({
+ 'created_by': 'Quay Superuser Panel',
+ 'creator': user.username,
+ 'ip': get_request_ip(),
+ })
+
+ # Generate a key with a private key that we *never save*.
+ (private_key, key_id) = pre_oci_model.generate_service_key(body['service'], expiration_date,
+ metadata=metadata,
+ name=key_name)
+ # Auto-approve the service key.
+ pre_oci_model.approve_service_key(key_id, user, ServiceKeyApprovalType.SUPERUSER,
+ notes=body.get('notes', ''))
+
+ # Log the creation and auto-approval of the service key.
+ key_log_metadata = {
+ 'kid': key_id,
+ 'preshared': True,
+ 'service': body['service'],
+ 'name': key_name,
+ 'expiration_date': expiration_date,
+ 'auto_approved': True,
+ }
+
+ log_action('service_key_create', None, key_log_metadata)
+ log_action('service_key_approve', None, key_log_metadata)
+
+ return jsonify({
+ 'kid': key_id,
+ 'name': key_name,
+ 'service': body['service'],
+ 'public_key': private_key.publickey().exportKey('PEM'),
+ 'private_key': private_key.exportKey('PEM'),
+ })
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser/keys/')
+@path_param('kid', 'The unique identifier for a service key')
+@show_if(features.SUPER_USERS)
+class SuperUserServiceKey(ApiResource):
+ """ Resource for managing service keys. """
+ schemas = {
+ 'PutServiceKey': {
+ 'id': 'PutServiceKey',
+ 'type': 'object',
+ 'description': 'Description of updates for a service key',
+ 'properties': {
+ 'name': {
+ 'type': 'string',
+ 'description': 'The friendly name of a service key',
+ },
+ 'metadata': {
+ 'type': 'object',
+ 'description': 'The key/value pairs of this key\'s metadata',
+ },
+ 'expiration': {
+ 'description': 'The expiration date as a unix timestamp',
+ 'anyOf': [{'type': 'number'}, {'type': 'null'}],
+ },
+ },
+ },
+ }
+
+ @verify_not_prod
+ @nickname('getServiceKey')
+ @require_scope(scopes.SUPERUSER)
+ def get(self, kid):
+ if SuperUserPermission().can():
+ try:
+ key = pre_oci_model.get_service_key(kid, approved_only=False, alive_only=False)
+ return jsonify(key.to_dict())
+ except ServiceKeyDoesNotExist:
+ raise NotFound()
+
+ raise Unauthorized()
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('updateServiceKey')
+ @require_scope(scopes.SUPERUSER)
+ @validate_json_request('PutServiceKey')
+ def put(self, kid):
+ if SuperUserPermission().can():
+ body = request.get_json()
+ try:
+ key = pre_oci_model.get_service_key(kid, approved_only=False, alive_only=False)
+ except ServiceKeyDoesNotExist:
+ raise NotFound()
+
+ key_log_metadata = {
+ 'kid': key.kid,
+ 'service': key.service,
+ 'name': body.get('name', key.name),
+ 'expiration_date': key.expiration_date,
+ }
+
+ if 'expiration' in body:
+ expiration_date = body['expiration']
+ if expiration_date is not None and expiration_date != '':
+ try:
+ expiration_date = datetime.utcfromtimestamp(float(expiration_date))
+ except ValueError as ve:
+ raise InvalidRequest('Invalid expiration date: %s' % ve)
+
+ if expiration_date <= datetime.now():
+ raise InvalidRequest('Cannot have an expiration date in the past')
+
+ key_log_metadata.update({
+ 'old_expiration_date': key.expiration_date,
+ 'expiration_date': expiration_date,
+ })
+
+ log_action('service_key_extend', None, key_log_metadata)
+ pre_oci_model.set_key_expiration(kid, expiration_date)
+
+ if 'name' in body or 'metadata' in body:
+ key_name = body.get('name')
+ if not validate_service_key_name(key_name):
+ raise InvalidRequest('Invalid service key friendly name: %s' % key_name)
+
+ pre_oci_model.update_service_key(kid, key_name, body.get('metadata'))
+ log_action('service_key_modify', None, key_log_metadata)
+
+ updated_key = pre_oci_model.get_service_key(kid, approved_only=False, alive_only=False)
+ return jsonify(updated_key.to_dict())
+
+ raise Unauthorized()
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('deleteServiceKey')
+ @require_scope(scopes.SUPERUSER)
+ def delete(self, kid):
+ if SuperUserPermission().can():
+ try:
+ key = pre_oci_model.delete_service_key(kid)
+ except ServiceKeyDoesNotExist:
+ raise NotFound()
+
+ key_log_metadata = {
+ 'kid': kid,
+ 'service': key.service,
+ 'name': key.name,
+ 'created_date': key.created_date,
+ 'expiration_date': key.expiration_date,
+ }
+
+ log_action('service_key_delete', None, key_log_metadata)
+ return make_response('', 204)
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser/approvedkeys/')
+@path_param('kid', 'The unique identifier for a service key')
+@show_if(features.SUPER_USERS)
+class SuperUserServiceKeyApproval(ApiResource):
+ """ Resource for approving service keys. """
+
+ schemas = {
+ 'ApproveServiceKey': {
+ 'id': 'ApproveServiceKey',
+ 'type': 'object',
+ 'description': 'Information for approving service keys',
+ 'properties': {
+ 'notes': {
+ 'type': 'string',
+ 'description': 'Optional approval notes',
+ },
+ },
+ },
+ }
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('approveServiceKey')
+ @require_scope(scopes.SUPERUSER)
+ @validate_json_request('ApproveServiceKey')
+ def post(self, kid):
+ if SuperUserPermission().can():
+ notes = request.get_json().get('notes', '')
+ approver = get_authenticated_user()
+ try:
+ key = pre_oci_model.approve_service_key(kid, approver, ServiceKeyApprovalType.SUPERUSER,
+ notes=notes)
+
+ # Log the approval of the service key.
+ key_log_metadata = {
+ 'kid': kid,
+ 'service': key.service,
+ 'name': key.name,
+ 'expiration_date': key.expiration_date,
+ }
+
+ log_action('service_key_approve', None, key_log_metadata)
+ except ServiceKeyDoesNotExist:
+ raise NotFound()
+ except ServiceKeyAlreadyApproved:
+ pass
+
+ return make_response('', 201)
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser//logs')
+@path_param('build_uuid', 'The UUID of the build')
+@show_if(features.SUPER_USERS)
+class SuperUserRepositoryBuildLogs(ApiResource):
+ """ Resource for loading repository build logs for the superuser. """
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('getRepoBuildLogsSuperUser')
+ @require_scope(scopes.SUPERUSER)
+ def get(self, build_uuid):
+ """ Return the build logs for the build specified by the build uuid. """
+ if SuperUserPermission().can():
+ try:
+ repo_build = pre_oci_model.get_repository_build(build_uuid)
+ return get_logs_or_log_url(repo_build)
+ except InvalidRepositoryBuildException as e:
+ raise InvalidResponse(str(e))
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser//status')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('build_uuid', 'The UUID of the build')
+@show_if(features.SUPER_USERS)
+class SuperUserRepositoryBuildStatus(ApiResource):
+ """ Resource for dealing with repository build status. """
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('getRepoBuildStatusSuperUser')
+ @require_scope(scopes.SUPERUSER)
+ def get(self, build_uuid):
+ """ Return the status for the builds specified by the build uuids. """
+ if SuperUserPermission().can():
+ try:
+ build = pre_oci_model.get_repository_build(build_uuid)
+ except InvalidRepositoryBuildException as e:
+ raise InvalidResponse(str(e))
+ return build.to_dict()
+
+ raise Unauthorized()
+
+
+@resource('/v1/superuser//build')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('build_uuid', 'The UUID of the build')
+@show_if(features.SUPER_USERS)
+class SuperUserRepositoryBuildResource(ApiResource):
+ """ Resource for dealing with repository builds as a super user. """
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('getRepoBuildSuperUser')
+ @require_scope(scopes.SUPERUSER)
+ def get(self, build_uuid):
+ """ Returns information about a build. """
+ if SuperUserPermission().can():
+ try:
+ build = pre_oci_model.get_repository_build(build_uuid)
+ except InvalidRepositoryBuildException:
+ raise NotFound()
+
+ return build.to_dict()
+
+ raise Unauthorized()
diff --git a/endpoints/api/superuser_models_interface.py b/endpoints/api/superuser_models_interface.py
new file mode 100644
index 000000000..e03d98e8c
--- /dev/null
+++ b/endpoints/api/superuser_models_interface.py
@@ -0,0 +1,335 @@
+import json
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple
+from datetime import datetime
+
+from dateutil.relativedelta import relativedelta
+from six import add_metaclass
+from tzlocal import get_localzone
+
+from app import avatar, superusers
+from buildtrigger.basehandler import BuildTriggerHandler
+from data import model
+from endpoints.api import format_date
+from util.morecollections import AttrDict
+
+
+def user_view(user):
+ return {
+ 'name': user.username,
+ 'kind': 'user',
+ 'is_robot': user.robot,
+ }
+
+
+class BuildTrigger(
+ namedtuple('BuildTrigger', ['uuid', 'service_name', 'pull_robot', 'can_read', 'can_admin', 'for_build'])):
+ """
+ BuildTrigger represent a trigger that is associated with a build
+ :type uuid: string
+ :type service_name: string
+ :type pull_robot: User
+ :type can_read: boolean
+ :type can_admin: boolean
+ :type for_build: boolean
+ """
+
+ def to_dict(self):
+ if not self.uuid:
+ return None
+
+ build_trigger = BuildTriggerHandler.get_handler(self)
+ build_source = build_trigger.config.get('build_source')
+
+ repo_url = build_trigger.get_repository_url() if build_source else None
+ can_read = self.can_read or self.can_admin
+
+ trigger_data = {
+ 'id': self.uuid,
+ 'service': self.service_name,
+ 'is_active': build_trigger.is_active(),
+
+ 'build_source': build_source if can_read else None,
+ 'repository_url': repo_url if can_read else None,
+
+ 'config': build_trigger.config if self.can_admin else {},
+ 'can_invoke': self.can_admin,
+ }
+
+ if not self.for_build and self.can_admin and self.pull_robot:
+ trigger_data['pull_robot'] = user_view(self.pull_robot)
+
+ return trigger_data
+
+
+class RepositoryBuild(namedtuple('RepositoryBuild',
+ ['uuid', 'logs_archived', 'repository_namespace_user_username', 'repository_name',
+ 'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger', 'display_name',
+ 'started', 'job_config', 'phase', 'status', 'error', 'archive_url'])):
+ """
+ RepositoryBuild represents a build associated with a repostiory
+ :type uuid: string
+ :type logs_archived: boolean
+ :type repository_namespace_user_username: string
+ :type repository_name: string
+ :type can_write: boolean
+ :type can_write: boolean
+ :type pull_robot: User
+ :type resource_key: string
+ :type trigger: Trigger
+ :type display_name: string
+ :type started: boolean
+ :type job_config: {Any -> Any}
+ :type phase: string
+ :type status: string
+ :type error: string
+ :type archive_url: string
+ """
+
+ def to_dict(self):
+
+ resp = {
+ 'id': self.uuid,
+ 'phase': self.phase,
+ 'started': format_date(self.started),
+ 'display_name': self.display_name,
+ 'status': self.status or {},
+ 'subdirectory': self.job_config.get('build_subdir', ''),
+ 'dockerfile_path': self.job_config.get('build_subdir', ''),
+ 'context': self.job_config.get('context', ''),
+ 'tags': self.job_config.get('docker_tags', []),
+ 'manual_user': self.job_config.get('manual_user', None),
+ 'is_writer': self.can_write,
+ 'trigger': self.trigger.to_dict(),
+ 'trigger_metadata': self.job_config.get('trigger_metadata', None) if self.can_read else None,
+ 'resource_key': self.resource_key,
+ 'pull_robot': user_view(self.pull_robot) if self.pull_robot else None,
+ 'repository': {
+ 'namespace': self.repository_namespace_user_username,
+ 'name': self.repository_name
+ },
+ 'error': self.error,
+ }
+
+ if self.can_write:
+ if self.resource_key is not None:
+ resp['archive_url'] = self.archive_url
+ elif self.job_config.get('archive_url', None):
+ resp['archive_url'] = self.job_config['archive_url']
+
+ return resp
+
+
+class Approval(namedtuple('Approval', ['approver', 'approval_type', 'approved_date', 'notes'])):
+ """
+ Approval represents whether a key has been approved or not
+ :type approver: User
+ :type approval_type: string
+ :type approved_date: Date
+ :type notes: string
+ """
+
+ def to_dict(self):
+ return {
+ 'approver': self.approver.to_dict() if self.approver else None,
+ 'approval_type': self.approval_type,
+ 'approved_date': self.approved_date,
+ 'notes': self.notes,
+ }
+
+
+class ServiceKey(namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata', 'created_date',
+ 'expiration_date', 'rotation_duration', 'approval'])):
+ """
+ ServiceKey is an apostille signing key
+ :type name: string
+ :type kid: int
+ :type service: string
+ :type jwk: string
+ :type metadata: string
+ :type created_date: Date
+ :type expiration_date: Date
+ :type rotation_duration: Date
+ :type approval: Approval
+
+ """
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'kid': self.kid,
+ 'service': self.service,
+ 'jwk': self.jwk,
+ 'metadata': self.metadata,
+ 'created_date': self.created_date,
+ 'expiration_date': self.expiration_date,
+ 'rotation_duration': self.rotation_duration,
+ 'approval': self.approval.to_dict() if self.approval is not None else None,
+ }
+
+
+class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robot'])):
+ """
+ User represents a single user.
+ :type username: string
+ :type email: string
+ :type verified: boolean
+ :type enabled: boolean
+ :type robot: User
+ """
+
+ def to_dict(self):
+ user_data = {
+ 'kind': 'user',
+ 'name': self.username,
+ 'username': self.username,
+ 'email': self.email,
+ 'verified': self.verified,
+ 'avatar': avatar.get_data_for_user(self),
+ 'super_user': superusers.is_superuser(self.username),
+ 'enabled': self.enabled,
+ }
+
+ return user_data
+
+
+class Organization(namedtuple('Organization', ['username', 'email'])):
+ """
+ Organization represents a single org.
+ :type username: string
+ :type email: string
+ """
+
+ def to_dict(self):
+ return {
+ 'name': self.username,
+ 'email': self.email,
+ 'avatar': avatar.get_data_for_org(self),
+ }
+
+
+@add_metaclass(ABCMeta)
+class SuperuserDataInterface(object):
+ """
+ Interface that represents all data store interactions required by a superuser api.
+ """
+
+ @abstractmethod
+ def get_organizations(self):
+ """
+ Returns a list of Organization
+ """
+
+ @abstractmethod
+ def get_active_users(self):
+ """
+ Returns a list of User
+ """
+
+ @abstractmethod
+ def create_install_user(self, username, password, email):
+ """
+ Returns the created user and confirmation code for email confirmation
+ """
+
+ @abstractmethod
+ def get_nonrobot_user(self, username):
+ """
+ Returns a User
+ """
+
+ @abstractmethod
+ def create_reset_password_email_code(self, email):
+ """
+ Returns a recover password code
+ """
+
+ @abstractmethod
+ def mark_user_for_deletion(self, username):
+ """
+ Returns None
+ """
+
+ @abstractmethod
+ def change_password(self, username, password):
+ """
+ Returns None
+ """
+
+ @abstractmethod
+ def update_email(self, username, email, auto_verify):
+ """
+ Returns None
+ """
+
+ @abstractmethod
+ def update_enabled(self, username, enabled):
+ """
+ Returns None
+ """
+
+ @abstractmethod
+ def take_ownership(self, namespace, authed_user):
+ """
+ Returns id of entity and whether the entity was a user
+ """
+
+ @abstractmethod
+ def mark_organization_for_deletion(self, name):
+ """
+ Returns None
+ """
+
+ @abstractmethod
+ def change_organization_name(self, old_org_name, new_org_name):
+ """
+ Returns updated Organization
+ """
+
+ @abstractmethod
+ def list_all_service_keys(self):
+ """
+ Returns a list of service keys
+ """
+
+ @abstractmethod
+ def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None):
+ """
+ Returns a tuple of private key and public key id
+ """
+
+ @abstractmethod
+ def approve_service_key(self, kid, approver, approval_type, notes=''):
+ """
+ Returns the approved Key
+ """
+
+ @abstractmethod
+ def get_service_key(self, kid, service=None, alive_only=True, approved_only=True):
+ """
+ Returns ServiceKey
+ """
+
+ @abstractmethod
+ def set_key_expiration(self, kid, expiration_date):
+ """
+ Returns None
+ """
+
+ @abstractmethod
+ def update_service_key(self, kid, name=None, metadata=None):
+ """
+ Returns None
+ """
+
+ @abstractmethod
+ def delete_service_key(self, kid):
+ """
+ Returns deleted ServiceKey
+ """
+
+ @abstractmethod
+ def get_repository_build(self, uuid):
+ """
+ Returns RepositoryBuild
+ """
diff --git a/endpoints/api/superuser_models_pre_oci.py b/endpoints/api/superuser_models_pre_oci.py
new file mode 100644
index 000000000..0458f9226
--- /dev/null
+++ b/endpoints/api/superuser_models_pre_oci.py
@@ -0,0 +1,182 @@
+import features
+
+from flask import request
+
+from app import all_queues, userfiles, namespace_gc_queue
+from auth.permissions import ReadRepositoryPermission, ModifyRepositoryPermission, AdministerRepositoryPermission
+from data import model, database
+from endpoints.api.build import get_job_config, _get_build_status
+from endpoints.api.superuser_models_interface import BuildTrigger
+from endpoints.api.superuser_models_interface import SuperuserDataInterface, Organization, User, \
+ ServiceKey, Approval, RepositoryBuild
+from util.request import get_request_ip
+
+
+def _create_user(user):
+ if user is None:
+ return None
+ return User(user.username, user.email, user.verified, user.enabled, user.robot)
+
+
+def _create_key(key):
+ approval = None
+ if key.approval is not None:
+ approval = Approval(_create_user(key.approval.approver), key.approval.approval_type, key.approval.approved_date,
+ key.approval.notes)
+
+ return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date, key.expiration_date,
+ key.rotation_duration, approval)
+
+
+class ServiceKeyDoesNotExist(Exception):
+ pass
+
+
+class ServiceKeyAlreadyApproved(Exception):
+ pass
+
+
+class InvalidRepositoryBuildException(Exception):
+ pass
+
+
+class PreOCIModel(SuperuserDataInterface):
+ """
+ PreOCIModel implements the data model for the SuperUser using a database schema
+ before it was changed to support the OCI specification.
+ """
+
+ def get_repository_build(self, uuid):
+ try:
+ build = model.build.get_repository_build(uuid)
+ except model.InvalidRepositoryBuildException as e:
+ raise InvalidRepositoryBuildException(str(e))
+
+ repo_namespace = build.repository_namespace_user_username
+ repo_name = build.repository_name
+
+ can_read = ReadRepositoryPermission(repo_namespace, repo_name).can()
+ can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can()
+ can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can()
+ job_config = get_job_config(build.job_config)
+ phase, status, error = _get_build_status(build)
+ url = userfiles.get_file_url(self.resource_key, get_request_ip(), requires_cors=True)
+
+ return RepositoryBuild(build.uuid, build.logs_archived, repo_namespace, repo_name, can_write, can_read,
+ _create_user(build.pull_robot), build.resource_key,
+ BuildTrigger(build.trigger.uuid, build.trigger.service.name,
+ _create_user(build.trigger.pull_robot), can_read, can_admin, True),
+ build.display_name, build.display_name, build.started, job_config, phase, status, error, url)
+
+ def delete_service_key(self, kid):
+ try:
+ key = model.service_keys.delete_service_key(kid)
+ except model.ServiceKeyDoesNotExist:
+ raise ServiceKeyDoesNotExist
+ return _create_key(key)
+
+ def update_service_key(self, kid, name=None, metadata=None):
+ model.service_keys.update_service_key(kid, name, metadata)
+
+ def set_key_expiration(self, kid, expiration_date):
+ model.service_keys.set_key_expiration(kid, expiration_date)
+
+ def get_service_key(self, kid, service=None, alive_only=True, approved_only=True):
+ try:
+ key = model.service_keys.get_service_key(kid, approved_only=approved_only, alive_only=alive_only)
+ return _create_key(key)
+ except model.ServiceKeyDoesNotExist:
+ raise ServiceKeyDoesNotExist
+
+ def approve_service_key(self, kid, approver, approval_type, notes=''):
+ try:
+ key = model.service_keys.approve_service_key(kid, approval_type, approver=approver, notes=notes)
+ return _create_key(key)
+ except model.ServiceKeyDoesNotExist:
+ raise ServiceKeyDoesNotExist
+ except model.ServiceKeyAlreadyApproved:
+ raise ServiceKeyAlreadyApproved
+
+ def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None):
+ (private_key, key) = model.service_keys.generate_service_key(service, expiration_date, metadata=metadata, name=name)
+
+ return private_key, key.kid
+
+ def list_all_service_keys(self):
+ keys = model.service_keys.list_all_keys()
+ return [_create_key(key) for key in keys]
+
+ def change_organization_name(self, old_org_name, new_org_name):
+ org = model.organization.get_organization(old_org_name)
+ if new_org_name is not None:
+ org = model.user.change_username(org.id, new_org_name)
+
+ return Organization(org.username, org.email)
+
+ def mark_organization_for_deletion(self, name):
+ org = model.organization.get_organization(name)
+ model.user.mark_namespace_for_deletion(org, all_queues, namespace_gc_queue, force=True)
+
+ def take_ownership(self, namespace, authed_user):
+ entity = model.user.get_user_or_org(namespace)
+ if entity is None:
+ return None, False
+
+ was_user = not entity.organization
+ if entity.organization:
+ # Add the superuser as an admin to the owners team of the org.
+ model.organization.add_user_as_admin(authed_user, entity)
+ else:
+ # If the entity is a user, convert it to an organization and add the current superuser
+ # as the admin.
+ model.organization.convert_user_to_organization(entity, authed_user)
+ return entity.id, was_user
+
+ def update_enabled(self, username, enabled):
+ user = model.user.get_nonrobot_user(username)
+ model.user.update_enabled(user, bool(enabled))
+
+ def update_email(self, username, email, auto_verify):
+ user = model.user.get_nonrobot_user(username)
+ model.user.update_email(user, email, auto_verify)
+
+ def change_password(self, username, password):
+ user = model.user.get_nonrobot_user(username)
+ model.user.change_password(user, password)
+
+ def mark_user_for_deletion(self, username):
+ user = model.user.get_nonrobot_user(username)
+ model.user.mark_namespace_for_deletion(user, all_queues, namespace_gc_queue, force=True)
+
+ def create_reset_password_email_code(self, email):
+ code = model.user.create_reset_password_email_code(email)
+ return code
+
+ def get_nonrobot_user(self, username):
+ user = model.user.get_nonrobot_user(username)
+ if user is None:
+ return None
+ return _create_user(user)
+
+ def create_install_user(self, username, password, email):
+ prompts = model.user.get_default_user_prompts(features)
+ user = model.user.create_user(username, password, email, auto_verify=not features.MAILING,
+ email_required=features.MAILING, prompts=prompts)
+
+ return_user = _create_user(user)
+ # If mailing is turned on, send the user a verification email.
+ if features.MAILING:
+ confirmation_code = model.user.create_confirm_email_code(user)
+ return return_user, confirmation_code
+
+ return return_user, ''
+
+ def get_active_users(self, disabled=True):
+ users = model.user.get_active_users(disabled=disabled)
+ return [_create_user(user) for user in users]
+
+ def get_organizations(self):
+ return [Organization(org.username, org.email) for org in model.organization.get_organizations()]
+
+
+pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/tag.py b/endpoints/api/tag.py
new file mode 100644
index 000000000..573f0fc97
--- /dev/null
+++ b/endpoints/api/tag.py
@@ -0,0 +1,336 @@
+""" Manage the tags of a repository. """
+from datetime import datetime
+from flask import request, abort
+
+from app import storage, docker_v2_signing_key
+from auth.auth_context import get_authenticated_user
+from data.registry_model import registry_model
+from endpoints.api import (resource, nickname, require_repo_read, require_repo_write,
+ RepositoryParamResource, log_action, validate_json_request, path_param,
+ parse_args, query_param, truthy_bool, disallow_for_app_repositories,
+ format_date, disallow_for_non_normal_repositories)
+from endpoints.api.image import image_dict
+from endpoints.exception import NotFound, InvalidRequest
+from util.names import TAG_ERROR, TAG_REGEX
+
+
+def _tag_dict(tag):
+ tag_info = {
+ 'name': tag.name,
+ 'reversion': tag.reversion,
+ }
+
+ if tag.lifetime_start_ts > 0:
+ tag_info['start_ts'] = tag.lifetime_start_ts
+
+ if tag.lifetime_end_ts > 0:
+ tag_info['end_ts'] = tag.lifetime_end_ts
+
+ # TODO: Remove this once fully on OCI data model.
+ if tag.legacy_image_if_present:
+ tag_info['docker_image_id'] = tag.legacy_image.docker_image_id
+ tag_info['image_id'] = tag.legacy_image.docker_image_id
+ tag_info['size'] = tag.legacy_image.aggregate_size
+
+ # TODO: Remove this check once fully on OCI data model.
+ if tag.manifest_digest:
+ tag_info['manifest_digest'] = tag.manifest_digest
+
+ if tag.manifest:
+ tag_info['is_manifest_list'] = tag.manifest.is_manifest_list
+
+ if tag.lifetime_start_ts > 0:
+ last_modified = format_date(datetime.utcfromtimestamp(tag.lifetime_start_ts))
+ tag_info['last_modified'] = last_modified
+
+ if tag.lifetime_end_ts is not None:
+ expiration = format_date(datetime.utcfromtimestamp(tag.lifetime_end_ts))
+ tag_info['expiration'] = expiration
+
+ return tag_info
+
+
+@resource('/v1/repository//tag/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class ListRepositoryTags(RepositoryParamResource):
+ """ Resource for listing full repository tag history, alive *and dead*. """
+
+ @require_repo_read
+ @disallow_for_app_repositories
+ @parse_args()
+ @query_param('specificTag', 'Filters the tags to the specific tag.', type=str, default='')
+ @query_param('limit', 'Limit to the number of results to return per page. Max 100.', type=int,
+ default=50)
+ @query_param('page', 'Page index for the results. Default 1.', type=int, default=1)
+ @query_param('onlyActiveTags', 'Filter to only active tags.', type=truthy_bool, default=False)
+ @nickname('listRepoTags')
+ def get(self, namespace, repository, parsed_args):
+ specific_tag = parsed_args.get('specificTag') or None
+ page = max(1, parsed_args.get('page', 1))
+ limit = min(100, max(1, parsed_args.get('limit', 50)))
+ active_tags_only = parsed_args.get('onlyActiveTags')
+
+ repo_ref = registry_model.lookup_repository(namespace, repository)
+ if repo_ref is None:
+ raise NotFound()
+
+ history, has_more = registry_model.list_repository_tag_history(repo_ref, page=page,
+ size=limit,
+ specific_tag_name=specific_tag,
+ active_tags_only=active_tags_only)
+ return {
+ 'tags': [_tag_dict(tag) for tag in history],
+ 'page': page,
+ 'has_additional': has_more,
+ }
+
+
+@resource('/v1/repository//tag/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('tag', 'The name of the tag')
+class RepositoryTag(RepositoryParamResource):
+ """ Resource for managing repository tags. """
+ schemas = {
+ 'ChangeTag': {
+ 'type': 'object',
+ 'description': 'Makes changes to a specific tag',
+ 'properties': {
+ 'image': {
+ 'type': ['string', 'null'],
+ 'description': '(Deprecated: Use `manifest_digest`) Image to which the tag should point.',
+ },
+ 'manifest_digest': {
+ 'type': ['string', 'null'],
+ 'description': '(If specified) The manifest digest to which the tag should point',
+ },
+ 'expiration': {
+ 'type': ['number', 'null'],
+ 'description': '(If specified) The expiration for the image',
+ },
+ },
+ },
+ }
+
+ @require_repo_write
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('changeTag')
+ @validate_json_request('ChangeTag')
+ def put(self, namespace, repository, tag):
+ """ Change which image a tag points to or create a new tag."""
+ if not TAG_REGEX.match(tag):
+ abort(400, TAG_ERROR)
+
+ repo_ref = registry_model.lookup_repository(namespace, repository)
+ if repo_ref is None:
+ raise NotFound()
+
+ if 'expiration' in request.get_json():
+ tag_ref = registry_model.get_repo_tag(repo_ref, tag)
+ if tag_ref is None:
+ raise NotFound()
+
+ expiration = request.get_json().get('expiration')
+ expiration_date = None
+ if expiration is not None:
+ try:
+ expiration_date = datetime.utcfromtimestamp(float(expiration))
+ except ValueError:
+ abort(400)
+
+ if expiration_date <= datetime.now():
+ abort(400)
+
+ existing_end_ts, ok = registry_model.change_repository_tag_expiration(tag_ref,
+ expiration_date)
+ if ok:
+ if not (existing_end_ts is None and expiration_date is None):
+ log_action('change_tag_expiration', namespace, {
+ 'username': get_authenticated_user().username,
+ 'repo': repository,
+ 'tag': tag,
+ 'namespace': namespace,
+ 'expiration_date': expiration_date,
+ 'old_expiration_date': existing_end_ts
+ }, repo_name=repository)
+ else:
+ raise InvalidRequest('Could not update tag expiration; Tag has probably changed')
+
+ if 'image' in request.get_json() or 'manifest_digest' in request.get_json():
+ existing_tag = registry_model.get_repo_tag(repo_ref, tag, include_legacy_image=True)
+
+ manifest_or_image = None
+ image_id = None
+ manifest_digest = None
+
+ if 'image' in request.get_json():
+ image_id = request.get_json()['image']
+ manifest_or_image = registry_model.get_legacy_image(repo_ref, image_id)
+ else:
+ manifest_digest = request.get_json()['manifest_digest']
+ manifest_or_image = registry_model.lookup_manifest_by_digest(repo_ref, manifest_digest,
+ require_available=True)
+
+ if manifest_or_image is None:
+ raise NotFound()
+
+ # TODO: Remove this check once fully on V22
+ existing_manifest_digest = None
+ if existing_tag:
+ existing_manifest = registry_model.get_manifest_for_tag(existing_tag)
+ existing_manifest_digest = existing_manifest.digest if existing_manifest else None
+
+ if not registry_model.retarget_tag(repo_ref, tag, manifest_or_image, storage,
+ docker_v2_signing_key):
+ raise InvalidRequest('Could not move tag')
+
+ username = get_authenticated_user().username
+
+ log_action('move_tag' if existing_tag else 'create_tag', namespace, {
+ 'username': username,
+ 'repo': repository,
+ 'tag': tag,
+ 'namespace': namespace,
+ 'image': image_id,
+ 'manifest_digest': manifest_digest,
+ 'original_image': (existing_tag.legacy_image.docker_image_id
+ if existing_tag and existing_tag.legacy_image_if_present
+ else None),
+ 'original_manifest_digest': existing_manifest_digest,
+ }, repo_name=repository)
+
+ return 'Updated', 201
+
+ @require_repo_write
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('deleteFullTag')
+ def delete(self, namespace, repository, tag):
+ """ Delete the specified repository tag. """
+ repo_ref = registry_model.lookup_repository(namespace, repository)
+ if repo_ref is None:
+ raise NotFound()
+
+ registry_model.delete_tag(repo_ref, tag)
+
+ username = get_authenticated_user().username
+ log_action('delete_tag', namespace,
+ {'username': username,
+ 'repo': repository,
+ 'namespace': namespace,
+ 'tag': tag}, repo_name=repository)
+
+ return '', 204
+
+
+@resource('/v1/repository//tag//images')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('tag', 'The name of the tag')
+class RepositoryTagImages(RepositoryParamResource):
+ """ Resource for listing the images in a specific repository tag. """
+
+ @require_repo_read
+ @nickname('listTagImages')
+ @disallow_for_app_repositories
+ @parse_args()
+ @query_param('owned', 'If specified, only images wholely owned by this tag are returned.',
+ type=truthy_bool, default=False)
+ def get(self, namespace, repository, tag, parsed_args):
+ """ List the images for the specified repository tag. """
+ repo_ref = registry_model.lookup_repository(namespace, repository)
+ if repo_ref is None:
+ raise NotFound()
+
+ tag_ref = registry_model.get_repo_tag(repo_ref, tag, include_legacy_image=True)
+ if tag_ref is None:
+ raise NotFound()
+
+ if tag_ref.legacy_image_if_present is None:
+ return {'images': []}
+
+ image_id = tag_ref.legacy_image.docker_image_id
+
+ all_images = None
+ if parsed_args['owned']:
+ # TODO: Remove the `owned` image concept once we are fully on V2_2.
+ all_images = registry_model.get_legacy_images_owned_by_tag(tag_ref)
+ else:
+ image_with_parents = registry_model.get_legacy_image(repo_ref, image_id, include_parents=True)
+ if image_with_parents is None:
+ raise NotFound()
+
+ all_images = [image_with_parents] + image_with_parents.parents
+
+ return {
+ 'images': [image_dict(image) for image in all_images],
+ }
+
+
+@resource('/v1/repository//tag//restore')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('tag', 'The name of the tag')
+class RestoreTag(RepositoryParamResource):
+ """ Resource for restoring a repository tag back to a previous image. """
+ schemas = {
+ 'RestoreTag': {
+ 'type': 'object',
+ 'description': 'Restores a tag to a specific image',
+ 'properties': {
+ 'image': {
+ 'type': 'string',
+ 'description': '(Deprecated: use `manifest_digest`) Image to which the tag should point',
+ },
+ 'manifest_digest': {
+ 'type': 'string',
+ 'description': 'If specified, the manifest digest that should be used',
+ },
+ },
+ },
+ }
+
+ @require_repo_write
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('restoreTag')
+ @validate_json_request('RestoreTag')
+ def post(self, namespace, repository, tag):
+ """ Restores a repository tag back to a previous image in the repository. """
+ repo_ref = registry_model.lookup_repository(namespace, repository)
+ if repo_ref is None:
+ raise NotFound()
+
+ # Restore the tag back to the previous image.
+ image_id = request.get_json().get('image', None)
+ manifest_digest = request.get_json().get('manifest_digest', None)
+
+ if image_id is None and manifest_digest is None:
+ raise InvalidRequest('Missing manifest_digest')
+
+ # Data for logging the reversion/restoration.
+ username = get_authenticated_user().username
+ log_data = {
+ 'username': username,
+ 'repo': repository,
+ 'tag': tag,
+ 'image': image_id,
+ 'manifest_digest': manifest_digest,
+ }
+
+ manifest_or_legacy_image = None
+ if manifest_digest is not None:
+ manifest_or_legacy_image = registry_model.lookup_manifest_by_digest(repo_ref, manifest_digest,
+ allow_dead=True,
+ require_available=True)
+ elif image_id is not None:
+ manifest_or_legacy_image = registry_model.get_legacy_image(repo_ref, image_id)
+
+ if manifest_or_legacy_image is None:
+ raise NotFound()
+
+ if not registry_model.retarget_tag(repo_ref, tag, manifest_or_legacy_image, storage,
+ docker_v2_signing_key, is_reversion=True):
+ raise InvalidRequest('Could not restore tag')
+
+ log_action('revert_tag', namespace, log_data, repo_name=repository)
+
+ return {}
diff --git a/endpoints/api/team.py b/endpoints/api/team.py
new file mode 100644
index 000000000..b00a14393
--- /dev/null
+++ b/endpoints/api/team.py
@@ -0,0 +1,534 @@
+""" Create, list and manage an organization's teams. """
+
+import json
+
+from functools import wraps
+
+from flask import request
+
+import features
+
+from app import avatar, authentication
+from auth.permissions import (AdministerOrganizationPermission, ViewTeamPermission,
+ SuperUserPermission)
+
+from auth.auth_context import get_authenticated_user
+from auth import scopes
+from data import model
+from data.database import Team
+from endpoints.api import (resource, nickname, ApiResource, validate_json_request, request_error,
+ log_action, internal_only, require_scope, path_param, query_param,
+ truthy_bool, parse_args, require_user_admin, show_if, format_date,
+ verify_not_prod, require_fresh_login)
+from endpoints.exception import Unauthorized, NotFound, InvalidRequest
+from util.useremails import send_org_invite_email
+from util.names import parse_robot_username
+
+def permission_view(permission):
+ return {
+ 'repository': {
+ 'name': permission.repository.name,
+ 'is_public': permission.repository.visibility.name == 'public'
+ },
+ 'role': permission.role.name
+ }
+
+def try_accept_invite(code, user):
+ (team, inviter) = model.team.confirm_team_invite(code, user)
+
+ model.notification.delete_matching_notifications(user, 'org_team_invite',
+ org=team.organization.username)
+
+ orgname = team.organization.username
+ log_action('org_team_member_invite_accepted', orgname, {
+ 'member': user.username,
+ 'team': team.name,
+ 'inviter': inviter.username
+ })
+
+ return team
+
+def handle_addinvite_team(inviter, team, user=None, email=None):
+ requires_invite = features.MAILING and features.REQUIRE_TEAM_INVITE
+ invite = model.team.add_or_invite_to_team(inviter, team, user, email,
+ requires_invite=requires_invite)
+ if not invite:
+ # User was added to the team directly.
+ return
+
+ orgname = team.organization.username
+ if user:
+ model.notification.create_notification('org_team_invite', user, metadata={
+ 'code': invite.invite_token,
+ 'inviter': inviter.username,
+ 'org': orgname,
+ 'team': team.name
+ })
+
+ send_org_invite_email(user.username if user else email, user.email if user else email,
+ orgname, team.name, inviter.username, invite.invite_token)
+ return invite
+
+def team_view(orgname, team, is_new_team=False):
+ view_permission = ViewTeamPermission(orgname, team.name)
+ return {
+ 'name': team.name,
+ 'description': team.description,
+ 'can_view': view_permission.can(),
+ 'role': Team.role.get_name(team.role_id),
+ 'avatar': avatar.get_data_for_team(team),
+ 'new_team': is_new_team,
+ }
+
+def member_view(member, invited=False):
+ return {
+ 'name': member.username,
+ 'kind': 'user',
+ 'is_robot': member.robot,
+ 'avatar': avatar.get_data_for_user(member),
+ 'invited': invited,
+ }
+
+def invite_view(invite):
+ if invite.user:
+ return member_view(invite.user, invited=True)
+ else:
+ return {
+ 'email': invite.email,
+ 'kind': 'invite',
+ 'avatar': avatar.get_data(invite.email, invite.email, 'user'),
+ 'invited': True
+ }
+
+def disallow_for_synced_team(except_robots=False):
+ """ Disallows the decorated operation for a team that is marked as being synced from an internal
+ auth provider such as LDAP. If except_robots is True, then the operation is allowed if the
+ member specified on the operation is a robot account.
+ """
+ def inner(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ # Team syncing can only be enabled if we have a federated service.
+ if features.TEAM_SYNCING and authentication.federated_service:
+ orgname = kwargs['orgname']
+ teamname = kwargs['teamname']
+ if model.team.get_team_sync_information(orgname, teamname):
+ if not except_robots or not parse_robot_username(kwargs.get('membername', '')):
+ raise InvalidRequest('Cannot call this method on an auth-synced team')
+
+ return func(self, *args, **kwargs)
+ return wrapper
+ return inner
+
+
+disallow_nonrobots_for_synced_team = disallow_for_synced_team(except_robots=True)
+disallow_all_for_synced_team = disallow_for_synced_team(except_robots=False)
+
+
+@resource('/v1/organization//team/')
+@path_param('orgname', 'The name of the organization')
+@path_param('teamname', 'The name of the team')
+class OrganizationTeam(ApiResource):
+ """ Resource for manging an organization's teams. """
+ schemas = {
+ 'TeamDescription': {
+ 'type': 'object',
+ 'description': 'Description of a team',
+ 'required': [
+ 'role',
+ ],
+ 'properties': {
+ 'role': {
+ 'type': 'string',
+ 'description': 'Org wide permissions that should apply to the team',
+ 'enum': [
+ 'member',
+ 'creator',
+ 'admin',
+ ],
+ },
+ 'description': {
+ 'type': 'string',
+ 'description': 'Markdown description for the team',
+ },
+ },
+ },
+ }
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('updateOrganizationTeam')
+ @validate_json_request('TeamDescription')
+ def put(self, orgname, teamname):
+ """ Update the org-wide permission for the specified team. """
+ edit_permission = AdministerOrganizationPermission(orgname)
+ if edit_permission.can():
+ team = None
+
+ details = request.get_json()
+ is_existing = False
+ try:
+ team = model.team.get_organization_team(orgname, teamname)
+ is_existing = True
+ except model.InvalidTeamException:
+ # Create the new team.
+ description = details['description'] if 'description' in details else ''
+ role = details['role'] if 'role' in details else 'member'
+
+ org = model.organization.get_organization(orgname)
+ team = model.team.create_team(teamname, org, role, description)
+ log_action('org_create_team', orgname, {'team': teamname})
+
+ if is_existing:
+ if ('description' in details and
+ team.description != details['description']):
+ team.description = details['description']
+ team.save()
+ log_action('org_set_team_description', orgname,
+ {'team': teamname, 'description': team.description})
+
+ if 'role' in details:
+ role = Team.role.get_name(team.role_id)
+ if role != details['role']:
+ team = model.team.set_team_org_permission(team, details['role'],
+ get_authenticated_user().username)
+ log_action('org_set_team_role', orgname, {'team': teamname, 'role': details['role']})
+
+ return team_view(orgname, team, is_new_team=not is_existing), 200
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('deleteOrganizationTeam')
+ def delete(self, orgname, teamname):
+ """ Delete the specified team. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ model.team.remove_team(orgname, teamname, get_authenticated_user().username)
+ log_action('org_delete_team', orgname, {'team': teamname})
+ return '', 204
+
+ raise Unauthorized()
+
+
+def _syncing_setup_allowed(orgname):
+ """ Returns whether syncing setup is allowed for the current user over the matching org. """
+ if not features.NONSUPERUSER_TEAM_SYNCING_SETUP and not SuperUserPermission().can():
+ return False
+
+ return AdministerOrganizationPermission(orgname).can()
+
+
+@resource('/v1/organization//team//syncing')
+@path_param('orgname', 'The name of the organization')
+@path_param('teamname', 'The name of the team')
+@show_if(features.TEAM_SYNCING)
+class OrganizationTeamSyncing(ApiResource):
+ """ Resource for managing syncing of a team by a backing group. """
+ @require_scope(scopes.ORG_ADMIN)
+ @require_scope(scopes.SUPERUSER)
+ @nickname('enableOrganizationTeamSync')
+ @verify_not_prod
+ @require_fresh_login
+ def post(self, orgname, teamname):
+ if _syncing_setup_allowed(orgname):
+ try:
+ team = model.team.get_organization_team(orgname, teamname)
+ except model.InvalidTeamException:
+ raise NotFound()
+
+ config = request.get_json()
+
+ # Ensure that the specified config points to a valid group.
+ status, err = authentication.check_group_lookup_args(config)
+ if not status:
+ raise InvalidRequest('Could not sync to group: %s' % err)
+
+ # Set the team's syncing config.
+ model.team.set_team_syncing(team, authentication.federated_service, config)
+
+ return team_view(orgname, team)
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @require_scope(scopes.SUPERUSER)
+ @nickname('disableOrganizationTeamSync')
+ @verify_not_prod
+ @require_fresh_login
+ def delete(self, orgname, teamname):
+ if _syncing_setup_allowed(orgname):
+ try:
+ team = model.team.get_organization_team(orgname, teamname)
+ except model.InvalidTeamException:
+ raise NotFound()
+
+ model.team.remove_team_syncing(orgname, teamname)
+ return team_view(orgname, team)
+
+ raise Unauthorized()
+
+
+@resource('/v1/organization//team//members')
+@path_param('orgname', 'The name of the organization')
+@path_param('teamname', 'The name of the team')
+class TeamMemberList(ApiResource):
+ """ Resource for managing the list of members for a team. """
+ @require_scope(scopes.ORG_ADMIN)
+ @parse_args()
+ @query_param('includePending', 'Whether to include pending members', type=truthy_bool,
+ default=False)
+ @nickname('getOrganizationTeamMembers')
+ def get(self, orgname, teamname, parsed_args):
+ """ Retrieve the list of members for the specified team. """
+ view_permission = ViewTeamPermission(orgname, teamname)
+ edit_permission = AdministerOrganizationPermission(orgname)
+
+ if view_permission.can():
+ team = None
+ try:
+ team = model.team.get_organization_team(orgname, teamname)
+ except model.InvalidTeamException:
+ raise NotFound()
+
+ members = model.organization.get_organization_team_members(team.id)
+ invites = []
+
+ if parsed_args['includePending'] and edit_permission.can():
+ invites = model.team.get_organization_team_member_invites(team.id)
+
+ data = {
+ 'name': teamname,
+ 'members': [member_view(m) for m in members] + [invite_view(i) for i in invites],
+ 'can_edit': edit_permission.can(),
+ }
+
+ if features.TEAM_SYNCING and authentication.federated_service:
+ if _syncing_setup_allowed(orgname):
+ data['can_sync'] = {
+ 'service': authentication.federated_service,
+ }
+
+ data['can_sync'].update(authentication.service_metadata())
+
+ sync_info = model.team.get_team_sync_information(orgname, teamname)
+ if sync_info is not None:
+ data['synced'] = {
+ 'service': sync_info.service.name,
+ }
+
+ if SuperUserPermission().can():
+ data['synced'].update({
+ 'last_updated': format_date(sync_info.last_updated),
+ 'config': json.loads(sync_info.config),
+ })
+
+ return data
+
+ raise Unauthorized()
+
+
+@resource('/v1/organization//team//members/')
+@path_param('orgname', 'The name of the organization')
+@path_param('teamname', 'The name of the team')
+@path_param('membername', 'The username of the team member')
+class TeamMember(ApiResource):
+ """ Resource for managing individual members of a team. """
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('updateOrganizationTeamMember')
+ @disallow_nonrobots_for_synced_team
+ def put(self, orgname, teamname, membername):
+ """ Adds or invites a member to an existing team. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ team = None
+ user = None
+
+ # Find the team.
+ try:
+ team = model.team.get_organization_team(orgname, teamname)
+ except model.InvalidTeamException:
+ raise NotFound()
+
+ # Find the user.
+ user = model.user.get_user(membername)
+ if not user:
+ raise request_error(message='Unknown user')
+
+ # Add or invite the user to the team.
+ inviter = get_authenticated_user()
+ invite = handle_addinvite_team(inviter, team, user=user)
+ if not invite:
+ log_action('org_add_team_member', orgname, {'member': membername, 'team': teamname})
+ return member_view(user, invited=False)
+
+ # User was invited.
+ log_action('org_invite_team_member', orgname, {
+ 'user': membername,
+ 'member': membername,
+ 'team': teamname
+ })
+ return member_view(user, invited=True)
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('deleteOrganizationTeamMember')
+ @disallow_nonrobots_for_synced_team
+ def delete(self, orgname, teamname, membername):
+ """ Delete a member of a team. If the user is merely invited to join
+ the team, then the invite is removed instead.
+ """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ # Remote the user from the team.
+ invoking_user = get_authenticated_user().username
+
+ # Find the team.
+ try:
+ team = model.team.get_organization_team(orgname, teamname)
+ except model.InvalidTeamException:
+ raise NotFound()
+
+ # Find the member.
+ member = model.user.get_user(membername)
+ if not member:
+ raise NotFound()
+
+ # First attempt to delete an invite for the user to this team. If none found,
+ # then we try to remove the user directly.
+ if model.team.delete_team_user_invite(team, member):
+ log_action('org_delete_team_member_invite', orgname, {
+ 'user': membername,
+ 'team': teamname,
+ 'member': membername
+ })
+ return '', 204
+
+ model.team.remove_user_from_team(orgname, teamname, membername, invoking_user)
+ log_action('org_remove_team_member', orgname, {'member': membername, 'team': teamname})
+ return '', 204
+
+ raise Unauthorized()
+
+
+@resource('/v1/organization//team//invite/')
+@show_if(features.MAILING)
+class InviteTeamMember(ApiResource):
+ """ Resource for inviting a team member via email address. """
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('inviteTeamMemberEmail')
+ @disallow_all_for_synced_team
+ def put(self, orgname, teamname, email):
+ """ Invites an email address to an existing team. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ team = None
+
+ # Find the team.
+ try:
+ team = model.team.get_organization_team(orgname, teamname)
+ except model.InvalidTeamException:
+ raise NotFound()
+
+ # Invite the email to the team.
+ inviter = get_authenticated_user()
+ invite = handle_addinvite_team(inviter, team, email=email)
+ log_action('org_invite_team_member', orgname, {
+ 'email': email,
+ 'team': teamname,
+ 'member': email
+ })
+ return invite_view(invite)
+
+ raise Unauthorized()
+
+ @require_scope(scopes.ORG_ADMIN)
+ @nickname('deleteTeamMemberEmailInvite')
+ def delete(self, orgname, teamname, email):
+ """ Delete an invite of an email address to join a team. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ team = None
+
+ # Find the team.
+ try:
+ team = model.team.get_organization_team(orgname, teamname)
+ except model.InvalidTeamException:
+ raise NotFound()
+
+ # Delete the invite.
+ if not model.team.delete_team_email_invite(team, email):
+ raise NotFound()
+
+ log_action('org_delete_team_member_invite', orgname, {
+ 'email': email,
+ 'team': teamname,
+ 'member': email
+ })
+ return '', 204
+
+ raise Unauthorized()
+
+
+@resource('/v1/organization//team//permissions')
+@path_param('orgname', 'The name of the organization')
+@path_param('teamname', 'The name of the team')
+class TeamPermissions(ApiResource):
+ """ Resource for listing the permissions an org's team has in the system. """
+ @nickname('getOrganizationTeamPermissions')
+ def get(self, orgname, teamname):
+ """ Returns the list of repository permissions for the org's team. """
+ permission = AdministerOrganizationPermission(orgname)
+ if permission.can():
+ try:
+ team = model.team.get_organization_team(orgname, teamname)
+ except model.InvalidTeamException:
+ raise NotFound()
+
+ permissions = model.permission.list_team_permissions(team)
+
+ return {
+ 'permissions': [permission_view(permission) for permission in permissions]
+ }
+
+ raise Unauthorized()
+
+
+@resource('/v1/teaminvite/')
+@internal_only
+@show_if(features.MAILING)
+class TeamMemberInvite(ApiResource):
+ """ Resource for managing invites to join a team. """
+ @require_user_admin
+ @nickname('acceptOrganizationTeamInvite')
+ def put(self, code):
+ """ Accepts an invite to join a team in an organization. """
+ # Accept the invite for the current user.
+ team = try_accept_invite(code, get_authenticated_user())
+ if not team:
+ raise NotFound()
+
+ orgname = team.organization.username
+ return {
+ 'org': orgname,
+ 'team': team.name
+ }
+
+ @nickname('declineOrganizationTeamInvite')
+ @require_user_admin
+ def delete(self, code):
+ """ Delete an existing invitation to join a team. """
+ (team, inviter) = model.team.delete_team_invite(code, user_obj=get_authenticated_user())
+
+ model.notification.delete_matching_notifications(get_authenticated_user(), 'org_team_invite',
+ code=code)
+
+ orgname = team.organization.username
+ log_action('org_team_member_invite_declined', orgname, {
+ 'member': get_authenticated_user().username,
+ 'team': team.name,
+ 'inviter': inviter.username
+ })
+
+ return '', 204
diff --git a/endpoints/api/test/__init__.py b/endpoints/api/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/endpoints/api/test/shared.py b/endpoints/api/test/shared.py
new file mode 100644
index 000000000..c5a553f09
--- /dev/null
+++ b/endpoints/api/test/shared.py
@@ -0,0 +1,11 @@
+from endpoints.test.shared import conduct_call
+from endpoints.api import api
+
+def conduct_api_call(client, resource, method, params, body=None, expected_code=200, headers=None):
+ """ Conducts an API call to the given resource via the given client, and ensures its returned
+ status matches the code given.
+
+ Returns the response.
+ """
+ return conduct_call(client, resource, api.url_for, method, params, body, expected_code,
+ headers=headers)
diff --git a/endpoints/api/test/test_appspecifictoken.py b/endpoints/api/test/test_appspecifictoken.py
new file mode 100644
index 000000000..28e2bcd00
--- /dev/null
+++ b/endpoints/api/test/test_appspecifictoken.py
@@ -0,0 +1,50 @@
+from datetime import datetime, timedelta
+
+from data import model
+from endpoints.api.appspecifictokens import AppTokens, AppToken
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity
+from test.fixtures import *
+
+def test_app_specific_tokens(app, client):
+ with client_with_identity('devtable', client) as cl:
+ # Add an app specific token.
+ token_data = {'title': 'Testing 123'}
+ resp = conduct_api_call(cl, AppTokens, 'POST', None, token_data, 200).json
+ token_uuid = resp['token']['uuid']
+ assert 'token_code' in resp['token']
+
+ # List the tokens and ensure we have the one added.
+ resp = conduct_api_call(cl, AppTokens, 'GET', None, None, 200).json
+ assert len(resp['tokens'])
+ assert token_uuid in set([token['uuid'] for token in resp['tokens']])
+ assert not set([token['token_code'] for token in resp['tokens'] if 'token_code' in token])
+
+ # List the tokens expiring soon and ensure the one added is not present.
+ resp = conduct_api_call(cl, AppTokens, 'GET', {'expiring': True}, None, 200).json
+ assert token_uuid not in set([token['uuid'] for token in resp['tokens']])
+
+ # Get the token and ensure we have its code.
+ resp = conduct_api_call(cl, AppToken, 'GET', {'token_uuid': token_uuid}, None, 200).json
+ assert resp['token']['uuid'] == token_uuid
+ assert 'token_code' in resp['token']
+
+ # Delete the token.
+ conduct_api_call(cl, AppToken, 'DELETE', {'token_uuid': token_uuid}, None, 204)
+
+ # Ensure the token no longer exists.
+ resp = conduct_api_call(cl, AppTokens, 'GET', None, None, 200).json
+ assert len(resp['tokens'])
+ assert token_uuid not in set([token['uuid'] for token in resp['tokens']])
+
+ conduct_api_call(cl, AppToken, 'GET', {'token_uuid': token_uuid}, None, 404)
+
+
+def test_delete_expired_app_token(app, client):
+ user = model.user.get_user('devtable')
+ expiration = datetime.now() - timedelta(seconds=10)
+ token = model.appspecifictoken.create_token(user, 'some token', expiration)
+
+ with client_with_identity('devtable', client) as cl:
+ # Delete the token.
+ conduct_api_call(cl, AppToken, 'DELETE', {'token_uuid': token.uuid}, None, 204)
diff --git a/endpoints/api/test/test_build.py b/endpoints/api/test/test_build.py
new file mode 100644
index 000000000..bf98ad4eb
--- /dev/null
+++ b/endpoints/api/test/test_build.py
@@ -0,0 +1,20 @@
+import pytest
+
+from endpoints.api.build import RepositoryBuildList
+
+
+@pytest.mark.parametrize('request_json,subdir,context', [
+ ({}, '/Dockerfile', '/'),
+ ({'context': '/some_context'}, '/some_context/Dockerfile', '/some_context'),
+ ({'subdirectory': 'some_context'}, 'some_context/Dockerfile', 'some_context'),
+ ({'subdirectory': 'some_context/'}, 'some_context/Dockerfile', 'some_context/'),
+ ({'dockerfile_path': 'some_context/Dockerfile'}, 'some_context/Dockerfile', 'some_context'),
+ ({'dockerfile_path': 'some_context/Dockerfile', 'context': '/'}, 'some_context/Dockerfile', '/'),
+ ({'dockerfile_path': 'some_context/Dockerfile',
+ 'context': '/',
+ 'subdirectory': 'slime'}, 'some_context/Dockerfile', '/'),
+])
+def test_extract_dockerfile_args(request_json, subdir, context):
+ actual_context, actual_subdir = RepositoryBuildList.get_dockerfile_context(request_json)
+ assert subdir == actual_subdir
+ assert context == actual_context
diff --git a/endpoints/api/test/test_disallow_for_apps.py b/endpoints/api/test/test_disallow_for_apps.py
new file mode 100644
index 000000000..b9112c291
--- /dev/null
+++ b/endpoints/api/test/test_disallow_for_apps.py
@@ -0,0 +1,83 @@
+import pytest
+
+from data import model
+from endpoints.api.repository import Repository
+from endpoints.api.build import (RepositoryBuildList, RepositoryBuildResource,
+ RepositoryBuildStatus, RepositoryBuildLogs)
+from endpoints.api.image import RepositoryImageList, RepositoryImage
+from endpoints.api.manifest import RepositoryManifestLabels, ManageRepositoryManifestLabel
+from endpoints.api.repositorynotification import (RepositoryNotification,
+ RepositoryNotificationList,
+ TestRepositoryNotification)
+from endpoints.api.secscan import RepositoryImageSecurity, RepositoryManifestSecurity
+from endpoints.api.signing import RepositorySignatures
+from endpoints.api.tag import ListRepositoryTags, RepositoryTag, RepositoryTagImages, RestoreTag
+from endpoints.api.trigger import (BuildTriggerList, BuildTrigger, BuildTriggerSubdirs,
+ BuildTriggerActivate, BuildTriggerAnalyze, ActivateBuildTrigger,
+ TriggerBuildList, BuildTriggerFieldValues, BuildTriggerSources,
+ BuildTriggerSourceNamespaces)
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity
+from test.fixtures import *
+
+BUILD_ARGS = {'build_uuid': '1234'}
+IMAGE_ARGS = {'imageid': '1234', 'image_id': 1234}
+MANIFEST_ARGS = {'manifestref': 'sha256:abcd1234'}
+LABEL_ARGS = {'manifestref': 'sha256:abcd1234', 'labelid': '1234'}
+NOTIFICATION_ARGS = {'uuid': '1234'}
+TAG_ARGS = {'tag': 'foobar'}
+TRIGGER_ARGS = {'trigger_uuid': '1234'}
+FIELD_ARGS = {'trigger_uuid': '1234', 'field_name': 'foobar'}
+
+@pytest.mark.parametrize('resource, method, params', [
+ (RepositoryBuildList, 'get', None),
+ (RepositoryBuildList, 'post', None),
+ (RepositoryBuildResource, 'get', BUILD_ARGS),
+ (RepositoryBuildResource, 'delete', BUILD_ARGS),
+ (RepositoryBuildStatus, 'get', BUILD_ARGS),
+ (RepositoryBuildLogs, 'get', BUILD_ARGS),
+ (RepositoryImageList, 'get', None),
+ (RepositoryImage, 'get', IMAGE_ARGS),
+ (RepositoryManifestLabels, 'get', MANIFEST_ARGS),
+ (RepositoryManifestLabels, 'post', MANIFEST_ARGS),
+ (ManageRepositoryManifestLabel, 'get', LABEL_ARGS),
+ (ManageRepositoryManifestLabel, 'delete', LABEL_ARGS),
+ (RepositoryNotificationList, 'get', None),
+ (RepositoryNotificationList, 'post', None),
+ (RepositoryNotification, 'get', NOTIFICATION_ARGS),
+ (RepositoryNotification, 'delete', NOTIFICATION_ARGS),
+ (RepositoryNotification, 'post', NOTIFICATION_ARGS),
+ (TestRepositoryNotification, 'post', NOTIFICATION_ARGS),
+ (RepositoryImageSecurity, 'get', IMAGE_ARGS),
+ (RepositoryManifestSecurity, 'get', MANIFEST_ARGS),
+ (RepositorySignatures, 'get', None),
+ (ListRepositoryTags, 'get', None),
+ (RepositoryTag, 'put', TAG_ARGS),
+ (RepositoryTag, 'delete', TAG_ARGS),
+ (RepositoryTagImages, 'get', TAG_ARGS),
+ (RestoreTag, 'post', TAG_ARGS),
+ (BuildTriggerList, 'get', None),
+ (BuildTrigger, 'get', TRIGGER_ARGS),
+ (BuildTrigger, 'delete', TRIGGER_ARGS),
+ (BuildTriggerSubdirs, 'post', TRIGGER_ARGS),
+ (BuildTriggerActivate, 'post', TRIGGER_ARGS),
+ (BuildTriggerAnalyze, 'post', TRIGGER_ARGS),
+ (ActivateBuildTrigger, 'post', TRIGGER_ARGS),
+ (TriggerBuildList, 'get', TRIGGER_ARGS),
+ (BuildTriggerFieldValues, 'post', FIELD_ARGS),
+ (BuildTriggerSources, 'post', TRIGGER_ARGS),
+ (BuildTriggerSourceNamespaces, 'get', TRIGGER_ARGS),
+])
+def test_disallowed_for_apps(resource, method, params, client):
+ namespace = 'devtable'
+ repository = 'someapprepo'
+
+ devtable = model.user.get_user('devtable')
+ model.repository.create_repository(namespace, repository, devtable, repo_kind='application')
+
+ params = params or {}
+ params['repository'] = '%s/%s' % (namespace, repository)
+
+ with client_with_identity('devtable', client) as cl:
+ conduct_api_call(cl, resource, method, params, None, 501)
+
diff --git a/endpoints/api/test/test_disallow_for_nonnormal.py b/endpoints/api/test/test_disallow_for_nonnormal.py
new file mode 100644
index 000000000..7d8ace845
--- /dev/null
+++ b/endpoints/api/test/test_disallow_for_nonnormal.py
@@ -0,0 +1,64 @@
+import pytest
+
+from data import model
+from data.database import RepositoryState
+from endpoints.api.build import RepositoryBuildList, RepositoryBuildResource
+from endpoints.api.manifest import RepositoryManifestLabels, ManageRepositoryManifestLabel
+from endpoints.api.tag import RepositoryTag, RestoreTag
+from endpoints.api.trigger import (BuildTrigger, BuildTriggerSubdirs,
+ BuildTriggerActivate, BuildTriggerAnalyze, ActivateBuildTrigger,
+ BuildTriggerFieldValues, BuildTriggerSources)
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity
+from test.fixtures import *
+
+BUILD_ARGS = {'build_uuid': '1234'}
+IMAGE_ARGS = {'imageid': '1234', 'image_id': 1234}
+MANIFEST_ARGS = {'manifestref': 'sha256:abcd1234'}
+LABEL_ARGS = {'manifestref': 'sha256:abcd1234', 'labelid': '1234'}
+NOTIFICATION_ARGS = {'uuid': '1234'}
+TAG_ARGS = {'tag': 'foobar'}
+TRIGGER_ARGS = {'trigger_uuid': '1234'}
+FIELD_ARGS = {'trigger_uuid': '1234', 'field_name': 'foobar'}
+
+
+@pytest.mark.parametrize('state', [
+ RepositoryState.MIRROR,
+ RepositoryState.READ_ONLY,
+])
+@pytest.mark.parametrize('resource, method, params', [
+ (RepositoryBuildList, 'post', None),
+ (RepositoryBuildResource, 'delete', BUILD_ARGS),
+
+ (RepositoryManifestLabels, 'post', MANIFEST_ARGS),
+ (ManageRepositoryManifestLabel, 'delete', LABEL_ARGS),
+
+ (RepositoryTag, 'put', TAG_ARGS),
+ (RepositoryTag, 'delete', TAG_ARGS),
+
+ (RestoreTag, 'post', TAG_ARGS),
+
+ (BuildTrigger, 'delete', TRIGGER_ARGS),
+ (BuildTriggerSubdirs, 'post', TRIGGER_ARGS),
+ (BuildTriggerActivate, 'post', TRIGGER_ARGS),
+ (BuildTriggerAnalyze, 'post', TRIGGER_ARGS),
+ (ActivateBuildTrigger, 'post', TRIGGER_ARGS),
+
+ (BuildTriggerFieldValues, 'post', FIELD_ARGS),
+ (BuildTriggerSources, 'post', TRIGGER_ARGS),
+
+])
+def test_disallowed_for_nonnormal(state, resource, method, params, client):
+ namespace = 'devtable'
+ repository = 'somenewstaterepo'
+
+ devtable = model.user.get_user('devtable')
+ repo = model.repository.create_repository(namespace, repository, devtable)
+ repo.state = state
+ repo.save()
+
+ params = params or {}
+ params['repository'] = '%s/%s' % (namespace, repository)
+
+ with client_with_identity('devtable', client) as cl:
+ conduct_api_call(cl, resource, method, params, None, 503)
diff --git a/endpoints/api/test/test_endtoend_auth.py b/endpoints/api/test/test_endtoend_auth.py
new file mode 100644
index 000000000..0bcf9c7e4
--- /dev/null
+++ b/endpoints/api/test/test_endtoend_auth.py
@@ -0,0 +1,63 @@
+import pytest
+
+from mock import patch
+
+from endpoints.api.search import EntitySearch, LinkExternalEntity
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity
+
+from test.test_ldap import mock_ldap
+from test.test_external_jwt_authn import fake_jwt
+from test.test_keystone_auth import fake_keystone
+
+from test.fixtures import *
+
+
+@pytest.fixture(params=[
+ mock_ldap,
+ fake_jwt,
+ fake_keystone,
+])
+def auth_engine(request):
+ return request.param
+
+
+@pytest.fixture(params=[
+ False,
+ True,
+])
+def requires_email(request):
+ return request.param
+
+
+def test_entity_search(auth_engine, requires_email, client):
+ with auth_engine(requires_email=requires_email) as auth:
+ with patch('endpoints.api.search.authentication', auth):
+ # Try an unknown prefix.
+ response = conduct_api_call(client, EntitySearch, 'GET', params=dict(prefix='unknown'))
+ results = response.json['results']
+ assert len(results) == 0
+
+ # Try a known prefix.
+ response = conduct_api_call(client, EntitySearch, 'GET', params=dict(prefix='cool'))
+ results = response.json['results']
+ entity = results[0]
+ assert entity['name'] == 'cool.user'
+ assert entity['kind'] == 'external'
+
+
+def test_link_external_entity(auth_engine, requires_email, client):
+ with auth_engine(requires_email=requires_email) as auth:
+ with patch('endpoints.api.search.authentication', auth):
+ with client_with_identity('devtable', client) as cl:
+ # Try an unknown user.
+ conduct_api_call(cl, LinkExternalEntity, 'POST', params=dict(username='unknownuser'),
+ expected_code=400)
+
+ # Try a known user.
+ response = conduct_api_call(cl, LinkExternalEntity, 'POST',
+ params=dict(username='cool.user'))
+
+ entity = response.json['entity']
+ assert entity['name'] == 'cool_user'
+ assert entity['kind'] == 'user'
diff --git a/endpoints/api/test/test_logs.py b/endpoints/api/test/test_logs.py
new file mode 100644
index 000000000..a73561bfa
--- /dev/null
+++ b/endpoints/api/test/test_logs.py
@@ -0,0 +1,34 @@
+import os
+import time
+
+from mock import patch
+
+from app import export_action_logs_queue
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.logs import ExportOrgLogs
+from endpoints.test.shared import client_with_identity
+
+from test.fixtures import *
+
+@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('mysql') >= 0,
+ reason="Queue code is very sensitive to times on MySQL, making this flaky")
+def test_export_logs(client):
+ with client_with_identity('devtable', client) as cl:
+ assert export_action_logs_queue.get() is None
+
+ timecode = time.time()
+ def get_time():
+ return timecode - 2
+
+ with patch('time.time', get_time):
+ # Call to export logs.
+ body = {
+ 'callback_url': 'http://some/url',
+ 'callback_email': 'a@b.com',
+ }
+
+ conduct_api_call(cl, ExportOrgLogs, 'POST', {'orgname': 'buynlarge'},
+ body, expected_code=200)
+
+ # Ensure the request was queued.
+ assert export_action_logs_queue.get() is not None
diff --git a/endpoints/api/test/test_manifest.py b/endpoints/api/test/test_manifest.py
new file mode 100644
index 000000000..164c26061
--- /dev/null
+++ b/endpoints/api/test/test_manifest.py
@@ -0,0 +1,24 @@
+from data.registry_model import registry_model
+from endpoints.api.manifest import RepositoryManifest
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity
+
+from test.fixtures import *
+
+def test_repository_manifest(client):
+ with client_with_identity('devtable', client) as cl:
+ repo_ref = registry_model.lookup_repository('devtable', 'simple')
+ tags = registry_model.list_all_active_repository_tags(repo_ref)
+ for tag in tags:
+ manifest_digest = tag.manifest_digest
+ if manifest_digest is None:
+ continue
+
+ params = {
+ 'repository': 'devtable/simple',
+ 'manifestref': manifest_digest,
+ }
+ result = conduct_api_call(cl, RepositoryManifest, 'GET', params, None, 200).json
+ assert result['digest'] == manifest_digest
+ assert result['manifest_data']
+ assert result['image']
diff --git a/endpoints/api/test/test_mirror.py b/endpoints/api/test/test_mirror.py
new file mode 100644
index 000000000..292c9b00e
--- /dev/null
+++ b/endpoints/api/test/test_mirror.py
@@ -0,0 +1,230 @@
+from datetime import datetime
+
+import pytest
+
+from data import model
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.mirror import RepoMirrorResource
+from endpoints.test.shared import client_with_identity
+
+from test.fixtures import *
+
+def _setup_mirror():
+ repo = model.repository.get_repository('devtable', 'simple')
+ assert repo
+ robot = model.user.lookup_robot('devtable+dtrobot')
+ assert robot
+ rule = model.repo_mirror.create_rule(repo, ['latest', '3.3*', 'foo'])
+ assert rule
+ mirror_kwargs = {
+ 'is_enabled': True,
+ 'external_reference': 'quay.io/redhat/quay',
+ 'sync_interval': 5000,
+ 'sync_start_date': datetime(2020, 01, 02, 6, 30, 0),
+ 'external_registry_username': 'fakeUsername',
+ 'external_registry_password': 'fakePassword',
+ 'external_registry_config': {
+ 'verify_tls': True,
+ 'proxy': {
+ 'http_proxy': 'http://insecure.proxy.corp',
+ 'https_proxy': 'https://secure.proxy.corp',
+ 'no_proxy': 'mylocalhost'
+ }
+ }
+ }
+ mirror = model.repo_mirror.enable_mirroring_for_repository(repo, root_rule=rule,
+ internal_robot=robot, **mirror_kwargs)
+ assert mirror
+ return mirror
+
+
+@pytest.mark.parametrize('existing_robot_permission, expected_permission', [
+ (None, 'write'),
+ ('read', 'write'),
+ ('write', 'write'),
+ ('admin', 'admin'),
+])
+def test_create_mirror_sets_permissions(existing_robot_permission, expected_permission, client):
+ mirror_bot, _ = model.user.create_robot('newmirrorbot', model.user.get_namespace_user('devtable'))
+
+ if existing_robot_permission:
+ model.permission.set_user_repo_permission(mirror_bot.username, 'devtable', 'simple',
+ existing_robot_permission)
+
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/simple'}
+ request_body = {
+ 'external_reference': 'quay.io/foobar/barbaz',
+ 'sync_interval': 100,
+ 'sync_start_date': '2019-08-20T17:51:00Z',
+ 'root_rule': {
+ 'rule_type': 'TAG_GLOB_CSV',
+ 'rule_value': ['latest','foo', 'bar']
+ },
+ 'robot_username': 'devtable+newmirrorbot',
+ }
+ conduct_api_call(cl, RepoMirrorResource, 'POST', params, request_body, 201)
+
+ # Check the status of the robot.
+ permissions = model.permission.get_user_repository_permissions(mirror_bot, 'devtable', 'simple')
+ assert permissions[0].role.name == expected_permission
+
+ config = model.repo_mirror.get_mirror(model.repository.get_repository('devtable', 'simple'))
+ assert config.root_rule.rule_value == ['latest', 'foo', 'bar']
+
+
+def test_get_mirror_does_not_exist(client):
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/simple'}
+ resp = conduct_api_call(cl, RepoMirrorResource, 'GET', params, None, 404)
+
+
+def test_get_repo_does_not_exist(client):
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/unicorn'}
+ resp = conduct_api_call(cl, RepoMirrorResource, 'GET', params, None, 404)
+
+
+def test_get_mirror(client):
+ """ Verify that performing a `GET` request returns expected and accurate data. """
+ mirror = _setup_mirror()
+
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/simple'}
+ resp = conduct_api_call(cl, RepoMirrorResource, 'GET', params, None, 200).json
+
+ assert resp['is_enabled'] == True
+ assert resp['external_reference'] == 'quay.io/redhat/quay'
+ assert resp['sync_interval'] == 5000
+ assert resp['sync_start_date'] == '2020-01-02T06:30:00Z'
+ assert resp['external_registry_username'] == 'fakeUsername'
+ assert 'external_registry_password' not in resp
+ assert 'external_registry_config' in resp
+ assert resp['external_registry_config']['verify_tls'] == True
+ assert 'proxy' in resp['external_registry_config']
+ assert resp['external_registry_config']['proxy']['http_proxy'] == 'http://insecure.proxy.corp'
+ assert resp['external_registry_config']['proxy']['https_proxy'] == 'https://secure.proxy.corp'
+ assert resp['external_registry_config']['proxy']['no_proxy'] == 'mylocalhost'
+
+
+@pytest.mark.parametrize('key, value, expected_status', [
+
+ ('is_enabled', True, 201),
+ ('is_enabled', False, 201),
+ ('is_enabled', None, 400),
+ ('is_enabled', 'foo', 400),
+
+ ('external_reference', 'example.com/foo/bar', 201),
+ ('external_reference', 'example.com/foo', 201),
+ ('external_reference', 'example.com', 201),
+
+ ('external_registry_username', 'newTestUsername', 201),
+ ('external_registry_username', None, 201),
+ ('external_registry_username', 123, 400),
+
+ ('external_registry_password', 'newTestPassword', 400),
+ ('external_registry_password', None, 400),
+ ('external_registry_password', 41, 400),
+
+ ('robot_username', 'devtable+dtrobot', 201),
+ ('robot_username', 'devtable+doesntExist', 400),
+
+ ('sync_start_date', '2020-01-01T00:00:00Z', 201),
+ ('sync_start_date', 'January 1 2020', 400),
+ ('sync_start_date', '2020-01-01T00:00:00.00Z', 400),
+ ('sync_start_date', 'Wed, 01 Jan 2020 00:00:00 -0000', 400),
+ ('sync_start_date', 'Wed, 02 Oct 2002 08:00:00 EST', 400),
+
+ ('sync_interval', 2000, 201),
+ ('sync_interval', -5, 400),
+
+ ('https_proxy', 'https://proxy.corp.example.com', 201),
+ ('https_proxy', None, 201),
+ ('https_proxy', 'proxy.example.com; rm -rf /', 201), # Safe; values only set in env, not eval'ed
+
+ ('http_proxy', 'http://proxy.corp.example.com', 201),
+ ('http_proxy', None, 201),
+ ('http_proxy', 'proxy.example.com; rm -rf /', 201), # Safe; values only set in env, not eval'ed
+
+ ('no_proxy', 'quay.io', 201),
+ ('no_proxy', None, 201),
+ ('no_proxy', 'quay.io; rm -rf /', 201), # Safe because proxy values are not eval'ed
+
+ ('verify_tls', True, 201),
+ ('verify_tls', False, 201),
+ ('verify_tls', None, 400),
+ ('verify_tls', 'abc', 400),
+
+])
+def test_change_config(key, value, expected_status, client):
+ """ Verify that changing each attribute works as expected. """
+ mirror = _setup_mirror()
+
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/simple'}
+ if key in ('http_proxy', 'https_proxy', 'no_proxy'):
+ request_body = {'external_registry_config': {'proxy': {key: value}}}
+ elif key == 'verify_tls':
+ request_body = {'external_registry_config': {key: value}}
+ else:
+ request_body = {key: value}
+ conduct_api_call(cl, RepoMirrorResource, 'PUT', params, request_body, expected_status)
+
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/simple'}
+ resp = conduct_api_call(cl, RepoMirrorResource, 'GET', params, None, 200)
+
+ if expected_status < 400:
+ if key == 'external_registry_password':
+ assert key not in resp.json
+ elif key == 'verify_tls':
+ assert resp.json['external_registry_config']['verify_tls'] == value
+ elif key in ('http_proxy', 'https_proxy', 'no_proxy'):
+ assert resp.json['external_registry_config']['proxy'][key] == value
+ else:
+ assert resp.json[key] == value
+ else:
+ if key == 'external_registry_password':
+ assert key not in resp.json
+ elif key == 'verify_tls':
+ assert resp.json['external_registry_config'][key] != value
+ elif key in ('http_proxy', 'https_proxy', 'no_proxy'):
+ assert resp.json['external_registry_config']['proxy'][key] != value
+ else:
+ assert resp.json[key] != value
+
+
+@pytest.mark.parametrize('request_body, expected_status', [
+
+ # Set a new password and username => Success
+ ({ 'external_registry_username': 'newUsername',
+ 'external_registry_password': 'newPassword'}, 201 ),
+
+ # Set password and username to None => Success
+ ({ 'external_registry_username': None,
+ 'external_registry_password': None}, 201 ),
+
+ # Set username to value but password None => Sucess
+ ({ 'external_registry_username': 'myUsername',
+ 'external_registry_password': None}, 201 ),
+
+ # Set only new Username => Success
+ ({'external_registry_username': 'myNewUsername'}, 201),
+ ({'external_registry_username': None}, 201),
+
+ # Set only new Password => Failure
+ ({'external_registry_password': 'myNewPassword'}, 400),
+ ({'external_registry_password': None}, 400),
+
+ # Set username and password to empty string => Success?
+ ({'external_registry_username': '',
+ 'external_registry_password': ''}, 201),
+
+])
+def test_change_credentials(request_body, expected_status, client):
+ """ Verify credentials can only be modified as a pair. """
+ mirror = _setup_mirror()
+
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/simple'}
+ conduct_api_call(cl, RepoMirrorResource, 'PUT', params, request_body, expected_status)
diff --git a/endpoints/api/test/test_organization.py b/endpoints/api/test/test_organization.py
new file mode 100644
index 000000000..4341e1125
--- /dev/null
+++ b/endpoints/api/test/test_organization.py
@@ -0,0 +1,38 @@
+import pytest
+
+from data import model
+from endpoints.api import api
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.organization import (Organization,
+ OrganizationCollaboratorList)
+from endpoints.test.shared import client_with_identity
+from test.fixtures import *
+
+
+@pytest.mark.parametrize('expiration, expected_code', [
+ (0, 200),
+ (100, 400),
+ (100000000000000000000, 400),
+])
+def test_change_tag_expiration(expiration, expected_code, client):
+ with client_with_identity('devtable', client) as cl:
+ conduct_api_call(cl, Organization, 'PUT', {'orgname': 'buynlarge'},
+ body={'tag_expiration_s': expiration},
+ expected_code=expected_code)
+
+
+def test_get_organization_collaborators(client):
+ params = {'orgname': 'buynlarge'}
+
+ with client_with_identity('devtable', client) as cl:
+ resp = conduct_api_call(cl, OrganizationCollaboratorList, 'GET', params)
+
+ collaborator_names = [c['name'] for c in resp.json['collaborators']]
+ assert 'outsideorg' in collaborator_names
+ assert 'devtable' not in collaborator_names
+ assert 'reader' not in collaborator_names
+
+ for collaborator in resp.json['collaborators']:
+ if collaborator['name'] == 'outsideorg':
+ assert 'orgrepo' in collaborator['repositories']
+ assert 'anotherorgrepo' not in collaborator['repositories']
diff --git a/endpoints/api/test/test_permission.py b/endpoints/api/test/test_permission.py
new file mode 100644
index 000000000..1182f1071
--- /dev/null
+++ b/endpoints/api/test/test_permission.py
@@ -0,0 +1,23 @@
+import pytest
+
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.permission import RepositoryUserPermission
+from endpoints.test.shared import client_with_identity
+from test.fixtures import *
+
+@pytest.mark.parametrize('repository, username, expected_code', [
+ pytest.param('devtable/simple', 'public', 200, id='valid user under user'),
+ pytest.param('devtable/simple', 'devtable+dtrobot', 200, id='valid robot under user'),
+ pytest.param('devtable/simple', 'buynlarge+coolrobot', 400, id='invalid robot under user'),
+ pytest.param('buynlarge/orgrepo', 'devtable', 200, id='valid user under org'),
+ pytest.param('buynlarge/orgrepo', 'devtable+dtrobot', 400, id='invalid robot under org'),
+ pytest.param('buynlarge/orgrepo', 'buynlarge+coolrobot', 200, id='valid robot under org'),
+])
+def test_robot_permission(repository, username, expected_code, client):
+ with client_with_identity('devtable', client) as cl:
+ conduct_api_call(cl, RepositoryUserPermission, 'PUT',
+ {'repository': repository, 'username': username},
+ body={
+ 'role': 'read',
+ },
+ expected_code=expected_code)
diff --git a/endpoints/api/test/test_repoemail_models_pre_oci.py b/endpoints/api/test/test_repoemail_models_pre_oci.py
new file mode 100644
index 000000000..7c8de8226
--- /dev/null
+++ b/endpoints/api/test/test_repoemail_models_pre_oci.py
@@ -0,0 +1,89 @@
+import pytest
+from mock import Mock
+
+import util
+from data import model
+from endpoints.api.repoemail_models_interface import RepositoryAuthorizedEmail
+from endpoints.api.repoemail_models_pre_oci import pre_oci_model
+
+
+@pytest.fixture
+def get_monkeypatch(monkeypatch):
+ return monkeypatch
+
+
+def return_none(name, repo, email):
+ return None
+
+
+def get_return_mock(mock):
+ def return_mock(name, repo, email):
+ return mock
+
+ return return_mock
+
+
+def test_get_email_authorized_for_repo(get_monkeypatch):
+ mock = Mock()
+
+ get_monkeypatch.setattr(model.repository, 'get_email_authorized_for_repo', mock)
+
+ pre_oci_model.get_email_authorized_for_repo('namespace_name', 'repository_name', 'email')
+
+ mock.assert_called_once_with('namespace_name', 'repository_name', 'email')
+
+
+def test_get_email_authorized_for_repo_return_none(get_monkeypatch):
+ get_monkeypatch.setattr(model.repository, 'get_email_authorized_for_repo', return_none)
+
+ repo = pre_oci_model.get_email_authorized_for_repo('namespace_name', 'repository_name', 'email')
+
+ assert repo is None
+
+
+def test_get_email_authorized_for_repo_return_repo(get_monkeypatch):
+ mock = Mock(confirmed=True, code='code')
+ get_monkeypatch.setattr(model.repository, 'get_email_authorized_for_repo', get_return_mock(mock))
+
+ actual = pre_oci_model.get_email_authorized_for_repo('namespace_name', 'repository_name',
+ 'email')
+
+ assert actual == RepositoryAuthorizedEmail('email', 'repository_name', 'namespace_name', True,
+ 'code')
+
+
+def test_create_email_authorization_for_repo(get_monkeypatch):
+ mock = Mock()
+ get_monkeypatch.setattr(model.repository, 'create_email_authorization_for_repo', mock)
+
+ pre_oci_model.create_email_authorization_for_repo('namespace_name', 'repository_name', 'email')
+
+ mock.assert_called_once_with('namespace_name', 'repository_name', 'email')
+
+
+def test_create_email_authorization_for_repo_return_none(get_monkeypatch):
+ get_monkeypatch.setattr(model.repository, 'create_email_authorization_for_repo', return_none)
+
+ assert pre_oci_model.create_email_authorization_for_repo('namespace_name', 'repository_name',
+ 'email') is None
+
+
+def test_create_email_authorization_for_repo_return_mock(get_monkeypatch):
+ mock = Mock()
+ get_monkeypatch.setattr(model.repository, 'create_email_authorization_for_repo',
+ get_return_mock(mock))
+
+ assert pre_oci_model.create_email_authorization_for_repo('namespace_name', 'repository_name',
+ 'email') is not None
+
+
+def test_create_email_authorization_for_repo_return_value(get_monkeypatch):
+ mock = Mock(confirmed=False, code='code')
+
+ get_monkeypatch.setattr(model.repository, 'create_email_authorization_for_repo',
+ get_return_mock(mock))
+
+ actual = pre_oci_model.create_email_authorization_for_repo('namespace_name', 'repository_name',
+ 'email')
+ assert actual == RepositoryAuthorizedEmail('email', 'repository_name', 'namespace_name', False,
+ 'code')
diff --git a/endpoints/api/test/test_repository.py b/endpoints/api/test/test_repository.py
new file mode 100644
index 000000000..4edca0e35
--- /dev/null
+++ b/endpoints/api/test/test_repository.py
@@ -0,0 +1,166 @@
+import pytest
+
+from mock import patch, ANY, MagicMock
+
+from data import model, database
+from data.appr_model import release, channel, blob
+from endpoints.appr.models_cnr import model as appr_model
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.repository import RepositoryTrust, Repository, RepositoryList
+from endpoints.test.shared import client_with_identity
+from features import FeatureNameValue
+
+from test.fixtures import *
+
+
+@pytest.mark.parametrize('trust_enabled,repo_found,expected_status', [
+ (True, True, 200),
+ (False, True, 200),
+ (False, False, 404),
+ ('invalid_req', False, 400),
+])
+def test_post_changetrust(trust_enabled, repo_found, expected_status, client):
+ with patch('endpoints.api.repository.tuf_metadata_api') as mock_tuf:
+ with patch(
+ 'endpoints.api.repository_models_pre_oci.model.repository.get_repository') as mock_model:
+ mock_model.return_value = MagicMock() if repo_found else None
+ mock_tuf.get_default_tags_with_expiration.return_value = ['tags', 'expiration']
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/repo'}
+ request_body = {'trust_enabled': trust_enabled}
+ conduct_api_call(cl, RepositoryTrust, 'POST', params, request_body, expected_status)
+
+
+def test_signing_disabled(client):
+ with patch('features.SIGNING', FeatureNameValue('SIGNING', False)):
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/simple'}
+ response = conduct_api_call(cl, Repository, 'GET', params).json
+ assert not response['trust_enabled']
+
+
+def test_list_starred_repos(client):
+ with client_with_identity('devtable', client) as cl:
+ params = {
+ 'starred': 'true',
+ }
+
+ response = conduct_api_call(cl, RepositoryList, 'GET', params).json
+ repos = {r['namespace'] + '/' + r['name'] for r in response['repositories']}
+ assert 'devtable/simple' in repos
+ assert 'public/publicrepo' not in repos
+
+ # Add a star on publicrepo.
+ publicrepo = model.repository.get_repository('public', 'publicrepo')
+ model.repository.star_repository(model.user.get_user('devtable'), publicrepo)
+
+ # Ensure publicrepo shows up.
+ response = conduct_api_call(cl, RepositoryList, 'GET', params).json
+ repos = {r['namespace'] + '/' + r['name'] for r in response['repositories']}
+ assert 'devtable/simple' in repos
+ assert 'public/publicrepo' in repos
+
+ # Make publicrepo private and ensure it disappears.
+ model.repository.set_repository_visibility(publicrepo, 'private')
+
+ response = conduct_api_call(cl, RepositoryList, 'GET', params).json
+ repos = {r['namespace'] + '/' + r['name'] for r in response['repositories']}
+ assert 'devtable/simple' in repos
+ assert 'public/publicrepo' not in repos
+
+
+def test_list_repositories_last_modified(client):
+ with client_with_identity('devtable', client) as cl:
+ params = {
+ 'namespace': 'devtable',
+ 'last_modified': 'true',
+ }
+
+ response = conduct_api_call(cl, RepositoryList, 'GET', params).json
+
+ for repo in response['repositories']:
+ if repo['name'] != 'building':
+ assert repo['last_modified'] is not None
+
+
+@pytest.mark.parametrize('repo_name, expected_status', [
+ pytest.param('x' * 255, 201, id='Maximum allowed length'),
+ pytest.param('x' * 256, 400, id='Over allowed length'),
+ pytest.param('a|b', 400, id='Invalid name'),
+])
+def test_create_repository(repo_name, expected_status, client):
+ with client_with_identity('devtable', client) as cl:
+ body = {
+ 'namespace': 'devtable',
+ 'repository': repo_name,
+ 'visibility': 'public',
+ 'description': 'foo',
+ }
+
+ result = conduct_api_call(client, RepositoryList, 'post', None, body,
+ expected_code=expected_status).json
+ if expected_status == 201:
+ assert result['name'] == repo_name
+ assert model.repository.get_repository('devtable', repo_name).name == repo_name
+
+
+@pytest.mark.parametrize('has_tag_manifest', [
+ True,
+ False,
+])
+def test_get_repo(has_tag_manifest, client, initialized_db):
+ with client_with_identity('devtable', client) as cl:
+ if not has_tag_manifest:
+ database.TagManifestLabelMap.delete().execute()
+ database.TagManifestToManifest.delete().execute()
+ database.TagManifestLabel.delete().execute()
+ database.TagManifest.delete().execute()
+
+ params = {'repository': 'devtable/simple'}
+ response = conduct_api_call(cl, Repository, 'GET', params).json
+ assert response['kind'] == 'image'
+
+
+def test_get_app_repo(client, initialized_db):
+ with client_with_identity('devtable', client) as cl:
+ devtable = model.user.get_user('devtable')
+ repo = model.repository.create_repository('devtable', 'someappr', devtable,
+ repo_kind='application')
+
+ models_ref = appr_model.models_ref
+ blob.get_or_create_blob('sha256:somedigest', 0, 'application/vnd.cnr.blob.v0.tar+gzip',
+ ['local_us'], models_ref)
+
+ release.create_app_release(repo, 'test',
+ dict(mediaType='application/vnd.cnr.package-manifest.helm.v0.json'),
+ 'sha256:somedigest', models_ref, False)
+
+ channel.create_or_update_channel(repo, 'somechannel', 'test', models_ref)
+
+ params = {'repository': 'devtable/someappr'}
+ response = conduct_api_call(cl, Repository, 'GET', params).json
+ assert response['kind'] == 'application'
+ assert response['channels']
+ assert response['releases']
+
+
+
+@pytest.mark.parametrize('state, can_write', [
+ (database.RepositoryState.NORMAL, True),
+ (database.RepositoryState.READ_ONLY, False),
+ (database.RepositoryState.MIRROR, False),
+])
+def test_get_repo_state_can_write(state, can_write, client, initialized_db):
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/simple'}
+ response = conduct_api_call(cl, Repository, 'GET', params).json
+ assert response['can_write']
+
+ repo = model.repository.get_repository('devtable', 'simple')
+ repo.state = state
+ repo.save()
+
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/simple'}
+ response = conduct_api_call(cl, Repository, 'GET', params).json
+ assert response['can_write'] == can_write
diff --git a/endpoints/api/test/test_repositorynotification.py b/endpoints/api/test/test_repositorynotification.py
new file mode 100644
index 000000000..06d65e2f0
--- /dev/null
+++ b/endpoints/api/test/test_repositorynotification.py
@@ -0,0 +1,90 @@
+import pytest
+
+from mock import Mock, MagicMock
+
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.repositorynotification import RepositoryNotificationList, RepositoryNotification, TestRepositoryNotification
+from endpoints.test.shared import client_with_identity
+import endpoints.api.repositorynotification_models_interface as iface
+from test.fixtures import *
+
+@pytest.fixture()
+def authd_client(client):
+ with client_with_identity('devtable', client) as cl:
+ yield cl
+
+def mock_get_notification(uuid):
+ mock_notification = MagicMock(iface.RepositoryNotification)
+ if uuid == 'exists':
+ mock_notification.return_value = iface.RepositoryNotification(
+ 'exists',
+ 'title',
+ 'event_name',
+ 'method_name',
+ 'config_json',
+ 'event_config_json',
+ 2,
+ )
+ else:
+ mock_notification.return_value = None
+ return mock_notification
+
+@pytest.mark.parametrize('namespace,repository,body,expected_code',[
+ ('devtable', 'simple', dict(config={'url': 'http://example.com'}, event='repo_push',
+ method='webhook', eventConfig={}, title='test'), 201),
+ ('devtable', 'simple', dict(config={'url': 'http://example.com'}, event='repo_mirror_sync_started',
+ method='webhook', eventConfig={}, title='test'), 201),
+ ('devtable', 'simple', dict(config={'url': 'http://example.com'}, event='repo_mirror_sync_success',
+ method='webhook', eventConfig={}, title='test'), 201),
+ ('devtable', 'simple', dict(config={'url': 'http://example.com'}, event='repo_mirror_sync_failed',
+ method='webhook', eventConfig={}, title='test'), 201)
+])
+def test_create_repo_notification(namespace, repository, body, expected_code, authd_client):
+ params = {'repository': namespace + '/' + repository}
+ conduct_api_call(authd_client, RepositoryNotificationList, 'POST', params, body, expected_code=expected_code)
+
+@pytest.mark.parametrize('namespace,repository,expected_code',[
+ ('devtable', 'simple', 200)
+])
+def test_list_repo_notifications(namespace, repository, expected_code, authd_client):
+ params = {'repository': namespace + '/' + repository}
+ resp = conduct_api_call(authd_client, RepositoryNotificationList, 'GET', params, expected_code=expected_code).json
+ assert len(resp['notifications']) > 0
+
+@pytest.mark.parametrize('namespace,repository,uuid,expected_code',[
+ ('devtable', 'simple', 'exists', 200),
+ ('devtable', 'simple', 'not found', 404),
+])
+def test_get_repo_notification(namespace, repository, uuid, expected_code, authd_client, monkeypatch):
+ monkeypatch.setattr('endpoints.api.repositorynotification.model.get_repo_notification', mock_get_notification(uuid))
+ params = {'repository': namespace + '/' + repository, 'uuid': uuid}
+ conduct_api_call(authd_client, RepositoryNotification, 'GET', params, expected_code=expected_code)
+
+@pytest.mark.parametrize('namespace,repository,uuid,expected_code',[
+ ('devtable', 'simple', 'exists', 204),
+ ('devtable', 'simple', 'not found', 400),
+])
+def test_delete_repo_notification(namespace, repository, uuid, expected_code, authd_client, monkeypatch):
+ monkeypatch.setattr('endpoints.api.repositorynotification.model.delete_repo_notification', mock_get_notification(uuid))
+ params = {'repository': namespace + '/' + repository, 'uuid': uuid}
+ conduct_api_call(authd_client, RepositoryNotification, 'DELETE', params, expected_code=expected_code)
+
+
+@pytest.mark.parametrize('namespace,repository,uuid,expected_code',[
+ ('devtable', 'simple', 'exists', 204),
+ ('devtable', 'simple', 'not found', 400),
+])
+def test_reset_repo_noticiation(namespace, repository, uuid, expected_code, authd_client, monkeypatch):
+ monkeypatch.setattr('endpoints.api.repositorynotification.model.reset_notification_number_of_failures', mock_get_notification(uuid))
+ params = {'repository': namespace + '/' + repository, 'uuid': uuid}
+ conduct_api_call(authd_client, RepositoryNotification, 'POST', params, expected_code=expected_code)
+
+
+@pytest.mark.parametrize('namespace,repository,uuid,expected_code',[
+ ('devtable', 'simple', 'exists', 200),
+ ('devtable', 'simple', 'not found', 400),
+])
+def test_test_repo_notification(namespace, repository, uuid, expected_code, authd_client, monkeypatch):
+ monkeypatch.setattr('endpoints.api.repositorynotification.model.queue_test_notification', mock_get_notification(uuid))
+ params = {'repository': namespace + '/' + repository, 'uuid': uuid}
+ conduct_api_call(authd_client, TestRepositoryNotification, 'POST', params, expected_code=expected_code)
diff --git a/endpoints/api/test/test_robot.py b/endpoints/api/test/test_robot.py
new file mode 100644
index 000000000..7c5349549
--- /dev/null
+++ b/endpoints/api/test/test_robot.py
@@ -0,0 +1,104 @@
+import pytest
+import json
+
+from data import model
+from endpoints.api import api
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.robot import UserRobot, OrgRobot, UserRobotList, OrgRobotList
+from endpoints.test.shared import client_with_identity
+from util.names import parse_robot_username
+
+from test.test_ldap import mock_ldap
+
+from test.fixtures import *
+
+@pytest.mark.parametrize('endpoint', [
+ UserRobot,
+ OrgRobot,
+])
+@pytest.mark.parametrize('body', [
+ {},
+ {'description': 'this is a description'},
+ {'unstructured_metadata': {'foo': 'bar'}},
+ {'description': 'this is a description', 'unstructured_metadata': {'foo': 'bar'}},
+])
+def test_create_robot_with_metadata(endpoint, body, client):
+ with client_with_identity('devtable', client) as cl:
+ # Create the robot with the specified body.
+ conduct_api_call(cl, endpoint, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'somebot'},
+ body, expected_code=201)
+
+ # Ensure the create succeeded.
+ resp = conduct_api_call(cl, endpoint, 'GET', {
+ 'orgname': 'buynlarge',
+ 'robot_shortname': 'somebot',
+ })
+
+ body = body or {}
+ assert resp.json['description'] == (body.get('description') or '')
+ assert resp.json['unstructured_metadata'] == (body.get('unstructured_metadata') or {})
+
+
+@pytest.mark.parametrize('endpoint, params', [
+ (UserRobot, {'robot_shortname': 'dtrobot'}),
+ (OrgRobot, {'orgname': 'buynlarge', 'robot_shortname': 'coolrobot'}),
+])
+def test_retrieve_robot(endpoint, params, app, client):
+ with client_with_identity('devtable', client) as cl:
+ result = conduct_api_call(cl, endpoint, 'GET', params, None)
+ assert result.json['token'] is not None
+
+
+@pytest.mark.parametrize('endpoint, params, bot_endpoint', [
+ (UserRobotList, {}, UserRobot),
+ (OrgRobotList, {'orgname': 'buynlarge'}, OrgRobot),
+])
+@pytest.mark.parametrize('include_token', [
+ True,
+ False,
+])
+@pytest.mark.parametrize('limit', [
+ None,
+ 1,
+ 5,
+])
+def test_retrieve_robots(endpoint, params, bot_endpoint, include_token, limit, app, client):
+ params['token'] = 'true' if include_token else 'false'
+
+ if limit is not None:
+ params['limit'] = limit
+
+ with client_with_identity('devtable', client) as cl:
+ result = conduct_api_call(cl, endpoint, 'GET', params, None)
+
+ if limit is not None:
+ assert len(result.json['robots']) <= limit
+
+ for robot in result.json['robots']:
+ assert (robot.get('token') is not None) == include_token
+ if include_token:
+ bot_params = dict(params)
+ bot_params['robot_shortname'] = parse_robot_username(robot['name'])[1]
+ result = conduct_api_call(cl, bot_endpoint, 'GET', bot_params, None)
+ assert robot.get('token') == result.json['token']
+
+
+@pytest.mark.parametrize('username, is_admin', [
+ ('devtable', True),
+ ('reader', False),
+])
+@pytest.mark.parametrize('with_permissions', [
+ True,
+ False,
+])
+def test_retrieve_robots_token_permission(username, is_admin, with_permissions, app, client):
+ with client_with_identity(username, client) as cl:
+ params = {'orgname': 'buynlarge', 'token': 'true'}
+ if with_permissions:
+ params['permissions'] = 'true'
+
+ result = conduct_api_call(cl, OrgRobotList, 'GET', params, None)
+ assert result.json['robots']
+ for robot in result.json['robots']:
+ assert (robot.get('token') is not None) == is_admin
+ assert (robot.get('repositories') is not None) == (is_admin and with_permissions)
diff --git a/endpoints/api/test/test_search.py b/endpoints/api/test/test_search.py
new file mode 100644
index 000000000..5e034934c
--- /dev/null
+++ b/endpoints/api/test/test_search.py
@@ -0,0 +1,41 @@
+import pytest
+
+from playhouse.test_utils import assert_query_count
+
+from data import model, database
+from endpoints.api.search import ConductRepositorySearch, ConductSearch
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity
+from test.fixtures import *
+
+@pytest.mark.parametrize('query', [
+ (''),
+ ('simple'),
+ ('public'),
+ ('repository'),
+])
+def test_repository_search(query, client):
+ # Prime the caches.
+ database.Repository.kind.get_id('image')
+ database.Repository.kind.get_name(1)
+
+ with client_with_identity('devtable', client) as cl:
+ params = {'query': query}
+ with assert_query_count(7):
+ result = conduct_api_call(cl, ConductRepositorySearch, 'GET', params, None, 200).json
+ assert result['start_index'] == 0
+ assert result['page'] == 1
+ assert len(result['results'])
+
+
+@pytest.mark.parametrize('query', [
+ ('simple'),
+ ('public'),
+ ('repository'),
+])
+def test_search_query_count(query, client):
+ with client_with_identity('devtable', client) as cl:
+ params = {'query': query}
+ with assert_query_count(10):
+ result = conduct_api_call(cl, ConductSearch, 'GET', params, None, 200).json
+ assert len(result['results'])
diff --git a/endpoints/api/test/test_secscan.py b/endpoints/api/test/test_secscan.py
new file mode 100644
index 000000000..40afa6ac3
--- /dev/null
+++ b/endpoints/api/test/test_secscan.py
@@ -0,0 +1,30 @@
+import base64
+
+import pytest
+
+from data.registry_model import registry_model
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.secscan import RepositoryImageSecurity, RepositoryManifestSecurity
+
+from test.fixtures import *
+
+@pytest.mark.parametrize('endpoint', [
+ RepositoryImageSecurity,
+ RepositoryManifestSecurity,
+])
+def test_get_security_info_with_pull_secret(endpoint, client):
+ repository_ref = registry_model.lookup_repository('devtable', 'simple')
+ tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
+ manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
+
+ params = {
+ 'repository': 'devtable/simple',
+ 'imageid': tag.legacy_image.docker_image_id,
+ 'manifestref': manifest.digest,
+ }
+
+ headers = {
+ 'Authorization': 'Basic %s' % base64.b64encode('devtable:password'),
+ }
+
+ conduct_api_call(client, endpoint, 'GET', params, None, headers=headers, expected_code=200)
diff --git a/endpoints/api/test/test_security.py b/endpoints/api/test/test_security.py
new file mode 100644
index 000000000..9f93413a7
--- /dev/null
+++ b/endpoints/api/test/test_security.py
@@ -0,0 +1,1485 @@
+from mock import patch
+
+import pytest
+from flask_principal import AnonymousIdentity
+
+from endpoints.api import api
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity, toggle_feature
+
+from endpoints.api.appspecifictokens import *
+from endpoints.api.billing import *
+from endpoints.api.build import *
+from endpoints.api.discovery import *
+from endpoints.api.globalmessages import *
+from endpoints.api.image import *
+from endpoints.api.logs import *
+from endpoints.api.manifest import *
+from endpoints.api.organization import *
+from endpoints.api.permission import *
+from endpoints.api.prototype import *
+from endpoints.api.repoemail import *
+from endpoints.api.repository import *
+from endpoints.api.repositorynotification import *
+from endpoints.api.repotoken import *
+from endpoints.api.robot import *
+from endpoints.api.search import *
+from endpoints.api.secscan import *
+from endpoints.api.signing import *
+from endpoints.api.subscribe import *
+from endpoints.api.suconfig import *
+from endpoints.api.superuser import *
+from endpoints.api.tag import *
+from endpoints.api.team import *
+from endpoints.api.trigger import *
+from endpoints.api.user import *
+from endpoints.api.mirror import *
+
+from endpoints.api.repository import Repository
+
+from test.fixtures import *
+
+ORG_PARAMS = {'orgname': 'buynlarge'}
+TEAM_PARAMS = {'orgname': 'buynlarge', 'teamname': 'owners'}
+BUILD_PARAMS = {'build_uuid': 'test-1234'}
+REPO_PARAMS = {'repository': 'devtable/someapp'}
+SEARCH_PARAMS = {'query': ''}
+NOTIFICATION_PARAMS = {'namespace': 'devtable', 'repository': 'devtable/simple', 'uuid': 'some uuid'}
+TOKEN_PARAMS = {'token_uuid': 'someuuid'}
+TRIGGER_PARAMS = {'repository': 'devtable/simple', 'trigger_uuid': 'someuuid'}
+MANIFEST_PARAMS = {'repository': 'devtable/simple', 'manifestref': 'sha256:deadbeef'}
+EXPORTLOGS_PARAMS = {'callback_url': 'http://foo'}
+
+SECURITY_TESTS = [
+ (AppTokens, 'GET', {}, {}, None, 401),
+ (AppTokens, 'GET', {}, {}, 'freshuser', 200),
+ (AppTokens, 'GET', {}, {}, 'reader', 200),
+ (AppTokens, 'GET', {}, {}, 'devtable', 200),
+
+ (AppTokens, 'POST', {}, {}, None, 401),
+ (AppTokens, 'POST', {}, {}, 'freshuser', 400),
+ (AppTokens, 'POST', {}, {}, 'reader', 400),
+ (AppTokens, 'POST', {}, {}, 'devtable', 400),
+
+ (AppToken, 'GET', TOKEN_PARAMS, {}, None, 401),
+ (AppToken, 'GET', TOKEN_PARAMS, {}, 'freshuser', 404),
+ (AppToken, 'GET', TOKEN_PARAMS, {}, 'reader', 404),
+ (AppToken, 'GET', TOKEN_PARAMS, {}, 'devtable', 404),
+
+ (AppToken, 'DELETE', TOKEN_PARAMS, {}, None, 401),
+ (AppToken, 'DELETE', TOKEN_PARAMS, {}, 'freshuser', 404),
+ (AppToken, 'DELETE', TOKEN_PARAMS, {}, 'reader', 404),
+ (AppToken, 'DELETE', TOKEN_PARAMS, {}, 'devtable', 404),
+
+ (RepositoryManifest, 'GET', MANIFEST_PARAMS, {}, None, 401),
+ (RepositoryManifest, 'GET', MANIFEST_PARAMS, {}, 'freshuser', 403),
+ (RepositoryManifest, 'GET', MANIFEST_PARAMS, {}, 'reader', 403),
+ (RepositoryManifest, 'GET', MANIFEST_PARAMS, {}, 'devtable', 404),
+
+ (OrganizationCollaboratorList, 'GET', ORG_PARAMS, None, None, 401),
+ (OrganizationCollaboratorList, 'GET', ORG_PARAMS, None, 'freshuser', 403),
+ (OrganizationCollaboratorList, 'GET', ORG_PARAMS, None, 'reader', 403),
+ (OrganizationCollaboratorList, 'GET', ORG_PARAMS, None, 'devtable', 200),
+
+ (OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, None, 401),
+ (OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, 'freshuser', 403),
+ (OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, 'reader', 403),
+ (OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, 'devtable', 400),
+
+ (OrganizationTeamSyncing, 'DELETE', TEAM_PARAMS, {}, None, 401),
+ (OrganizationTeamSyncing, 'DELETE', TEAM_PARAMS, {}, 'freshuser', 403),
+ (OrganizationTeamSyncing, 'DELETE', TEAM_PARAMS, {}, 'reader', 403),
+ (OrganizationTeamSyncing, 'DELETE', TEAM_PARAMS, {}, 'devtable', 200),
+
+ (ConductRepositorySearch, 'GET', SEARCH_PARAMS, None, None, 200),
+ (ConductRepositorySearch, 'GET', SEARCH_PARAMS, None, 'freshuser', 200),
+ (ConductRepositorySearch, 'GET', SEARCH_PARAMS, None, 'reader', 200),
+ (ConductRepositorySearch, 'GET', SEARCH_PARAMS, None, 'devtable', 200),
+
+ (SuperUserRepositoryBuildLogs, 'GET', BUILD_PARAMS, None, None, 401),
+ (SuperUserRepositoryBuildLogs, 'GET', BUILD_PARAMS, None, 'freshuser', 403),
+ (SuperUserRepositoryBuildLogs, 'GET', BUILD_PARAMS, None, 'reader', 403),
+ (SuperUserRepositoryBuildLogs, 'GET', BUILD_PARAMS, None, 'devtable', 400),
+
+ (SuperUserRepositoryBuildStatus, 'GET', BUILD_PARAMS, None, None, 401),
+ (SuperUserRepositoryBuildStatus, 'GET', BUILD_PARAMS, None, 'freshuser', 403),
+ (SuperUserRepositoryBuildStatus, 'GET', BUILD_PARAMS, None, 'reader', 403),
+ (SuperUserRepositoryBuildStatus, 'GET', BUILD_PARAMS, None, 'devtable', 400),
+
+ (SuperUserRepositoryBuildResource, 'GET', BUILD_PARAMS, None, None, 401),
+ (SuperUserRepositoryBuildResource, 'GET', BUILD_PARAMS, None, 'freshuser', 403),
+ (SuperUserRepositoryBuildResource, 'GET', BUILD_PARAMS, None, 'reader', 403),
+ (SuperUserRepositoryBuildResource, 'GET', BUILD_PARAMS, None, 'devtable', 404),
+
+ (RepositorySignatures, 'GET', REPO_PARAMS, {}, 'freshuser', 403),
+ (RepositorySignatures, 'GET', REPO_PARAMS, {}, 'reader', 403),
+ (RepositorySignatures, 'GET', REPO_PARAMS, {}, 'devtable', 404),
+
+ (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, None, 401),
+ (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'freshuser', 403),
+ (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'reader', 403),
+ (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'devtable', 400),
+
+ (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, None, 401),
+ (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'freshuser', 403),
+ (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'reader', 403),
+ (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'devtable', 404),
+
+ (BuildTrigger, 'GET', TRIGGER_PARAMS, {}, None, 401),
+ (BuildTrigger, 'GET', TRIGGER_PARAMS, {}, 'freshuser', 403),
+ (BuildTrigger, 'GET', TRIGGER_PARAMS, {}, 'reader', 403),
+ (BuildTrigger, 'GET', TRIGGER_PARAMS, {}, 'devtable', 404),
+
+ (BuildTrigger, 'DELETE', TRIGGER_PARAMS, {}, None, 401),
+ (BuildTrigger, 'DELETE', TRIGGER_PARAMS, {}, 'freshuser', 403),
+ (BuildTrigger, 'DELETE', TRIGGER_PARAMS, {}, 'reader', 403),
+ (BuildTrigger, 'DELETE', TRIGGER_PARAMS, {}, 'devtable', 404),
+
+ (BuildTrigger, 'PUT', TRIGGER_PARAMS, {}, None, 401),
+ (BuildTrigger, 'PUT', TRIGGER_PARAMS, {}, 'freshuser', 403),
+ (BuildTrigger, 'PUT', TRIGGER_PARAMS, {}, 'reader', 403),
+ (BuildTrigger, 'PUT', TRIGGER_PARAMS, {}, 'devtable', 400),
+
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'devtable/shared'}, None, 'devtable', 404),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'devtable','repository': 'devtable/shared'}, None, 'devtable', 200),
+ (RepositoryUserTransitivePermission, 'GET', {'username': 'devtable','repository': 'devtable/nope'}, None, 'devtable', 404),
+
+ (StarredRepositoryList, 'GET', None, None, None, 401),
+ (StarredRepositoryList, 'GET', None, None, 'devtable', 200),
+ (StarredRepositoryList, 'GET', None, None, 'freshuser', 200),
+ (StarredRepositoryList, 'GET', None, None, 'reader', 200),
+ (StarredRepositoryList, 'POST', None, {u'namespace': 'public', u'repository': 'publicrepo'}, None, 401),
+ (StarredRepositoryList, 'POST', None, {u'namespace': 'public', u'repository': 'publicrepo'}, 'devtable', 201),
+ (StarredRepositoryList, 'POST', None, {u'namespace': 'public', u'repository': 'publicrepo'}, 'freshuser', 201),
+ (StarredRepositoryList, 'POST', None, {u'namespace': 'public', u'repository': 'publicrepo'}, 'reader', 201),
+
+ (StarredRepository, 'DELETE', {'repository': 'public/publicrepo'}, None, None, 401),
+ (StarredRepository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'devtable', 204),
+ (StarredRepository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'freshuser', 204),
+ (StarredRepository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'reader', 204),
+
+ (UserNotification, 'GET', {'uuid': 'someuuid'}, None, None, 401),
+ (UserNotification, 'GET', {'uuid': 'someuuid'}, None, 'devtable', 404),
+ (UserNotification, 'GET', {'uuid': 'someuuid'}, None, 'freshuser', 404),
+ (UserNotification, 'GET', {'uuid': 'someuuid'}, None, 'reader', 404),
+ (UserNotification, 'PUT', {'uuid': 'someuuid'}, {}, None, 401),
+ (UserNotification, 'PUT', {'uuid': 'someuuid'}, {}, 'devtable', 404),
+ (UserNotification, 'PUT', {'uuid': 'someuuid'}, {}, 'freshuser', 404),
+ (UserNotification, 'PUT', {'uuid': 'someuuid'}, {}, 'reader', 404),
+
+ (UserInvoiceList, 'GET', None, None, None, 401),
+ (UserInvoiceList, 'GET', None, None, 'devtable', 200),
+ (UserInvoiceList, 'GET', None, None, 'freshuser', 404),
+ (UserInvoiceList, 'GET', None, None, 'reader', 404),
+
+ (PrivateRepositories, 'GET', None, None, None, 401),
+ (PrivateRepositories, 'GET', None, None, 'devtable', 200),
+ (PrivateRepositories, 'GET', None, None, 'freshuser', 200),
+ (PrivateRepositories, 'GET', None, None, 'reader', 200),
+
+ (ConvertToOrganization, 'POST', None, {u'adminPassword': 'IQTM', u'plan': '1RB4', u'adminUser': '44E8'}, None, 401),
+ (ConvertToOrganization, 'POST', None, {u'adminPassword': 'IQTM', u'plan': '1RB4', u'adminUser': '44E8'}, 'devtable', 400),
+ (ConvertToOrganization, 'POST', None, {u'adminPassword': 'IQTM', u'plan': '1RB4', u'adminUser': '44E8'}, 'freshuser', 400),
+ (ConvertToOrganization, 'POST', None, {u'adminPassword': 'IQTM', u'plan': '1RB4', u'adminUser': '44E8'}, 'reader', 400),
+
+ (UserRobotList, 'GET', None, None, None, 401),
+ (UserRobotList, 'GET', None, None, 'devtable', 200),
+ (UserRobotList, 'GET', None, None, 'freshuser', 200),
+ (UserRobotList, 'GET', None, None, 'reader', 200),
+
+ (UserCard, 'GET', None, None, None, 401),
+ (UserCard, 'GET', None, None, 'devtable', 200),
+ (UserCard, 'GET', None, None, 'freshuser', 200),
+ (UserCard, 'GET', None, None, 'reader', 200),
+ (UserCard, 'POST', None, {u'token': 'ORH4'}, None, 401),
+
+ (UserPlan, 'GET', None, None, None, 401),
+ (UserPlan, 'GET', None, None, 'devtable', 200),
+ (UserPlan, 'GET', None, None, 'freshuser', 200),
+ (UserPlan, 'GET', None, None, 'reader', 200),
+ (UserPlan, 'PUT', None, {u'plan': '1QIK'}, None, 401),
+
+ (UserLogs, 'GET', None, None, None, 401),
+ (UserLogs, 'GET', None, None, 'devtable', 200),
+ (UserLogs, 'GET', None, None, 'freshuser', 200),
+ (UserLogs, 'GET', None, None, 'reader', 200),
+
+ (OrganizationList, 'POST', None, {u'name': 'KSIS', u'email': 'DHVZ'}, None, 401),
+ (OrganizationList, 'POST', None, {u'name': 'KSIS', u'email': 'DHVZ'}, 'devtable', 400),
+ (OrganizationList, 'POST', None, {u'name': 'KSIS', u'email': 'DHVZ'}, 'freshuser', 400),
+ (OrganizationList, 'POST', None, {u'name': 'KSIS', u'email': 'DHVZ'}, 'reader', 400),
+
+ (Repository, 'GET', {'repository': 'public/publicrepo'}, None, None, 200),
+ (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 200),
+ (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 200),
+ (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 200),
+
+ (RepositoryList, 'GET', None, None, None, 400),
+ (RepositoryList, 'GET', None, None, 'devtable', 400),
+ (RepositoryList, 'GET', None, None, 'freshuser', 400),
+ (RepositoryList, 'GET', None, None, 'reader', 400),
+ (RepositoryList, 'POST', None, {u'repository': 'XZGB', u'visibility': u'public', u'description': '0O8U'}, None, 400),
+ (RepositoryList, 'POST', None, {u'repository': 'XZGB', u'visibility': u'public', u'description': '0O8U'}, 'devtable', 201),
+ (RepositoryList, 'POST', None, {u'repository': 'XZGB', u'visibility': u'public', u'description': '0O8U'}, 'freshuser', 201),
+ (RepositoryList, 'POST', None, {u'repository': 'XZGB', u'visibility': u'public', u'description': '0O8U'}, 'reader', 201),
+
+ (DiscoveryResource, 'GET', None, None, None, 200),
+ (DiscoveryResource, 'GET', None, None, 'devtable', 200),
+ (DiscoveryResource, 'GET', None, None, 'freshuser', 200),
+ (DiscoveryResource, 'GET', None, None, 'reader', 200),
+
+ (FileDropResource, 'POST', None, {u'mimeType': 'TKBX'}, None, 200),
+ (FileDropResource, 'POST', None, {u'mimeType': 'TKBX'}, 'devtable', 200),
+ (FileDropResource, 'POST', None, {u'mimeType': 'TKBX'}, 'freshuser', 200),
+ (FileDropResource, 'POST', None, {u'mimeType': 'TKBX'}, 'reader', 200),
+
+ (Recovery, 'POST', None, {u'email': '826S'}, None, 200),
+ (Recovery, 'POST', None, {u'email': '826S'}, 'devtable', 200),
+ (Recovery, 'POST', None, {u'email': '826S'}, 'freshuser', 200),
+ (Recovery, 'POST', None, {u'email': '826S'}, 'reader', 200),
+
+ (Signout, 'POST', None, None, None, 200),
+ (Signout, 'POST', None, None, 'devtable', 200),
+ (Signout, 'POST', None, None, 'freshuser', 200),
+ (Signout, 'POST', None, None, 'reader', 200),
+
+ (Signin, 'POST', None, {u'username': 'E9RY', u'password': 'LQ0N'}, None, 403),
+ (Signin, 'POST', None, {u'username': 'E9RY', u'password': 'LQ0N'}, 'devtable', 403),
+ (Signin, 'POST', None, {u'username': 'E9RY', u'password': 'LQ0N'}, 'freshuser', 403),
+ (Signin, 'POST', None, {u'username': 'E9RY', u'password': 'LQ0N'}, 'reader', 403),
+
+ (ExternalLoginInformation, 'POST', {'service_id': 'someservice'}, {}, None, 400),
+ (ExternalLoginInformation, 'POST', {'service_id': 'someservice'}, {}, 'devtable', 400),
+ (ExternalLoginInformation, 'POST', {'service_id': 'someservice'}, {}, 'freshuser', 400),
+ (ExternalLoginInformation, 'POST', {'service_id': 'someservice'}, {}, 'reader', 400),
+
+ (DetachExternal, 'POST', {'service_id': 'someservice'}, {}, None, 401),
+ (DetachExternal, 'POST', {'service_id': 'someservice'}, {}, 'devtable', 200),
+ (DetachExternal, 'POST', {'service_id': 'someservice'}, {}, 'freshuser', 200),
+ (DetachExternal, 'POST', {'service_id': 'someservice'}, {}, 'reader', 200),
+
+ (VerifyUser, 'POST', None, {u'password': 'LQ0N'}, None, 401),
+ (VerifyUser, 'POST', None, {u'password': 'password'}, 'devtable', 200),
+ (VerifyUser, 'POST', None, {u'password': 'LQ0N'}, 'freshuser', 403),
+ (VerifyUser, 'POST', None, {u'password': 'LQ0N'}, 'reader', 403),
+
+ (ClientKey, 'POST', None, {u'password': 'LQ0N'}, None, 401),
+ (ClientKey, 'POST', None, {u'password': 'password'}, 'devtable', 200),
+ (ClientKey, 'POST', None, {u'password': 'LQ0N'}, 'freshuser', 400),
+ (ClientKey, 'POST', None, {u'password': 'password'}, 'reader', 200),
+
+ (ListPlans, 'GET', None, None, None, 200),
+ (ListPlans, 'GET', None, None, 'devtable', 200),
+ (ListPlans, 'GET', None, None, 'freshuser', 200),
+ (ListPlans, 'GET', None, None, 'reader', 200),
+
+ (User, 'GET', None, None, None, 401),
+ (User, 'GET', None, None, 'devtable', 200),
+ (User, 'GET', None, None, 'freshuser', 200),
+ (User, 'GET', None, None, 'reader', 200),
+ (User, 'POST', None, {u'username': 'T946', u'password': '0SG4', u'email': 'MENT'}, None, 400),
+ (User, 'POST', None, {u'username': 'T946', u'password': '0SG4', u'email': 'MENT'}, 'devtable', 400),
+ (User, 'POST', None, {u'username': 'T946', u'password': '0SG4', u'email': 'MENT'}, 'freshuser', 400),
+ (User, 'POST', None, {u'username': 'T946', u'password': '0SG4', u'email': 'MENT'}, 'reader', 400),
+ (User, 'PUT', None, {}, None, 401),
+ (User, 'PUT', None, {}, 'devtable', 200),
+ (User, 'PUT', None, {}, 'freshuser', 200),
+ (User, 'PUT', None, {}, 'reader', 200),
+ (User, 'DELETE', None, {}, None, 401),
+ (User, 'DELETE', None, {}, 'devtable', 400),
+ (User, 'DELETE', None, {}, 'freshuser', 204),
+ (User, 'DELETE', None, {}, 'reader', 204),
+
+ (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, None, 401),
+ (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'devtable', 400),
+ (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'reader', 403),
+ (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, None, 401),
+ (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'devtable', 200),
+ (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'reader', 403),
+ (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, None, 401),
+ (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'devtable', 400),
+ (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'reader', 403),
+ (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, None, 401),
+ (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'devtable', 400),
+ (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'reader', 403),
+
+ (TeamPermissions, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, None, 401),
+ (TeamPermissions, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'devtable', 200),
+ (TeamPermissions, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (TeamPermissions, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'reader', 403),
+
+ (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, None, 401),
+ (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'devtable', 200),
+ (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'reader', 200),
+ (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, None, 401),
+ (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'devtable', 200),
+ (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'reader', 403),
+
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'public/publicrepo'}, {u'role': u'read'}, None, 401),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'devtable', 403),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'reader', 403),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'devtable', 400),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'devtable', 400),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'devtable/shared'}, {u'role': u'read'}, None, 401),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'devtable', 400),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'reader', 403),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, None, 401),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'devtable', 400),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'reader', 403),
+
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, None, 401),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'devtable', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, None, 401),
+ (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'devtable', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'readers'}, {u'role': u'read'}, None, 401),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'devtable', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'reader', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, None, 401),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'devtable', 400),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, None, 401),
+ (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'devtable', 400),
+ (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'readers'}, {u'role': u'read'}, None, 401),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'readers'}, {u'role': u'read'}, 'devtable', 400),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'readers'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'readers'}, {u'role': u'read'}, 'reader', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, None, 401),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'devtable', 204),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, None, 401),
+ (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'devtable', 200),
+ (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, {u'role': u'read'}, None, 401),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'devtable', 200),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'reader', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, None, 401),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'devtable', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, None, 401),
+ (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'devtable', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'owners'}, {u'role': u'read'}, None, 401),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'devtable', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'reader', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, None, 401),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'devtable', 400),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, None, 401),
+ (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'devtable', 400),
+ (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'owners'}, {u'role': u'read'}, None, 401),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'owners'}, {u'role': u'read'}, 'devtable', 400),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'owners'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'owners'}, {u'role': u'read'}, 'reader', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, None, 401),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'devtable', 400),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, None, 401),
+ (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'devtable', 400),
+ (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'reader', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, {u'role': u'read'}, None, 401),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'devtable', 200),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'reader', 403),
+
+ (BuildTriggerActivate, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, None, 401),
+ (BuildTriggerActivate, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'devtable', 403),
+ (BuildTriggerActivate, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
+ (BuildTriggerActivate, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
+ (BuildTriggerActivate, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, None, 401),
+ (BuildTriggerActivate, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {'config': {}}, 'devtable', 404),
+ (BuildTriggerActivate, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
+ (BuildTriggerActivate, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
+ (BuildTriggerActivate, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, None, 401),
+ (BuildTriggerActivate, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {'config': {}}, 'devtable', 404),
+ (BuildTriggerActivate, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
+ (BuildTriggerActivate, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
+
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, None, 401),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'devtable', 403),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, None, 401),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {'config': {}}, 'devtable', 404),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, None, 401),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {'config': {}}, 'devtable', 404),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
+ (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
+
+ (BuildTriggerSources, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '831C'}, None, None, 401),
+ (BuildTriggerSources, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '831C'}, {'namespace': 'foo'}, 'devtable', 403),
+ (BuildTriggerSources, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '831C'}, None, 'freshuser', 403),
+ (BuildTriggerSources, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '831C'}, None, 'reader', 403),
+ (BuildTriggerSources, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '831C'}, None, None, 401),
+ (BuildTriggerSources, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '831C'}, {'namespace': 'foo'}, 'devtable', 404),
+ (BuildTriggerSources, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '831C'}, None, 'freshuser', 403),
+ (BuildTriggerSources, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '831C'}, None, 'reader', 403),
+ (BuildTriggerSources, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '831C'}, None, None, 401),
+ (BuildTriggerSources, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '831C'}, {'namespace': 'foo'}, 'devtable', 404),
+ (BuildTriggerSources, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '831C'}, None, 'freshuser', 403),
+ (BuildTriggerSources, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '831C'}, None, 'reader', 403),
+
+ (BuildTriggerSubdirs, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '4I2Y'}, {}, None, 401),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '4I2Y'}, {}, 'devtable', 403),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '4I2Y'}, {}, 'freshuser', 403),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '4I2Y'}, {}, 'reader', 403),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '4I2Y'}, {}, None, 401),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '4I2Y'}, {}, 'devtable', 404),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '4I2Y'}, {}, 'freshuser', 403),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '4I2Y'}, {}, 'reader', 403),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '4I2Y'}, {}, None, 401),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '4I2Y'}, {}, 'devtable', 404),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '4I2Y'}, {}, 'freshuser', 403),
+ (BuildTriggerSubdirs, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '4I2Y'}, {}, 'reader', 403),
+
+ (TriggerBuildList, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'ZM1W'}, None, None, 401),
+ (TriggerBuildList, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'ZM1W'}, None, 'devtable', 403),
+ (TriggerBuildList, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'ZM1W'}, None, 'freshuser', 403),
+ (TriggerBuildList, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'ZM1W'}, None, 'reader', 403),
+ (TriggerBuildList, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'ZM1W'}, None, None, 401),
+ (TriggerBuildList, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'ZM1W'}, None, 'devtable', 200),
+ (TriggerBuildList, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'ZM1W'}, None, 'freshuser', 403),
+ (TriggerBuildList, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'ZM1W'}, None, 'reader', 403),
+ (TriggerBuildList, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'ZM1W'}, None, None, 401),
+ (TriggerBuildList, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'ZM1W'}, None, 'devtable', 200),
+ (TriggerBuildList, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'ZM1W'}, None, 'freshuser', 403),
+ (TriggerBuildList, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'ZM1W'}, None, 'reader', 403),
+
+ (ActivateBuildTrigger, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, None, 401),
+ (ActivateBuildTrigger, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'devtable', 403),
+ (ActivateBuildTrigger, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
+ (ActivateBuildTrigger, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
+ (ActivateBuildTrigger, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, None, 401),
+ (ActivateBuildTrigger, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, {}, 'devtable', 404),
+ (ActivateBuildTrigger, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
+ (ActivateBuildTrigger, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
+ (ActivateBuildTrigger, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, None, 401),
+ (ActivateBuildTrigger, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, {}, 'devtable', 404),
+ (ActivateBuildTrigger, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
+ (ActivateBuildTrigger, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
+
+ (BuildTriggerAnalyze, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, None, 401),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, {'config': {}}, 'devtable', 403),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, None, 401),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, {'config': {}}, 'devtable', 404),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, None, 401),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, {'config': {}}, 'devtable', 404),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
+ (BuildTriggerAnalyze, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
+
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, None, 400),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'devtable', 400),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'freshuser', 400),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'reader', 400),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'devtable', 400),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'reader', 400),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 400),
+
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, None, 404),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'devtable', 404),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'freshuser', 404),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'reader', 404),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'devtable', 404),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'reader', 404),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 404),
+
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'devtable', 404),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'devtable/shared'}, None, 'devtable', 400),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, None, 200),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'devtable', 200),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'freshuser', 200),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'reader', 200),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, None, 401),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'devtable', 200),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'reader', 200),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 200),
+
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, None, 404),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'devtable', 404),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'freshuser', 404),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'reader', 404),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'devtable', 404),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'reader', 404),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 404),
+
+ (PermissionPrototype, 'DELETE', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, None, None, 401),
+ (PermissionPrototype, 'DELETE', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, None, 'devtable', 404),
+ (PermissionPrototype, 'DELETE', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, None, 'freshuser', 403),
+ (PermissionPrototype, 'DELETE', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, None, 'reader', 403),
+ (PermissionPrototype, 'PUT', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, {u'role': u'read'}, None, 401),
+ (PermissionPrototype, 'PUT', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, {u'role': u'read'}, 'devtable', 404),
+ (PermissionPrototype, 'PUT', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, {u'role': u'read'}, 'freshuser', 403),
+ (PermissionPrototype, 'PUT', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, {u'role': u'read'}, 'reader', 403),
+
+ (OrganizationMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, None, 401),
+ (OrganizationMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'devtable', 404),
+ (OrganizationMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'freshuser', 403),
+ (OrganizationMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'reader', 403),
+ (OrganizationMember, 'GET', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, None, 401),
+ (OrganizationMember, 'GET', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'devtable', 404),
+ (OrganizationMember, 'GET', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'freshuser', 403),
+ (OrganizationMember, 'GET', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'reader', 403),
+
+ (OrgRobot, 'DELETE', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, None, 401),
+ (OrgRobot, 'DELETE', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'devtable', 400),
+ (OrgRobot, 'DELETE', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'freshuser', 403),
+ (OrgRobot, 'DELETE', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'reader', 403),
+ (OrgRobot, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, None, 401),
+ (OrgRobot, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'devtable', 400),
+ (OrgRobot, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'freshuser', 403),
+ (OrgRobot, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'reader', 403),
+ (OrgRobot, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, {}, None, 401),
+ (OrgRobot, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, {}, 'devtable', 400),
+ (OrgRobot, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, {}, 'freshuser', 403),
+ (OrgRobot, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, {}, 'reader', 403),
+
+ (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, None, 401),
+ (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'devtable', 204),
+ (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'freshuser', 403),
+ (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'reader', 403),
+ (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'readers'}, {u'role': u'member'}, None, 401),
+ (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'readers'}, {u'role': u'member'}, 'devtable', 200),
+ (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'readers'}, {u'role': u'member'}, 'freshuser', 403),
+ (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'readers'}, {u'role': u'member'}, 'reader', 403),
+ (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, None, 401),
+ (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'devtable', 400),
+ (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'freshuser', 403),
+ (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'reader', 403),
+ (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners'}, {u'role': u'member'}, None, 401),
+ (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners'}, {u'role': u'member'}, 'devtable', 400),
+ (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners'}, {u'role': u'member'}, 'freshuser', 403),
+ (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners'}, {u'role': u'member'}, 'reader', 403),
+
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryTeamPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+
+ (RepositoryUserPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryUserPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+
+ (BuildTrigger, 'DELETE', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, None, 401),
+ (BuildTrigger, 'DELETE', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'devtable', 403),
+ (BuildTrigger, 'DELETE', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
+ (BuildTrigger, 'DELETE', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
+ (BuildTrigger, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, None, 401),
+ (BuildTrigger, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'devtable', 403),
+ (BuildTrigger, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
+ (BuildTrigger, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
+ (BuildTrigger, 'DELETE', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, None, 401),
+ (BuildTrigger, 'DELETE', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'devtable', 404),
+ (BuildTrigger, 'DELETE', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
+ (BuildTrigger, 'DELETE', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
+ (BuildTrigger, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, None, 401),
+ (BuildTrigger, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'devtable', 404),
+ (BuildTrigger, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
+ (BuildTrigger, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
+ (BuildTrigger, 'DELETE', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, None, 401),
+ (BuildTrigger, 'DELETE', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'devtable', 404),
+ (BuildTrigger, 'DELETE', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
+ (BuildTrigger, 'DELETE', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
+ (BuildTrigger, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, None, 401),
+ (BuildTrigger, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'devtable', 404),
+ (BuildTrigger, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
+ (BuildTrigger, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
+
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'devtable', 400),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'devtable', 404),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'public/publicrepo'}, {u'role': u'read'}, None, 401),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'devtable', 403),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'reader', 403),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'devtable', 410),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'devtable', 410),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'devtable/shared'}, {u'role': u'read'}, None, 401),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'devtable', 410),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'reader', 403),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 410),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 410),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, None, 401),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'devtable', 410),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'freshuser', 403),
+ (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'reader', 403),
+
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'public/publicrepo'}, None, None, 404),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'public/publicrepo'}, None, 'devtable', 404),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'public/publicrepo'}, None, 'freshuser', 404),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'public/publicrepo'}, None, 'reader', 404),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'devtable/shared'}, None, 'devtable', 404),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'devtable/shared'}, None, 'reader', 404),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 404),
+
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, None, 401),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'devtable', 403),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'freshuser', 403),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'reader', 403),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, None, 401),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'devtable', 404),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'freshuser', 403),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'reader', 403),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, None, 401),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'devtable', 404),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'freshuser', 403),
+ (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'reader', 403),
+
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, None, 401),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'devtable', 403),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'freshuser', 403),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'reader', 403),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'devtable/shared'}, None, 'devtable', 400),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, None, 401),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'devtable', 404),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'freshuser', 403),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'reader', 403),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, None, 401),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'devtable', 404),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'freshuser', 403),
+ (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'reader', 403),
+
+ (PermissionPrototypeList, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (PermissionPrototypeList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (PermissionPrototypeList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (PermissionPrototypeList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+ (PermissionPrototypeList, 'POST', {'orgname': 'buynlarge'}, {u'role': u'read', u'delegate': {u'kind': u'user', u'name': '7DGP'}}, None, 401),
+ (PermissionPrototypeList, 'POST', {'orgname': 'buynlarge'}, {u'role': u'read', u'delegate': {u'kind': u'user', u'name': '7DGP'}}, 'devtable', 400),
+ (PermissionPrototypeList, 'POST', {'orgname': 'buynlarge'}, {u'role': u'read', u'delegate': {u'kind': u'user', u'name': '7DGP'}}, 'freshuser', 403),
+ (PermissionPrototypeList, 'POST', {'orgname': 'buynlarge'}, {u'role': u'read', u'delegate': {u'kind': u'user', u'name': '7DGP'}}, 'reader', 403),
+
+ (OrganizationInvoiceList, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (OrganizationInvoiceList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrganizationInvoiceList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrganizationInvoiceList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+
+ (OrgPrivateRepositories, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (OrgPrivateRepositories, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrgPrivateRepositories, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrgPrivateRepositories, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+
+ (OrganizationMemberList, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (OrganizationMemberList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrganizationMemberList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrganizationMemberList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+
+ (OrgRobotList, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (OrgRobotList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrgRobotList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrgRobotList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 200),
+
+ (OrganizationCard, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (OrganizationCard, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrganizationCard, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrganizationCard, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+ (OrganizationCard, 'POST', {'orgname': 'buynlarge'}, {u'token': '4VFR'}, None, 401),
+ (OrganizationCard, 'POST', {'orgname': 'buynlarge'}, {u'token': '4VFR'}, 'freshuser', 403),
+ (OrganizationCard, 'POST', {'orgname': 'buynlarge'}, {u'token': '4VFR'}, 'reader', 403),
+
+ (OrganizationPlan, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (OrganizationPlan, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrganizationPlan, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrganizationPlan, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+ (OrganizationPlan, 'PUT', {'orgname': 'buynlarge'}, {u'plan': 'WWEI'}, None, 401),
+ (OrganizationPlan, 'PUT', {'orgname': 'buynlarge'}, {u'plan': 'WWEI'}, 'freshuser', 403),
+ (OrganizationPlan, 'PUT', {'orgname': 'buynlarge'}, {u'plan': 'WWEI'}, 'reader', 403),
+
+ (OrgLogs, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (OrgLogs, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrgLogs, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrgLogs, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+
+ (RepositoryVisibility, 'POST', {'repository': 'public/publicrepo'}, {u'visibility': u'public'}, None, 401),
+ (RepositoryVisibility, 'POST', {'repository': 'public/publicrepo'}, {u'visibility': u'public'}, 'devtable', 403),
+ (RepositoryVisibility, 'POST', {'repository': 'public/publicrepo'}, {u'visibility': u'public'}, 'freshuser', 403),
+ (RepositoryVisibility, 'POST', {'repository': 'public/publicrepo'}, {u'visibility': u'public'}, 'reader', 403),
+ (RepositoryVisibility, 'POST', {'repository': 'devtable/shared'}, {u'visibility': u'public'}, None, 401),
+ (RepositoryVisibility, 'POST', {'repository': 'devtable/shared'}, {u'visibility': u'public'}, 'devtable', 200),
+ (RepositoryVisibility, 'POST', {'repository': 'devtable/shared'}, {u'visibility': u'public'}, 'freshuser', 403),
+ (RepositoryVisibility, 'POST', {'repository': 'devtable/shared'}, {u'visibility': u'public'}, 'reader', 403),
+ (RepositoryVisibility, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'visibility': u'public'}, None, 401),
+ (RepositoryVisibility, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'visibility': u'public'}, 'devtable', 200),
+ (RepositoryVisibility, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'visibility': u'public'}, 'freshuser', 403),
+ (RepositoryVisibility, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'visibility': u'public'}, 'reader', 403),
+
+ (BuildTriggerList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
+ (BuildTriggerList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (BuildTriggerList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (BuildTriggerList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (BuildTriggerList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
+ (BuildTriggerList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
+ (BuildTriggerList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (BuildTriggerList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
+ (BuildTriggerList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (BuildTriggerList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
+ (BuildTriggerList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (BuildTriggerList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+
+ (RepositoryNotificationList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryNotificationList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryNotificationList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryNotificationList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryNotificationList, 'POST', {'repository': 'public/publicrepo'}, {}, None, 401),
+ (RepositoryNotificationList, 'POST', {'repository': 'public/publicrepo'}, {}, 'devtable', 403),
+ (RepositoryNotificationList, 'POST', {'repository': 'public/publicrepo'}, {}, 'freshuser', 403),
+ (RepositoryNotificationList, 'POST', {'repository': 'public/publicrepo'}, {}, 'reader', 403),
+ (RepositoryNotificationList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryNotificationList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
+ (RepositoryNotificationList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryNotificationList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryNotificationList, 'POST', {'repository': 'devtable/shared'}, {}, None, 401),
+ (RepositoryNotificationList, 'POST', {'repository': 'devtable/shared'}, {'config': {'email': 'a@b.com'}, 'event': 'repo_push', 'method': 'email'}, 'devtable', 400),
+ (RepositoryNotificationList, 'POST', {'repository': 'devtable/shared'}, {}, 'freshuser', 403),
+ (RepositoryNotificationList, 'POST', {'repository': 'devtable/shared'}, {}, 'reader', 403),
+ (RepositoryNotificationList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryNotificationList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
+ (RepositoryNotificationList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryNotificationList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryNotificationList, 'POST', {'repository': 'buynlarge/orgrepo'}, {}, None, 401),
+ (RepositoryNotificationList, 'POST', {'repository': 'buynlarge/orgrepo'}, {'config': {'email': 'a@b.com'}, 'event': 'repo_push', 'method': 'email'}, 'devtable', 400),
+ (RepositoryNotificationList, 'POST', {'repository': 'buynlarge/orgrepo'}, {}, 'freshuser', 403),
+ (RepositoryNotificationList, 'POST', {'repository': 'buynlarge/orgrepo'}, {}, 'reader', 403),
+
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, {}, None, 401),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, {}, 'devtable', 403),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, {}, 'freshuser', 403),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, {}, 'reader', 403),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, None, 'devtable', 404),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, {}, None, 401),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, {}, 'devtable', 200),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, {}, 'freshuser', 403),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, {}, 'reader', 403),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, {}, None, 401),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, {}, 'devtable', 200),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, {}, 'freshuser', 403),
+ (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, {}, 'reader', 403),
+
+ (RepositoryTokenList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryTokenList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryTokenList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryTokenList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryTokenList, 'POST', {'repository': 'public/publicrepo'}, {u'friendlyName': 'R1CN'}, None, 401),
+ (RepositoryTokenList, 'POST', {'repository': 'public/publicrepo'}, {u'friendlyName': 'R1CN'}, 'devtable', 403),
+ (RepositoryTokenList, 'POST', {'repository': 'public/publicrepo'}, {u'friendlyName': 'R1CN'}, 'freshuser', 403),
+ (RepositoryTokenList, 'POST', {'repository': 'public/publicrepo'}, {u'friendlyName': 'R1CN'}, 'reader', 403),
+ (RepositoryTokenList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryTokenList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 410),
+ (RepositoryTokenList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryTokenList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryTokenList, 'POST', {'repository': 'devtable/shared'}, {u'friendlyName': 'R1CN'}, None, 401),
+ (RepositoryTokenList, 'POST', {'repository': 'devtable/shared'}, {u'friendlyName': 'R1CN'}, 'devtable', 410),
+ (RepositoryTokenList, 'POST', {'repository': 'devtable/shared'}, {u'friendlyName': 'R1CN'}, 'freshuser', 403),
+ (RepositoryTokenList, 'POST', {'repository': 'devtable/shared'}, {u'friendlyName': 'R1CN'}, 'reader', 403),
+ (RepositoryTokenList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryTokenList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 410),
+ (RepositoryTokenList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryTokenList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (RepositoryTokenList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'friendlyName': 'R1CN'}, None, 401),
+ (RepositoryTokenList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'friendlyName': 'R1CN'}, 'devtable', 410),
+ (RepositoryTokenList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'friendlyName': 'R1CN'}, 'freshuser', 403),
+ (RepositoryTokenList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'friendlyName': 'R1CN'}, 'reader', 403),
+
+ (RepositoryBuildList, 'GET', {'repository': 'public/publicrepo'}, None, None, 200),
+ (RepositoryBuildList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 200),
+ (RepositoryBuildList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 200),
+ (RepositoryBuildList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 200),
+ (RepositoryBuildList, 'POST', {'repository': 'public/publicrepo'}, {u'file_id': 'UX7K'}, None, 401),
+ (RepositoryBuildList, 'POST', {'repository': 'public/publicrepo'}, {u'file_id': 'UX7K'}, 'devtable', 403),
+ (RepositoryBuildList, 'POST', {'repository': 'public/publicrepo'}, {u'file_id': 'UX7K'}, 'freshuser', 403),
+ (RepositoryBuildList, 'POST', {'repository': 'public/publicrepo'}, {u'file_id': 'UX7K'}, 'reader', 403),
+ (RepositoryBuildList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryBuildList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
+ (RepositoryBuildList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryBuildList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 200),
+ (RepositoryBuildList, 'POST', {'repository': 'devtable/shared'}, {u'file_id': 'UX7K'}, None, 401),
+ (RepositoryBuildList, 'POST', {'repository': 'devtable/shared'}, {u'file_id': 'UX7K'}, 'devtable', 201),
+ (RepositoryBuildList, 'POST', {'repository': 'devtable/shared'}, {u'file_id': 'UX7K'}, 'freshuser', 403),
+ (RepositoryBuildList, 'POST', {'repository': 'devtable/shared'}, {u'file_id': 'UX7K'}, 'reader', 403),
+ (RepositoryBuildList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryBuildList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
+ (RepositoryBuildList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryBuildList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 200),
+ (RepositoryBuildList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'file_id': 'UX7K'}, None, 401),
+ (RepositoryBuildList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'file_id': 'UX7K'}, 'devtable', 201),
+ (RepositoryBuildList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'file_id': 'UX7K'}, 'freshuser', 403),
+ (RepositoryBuildList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'file_id': 'UX7K'}, 'reader', 403),
+
+ (RepositoryImageList, 'GET', {'repository': 'public/publicrepo'}, None, None, 200),
+ (RepositoryImageList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 200),
+ (RepositoryImageList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 200),
+ (RepositoryImageList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 200),
+ (RepositoryImageList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryImageList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
+ (RepositoryImageList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryImageList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 200),
+ (RepositoryImageList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryImageList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
+ (RepositoryImageList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryImageList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 200),
+
+ (RepositoryLogs, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
+ (RepositoryLogs, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (RepositoryLogs, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (RepositoryLogs, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (RepositoryLogs, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
+ (RepositoryLogs, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
+ (RepositoryLogs, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (RepositoryLogs, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
+ (RepositoryLogs, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (RepositoryLogs, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
+ (RepositoryLogs, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (RepositoryLogs, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+
+ (UserRobot, 'DELETE', {'robot_shortname': 'robotname'}, None, None, 401),
+ (UserRobot, 'DELETE', {'robot_shortname': 'robotname'}, None, 'devtable', 400),
+ (UserRobot, 'DELETE', {'robot_shortname': 'robotname'}, None, 'freshuser', 400),
+ (UserRobot, 'DELETE', {'robot_shortname': 'robotname'}, None, 'reader', 400),
+ (UserRobot, 'GET', {'robot_shortname': 'robotname'}, None, None, 401),
+ (UserRobot, 'GET', {'robot_shortname': 'robotname'}, None, 'devtable', 400),
+ (UserRobot, 'GET', {'robot_shortname': 'robotname'}, None, 'freshuser', 400),
+ (UserRobot, 'GET', {'robot_shortname': 'robotname'}, None, 'reader', 400),
+ (UserRobot, 'PUT', {'robot_shortname': 'robotname'}, {}, None, 401),
+ (UserRobot, 'PUT', {'robot_shortname': 'robotname'}, {}, 'devtable', 201),
+ (UserRobot, 'PUT', {'robot_shortname': 'robotname'}, {}, 'freshuser', 201),
+ (UserRobot, 'PUT', {'robot_shortname': 'robotname'}, {}, 'reader', 201),
+
+ (RegenerateUserRobot, 'POST', {'robot_shortname': 'robotname'}, None, None, 401),
+ (RegenerateUserRobot, 'POST', {'robot_shortname': 'robotname'}, None, 'devtable', 400),
+ (RegenerateUserRobot, 'POST', {'robot_shortname': 'robotname'}, None, 'freshuser', 400),
+ (RegenerateUserRobot, 'POST', {'robot_shortname': 'robotname'}, None, 'reader', 400),
+
+ (RegenerateOrgRobot, 'POST', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, None, 401),
+ (RegenerateOrgRobot, 'POST', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'devtable', 400),
+ (RegenerateOrgRobot, 'POST', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'freshuser', 403),
+ (RegenerateOrgRobot, 'POST', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'reader', 403),
+
+ (UserRobotPermissions, 'GET', {'robot_shortname': 'robotname'}, None, None, 401),
+ (UserRobotPermissions, 'GET', {'robot_shortname': 'robotname'}, None, 'devtable', 400),
+ (UserRobotPermissions, 'GET', {'robot_shortname': 'robotname'}, None, 'freshuser', 400),
+ (UserRobotPermissions, 'GET', {'robot_shortname': 'robotname'}, None, 'reader', 400),
+
+ (OrgRobotPermissions, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, None, 401),
+ (OrgRobotPermissions, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'devtable', 400),
+ (OrgRobotPermissions, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'freshuser', 403),
+ (OrgRobotPermissions, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'reader', 403),
+
+ (Organization, 'DELETE', {'orgname': 'buynlarge'}, {}, None, 401),
+ (Organization, 'DELETE', {'orgname': 'buynlarge'}, {}, 'devtable', 204),
+ (Organization, 'DELETE', {'orgname': 'buynlarge'}, {}, 'freshuser', 403),
+ (Organization, 'DELETE', {'orgname': 'buynlarge'}, {}, 'reader', 403),
+ (Organization, 'GET', {'orgname': 'buynlarge'}, None, None, 200),
+ (Organization, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (Organization, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 200),
+ (Organization, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 200),
+ (Organization, 'PUT', {'orgname': 'buynlarge'}, {}, None, 401),
+ (Organization, 'PUT', {'orgname': 'buynlarge'}, {}, 'devtable', 200),
+ (Organization, 'PUT', {'orgname': 'buynlarge'}, {}, 'freshuser', 403),
+ (Organization, 'PUT', {'orgname': 'buynlarge'}, {}, 'reader', 403),
+
+ (Repository, 'DELETE', {'repository': 'public/publicrepo'}, None, None, 401),
+ (Repository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
+ (Repository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
+ (Repository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'reader', 403),
+ (Repository, 'GET', {'repository': 'public/publicrepo'}, None, None, 200),
+ (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 200),
+ (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 200),
+ (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 200),
+ (Repository, 'PUT', {'repository': 'public/publicrepo'}, {u'description': 'WXNG'}, None, 401),
+ (Repository, 'PUT', {'repository': 'public/publicrepo'}, {u'description': 'WXNG'}, 'devtable', 403),
+ (Repository, 'PUT', {'repository': 'public/publicrepo'}, {u'description': 'WXNG'}, 'freshuser', 403),
+ (Repository, 'PUT', {'repository': 'public/publicrepo'}, {u'description': 'WXNG'}, 'reader', 403),
+ (Repository, 'DELETE', {'repository': 'devtable/shared'}, None, None, 401),
+ (Repository, 'DELETE', {'repository': 'devtable/shared'}, None, 'devtable', 204),
+ (Repository, 'DELETE', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (Repository, 'DELETE', {'repository': 'devtable/shared'}, None, 'reader', 403),
+ (Repository, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
+ (Repository, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
+ (Repository, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
+ (Repository, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 200),
+ (Repository, 'PUT', {'repository': 'devtable/shared'}, {u'description': 'WXNG'}, None, 401),
+ (Repository, 'PUT', {'repository': 'devtable/shared'}, {u'description': 'WXNG'}, 'devtable', 200),
+ (Repository, 'PUT', {'repository': 'devtable/shared'}, {u'description': 'WXNG'}, 'freshuser', 403),
+ (Repository, 'PUT', {'repository': 'devtable/shared'}, {u'description': 'WXNG'}, 'reader', 403),
+ (Repository, 'DELETE', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (Repository, 'DELETE', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 204),
+ (Repository, 'DELETE', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (Repository, 'DELETE', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
+ (Repository, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
+ (Repository, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
+ (Repository, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
+ (Repository, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 200),
+ (Repository, 'PUT', {'repository': 'buynlarge/orgrepo'}, {u'description': 'WXNG'}, None, 401),
+ (Repository, 'PUT', {'repository': 'buynlarge/orgrepo'}, {u'description': 'WXNG'}, 'devtable', 200),
+ (Repository, 'PUT', {'repository': 'buynlarge/orgrepo'}, {u'description': 'WXNG'}, 'freshuser', 403),
+ (Repository, 'PUT', {'repository': 'buynlarge/orgrepo'}, {u'description': 'WXNG'}, 'reader', 403),
+
+ (EntitySearch, 'GET', {'prefix': 'R9NZ'}, None, None, 200),
+ (EntitySearch, 'GET', {'prefix': 'R9NZ'}, None, 'devtable', 200),
+ (EntitySearch, 'GET', {'prefix': 'R9NZ'}, None, 'freshuser', 200),
+ (EntitySearch, 'GET', {'prefix': 'R9NZ'}, None, 'reader', 200),
+
+ (ApplicationInformation, 'GET', {'client_id': '3LGI'}, None, None, 404),
+ (ApplicationInformation, 'GET', {'client_id': '3LGI'}, None, 'devtable', 404),
+ (ApplicationInformation, 'GET', {'client_id': '3LGI'}, None, 'freshuser', 404),
+ (ApplicationInformation, 'GET', {'client_id': '3LGI'}, None, 'reader', 404),
+
+ (OrganizationApplications, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (OrganizationApplications, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrganizationApplications, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrganizationApplications, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+ (OrganizationApplications, 'POST', {'orgname': 'buynlarge'}, {u'name': 'foo'}, None, 401),
+ (OrganizationApplications, 'POST', {'orgname': 'buynlarge'}, {u'name': 'foo'}, 'devtable', 200),
+ (OrganizationApplications, 'POST', {'orgname': 'buynlarge'}, {u'name': 'foo'}, 'freshuser', 403),
+ (OrganizationApplications, 'POST', {'orgname': 'buynlarge'}, {u'name': 'foo'}, 'reader', 403),
+
+ (OrganizationApplicationResource, 'DELETE', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, None, 401),
+ (OrganizationApplicationResource, 'DELETE', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'devtable', 204),
+ (OrganizationApplicationResource, 'DELETE', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'freshuser', 403),
+ (OrganizationApplicationResource, 'DELETE', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'reader', 403),
+ (OrganizationApplicationResource, 'GET', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, None, 401),
+ (OrganizationApplicationResource, 'GET', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'devtable', 200),
+ (OrganizationApplicationResource, 'GET', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'freshuser', 403),
+ (OrganizationApplicationResource, 'GET', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'reader', 403),
+ (OrganizationApplicationResource, 'PUT', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, {u'redirect_uri': 'foo', u'name': 'foo', u'application_uri': 'foo'}, None, 401),
+ (OrganizationApplicationResource, 'PUT', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, {u'redirect_uri': 'foo', u'name': 'foo', u'application_uri': 'foo'}, 'devtable', 200),
+ (OrganizationApplicationResource, 'PUT', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, {u'redirect_uri': 'foo', u'name': 'foo', u'application_uri': 'foo'}, 'freshuser', 403),
+ (OrganizationApplicationResource, 'PUT', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, {u'redirect_uri': 'foo', u'name': 'foo', u'application_uri': 'foo'}, 'reader', 403),
+
+ (OrganizationApplicationResetClientSecret, 'POST', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, None, 401),
+ (OrganizationApplicationResetClientSecret, 'POST', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'devtable', 200),
+ (OrganizationApplicationResetClientSecret, 'POST', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'freshuser', 403),
+ (OrganizationApplicationResetClientSecret, 'POST', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'reader', 403),
+
+ (Users, 'GET', {'username': 'devtable'}, None, None, 200),
+
+ (UserNotificationList, 'GET', None, None, None, 401),
+ (UserNotificationList, 'GET', None, None, 'devtable', 200),
+ (UserNotificationList, 'GET', None, None, 'freshuser', 200),
+ (UserNotificationList, 'GET', None, None, 'reader', 200),
+
+ (UserAuthorizationList, 'GET', None, None, None, 401),
+ (UserAuthorizationList, 'GET', None, None, 'devtable', 200),
+ (UserAuthorizationList, 'GET', None, None, 'freshuser', 200),
+ (UserAuthorizationList, 'GET', None, None, 'reader', 200),
+
+ (UserAuthorization, 'DELETE', {'access_token_uuid': 'fake'}, None, None, 401),
+ (UserAuthorization, 'DELETE', {'access_token_uuid': 'fake'}, None, 'devtable', 404),
+ (UserAuthorization, 'DELETE', {'access_token_uuid': 'fake'}, None, 'freshuser', 404),
+ (UserAuthorization, 'DELETE', {'access_token_uuid': 'fake'}, None, 'reader', 404),
+ (UserAuthorization, 'GET', {'access_token_uuid': 'fake'}, None, None, 401),
+ (UserAuthorization, 'GET', {'access_token_uuid': 'fake'}, None, 'devtable', 404),
+ (UserAuthorization, 'GET', {'access_token_uuid': 'fake'}, None, 'freshuser', 404),
+ (UserAuthorization, 'GET', {'access_token_uuid': 'fake'}, None, 'reader', 404),
+
+ (UserAggregateLogs, 'GET', None, None, None, 401),
+ (UserAggregateLogs, 'GET', None, None, 'devtable', 200),
+ (UserAggregateLogs, 'GET', None, None, 'freshuser', 200),
+ (UserAggregateLogs, 'GET', None, None, 'reader', 200),
+
+ (OrgAggregateLogs, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
+ (OrgAggregateLogs, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrgAggregateLogs, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrgAggregateLogs, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+
+ (RepositoryAggregateLogs, 'GET', {'repository': 'devtable/simple'}, None, None, 401),
+ (RepositoryAggregateLogs, 'GET', {'repository': 'devtable/simple'}, None, 'devtable', 200),
+ (RepositoryAggregateLogs, 'GET', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (RepositoryAggregateLogs, 'GET', {'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (ExportUserLogs, 'POST', None, EXPORTLOGS_PARAMS, None, 401),
+ (ExportUserLogs, 'POST', None, EXPORTLOGS_PARAMS, 'devtable', 200),
+ (ExportUserLogs, 'POST', None, EXPORTLOGS_PARAMS, 'freshuser', 200),
+ (ExportUserLogs, 'POST', None, EXPORTLOGS_PARAMS, 'reader', 200),
+
+ (ExportOrgLogs, 'POST', {'orgname': 'buynlarge'}, EXPORTLOGS_PARAMS, None, 401),
+ (ExportOrgLogs, 'POST', {'orgname': 'buynlarge'}, EXPORTLOGS_PARAMS, 'devtable', 200),
+ (ExportOrgLogs, 'POST', {'orgname': 'buynlarge'}, EXPORTLOGS_PARAMS, 'freshuser', 403),
+ (ExportOrgLogs, 'POST', {'orgname': 'buynlarge'}, EXPORTLOGS_PARAMS, 'reader', 403),
+
+ (ExportRepositoryLogs, 'POST', {'repository': 'devtable/simple'}, EXPORTLOGS_PARAMS, None, 401),
+ (ExportRepositoryLogs, 'POST', {'repository': 'devtable/simple'}, EXPORTLOGS_PARAMS, 'devtable', 200),
+ (ExportRepositoryLogs, 'POST', {'repository': 'devtable/simple'}, EXPORTLOGS_PARAMS, 'freshuser', 403),
+ (ExportRepositoryLogs, 'POST', {'repository': 'devtable/simple'}, EXPORTLOGS_PARAMS, 'reader', 403),
+
+ (SuperUserAggregateLogs, 'GET', None, None, None, 401),
+ (SuperUserAggregateLogs, 'GET', None, None, 'devtable', 200),
+ (SuperUserAggregateLogs, 'GET', None, None, 'freshuser', 403),
+ (SuperUserAggregateLogs, 'GET', None, None, 'reader', 403),
+
+ (SuperUserLogs, 'GET', None, None, None, 401),
+ (SuperUserLogs, 'GET', None, None, 'devtable', 200),
+ (SuperUserLogs, 'GET', None, None, 'freshuser', 403),
+ (SuperUserLogs, 'GET', None, None, 'reader', 403),
+
+ (SuperUserSendRecoveryEmail, 'POST', {'username': 'someuser'}, None, None, 401),
+ (SuperUserSendRecoveryEmail, 'POST', {'username': 'someuser'}, None, 'devtable', 404),
+ (SuperUserSendRecoveryEmail, 'POST', {'username': 'someuser'}, None, 'freshuser', 403),
+ (SuperUserSendRecoveryEmail, 'POST', {'username': 'someuser'}, None, 'reader', 403),
+
+ (SuperUserTakeOwnership, 'POST', {'namespace': 'invalidnamespace'}, {}, None, 401),
+ (SuperUserTakeOwnership, 'POST', {'namespace': 'invalidnamespace'}, {}, 'devtable', 404),
+ (SuperUserTakeOwnership, 'POST', {'namespace': 'invalidnamespace'}, {}, 'freshuser', 403),
+ (SuperUserTakeOwnership, 'POST', {'namespace': 'invalidnamespace'}, {}, 'reader', 403),
+
+ (SuperUserServiceKeyApproval, 'POST', {'kid': 1234}, {}, None, 401),
+ (SuperUserServiceKeyApproval, 'POST', {'kid': 1234}, {}, 'devtable', 404),
+ (SuperUserServiceKeyApproval, 'POST', {'kid': 1234}, {}, 'freshuser', 403),
+ (SuperUserServiceKeyApproval, 'POST', {'kid': 1234}, {}, 'reader', 403),
+
+ (SuperUserServiceKeyManagement, 'GET', None, None, None, 401),
+ (SuperUserServiceKeyManagement, 'GET', None, None, 'devtable', 200),
+ (SuperUserServiceKeyManagement, 'GET', None, None, 'freshuser', 403),
+ (SuperUserServiceKeyManagement, 'GET', None, None, 'reader', 403),
+ (SuperUserServiceKeyManagement, 'POST', None, {'expiration': None, 'service': 'someservice'}, None, 401),
+ (SuperUserServiceKeyManagement, 'POST', None, {'expiration': None, 'service': 'someservice'}, 'devtable', 200),
+ (SuperUserServiceKeyManagement, 'POST', None, {'expiration': None, 'service': 'someservice'}, 'freshuser', 403),
+ (SuperUserServiceKeyManagement, 'POST', None, {'expiration': None, 'service': 'someservice'}, 'reader', 403),
+
+ (SuperUserServiceKey, 'DELETE', {'kid': 1234}, None, None, 401),
+ (SuperUserServiceKey, 'DELETE', {'kid': 1234}, None, 'devtable', 404),
+ (SuperUserServiceKey, 'DELETE', {'kid': 1234}, None, 'freshuser', 403),
+ (SuperUserServiceKey, 'DELETE', {'kid': 1234}, None, 'reader', 403),
+ (SuperUserServiceKey, 'GET', {'kid': 1234}, None, None, 401),
+ (SuperUserServiceKey, 'GET', {'kid': 1234}, None, 'devtable', 404),
+ (SuperUserServiceKey, 'GET', {'kid': 1234}, None, 'freshuser', 403),
+ (SuperUserServiceKey, 'GET', {'kid': 1234}, None, 'reader', 403),
+ (SuperUserServiceKey, 'PUT', {'kid': 1234}, {}, None, 401),
+ (SuperUserServiceKey, 'PUT', {'kid': 1234}, {}, 'devtable', 404),
+ (SuperUserServiceKey, 'PUT', {'kid': 1234}, {}, 'freshuser', 403),
+ (SuperUserServiceKey, 'PUT', {'kid': 1234}, {}, 'reader', 403),
+
+ (TeamMemberInvite, 'DELETE', {'code': 'foobarbaz'}, None, None, 401),
+ (TeamMemberInvite, 'DELETE', {'code': 'foobarbaz'}, None, 'devtable', 400),
+ (TeamMemberInvite, 'DELETE', {'code': 'foobarbaz'}, None, 'freshuser', 400),
+ (TeamMemberInvite, 'DELETE', {'code': 'foobarbaz'}, None, 'reader', 400),
+ (TeamMemberInvite, 'PUT', {'code': 'foobarbaz'}, None, None, 401),
+ (TeamMemberInvite, 'PUT', {'code': 'foobarbaz'}, None, 'devtable', 400),
+ (TeamMemberInvite, 'PUT', {'code': 'foobarbaz'}, None, 'freshuser', 400),
+ (TeamMemberInvite, 'PUT', {'code': 'foobarbaz'}, None, 'reader', 400),
+
+ (ConductSearch, 'GET', None, None, None, 200),
+ (ConductSearch, 'GET', None, None, 'devtable', 200),
+
+ (ChangeLog, 'GET', None, None, None, 401),
+ (ChangeLog, 'GET', None, None, 'devtable', 200),
+ (ChangeLog, 'GET', None, None, 'freshuser', 403),
+ (ChangeLog, 'GET', None, None, 'reader', 403),
+
+ (SuperUserOrganizationList, 'GET', None, None, None, 401),
+ (SuperUserOrganizationList, 'GET', None, None, 'devtable', 200),
+ (SuperUserOrganizationList, 'GET', None, None, 'freshuser', 403),
+ (SuperUserOrganizationList, 'GET', None, None, 'reader', 403),
+
+ (SuperUserOrganizationManagement, 'DELETE', {'name': 'buynlarge'}, None, None, 401),
+ (SuperUserOrganizationManagement, 'DELETE', {'name': 'buynlarge'}, None, 'devtable', 204),
+ (SuperUserOrganizationManagement, 'DELETE', {'name': 'buynlarge'}, None, 'freshuser', 403),
+ (SuperUserOrganizationManagement, 'DELETE', {'name': 'buynlarge'}, None, 'reader', 403),
+ (SuperUserOrganizationManagement, 'PUT', {'name': 'buynlarge'}, {}, None, 401),
+ (SuperUserOrganizationManagement, 'PUT', {'name': 'buynlarge'}, {}, 'devtable', 200),
+ (SuperUserOrganizationManagement, 'PUT', {'name': 'buynlarge'}, {}, 'freshuser', 403),
+ (SuperUserOrganizationManagement, 'PUT', {'name': 'buynlarge'}, {}, 'reader', 403),
+
+ (SuperUserList, 'GET', None, None, None, 401),
+ (SuperUserList, 'GET', None, None, 'devtable', 200),
+ (SuperUserList, 'GET', None, None, 'freshuser', 403),
+ (SuperUserList, 'GET', None, None, 'reader', 403),
+
+ (SuperUserList, 'POST', None, {'username': 'foo'}, None, 401),
+ (SuperUserList, 'POST', None, {'username': 'foo'}, 'devtable', 400),
+ (SuperUserList, 'POST', None, {'username': 'foo'}, 'freshuser', 403),
+ (SuperUserList, 'POST', None, {'username': 'foo'}, 'reader', 403),
+
+ (SuperUserManagement, 'DELETE', {'username': 'freshuser'}, None, None, 401),
+ (SuperUserManagement, 'DELETE', {'username': 'freshuser'}, None, 'devtable', 204),
+ (SuperUserManagement, 'DELETE', {'username': 'freshuser'}, None, 'freshuser', 403),
+ (SuperUserManagement, 'DELETE', {'username': 'freshuser'}, None, 'reader', 403),
+ (SuperUserManagement, 'GET', {'username': 'freshuser'}, None, None, 401),
+ (SuperUserManagement, 'GET', {'username': 'freshuser'}, None, 'devtable', 200),
+ (SuperUserManagement, 'GET', {'username': 'freshuser'}, None, 'freshuser', 403),
+ (SuperUserManagement, 'GET', {'username': 'freshuser'}, None, 'reader', 403),
+ (SuperUserManagement, 'PUT', {'username': 'freshuser'}, {}, None, 401),
+ (SuperUserManagement, 'PUT', {'username': 'freshuser'}, {}, 'devtable', 200),
+ (SuperUserManagement, 'PUT', {'username': 'freshuser'}, {}, 'freshuser', 403),
+ (SuperUserManagement, 'PUT', {'username': 'freshuser'}, {}, 'reader', 403),
+
+ (GlobalUserMessages, 'GET', None, None, None, 200),
+
+ (GlobalUserMessages, 'POST', None, None, None, 401),
+ (GlobalUserMessages, 'POST', None, {'message': {'content': 'msg', 'media_type': 'text/plain', 'severity': 'info'}}, 'devtable', 201),
+ (GlobalUserMessages, 'POST', None, {'message': {'content': 'msg', 'media_type': 'text/plain', 'severity': 'info'}}, 'freshuser', 403),
+ (GlobalUserMessages, 'POST', None, {'message': {'content': 'msg', 'media_type': 'text/plain', 'severity': 'info'}}, 'reader', 403),
+
+ (GlobalUserMessage, 'DELETE', {'uuid': '1234'}, None, None, 401),
+ (GlobalUserMessage, 'DELETE', {'uuid': '1234'}, None, 'devtable', 204),
+ (GlobalUserMessage, 'DELETE', {'uuid': '1234'}, None, 'freshuser', 403),
+ (GlobalUserMessage, 'DELETE', {'uuid': '1234'}, None, 'reader', 403),
+
+ (UserInvoiceFieldList, 'GET', None, None, None, 401),
+ (UserInvoiceFieldList, 'GET', None, None, 'devtable', 200),
+ (UserInvoiceFieldList, 'GET', None, None, 'freshuser', 404),
+ (UserInvoiceFieldList, 'GET', None, None, 'reader', 404),
+ (UserInvoiceFieldList, 'POST', None, None, None, 401),
+ (UserInvoiceFieldList, 'POST', None, {'value': 'bar', 'title': 'foo'}, 'devtable', 200),
+ (UserInvoiceFieldList, 'POST', None, {'value': 'bar', 'title': 'foo'}, 'freshuser', 404),
+ (UserInvoiceFieldList, 'POST', None, {'value': 'bar', 'title': 'foo'}, 'reader', 404),
+
+ (UserInvoiceField, 'DELETE', {'field_uuid': '1234'}, None, None, 401),
+ (UserInvoiceField, 'DELETE', {'field_uuid': '1234'}, None, 'devtable', 201),
+ (UserInvoiceField, 'DELETE', {'field_uuid': '1234'}, None, 'freshuser', 404),
+ (UserInvoiceField, 'DELETE', {'field_uuid': '1234'}, None, 'reader', 404),
+
+ (OrganizationInvoiceFieldList, 'GET', {'orgname': 'buynlarge'}, None, None, 403),
+ (OrganizationInvoiceFieldList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
+ (OrganizationInvoiceFieldList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
+ (OrganizationInvoiceFieldList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
+ (OrganizationInvoiceFieldList, 'POST', {'orgname': 'buynlarge'}, {'value': 'bar', 'title': 'foo'}, None, 403),
+ (OrganizationInvoiceFieldList, 'POST', {'orgname': 'buynlarge'}, {'value': 'bar', 'title': 'foo'}, 'devtable', 200),
+ (OrganizationInvoiceFieldList, 'POST', {'orgname': 'buynlarge'}, {'value': 'bar', 'title': 'foo'}, 'freshuser', 403),
+ (OrganizationInvoiceFieldList, 'POST', {'orgname': 'buynlarge'}, {'value': 'bar', 'title': 'foo'}, 'reader', 403),
+
+ (OrganizationInvoiceField, 'DELETE', {'orgname': 'buynlarge', 'field_uuid': '1234'}, None, None, 403),
+ (OrganizationInvoiceField, 'DELETE', {'orgname': 'buynlarge', 'field_uuid': '1234'}, None, 'devtable', 201),
+ (OrganizationInvoiceField, 'DELETE', {'orgname': 'buynlarge', 'field_uuid': '1234'}, None, 'freshuser', 403),
+ (OrganizationInvoiceField, 'DELETE', {'orgname': 'buynlarge', 'field_uuid': '1234'}, None, 'reader', 403),
+
+ (RepositoryImageSecurity, 'GET', {'repository': 'devtable/simple', 'imageid': 'fake'}, None, None, 401),
+ (RepositoryImageSecurity, 'GET', {'repository': 'devtable/simple', 'imageid': 'fake'}, None, 'devtable', 404),
+ (RepositoryImageSecurity, 'GET', {'repository': 'devtable/simple', 'imageid': 'fake'}, None, 'freshuser', 403),
+ (RepositoryImageSecurity, 'GET', {'repository': 'devtable/simple', 'imageid': 'fake'}, None, 'reader', 403),
+
+ (RepositoryManifestSecurity, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, None, 401),
+ (RepositoryManifestSecurity, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'devtable', 404),
+ (RepositoryManifestSecurity, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (RepositoryManifestSecurity, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (RepositoryManifestLabels, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, None, 401),
+ (RepositoryManifestLabels, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'devtable', 404),
+ (RepositoryManifestLabels, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (RepositoryManifestLabels, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'reader', 403),
+ (RepositoryManifestLabels, 'POST', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, {'media_type': 'text/plain', 'value': 'bar', 'key': 'foo'}, None, 401),
+ (RepositoryManifestLabels, 'POST', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, {'media_type': 'text/plain', 'value': 'bar', 'key': 'foo'}, 'devtable', 404),
+ (RepositoryManifestLabels, 'POST', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, {'media_type': 'text/plain', 'value': 'bar', 'key': 'foo'}, 'freshuser', 403),
+ (RepositoryManifestLabels, 'POST', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, {'media_type': 'text/plain', 'value': 'bar', 'key': 'foo'}, 'reader', 403),
+
+ (ManageRepositoryManifestLabel, 'GET', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, None, 401),
+ (ManageRepositoryManifestLabel, 'GET', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'devtable', 404),
+ (ManageRepositoryManifestLabel, 'GET', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (ManageRepositoryManifestLabel, 'GET', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (ManageRepositoryManifestLabel, 'DELETE', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, None, 401),
+ (ManageRepositoryManifestLabel, 'DELETE', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'devtable', 404),
+ (ManageRepositoryManifestLabel, 'DELETE', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (ManageRepositoryManifestLabel, 'DELETE', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (InviteTeamMember, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, None, 401),
+ (InviteTeamMember, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'devtable', 200),
+ (InviteTeamMember, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'freshuser', 403),
+ (InviteTeamMember, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'reader', 403),
+
+ (InviteTeamMember, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, None, 401),
+ (InviteTeamMember, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'devtable', 404),
+ (InviteTeamMember, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'freshuser', 403),
+ (InviteTeamMember, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'reader', 403),
+
+ (TestRepositoryNotification, 'POST', {'repository': 'buynlarge/orgrepo', 'uuid': 'foo'}, None, None, 401),
+ (TestRepositoryNotification, 'POST', {'repository': 'buynlarge/orgrepo', 'uuid': 'foo'}, None, 'devtable', 400),
+ (TestRepositoryNotification, 'POST', {'repository': 'buynlarge/orgrepo', 'uuid': 'foo'}, None, 'freshuser', 403),
+ (TestRepositoryNotification, 'POST', {'repository': 'buynlarge/orgrepo', 'uuid': 'foo'}, None, 'reader', 403),
+
+ (LinkExternalEntity, 'POST', {'username': 'foo'}, None, None, 404),
+
+ (BuildTriggerSourceNamespaces, 'GET', {'repository': 'devtable/simple', 'trigger_uuid': 'foo'}, None, None, 401),
+ (BuildTriggerSourceNamespaces, 'GET', {'repository': 'devtable/simple', 'trigger_uuid': 'foo'}, None, 'devtable', 404),
+ (BuildTriggerSourceNamespaces, 'GET', {'repository': 'devtable/simple', 'trigger_uuid': 'foo'}, None, 'freshuser', 403),
+ (BuildTriggerSourceNamespaces, 'GET', {'repository': 'devtable/simple', 'trigger_uuid': 'foo'}, None, 'reader', 403),
+
+ (RepoMirrorResource, 'GET', {'repository': 'devtable/simple'}, None, None, 401),
+ (RepoMirrorResource, 'GET', {'repository': 'devtable/simple'}, None, 'devtable', 404),
+ (RepoMirrorResource, 'GET', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (RepoMirrorResource, 'GET', {'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (RepoMirrorResource, 'POST', {'repository': 'devtable/simple'}, None, None, 401),
+ (RepoMirrorResource, 'POST', {'repository': 'devtable/simple'}, None, 'devtable', 400),
+ (RepoMirrorResource, 'POST', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (RepoMirrorResource, 'POST', {'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (RepoMirrorResource, 'PUT', {'repository': 'devtable/simple'}, None, None, 401),
+ (RepoMirrorResource, 'PUT', {'repository': 'devtable/simple'}, None, 'devtable', 400),
+ (RepoMirrorResource, 'PUT', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (RepoMirrorResource, 'PUT', {'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (RepoMirrorSyncNowResource, 'POST', {'repository': 'devtable/simple'}, None, None, 401),
+ (RepoMirrorSyncNowResource, 'POST', {'repository': 'devtable/simple'}, None, 'devtable', 404),
+ (RepoMirrorSyncNowResource, 'POST', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (RepoMirrorSyncNowResource, 'POST', {'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (RepoMirrorSyncCancelResource, 'POST', {'repository': 'devtable/simple'}, None, None, 401),
+ (RepoMirrorSyncCancelResource, 'POST', {'repository': 'devtable/simple'}, None, 'devtable', 404),
+ (RepoMirrorSyncCancelResource, 'POST', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (RepoMirrorSyncCancelResource, 'POST', {'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, None, 401),
+ (RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, 'devtable', 400),
+ (RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, 'reader', 403),
+
+ (ManageRepoMirrorRule, 'PUT', {'repository': 'devtable/simple'}, None, None, 401),
+ (ManageRepoMirrorRule, 'PUT', {'repository': 'devtable/simple'}, None, 'devtable', 400),
+ (ManageRepoMirrorRule, 'PUT', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
+ (ManageRepoMirrorRule, 'PUT', {'repository': 'devtable/simple'}, None, 'reader', 403),
+]
+
+@pytest.mark.parametrize('resource,method,params,body,identity,expected', SECURITY_TESTS)
+def test_api_security(resource, method, params, body, identity, expected, client):
+ with client_with_identity(identity, client) as cl:
+ conduct_api_call(cl, resource, method, params, body, expected)
+
+
+ALLOWED_MISSING_MODULES = {'endpoints.api.suconfig', 'endpoints.api.error', 'data.userfiles'}
+
+def test_all_apis_tested(app):
+ required_tests = set()
+
+ for rule in app.url_map.iter_rules():
+ endpoint_method = app.view_functions[rule.endpoint]
+
+ # Verify that we have a view class for this API method.
+ if not 'view_class' in dir(endpoint_method):
+ continue
+
+ view_class = endpoint_method.view_class
+ if view_class.__module__ in ALLOWED_MISSING_MODULES:
+ continue
+
+ method_names = list(rule.methods.difference(['HEAD', 'OPTIONS']))
+ full_name = '%s.%s' % (view_class.__module__, view_class.__name__)
+ for method_name in method_names:
+ required_tests.add('%s::%s' % (full_name, method_name.upper()))
+
+ assert required_tests
+
+ for test in SECURITY_TESTS:
+ view_class = test[0]
+ required_tests.discard('%s.%s::%s' % (view_class.__module__, view_class.__name__,
+ test[1].upper()))
+
+ assert not required_tests, "API security tests missing for: %s" % required_tests
+
+
+@pytest.mark.parametrize('is_superuser', [
+ (True),
+ (False),
+])
+@pytest.mark.parametrize('allow_nonsuperuser', [
+ (True),
+ (False),
+])
+@pytest.mark.parametrize('method, expected', [
+ ('POST', 400),
+ ('DELETE', 200),
+])
+def test_team_sync_security(is_superuser, allow_nonsuperuser, method, expected, client):
+ def is_superuser_method(_):
+ return is_superuser
+
+ with patch('auth.permissions.superusers.is_superuser', is_superuser_method):
+ with toggle_feature('NONSUPERUSER_TEAM_SYNCING_SETUP', allow_nonsuperuser):
+ with client_with_identity('devtable', client) as cl:
+ expect_success = is_superuser or allow_nonsuperuser
+ expected_status = expected if expect_success else 403
+ conduct_api_call(cl, OrganizationTeamSyncing, method, TEAM_PARAMS, {}, expected_status)
diff --git a/endpoints/api/test/test_signing.py b/endpoints/api/test/test_signing.py
new file mode 100644
index 000000000..e941cee56
--- /dev/null
+++ b/endpoints/api/test/test_signing.py
@@ -0,0 +1,55 @@
+import pytest
+
+from collections import Counter
+from mock import patch
+
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.signing import RepositorySignatures
+from endpoints.test.shared import client_with_identity
+
+from test.fixtures import *
+
+VALID_TARGETS_MAP = {
+ "targets/ci": {
+ "targets": {
+ "latest": {
+ "hashes": {
+ "sha256": "2Q8GLEgX62VBWeL76axFuDj/Z1dd6Zhx0ZDM6kNwPkQ="
+ },
+ "length": 2111
+ }
+ },
+ "expiration": "2020-05-22T10:26:46.618176424-04:00"
+ },
+ "targets": {
+ "targets": {
+ "latest": {
+ "hashes": {
+ "sha256": "2Q8GLEgX62VBWeL76axFuDj/Z1dd6Zhx0ZDM6kNwPkQ="
+ },
+ "length": 2111
+ }
+ },
+ "expiration": "2020-05-22T10:26:01.953414888-04:00"}
+ }
+
+
+def tags_equal(expected, actual):
+ expected_tags = expected.get('delegations')
+ actual_tags = actual.get('delegations')
+ if expected_tags and actual_tags:
+ return Counter(expected_tags) == Counter(actual_tags)
+ return expected == actual
+
+@pytest.mark.parametrize('targets_map,expected', [
+ (VALID_TARGETS_MAP, {'delegations': VALID_TARGETS_MAP}),
+ ({'bad': 'tags'}, {'delegations': {'bad': 'tags'}}),
+ ({}, {'delegations': {}}),
+ (None, {'delegations': None}), # API returns None on exceptions
+])
+def test_get_signatures(targets_map, expected, client):
+ with patch('endpoints.api.signing.tuf_metadata_api') as mock_tuf:
+ mock_tuf.get_all_tags_with_expiration.return_value = targets_map
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': 'devtable/trusted'}
+ assert tags_equal(expected, conduct_api_call(cl, RepositorySignatures, 'GET', params, None, 200).json)
diff --git a/endpoints/api/test/test_subscribe_models_pre_oci.py b/endpoints/api/test/test_subscribe_models_pre_oci.py
new file mode 100644
index 000000000..8810e36f5
--- /dev/null
+++ b/endpoints/api/test/test_subscribe_models_pre_oci.py
@@ -0,0 +1,43 @@
+import pytest
+from mock import patch
+
+from endpoints.api.subscribe_models_pre_oci import data_model
+
+
+@pytest.mark.parametrize('username,repo_count', [
+ ('devtable', 3)
+])
+def test_get_private_repo_count(username, repo_count):
+ with patch('endpoints.api.subscribe_models_pre_oci.get_private_repo_count') as mock_get_private_reop_count:
+ mock_get_private_reop_count.return_value = repo_count
+ count = data_model.get_private_repo_count(username)
+
+ mock_get_private_reop_count.assert_called_once_with(username)
+ assert count == repo_count
+
+
+@pytest.mark.parametrize('kind_name,target_username,metadata', [
+ ('over_private_usage', 'devtable', {'namespace': 'devtable'})
+])
+def test_create_unique_notification(kind_name, target_username, metadata):
+ with patch('endpoints.api.subscribe_models_pre_oci.get_user_or_org') as mock_get_user_or_org:
+ mock_get_user_or_org.return_value = {'username': target_username}
+ with patch('endpoints.api.subscribe_models_pre_oci.create_unique_notification') as mock_create_unique_notification:
+ data_model.create_unique_notification(kind_name, target_username, metadata)
+
+ mock_get_user_or_org.assert_called_once_with(target_username)
+ mock_create_unique_notification.assert_called_once_with(kind_name, mock_get_user_or_org.return_value, metadata)
+
+
+@pytest.mark.parametrize('target_username,kind_name', [
+ ('devtable', 'over_private_usage')
+])
+def test_delete_notifications_by_kind(target_username, kind_name):
+ with patch('endpoints.api.subscribe_models_pre_oci.get_user_or_org') as mock_get_user_or_org:
+ mock_get_user_or_org.return_value = {'username': target_username}
+ with patch('endpoints.api.subscribe_models_pre_oci.delete_notifications_by_kind') as mock_delete_notifications_by_kind:
+ data_model.delete_notifications_by_kind(target_username, kind_name)
+
+ mock_get_user_or_org.assert_called_once_with(target_username)
+ mock_delete_notifications_by_kind.assert_called_once_with(mock_get_user_or_org.return_value, kind_name)
+
diff --git a/endpoints/api/test/test_superuser.py b/endpoints/api/test/test_superuser.py
new file mode 100644
index 000000000..46e4bacf3
--- /dev/null
+++ b/endpoints/api/test/test_superuser.py
@@ -0,0 +1,28 @@
+import pytest
+
+from endpoints.api.superuser import SuperUserList, SuperUserManagement
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity
+from test.fixtures import *
+
+@pytest.mark.parametrize('disabled', [
+ (True),
+ (False),
+])
+def test_list_all_users(disabled, client):
+ with client_with_identity('devtable', client) as cl:
+ params = {'disabled': disabled}
+ result = conduct_api_call(cl, SuperUserList, 'GET', params, None, 200).json
+ assert len(result['users'])
+ for user in result['users']:
+ if not disabled:
+ assert user['enabled']
+
+
+def test_change_install_user(client):
+ with client_with_identity('devtable', client) as cl:
+ params = {'username': 'randomuser'}
+ body = {'email': 'new_email123@test.com'}
+ result = conduct_api_call(cl, SuperUserManagement, 'PUT', params, body, 200).json
+
+ assert result['email'] == body['email']
diff --git a/endpoints/api/test/test_tag.py b/endpoints/api/test/test_tag.py
new file mode 100644
index 000000000..54f6df599
--- /dev/null
+++ b/endpoints/api/test/test_tag.py
@@ -0,0 +1,116 @@
+import pytest
+
+from playhouse.test_utils import assert_query_count
+
+from data.registry_model import registry_model
+from data.database import Manifest
+
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity
+from endpoints.api.tag import RepositoryTag, RestoreTag, ListRepositoryTags, RepositoryTagImages
+
+from test.fixtures import *
+
+@pytest.mark.parametrize('expiration_time, expected_status', [
+ (None, 201),
+ ('aksdjhasd', 400),
+])
+def test_change_tag_expiration_default(expiration_time, expected_status, client, app):
+ with client_with_identity('devtable', client) as cl:
+ params = {
+ 'repository': 'devtable/simple',
+ 'tag': 'latest',
+ }
+
+ request_body = {
+ 'expiration': expiration_time,
+ }
+
+ conduct_api_call(cl, RepositoryTag, 'put', params, request_body, expected_status)
+
+
+def test_change_tag_expiration(client, app):
+ with client_with_identity('devtable', client) as cl:
+ params = {
+ 'repository': 'devtable/simple',
+ 'tag': 'latest',
+ }
+
+ tag = model.tag.get_active_tag('devtable', 'simple', 'latest')
+ updated_expiration = tag.lifetime_start_ts + 60*60*24
+
+ request_body = {
+ 'expiration': updated_expiration,
+ }
+
+ conduct_api_call(cl, RepositoryTag, 'put', params, request_body, 201)
+ tag = model.tag.get_active_tag('devtable', 'simple', 'latest')
+ assert tag.lifetime_end_ts == updated_expiration
+
+
+@pytest.mark.parametrize('image_exists,test_tag,expected_status', [
+ (True, '-INVALID-TAG-NAME', 400),
+ (True, '.INVALID-TAG-NAME', 400),
+ (True,
+ 'INVALID-TAG_NAME-BECAUSE-THIS-IS-WAY-WAY-TOO-LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG',
+ 400),
+ (False, 'newtag', 404),
+ (True, 'generatemanifestfail', None),
+ (True, 'latest', 201),
+ (True, 'newtag', 201),
+])
+def test_move_tag(image_exists, test_tag, expected_status, client, app):
+ with client_with_identity('devtable', client) as cl:
+ test_image = 'unknown'
+ if image_exists:
+ repo_ref = registry_model.lookup_repository('devtable', 'simple')
+ tag_ref = registry_model.get_repo_tag(repo_ref, 'latest', include_legacy_image=True)
+ assert tag_ref
+
+ test_image = tag_ref.legacy_image.docker_image_id
+
+ params = {'repository': 'devtable/simple', 'tag': test_tag}
+ request_body = {'image': test_image}
+ if expected_status is None:
+ with pytest.raises(Exception):
+ conduct_api_call(cl, RepositoryTag, 'put', params, request_body, expected_status)
+ else:
+ conduct_api_call(cl, RepositoryTag, 'put', params, request_body, expected_status)
+
+
+@pytest.mark.parametrize('repo_namespace, repo_name, query_count', [
+ ('devtable', 'simple', 5),
+ ('devtable', 'history', 5),
+ ('devtable', 'complex', 5),
+ ('devtable', 'gargantuan', 5),
+ ('buynlarge', 'orgrepo', 7), # +2 for permissions checks.
+ ('buynlarge', 'anotherorgrepo', 7), # +2 for permissions checks.
+])
+def test_list_repo_tags(repo_namespace, repo_name, client, query_count, app):
+ # Pre-cache media type loads to ensure consistent query count.
+ Manifest.media_type.get_name(1)
+
+ params = {'repository': repo_namespace + '/' + repo_name}
+ with client_with_identity('devtable', client) as cl:
+ with assert_query_count(query_count):
+ tags = conduct_api_call(cl, ListRepositoryTags, 'get', params).json['tags']
+
+ repo_ref = registry_model.lookup_repository(repo_namespace, repo_name)
+ history, _ = registry_model.list_repository_tag_history(repo_ref)
+ assert len(tags) == len(history)
+
+
+@pytest.mark.parametrize('repository, tag, owned, expect_images', [
+ ('devtable/simple', 'prod', False, True),
+ ('devtable/simple', 'prod', True, False),
+ ('devtable/simple', 'latest', False, True),
+ ('devtable/simple', 'latest', True, False),
+
+ ('devtable/complex', 'prod', False, True),
+ ('devtable/complex', 'prod', True, True),
+])
+def test_list_tag_images(repository, tag, owned, expect_images, client, app):
+ with client_with_identity('devtable', client) as cl:
+ params = {'repository': repository, 'tag': tag, 'owned': owned}
+ result = conduct_api_call(cl, RepositoryTagImages, 'get', params, None, 200).json
+ assert bool(result['images']) == expect_images
diff --git a/endpoints/api/test/test_team.py b/endpoints/api/test/test_team.py
new file mode 100644
index 000000000..9a17a36e4
--- /dev/null
+++ b/endpoints/api/test/test_team.py
@@ -0,0 +1,90 @@
+import json
+
+from mock import patch
+
+from data import model
+from endpoints.api import api
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.team import OrganizationTeamSyncing, TeamMemberList
+from endpoints.api.organization import Organization
+from endpoints.test.shared import client_with_identity
+
+from test.test_ldap import mock_ldap
+
+from test.fixtures import *
+
+SYNCED_TEAM_PARAMS = {'orgname': 'sellnsmall', 'teamname': 'synced'}
+UNSYNCED_TEAM_PARAMS = {'orgname': 'sellnsmall', 'teamname': 'owners'}
+
+def test_team_syncing(client):
+ with mock_ldap() as ldap:
+ with patch('endpoints.api.team.authentication', ldap):
+ with client_with_identity('devtable', client) as cl:
+ config = {
+ 'group_dn': 'cn=AwesomeFolk',
+ }
+
+ conduct_api_call(cl, OrganizationTeamSyncing, 'POST', UNSYNCED_TEAM_PARAMS, config)
+
+ # Ensure the team is now synced.
+ sync_info = model.team.get_team_sync_information(UNSYNCED_TEAM_PARAMS['orgname'],
+ UNSYNCED_TEAM_PARAMS['teamname'])
+ assert sync_info is not None
+ assert json.loads(sync_info.config) == config
+
+ # Remove the syncing.
+ conduct_api_call(cl, OrganizationTeamSyncing, 'DELETE', UNSYNCED_TEAM_PARAMS, None)
+
+ # Ensure the team is no longer synced.
+ sync_info = model.team.get_team_sync_information(UNSYNCED_TEAM_PARAMS['orgname'],
+ UNSYNCED_TEAM_PARAMS['teamname'])
+ assert sync_info is None
+
+
+def test_team_member_sync_info(client):
+ with mock_ldap() as ldap:
+ with patch('endpoints.api.team.authentication', ldap):
+ # Check for an unsynced team, with superuser.
+ with client_with_identity('devtable', client) as cl:
+ resp = conduct_api_call(cl, TeamMemberList, 'GET', UNSYNCED_TEAM_PARAMS)
+ assert 'can_sync' in resp.json
+ assert resp.json['can_sync']['service'] == 'ldap'
+
+ assert 'synced' not in resp.json
+
+ # Check for an unsynced team, with non-superuser.
+ with client_with_identity('randomuser', client) as cl:
+ resp = conduct_api_call(cl, TeamMemberList, 'GET', UNSYNCED_TEAM_PARAMS)
+ assert 'can_sync' not in resp.json
+ assert 'synced' not in resp.json
+
+ # Check for a synced team, with superuser.
+ with client_with_identity('devtable', client) as cl:
+ resp = conduct_api_call(cl, TeamMemberList, 'GET', SYNCED_TEAM_PARAMS)
+ assert 'can_sync' in resp.json
+ assert resp.json['can_sync']['service'] == 'ldap'
+
+ assert 'synced' in resp.json
+ assert 'last_updated' in resp.json['synced']
+ assert 'group_dn' in resp.json['synced']['config']
+
+ # Check for a synced team, with non-superuser.
+ with client_with_identity('randomuser', client) as cl:
+ resp = conduct_api_call(cl, TeamMemberList, 'GET', SYNCED_TEAM_PARAMS)
+ assert 'can_sync' not in resp.json
+
+ assert 'synced' in resp.json
+ assert 'last_updated' not in resp.json['synced']
+ assert 'config' not in resp.json['synced']
+
+
+def test_organization_teams_sync_bool(client):
+ with mock_ldap() as ldap:
+ with patch('endpoints.api.organization.authentication', ldap):
+ # Ensure synced teams are marked as such in the organization teams list.
+ with client_with_identity('devtable', client) as cl:
+ resp = conduct_api_call(cl, Organization, 'GET', {'orgname': 'sellnsmall'})
+
+ assert not resp.json['teams']['owners']['is_synced']
+
+ assert resp.json['teams']['synced']['is_synced']
diff --git a/endpoints/api/test/test_trigger.py b/endpoints/api/test/test_trigger.py
new file mode 100644
index 000000000..946b34431
--- /dev/null
+++ b/endpoints/api/test/test_trigger.py
@@ -0,0 +1,55 @@
+import pytest
+import json
+
+from data import model
+from endpoints.api.trigger_analyzer import is_parent
+from endpoints.api.trigger import BuildTrigger
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.test.shared import client_with_identity
+from test.fixtures import *
+
+
+@pytest.mark.parametrize('context,dockerfile_path,expected', [
+ ("/", "/a/b", True),
+ ("/a", "/a/b", True),
+ ("/a/b", "/a/b", False),
+ ("/a//", "/a/b", True),
+ ("/a", "/a//b/c", True),
+ ("/a//", "a/b", True),
+ ("/a/b", "a/bc/d", False),
+ ("/d", "/a/b", False),
+ ("/a/b", "/a/b.c", False),
+ ("/a/b", "/a/b/b.c", True),
+ ("", "/a/b.c", False),
+ ("/a/b", "", False),
+ ("", "", False),
+])
+def test_super_user_build_endpoints(context, dockerfile_path, expected):
+ assert is_parent(context, dockerfile_path) == expected
+
+
+def test_enabled_disabled_trigger(app, client):
+ trigger = model.build.list_build_triggers('devtable', 'building')[0]
+ trigger.config = json.dumps({'hook_id': 'someid'})
+ trigger.save()
+
+ params = {
+ 'repository': 'devtable/building',
+ 'trigger_uuid': trigger.uuid,
+ }
+
+ body = {
+ 'enabled': False,
+ }
+
+ with client_with_identity('devtable', client) as cl:
+ result = conduct_api_call(cl, BuildTrigger, 'PUT', params, body, 200).json
+ assert not result['enabled']
+
+ body = {
+ 'enabled': True,
+ }
+
+ with client_with_identity('devtable', client) as cl:
+ result = conduct_api_call(cl, BuildTrigger, 'PUT', params, body, 200).json
+ assert result['enabled']
diff --git a/endpoints/api/test/test_trigger_analyzer.py b/endpoints/api/test/test_trigger_analyzer.py
new file mode 100644
index 000000000..881bad8a3
--- /dev/null
+++ b/endpoints/api/test/test_trigger_analyzer.py
@@ -0,0 +1,152 @@
+import pytest
+from mock import Mock
+
+from auth import permissions
+from data import model
+from endpoints.api.trigger_analyzer import TriggerAnalyzer
+from util import dockerfileparse
+
+BAD_PATH = "\"server_hostname/\" is not a valid Quay repository path"
+
+EMPTY_CONF = {}
+
+GOOD_CONF = {'context': '/', 'dockerfile_path': '/file'}
+
+BAD_CONF = {'context': 'context', 'dockerfile_path': 'dockerfile_path'}
+
+ONE_ROBOT = {'can_read': False, 'is_robot': True, 'kind': 'user', 'name': 'name'}
+
+DOCKERFILE_NOT_CHILD = 'Dockerfile, context, is not a child of the context, dockerfile_path.'
+
+THE_DOCKERFILE_SPECIFIED = 'Could not parse the Dockerfile specified'
+
+DOCKERFILE_PATH_NOT_FOUND = 'Specified Dockerfile path for the trigger was not found on the main branch. This trigger may fail.'
+
+NO_FROM_LINE = 'No FROM line found in the Dockerfile'
+
+REPO_NOT_FOUND = 'Repository "server_hostname/path/file" referenced by the Dockerfile was not found'
+
+
+@pytest.fixture
+def get_monkeypatch(monkeypatch):
+ return monkeypatch
+
+
+def patch_permissions(monkeypatch, can_read=False):
+ def can_read_fn(base_namespace, base_repository):
+ return can_read
+
+ monkeypatch.setattr(permissions, 'ReadRepositoryPermission', can_read_fn)
+
+
+def patch_list_namespace_robots(monkeypatch):
+ my_mock = Mock()
+ my_mock.configure_mock(**{'username': 'name'})
+ return_value = [my_mock]
+
+ def return_list_mocks(namesapce):
+ return return_value
+
+ monkeypatch.setattr(model.user, 'list_namespace_robots', return_list_mocks)
+ return return_value
+
+
+def patch_get_all_repo_users_transitive(monkeypatch):
+ my_mock = Mock()
+ my_mock.configure_mock(**{'username': 'name'})
+ return_value = [my_mock]
+
+ def return_get_mocks(namesapce, image_repostiory):
+ return return_value
+
+ monkeypatch.setattr(model.user, 'get_all_repo_users_transitive', return_get_mocks)
+ return return_value
+
+
+def patch_parse_dockerfile(monkeypatch, get_base_image):
+ if get_base_image is not None:
+ def return_return_value(content):
+ parse_mock = Mock()
+ parse_mock.configure_mock(**{'get_base_image': get_base_image})
+ return parse_mock
+
+ monkeypatch.setattr(dockerfileparse, "parse_dockerfile", return_return_value)
+ else:
+ def return_return_value(content):
+ return get_base_image
+
+ monkeypatch.setattr(dockerfileparse, "parse_dockerfile", return_return_value)
+
+
+def patch_model_repository_get_repository(monkeypatch, get_repository):
+ if get_repository is not None:
+
+ def mock_get_repository(base_namespace, base_repository):
+ vis_mock = Mock()
+ vis_mock.name = get_repository
+ get_repo_mock = Mock(visibility=vis_mock)
+
+
+ return get_repo_mock
+
+ else:
+ def mock_get_repository(base_namespace, base_repository):
+ return None
+
+ monkeypatch.setattr(model.repository, "get_repository", mock_get_repository)
+
+
+def return_none():
+ return None
+
+
+def return_content():
+ return Mock()
+
+
+def return_server_hostname():
+ return "server_hostname/"
+
+
+def return_non_server_hostname():
+ return "slime"
+
+
+def return_path():
+ return "server_hostname/path/file"
+
+
+@pytest.mark.parametrize(
+ 'handler_fn, config_dict, admin_org_permission, status, message, get_base_image, robots, server_hostname, get_repository, can_read, namespace, name', [
+ (return_none, EMPTY_CONF, False, "warning", DOCKERFILE_PATH_NOT_FOUND, None, [], None, None, False, "namespace", None),
+ (return_none, EMPTY_CONF, True, "warning", DOCKERFILE_PATH_NOT_FOUND, None, [ONE_ROBOT], None, None, False, "namespace", None),
+ (return_content, BAD_CONF, False, "error", THE_DOCKERFILE_SPECIFIED, None, [], None, None, False, "namespace", None),
+ (return_none, EMPTY_CONF, False, "warning", DOCKERFILE_PATH_NOT_FOUND, return_none, [], None, None, False, "namespace", None),
+ (return_none, EMPTY_CONF, True, "warning", DOCKERFILE_PATH_NOT_FOUND, return_none, [ONE_ROBOT], None, None, False, "namespace", None),
+ (return_content, BAD_CONF, False, "error", DOCKERFILE_NOT_CHILD, return_none, [], None, None, False, "namespace", None),
+ (return_content, GOOD_CONF, False, "warning", NO_FROM_LINE, return_none, [], None, None, False, "namespace", None),
+ (return_content, GOOD_CONF, False, "publicbase", None, return_non_server_hostname, [], "server_hostname", None, False, "namespace", None),
+ (return_content, GOOD_CONF, False, "warning", BAD_PATH, return_server_hostname, [], "server_hostname", None, False, "namespace", None),
+ (return_content, GOOD_CONF, False, "error", REPO_NOT_FOUND, return_path, [], "server_hostname", None, False, "namespace", None),
+ (return_content, GOOD_CONF, False, "error", REPO_NOT_FOUND, return_path, [], "server_hostname", "nonpublic", False, "namespace", None),
+ (return_content, GOOD_CONF, False, "requiresrobot", None, return_path, [], "server_hostname", "nonpublic", True, "path", "file"),
+ (return_content, GOOD_CONF, False, "publicbase", None, return_path, [], "server_hostname", "public", True, "path", "file"),
+
+ ])
+def test_trigger_analyzer(handler_fn, config_dict, admin_org_permission, status, message, get_base_image, robots,
+ server_hostname, get_repository, can_read, namespace, name,
+ get_monkeypatch):
+ patch_list_namespace_robots(get_monkeypatch)
+ patch_get_all_repo_users_transitive(get_monkeypatch)
+ patch_parse_dockerfile(get_monkeypatch, get_base_image)
+ patch_model_repository_get_repository(get_monkeypatch, get_repository)
+ patch_permissions(get_monkeypatch, can_read)
+ handler_mock = Mock()
+ handler_mock.configure_mock(**{'load_dockerfile_contents': handler_fn})
+ trigger_analyzer = TriggerAnalyzer(handler_mock, 'namespace', server_hostname, config_dict, admin_org_permission)
+ assert trigger_analyzer.analyze_trigger() == {'namespace': namespace,
+ 'name': name,
+ 'robots': robots,
+ 'status': status,
+ 'message': message,
+ 'is_admin': admin_org_permission}
diff --git a/endpoints/api/test/test_user.py b/endpoints/api/test/test_user.py
new file mode 100644
index 000000000..bf31b0b6d
--- /dev/null
+++ b/endpoints/api/test/test_user.py
@@ -0,0 +1,42 @@
+import pytest
+
+from mock import patch
+
+from endpoints.api.test.shared import conduct_api_call
+from endpoints.api.user import User
+from endpoints.test.shared import client_with_identity
+from features import FeatureNameValue
+
+from test.fixtures import *
+
+
+def test_user_metadata_update(client):
+ with patch('features.USER_METADATA', FeatureNameValue('USER_METADATA', True)):
+ with client_with_identity('devtable', client) as cl:
+ metadata = {
+ 'given_name': 'Quay',
+ 'family_name': 'User',
+ 'location': 'NYC',
+ 'company': 'Red Hat',
+ }
+
+ # Update all user metadata fields.
+ conduct_api_call(cl, User, 'PUT', None, body=metadata)
+
+ # Test that they were successfully updated.
+ user = conduct_api_call(cl, User, 'GET', None).json
+ for field in metadata:
+ assert user.get(field) == metadata.get(field)
+
+ # Now nullify one of the fields, and remove another.
+ metadata['company'] = None
+ location = metadata.pop('location')
+
+ conduct_api_call(cl, User, 'PUT', None, body=metadata)
+
+ user = conduct_api_call(cl, User, 'GET', None).json
+ for field in metadata:
+ assert user.get(field) == metadata.get(field)
+
+ # The location field should be unchanged.
+ assert user.get('location') == location
diff --git a/endpoints/api/trigger.py b/endpoints/api/trigger.py
new file mode 100644
index 000000000..fb9f72a48
--- /dev/null
+++ b/endpoints/api/trigger.py
@@ -0,0 +1,539 @@
+""" Create, list and manage build triggers. """
+
+import logging
+from urlparse import urlunparse
+
+from flask import request, url_for
+
+from active_migration import ActiveDataMigration, ERTMigrationFlags
+from app import app
+from auth.permissions import (UserAdminPermission, AdministerOrganizationPermission,
+ AdministerRepositoryPermission)
+from buildtrigger.basehandler import BuildTriggerHandler
+from buildtrigger.triggerutil import TriggerException, EmptyRepositoryException
+from data import model
+from data.fields import DecryptedValue
+from data.model.build import update_build_trigger
+from endpoints.api import (RepositoryParamResource, nickname, resource, require_repo_admin,
+ log_action, request_error, query_param, parse_args, internal_only,
+ validate_json_request, api, path_param, abort,
+ disallow_for_app_repositories, disallow_for_non_normal_repositories)
+from endpoints.api.build import build_status_view, trigger_view, RepositoryBuildStatus
+from endpoints.api.trigger_analyzer import TriggerAnalyzer
+from endpoints.building import (start_build, MaximumBuildsQueuedException,
+ BuildTriggerDisabledException)
+from endpoints.exception import NotFound, Unauthorized, InvalidRequest
+from util.names import parse_robot_username
+
+logger = logging.getLogger(__name__)
+
+
+def _prepare_webhook_url(scheme, username, password, hostname, path):
+ auth_hostname = '%s:%s@%s' % (username, password, hostname)
+ return urlunparse((scheme, auth_hostname, path, '', '', ''))
+
+
+def get_trigger(trigger_uuid):
+ try:
+ trigger = model.build.get_build_trigger(trigger_uuid)
+ except model.InvalidBuildTriggerException:
+ raise NotFound()
+ return trigger
+
+@resource('/v1/repository//trigger/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class BuildTriggerList(RepositoryParamResource):
+ """ Resource for listing repository build triggers. """
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @nickname('listBuildTriggers')
+ def get(self, namespace_name, repo_name):
+ """ List the triggers for the specified repository. """
+ triggers = model.build.list_build_triggers(namespace_name, repo_name)
+ return {
+ 'triggers': [trigger_view(trigger, can_admin=True) for trigger in triggers]
+ }
+
+
+@resource('/v1/repository//trigger/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('trigger_uuid', 'The UUID of the build trigger')
+class BuildTrigger(RepositoryParamResource):
+ """ Resource for managing specific build triggers. """
+ schemas = {
+ 'UpdateTrigger': {
+ 'type': 'object',
+ 'description': 'Options for updating a build trigger',
+ 'required': [
+ 'enabled',
+ ],
+ 'properties': {
+ 'enabled': {
+ 'type': 'boolean',
+ 'description': 'Whether the build trigger is enabled',
+ },
+ }
+ },
+ }
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @nickname('getBuildTrigger')
+ def get(self, namespace_name, repo_name, trigger_uuid):
+ """ Get information for the specified build trigger. """
+ return trigger_view(get_trigger(trigger_uuid), can_admin=True)
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('updateBuildTrigger')
+ @validate_json_request('UpdateTrigger')
+ def put(self, namespace_name, repo_name, trigger_uuid):
+ """ Updates the specified build trigger. """
+ trigger = get_trigger(trigger_uuid)
+
+ handler = BuildTriggerHandler.get_handler(trigger)
+ if not handler.is_active():
+ raise InvalidRequest('Cannot update an unactivated trigger')
+
+ enable = request.get_json()['enabled']
+ model.build.toggle_build_trigger(trigger, enable)
+ log_action('toggle_repo_trigger', namespace_name,
+ {'repo': repo_name, 'trigger_id': trigger_uuid,
+ 'service': trigger.service.name, 'enabled': enable},
+ repo=model.repository.get_repository(namespace_name, repo_name))
+
+ return trigger_view(trigger)
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('deleteBuildTrigger')
+ def delete(self, namespace_name, repo_name, trigger_uuid):
+ """ Delete the specified build trigger. """
+ trigger = get_trigger(trigger_uuid)
+
+ handler = BuildTriggerHandler.get_handler(trigger)
+ if handler.is_active():
+ try:
+ handler.deactivate()
+ except TriggerException as ex:
+ # We are just going to eat this error
+ logger.warning('Trigger deactivation problem: %s', ex)
+
+ log_action('delete_repo_trigger', namespace_name,
+ {'repo': repo_name, 'trigger_id': trigger_uuid,
+ 'service': trigger.service.name},
+ repo=model.repository.get_repository(namespace_name, repo_name))
+
+ trigger.delete_instance(recursive=True)
+
+ if trigger.write_token is not None:
+ trigger.write_token.delete_instance()
+
+ return 'No Content', 204
+
+
+@resource('/v1/repository//trigger//subdir')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('trigger_uuid', 'The UUID of the build trigger')
+@internal_only
+class BuildTriggerSubdirs(RepositoryParamResource):
+ """ Custom verb for fetching the subdirs which are buildable for a trigger. """
+ schemas = {
+ 'BuildTriggerSubdirRequest': {
+ 'type': 'object',
+ 'description': 'Arbitrary json.',
+ },
+ }
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('listBuildTriggerSubdirs')
+ @validate_json_request('BuildTriggerSubdirRequest')
+ def post(self, namespace_name, repo_name, trigger_uuid):
+ """ List the subdirectories available for the specified build trigger and source. """
+ trigger = get_trigger(trigger_uuid)
+
+ user_permission = UserAdminPermission(trigger.connected_user.username)
+ if user_permission.can():
+ new_config_dict = request.get_json()
+ handler = BuildTriggerHandler.get_handler(trigger, new_config_dict)
+
+ try:
+ subdirs = handler.list_build_subdirs()
+ context_map = {}
+ for file in subdirs:
+ context_map = handler.get_parent_directory_mappings(file, context_map)
+
+ return {
+ 'dockerfile_paths': ['/' + subdir for subdir in subdirs],
+ 'contextMap': context_map,
+ 'status': 'success',
+ }
+ except EmptyRepositoryException as exc:
+ return {
+ 'status': 'success',
+ 'contextMap': {},
+ 'dockerfile_paths': [],
+ }
+ except TriggerException as exc:
+ return {
+ 'status': 'error',
+ 'message': exc.message,
+ }
+ else:
+ raise Unauthorized()
+
+
+@resource('/v1/repository//trigger//activate')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('trigger_uuid', 'The UUID of the build trigger')
+class BuildTriggerActivate(RepositoryParamResource):
+ """ Custom verb for activating a build trigger once all required information has been collected.
+ """
+ schemas = {
+ 'BuildTriggerActivateRequest': {
+ 'type': 'object',
+ 'required': [
+ 'config'
+ ],
+ 'properties': {
+ 'config': {
+ 'type': 'object',
+ 'description': 'Arbitrary json.',
+ },
+ 'pull_robot': {
+ 'type': 'string',
+ 'description': 'The name of the robot that will be used to pull images.'
+ }
+ }
+ },
+ }
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('activateBuildTrigger')
+ @validate_json_request('BuildTriggerActivateRequest')
+ def post(self, namespace_name, repo_name, trigger_uuid):
+ """ Activate the specified build trigger. """
+ trigger = get_trigger(trigger_uuid)
+ handler = BuildTriggerHandler.get_handler(trigger)
+ if handler.is_active():
+ raise InvalidRequest('Trigger config is not sufficient for activation.')
+
+ user_permission = UserAdminPermission(trigger.connected_user.username)
+ if user_permission.can():
+ # Update the pull robot (if any).
+ pull_robot_name = request.get_json().get('pull_robot', None)
+ if pull_robot_name:
+ try:
+ pull_robot = model.user.lookup_robot(pull_robot_name)
+ except model.InvalidRobotException:
+ raise NotFound()
+
+ # Make sure the user has administer permissions for the robot's namespace.
+ (robot_namespace, _) = parse_robot_username(pull_robot_name)
+ if not AdministerOrganizationPermission(robot_namespace).can():
+ raise Unauthorized()
+
+ # Make sure the namespace matches that of the trigger.
+ if robot_namespace != namespace_name:
+ raise Unauthorized()
+
+ # Set the pull robot.
+ trigger.pull_robot = pull_robot
+
+ # Update the config.
+ new_config_dict = request.get_json()['config']
+
+ write_token_name = 'Build Trigger: %s' % trigger.service.name
+ write_token = model.token.create_delegate_token(namespace_name, repo_name, write_token_name,
+ 'write')
+
+ try:
+ path = url_for('webhooks.build_trigger_webhook', trigger_uuid=trigger.uuid)
+ authed_url = _prepare_webhook_url(app.config['PREFERRED_URL_SCHEME'],
+ '$token', write_token.get_code(),
+ app.config['SERVER_HOSTNAME'], path)
+
+ handler = BuildTriggerHandler.get_handler(trigger, new_config_dict)
+ final_config, private_config = handler.activate(authed_url)
+
+ if 'private_key' in private_config:
+ trigger.secure_private_key = DecryptedValue(private_config['private_key'])
+
+ # TODO(remove-unenc): Remove legacy field.
+ if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
+ trigger.private_key = private_config['private_key']
+
+ except TriggerException as exc:
+ write_token.delete_instance()
+ raise request_error(message=exc.message)
+
+ # Save the updated config.
+ update_build_trigger(trigger, final_config, write_token=write_token)
+
+ # Log the trigger setup.
+ repo = model.repository.get_repository(namespace_name, repo_name)
+ log_action('setup_repo_trigger', namespace_name,
+ {'repo': repo_name, 'namespace': namespace_name,
+ 'trigger_id': trigger.uuid, 'service': trigger.service.name,
+ 'pull_robot': trigger.pull_robot.username if trigger.pull_robot else None,
+ 'config': final_config},
+ repo=repo)
+
+ return trigger_view(trigger, can_admin=True)
+ else:
+ raise Unauthorized()
+
+
+@resource('/v1/repository//trigger//analyze')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('trigger_uuid', 'The UUID of the build trigger')
+@internal_only
+class BuildTriggerAnalyze(RepositoryParamResource):
+ """ Custom verb for analyzing the config for a build trigger and suggesting various changes
+ (such as a robot account to use for pulling)
+ """
+ schemas = {
+ 'BuildTriggerAnalyzeRequest': {
+ 'type': 'object',
+ 'required': [
+ 'config'
+ ],
+ 'properties': {
+ 'config': {
+ 'type': 'object',
+ 'description': 'Arbitrary json.',
+ }
+ }
+ },
+ }
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('analyzeBuildTrigger')
+ @validate_json_request('BuildTriggerAnalyzeRequest')
+ def post(self, namespace_name, repo_name, trigger_uuid):
+ """ Analyze the specified build trigger configuration. """
+ trigger = get_trigger(trigger_uuid)
+
+ if trigger.repository.namespace_user.username != namespace_name:
+ raise NotFound()
+
+ if trigger.repository.name != repo_name:
+ raise NotFound()
+
+ new_config_dict = request.get_json()['config']
+ handler = BuildTriggerHandler.get_handler(trigger, new_config_dict)
+ server_hostname = app.config['SERVER_HOSTNAME']
+ try:
+ trigger_analyzer = TriggerAnalyzer(handler,
+ namespace_name,
+ server_hostname,
+ new_config_dict,
+ AdministerOrganizationPermission(namespace_name).can())
+ return trigger_analyzer.analyze_trigger()
+ except TriggerException as rre:
+ return {
+ 'status': 'error',
+ 'message': 'Could not analyze the repository: %s' % rre.message,
+ }
+ except NotImplementedError:
+ return {
+ 'status': 'notimplemented',
+ }
+
+
+@resource('/v1/repository//trigger//start')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('trigger_uuid', 'The UUID of the build trigger')
+class ActivateBuildTrigger(RepositoryParamResource):
+ """ Custom verb to manually activate a build trigger. """
+ schemas = {
+ 'RunParameters': {
+ 'type': 'object',
+ 'description': 'Optional run parameters for activating the build trigger',
+ 'properties': {
+ 'branch_name': {
+ 'type': 'string',
+ 'description': '(SCM only) If specified, the name of the branch to build.'
+ },
+ 'commit_sha': {
+ 'type': 'string',
+ 'description': '(Custom Only) If specified, the ref/SHA1 used to checkout a git repository.'
+ },
+ 'refs': {
+ 'type': ['object', 'null'],
+ 'description': '(SCM Only) If specified, the ref to build.'
+ }
+ },
+ 'additionalProperties': False
+ }
+ }
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('manuallyStartBuildTrigger')
+ @validate_json_request('RunParameters')
+ def post(self, namespace_name, repo_name, trigger_uuid):
+ """ Manually start a build from the specified trigger. """
+ trigger = get_trigger(trigger_uuid)
+ if not trigger.enabled:
+ raise InvalidRequest('Trigger is not enabled.')
+
+ handler = BuildTriggerHandler.get_handler(trigger)
+ if not handler.is_active():
+ raise InvalidRequest('Trigger is not active.')
+
+ try:
+ repo = model.repository.get_repository(namespace_name, repo_name)
+ pull_robot_name = model.build.get_pull_robot_name(trigger)
+
+ run_parameters = request.get_json()
+ prepared = handler.manual_start(run_parameters=run_parameters)
+ build_request = start_build(repo, prepared, pull_robot_name=pull_robot_name)
+ except TriggerException as tse:
+ raise InvalidRequest(tse.message)
+ except MaximumBuildsQueuedException:
+ abort(429, message='Maximum queued build rate exceeded.')
+ except BuildTriggerDisabledException:
+ abort(400, message='Build trigger is disabled')
+
+ resp = build_status_view(build_request)
+ repo_string = '%s/%s' % (namespace_name, repo_name)
+ headers = {
+ 'Location': api.url_for(RepositoryBuildStatus, repository=repo_string,
+ build_uuid=build_request.uuid),
+ }
+ return resp, 201, headers
+
+
+@resource('/v1/repository//trigger//builds')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('trigger_uuid', 'The UUID of the build trigger')
+class TriggerBuildList(RepositoryParamResource):
+ """ Resource to represent builds that were activated from the specified trigger. """
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @parse_args()
+ @query_param('limit', 'The maximum number of builds to return', type=int, default=5)
+ @nickname('listTriggerRecentBuilds')
+ def get(self, namespace_name, repo_name, trigger_uuid, parsed_args):
+ """ List the builds started by the specified trigger. """
+ limit = parsed_args['limit']
+ builds = model.build.list_trigger_builds(namespace_name, repo_name, trigger_uuid, limit)
+ return {
+ 'builds': [build_status_view(bld) for bld in builds]
+ }
+
+
+FIELD_VALUE_LIMIT = 30
+
+
+@resource('/v1/repository//trigger//fields/')
+@internal_only
+class BuildTriggerFieldValues(RepositoryParamResource):
+ """ Custom verb to fetch a values list for a particular field name. """
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('listTriggerFieldValues')
+ def post(self, namespace_name, repo_name, trigger_uuid, field_name):
+ """ List the field values for a custom run field. """
+ trigger = get_trigger(trigger_uuid)
+
+ config = request.get_json() or None
+ if AdministerRepositoryPermission(namespace_name, repo_name).can():
+ handler = BuildTriggerHandler.get_handler(trigger, config)
+ values = handler.list_field_values(field_name, limit=FIELD_VALUE_LIMIT)
+
+ if values is None:
+ raise NotFound()
+
+ return {
+ 'values': values
+ }
+ else:
+ raise Unauthorized()
+
+
+@resource('/v1/repository//trigger//sources')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('trigger_uuid', 'The UUID of the build trigger')
+@internal_only
+class BuildTriggerSources(RepositoryParamResource):
+ """ Custom verb to fetch the list of build sources for the trigger config. """
+ schemas = {
+ 'BuildTriggerSourcesRequest': {
+ 'type': 'object',
+ 'description': 'Specifies the namespace under which to fetch sources',
+ 'properties': {
+ 'namespace': {
+ 'type': 'string',
+ 'description': 'The namespace for which to fetch sources'
+ },
+ },
+ }
+ }
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @disallow_for_non_normal_repositories
+ @nickname('listTriggerBuildSources')
+ @validate_json_request('BuildTriggerSourcesRequest')
+ def post(self, namespace_name, repo_name, trigger_uuid):
+ """ List the build sources for the trigger configuration thus far. """
+ namespace = request.get_json()['namespace']
+
+ trigger = get_trigger(trigger_uuid)
+
+ user_permission = UserAdminPermission(trigger.connected_user.username)
+ if user_permission.can():
+ handler = BuildTriggerHandler.get_handler(trigger)
+
+ try:
+ return {
+ 'sources': handler.list_build_sources_for_namespace(namespace)
+ }
+ except TriggerException as rre:
+ raise InvalidRequest(rre.message)
+ else:
+ raise Unauthorized()
+
+
+@resource('/v1/repository//trigger//namespaces')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+@path_param('trigger_uuid', 'The UUID of the build trigger')
+@internal_only
+class BuildTriggerSourceNamespaces(RepositoryParamResource):
+ """ Custom verb to fetch the list of namespaces (orgs, projects, etc) for the trigger config. """
+
+ @require_repo_admin
+ @disallow_for_app_repositories
+ @nickname('listTriggerBuildSourceNamespaces')
+ def get(self, namespace_name, repo_name, trigger_uuid):
+ """ List the build sources for the trigger configuration thus far. """
+ trigger = get_trigger(trigger_uuid)
+
+ user_permission = UserAdminPermission(trigger.connected_user.username)
+ if user_permission.can():
+ handler = BuildTriggerHandler.get_handler(trigger)
+
+ try:
+ return {
+ 'namespaces': handler.list_build_source_namespaces()
+ }
+ except TriggerException as rre:
+ raise InvalidRequest(rre.message)
+ else:
+ raise Unauthorized()
+
diff --git a/endpoints/api/trigger_analyzer.py b/endpoints/api/trigger_analyzer.py
new file mode 100644
index 000000000..2a29e502e
--- /dev/null
+++ b/endpoints/api/trigger_analyzer.py
@@ -0,0 +1,122 @@
+from os import path
+
+from auth import permissions
+from data import model
+from util import dockerfileparse
+
+
+def is_parent(context, dockerfile_path):
+ """ This checks whether the context is a parent of the dockerfile_path"""
+ if context == "" or dockerfile_path == "":
+ return False
+
+ normalized_context = path.normpath(context)
+ if normalized_context[len(normalized_context) - 1] != path.sep:
+ normalized_context += path.sep
+
+ if normalized_context[0] != path.sep:
+ normalized_context = path.sep + normalized_context
+
+ normalized_subdir = path.normpath(path.dirname(dockerfile_path))
+ if normalized_subdir[0] != path.sep:
+ normalized_subdir = path.sep + normalized_subdir
+
+ if normalized_subdir[len(normalized_subdir) - 1] != path.sep:
+ normalized_subdir += path.sep
+
+ return normalized_subdir.startswith(normalized_context)
+
+
+class TriggerAnalyzer:
+ """ This analyzes triggers and returns the appropriate trigger and robot view to the frontend. """
+
+ def __init__(self, handler, namespace_name, server_hostname, new_config_dict, admin_org_permission):
+ self.handler = handler
+ self.namespace_name = namespace_name
+ self.server_hostname = server_hostname
+ self.new_config_dict = new_config_dict
+ self.admin_org_permission = admin_org_permission
+
+ def analyze_trigger(self):
+ # Load the contents of the Dockerfile.
+ contents = self.handler.load_dockerfile_contents()
+ if not contents:
+ return self.analyze_view(self.namespace_name, None, 'warning',
+ message='Specified Dockerfile path for the trigger was not found on the main ' +
+ 'branch. This trigger may fail.')
+
+ # Parse the contents of the Dockerfile.
+ parsed = dockerfileparse.parse_dockerfile(contents)
+ if not parsed:
+ return self.analyze_view(self.namespace_name, None, 'error', message='Could not parse the Dockerfile specified')
+
+ # Check whether the dockerfile_path is correct
+ if self.new_config_dict.get('context') and not is_parent(self.new_config_dict.get('context'),
+ self.new_config_dict.get('dockerfile_path')):
+ return self.analyze_view(self.namespace_name, None, 'error',
+ message='Dockerfile, %s, is not a child of the context, %s.' %
+ (self.new_config_dict.get('context'),
+ self.new_config_dict.get('dockerfile_path')))
+
+ # Determine the base image (i.e. the FROM) for the Dockerfile.
+ base_image = parsed.get_base_image()
+ if not base_image:
+ return self.analyze_view(self.namespace_name, None, 'warning', message='No FROM line found in the Dockerfile')
+
+ # Check to see if the base image lives in Quay.
+ quay_registry_prefix = '%s/' % self.server_hostname
+ if not base_image.startswith(quay_registry_prefix):
+ return self.analyze_view(self.namespace_name, None, 'publicbase')
+
+ # Lookup the repository in Quay.
+ result = str(base_image)[len(quay_registry_prefix):].split('/', 2)
+ if len(result) != 2:
+ msg = '"%s" is not a valid Quay repository path' % base_image
+ return self.analyze_view(self.namespace_name, None, 'warning', message=msg)
+
+ (base_namespace, base_repository) = result
+ found_repository = model.repository.get_repository(base_namespace, base_repository)
+ if not found_repository:
+ return self.analyze_view(self.namespace_name, None, 'error',
+ message='Repository "%s" referenced by the Dockerfile was not found' % base_image)
+
+ # If the repository is private and the user cannot see that repo, then
+ # mark it as not found.
+ can_read = permissions.ReadRepositoryPermission(base_namespace, base_repository)
+ if found_repository.visibility.name != 'public' and not can_read:
+ return self.analyze_view(self.namespace_name, None, 'error',
+ message='Repository "%s" referenced by the Dockerfile was not found' % base_image)
+
+ if found_repository.visibility.name == 'public':
+ return self.analyze_view(base_namespace, base_repository, 'publicbase')
+
+ return self.analyze_view(base_namespace, base_repository, 'requiresrobot')
+
+ def analyze_view(self, image_namespace, image_repository, status, message=None):
+ # Retrieve the list of robots and mark whether they have read access already.
+ robots = []
+ if self.admin_org_permission:
+ if image_repository is not None:
+ perm_query = model.user.get_all_repo_users_transitive(image_namespace, image_repository)
+ user_ids_with_permission = set([user.id for user in perm_query])
+ else:
+ user_ids_with_permission = set()
+
+ def robot_view(robot):
+ return {
+ 'name': robot.username,
+ 'kind': 'user',
+ 'is_robot': True,
+ 'can_read': robot.id in user_ids_with_permission,
+ }
+
+ robots = [robot_view(robot) for robot in model.user.list_namespace_robots(image_namespace)]
+
+ return {
+ 'namespace': image_namespace,
+ 'name': image_repository,
+ 'robots': robots,
+ 'status': status,
+ 'message': message,
+ 'is_admin': self.admin_org_permission,
+ }
diff --git a/endpoints/api/user.py b/endpoints/api/user.py
new file mode 100644
index 000000000..4eabe1088
--- /dev/null
+++ b/endpoints/api/user.py
@@ -0,0 +1,1120 @@
+""" Manage the current user. """
+
+import logging
+import json
+import recaptcha2
+
+from flask import request, abort
+from flask_login import logout_user
+from flask_principal import identity_changed, AnonymousIdentity
+from peewee import IntegrityError
+
+import features
+
+from app import (app, billing as stripe, authentication, avatar, user_analytics, all_queues,
+ oauth_login, namespace_gc_queue, ip_resolver, url_scheme_and_hostname)
+
+from auth import scopes
+from auth.auth_context import get_authenticated_user
+from auth.permissions import (AdministerOrganizationPermission, CreateRepositoryPermission,
+ UserAdminPermission, UserReadPermission, SuperUserPermission)
+from data import model
+from data.billing import get_plan
+from data.database import Repository as RepositoryTable
+from data.users.shared import can_create_user
+from endpoints.api import (ApiResource, nickname, resource, validate_json_request, request_error,
+ log_action, internal_only, require_user_admin, parse_args,
+ query_param, require_scope, format_date, show_if,
+ require_fresh_login, path_param, define_json_response,
+ RepositoryParamResource, page_support)
+from endpoints.exception import NotFound, InvalidToken, InvalidRequest, DownstreamIssue
+from endpoints.api.subscribe import subscribe
+from endpoints.common import common_login
+from endpoints.csrf import generate_csrf_token, OAUTH_CSRF_TOKEN_NAME
+from endpoints.decorators import anon_allowed, readonly_call_allowed
+from oauth.oidc import DiscoveryFailureException
+from util.useremails import (send_confirmation_email, send_recovery_email, send_change_email,
+ send_password_changed, send_org_recovery_email)
+from util.names import parse_single_urn
+from util.saas.useranalytics import build_error_callback
+from util.request import get_request_ip
+
+
+REPOS_PER_PAGE = 100
+
+
+logger = logging.getLogger(__name__)
+
+
+def handle_invite_code(invite_code, user):
+ """ Checks that the given invite code matches the specified user's e-mail address. If so, the
+ user is marked as having a verified e-mail address and this method returns True.
+ """
+ parsed_invite = parse_single_urn(invite_code)
+ if parsed_invite is None:
+ return False
+
+ if parsed_invite[0] != 'teaminvite':
+ return False
+
+ # Check to see if the team invite is valid. If so, then we know the user has
+ # a possible matching email address.
+ try:
+ found = model.team.find_matching_team_invite(invite_code, user)
+ except model.DataModelException:
+ return False
+
+ # Since we sent the invite code via email, mark the user as having a verified
+ # email address.
+ if found.email != user.email:
+ return False
+
+ user.verified = True
+ user.save()
+ return True
+
+
+def user_view(user, previous_username=None):
+ def org_view(o, user_admin=True):
+ admin_org = AdministerOrganizationPermission(o.username)
+ org_response = {
+ 'name': o.username,
+ 'avatar': avatar.get_data_for_org(o),
+ 'can_create_repo': CreateRepositoryPermission(o.username).can(),
+ 'public': o.username in app.config.get('PUBLIC_NAMESPACES', []),
+ }
+
+ if user_admin:
+ org_response.update({
+ 'is_org_admin': admin_org.can(),
+ 'preferred_namespace': not (o.stripe_id is None),
+ })
+
+ return org_response
+
+ # Retrieve the organizations for the user.
+ organizations = {o.username: o for o in model.organization.get_user_organizations(user.username)}
+
+ # Add any public namespaces.
+ public_namespaces = app.config.get('PUBLIC_NAMESPACES', [])
+ if public_namespaces:
+ organizations.update({ns: model.user.get_namespace_user(ns) for ns in public_namespaces})
+
+ def login_view(login):
+ try:
+ metadata = json.loads(login.metadata_json)
+ except:
+ metadata = {}
+
+ return {
+ 'service': login.service.name,
+ 'service_identifier': login.service_ident,
+ 'metadata': metadata
+ }
+
+ logins = model.user.list_federated_logins(user)
+
+ user_response = {
+ 'anonymous': False,
+ 'username': user.username,
+ 'avatar': avatar.get_data_for_user(user),
+ }
+
+ user_admin = UserAdminPermission(previous_username if previous_username else user.username)
+ if user_admin.can():
+ user_response.update({
+ 'can_create_repo': True,
+ 'is_me': True,
+ 'verified': user.verified,
+ 'email': user.email,
+ 'logins': [login_view(login) for login in logins],
+ 'invoice_email': user.invoice_email,
+ 'invoice_email_address': user.invoice_email_address,
+ 'preferred_namespace': not (user.stripe_id is None),
+ 'tag_expiration_s': user.removed_tag_expiration_s,
+ 'prompts': model.user.get_user_prompts(user),
+ 'company': user.company,
+ 'family_name': user.family_name,
+ 'given_name': user.given_name,
+ 'location': user.location,
+ 'is_free_account': user.stripe_id is None,
+ 'has_password_set': authentication.has_password_set(user.username),
+ })
+
+ analytics_metadata = user_analytics.get_user_analytics_metadata(user)
+
+ # This is a sync call, but goes through the async wrapper interface and
+ # returns a Future. By calling with timeout 0 immediately after the method
+ # call, we ensure that if it ever accidentally becomes async it will raise
+ # a TimeoutError.
+ user_response.update(analytics_metadata.result(timeout=0))
+
+ user_view_perm = UserReadPermission(user.username)
+ if user_view_perm.can():
+ user_response.update({
+ 'organizations': [org_view(o, user_admin=user_admin.can()) for o in organizations.values()],
+ })
+
+
+ if features.SUPER_USERS and SuperUserPermission().can():
+ user_response.update({
+ 'super_user': user and user == get_authenticated_user() and SuperUserPermission().can()
+ })
+
+ return user_response
+
+
+def notification_view(note):
+ return {
+ 'id': note.uuid,
+ 'organization': note.target.username if note.target.organization else None,
+ 'kind': note.kind.name,
+ 'created': format_date(note.created),
+ 'metadata': json.loads(note.metadata_json),
+ 'dismissed': note.dismissed
+ }
+
+
+@resource('/v1/user/')
+class User(ApiResource):
+ """ Operations related to users. """
+ schemas = {
+ 'NewUser': {
+ 'type': 'object',
+ 'description': 'Fields which must be specified for a new user.',
+ 'required': [
+ 'username',
+ 'password',
+ ],
+ 'properties': {
+ 'username': {
+ 'type': 'string',
+ 'description': 'The user\'s username',
+ },
+ 'password': {
+ 'type': 'string',
+ 'description': 'The user\'s password',
+ },
+ 'email': {
+ 'type': 'string',
+ 'description': 'The user\'s email address',
+ },
+ 'invite_code': {
+ 'type': 'string',
+ 'description': 'The optional invite code',
+ },
+ 'recaptcha_response': {
+ 'type': 'string',
+ 'description': 'The (may be disabled) recaptcha response code for verification',
+ },
+ }
+ },
+ 'UpdateUser': {
+ 'type': 'object',
+ 'description': 'Fields which can be updated in a user.',
+ 'properties': {
+ 'password': {
+ 'type': 'string',
+ 'description': 'The user\'s password',
+ },
+ 'invoice_email': {
+ 'type': 'boolean',
+ 'description': 'Whether the user desires to receive an invoice email.',
+ },
+ 'email': {
+ 'type': 'string',
+ 'description': 'The user\'s email address',
+ },
+ 'tag_expiration_s': {
+ 'type': 'integer',
+ 'minimum': 0,
+ 'description': 'The number of seconds for tag expiration',
+ },
+ 'username': {
+ 'type': 'string',
+ 'description': 'The user\'s username',
+ },
+ 'invoice_email_address': {
+ 'type': ['string', 'null'],
+ 'description': 'Custom email address for receiving invoices',
+ },
+ 'given_name': {
+ 'type': ['string', 'null'],
+ 'description': 'The optional entered given name for the user',
+ },
+ 'family_name': {
+ 'type': ['string', 'null'],
+ 'description': 'The optional entered family name for the user',
+ },
+ 'company': {
+ 'type': ['string', 'null'],
+ 'description': 'The optional entered company for the user',
+ },
+ 'location': {
+ 'type': ['string', 'null'],
+ 'description': 'The optional entered location for the user',
+ },
+ },
+ },
+ 'UserView': {
+ 'type': 'object',
+ 'description': 'Describes a user',
+ 'required': ['anonymous', 'avatar'],
+ 'properties': {
+ 'verified': {
+ 'type': 'boolean',
+ 'description': 'Whether the user\'s email address has been verified'
+ },
+ 'anonymous': {
+ 'type': 'boolean',
+ 'description': 'true if this user data represents a guest user'
+ },
+ 'email': {
+ 'type': 'string',
+ 'description': 'The user\'s email address',
+ },
+ 'avatar': {
+ 'type': 'object',
+ 'description': 'Avatar data representing the user\'s icon'
+ },
+ 'organizations': {
+ 'type': 'array',
+ 'description': 'Information about the organizations in which the user is a member',
+ 'items': {
+ 'type': 'object'
+ }
+ },
+ 'logins': {
+ 'type': 'array',
+ 'description': 'The list of external login providers against which the user has authenticated',
+ 'items': {
+ 'type': 'object'
+ }
+ },
+ 'can_create_repo': {
+ 'type': 'boolean',
+ 'description': 'Whether the user has permission to create repositories'
+ },
+ 'preferred_namespace': {
+ 'type': 'boolean',
+ 'description': 'If true, the user\'s namespace is the preferred namespace to display'
+ }
+ }
+ },
+ }
+
+ @require_scope(scopes.READ_USER)
+ @nickname('getLoggedInUser')
+ @define_json_response('UserView')
+ @anon_allowed
+ def get(self):
+ """ Get user information for the authenticated user. """
+ user = get_authenticated_user()
+ if user is None or user.organization or not UserReadPermission(user.username).can():
+ raise InvalidToken("Requires authentication", payload={'session_required': False})
+
+ return user_view(user)
+
+ @require_user_admin
+ @require_fresh_login
+ @nickname('changeUserDetails')
+ @internal_only
+ @validate_json_request('UpdateUser')
+ def put(self):
+ """ Update a users details such as password or email. """
+ user = get_authenticated_user()
+ user_data = request.get_json()
+ previous_username = None
+ headers = None
+
+ try:
+ if 'password' in user_data:
+ logger.debug('Changing password for user: %s', user.username)
+ log_action('account_change_password', user.username)
+
+ # Change the user's password.
+ model.user.change_password(user, user_data['password'])
+
+ # Login again to reset their session cookie.
+ success, headers = common_login(user.uuid)
+ if not success:
+ raise request_error(message='Could not perform login action')
+
+ if features.MAILING:
+ send_password_changed(user.username, user.email)
+
+ if 'invoice_email' in user_data:
+ logger.debug('Changing invoice_email for user: %s', user.username)
+ model.user.change_send_invoice_email(user, user_data['invoice_email'])
+
+ if features.CHANGE_TAG_EXPIRATION and 'tag_expiration_s' in user_data:
+ logger.debug('Changing user tag expiration to: %ss', user_data['tag_expiration_s'])
+ model.user.change_user_tag_expiration(user, user_data['tag_expiration_s'])
+
+ if ('invoice_email_address' in user_data and
+ user_data['invoice_email_address'] != user.invoice_email_address):
+ model.user.change_invoice_email_address(user, user_data['invoice_email_address'])
+
+ if 'email' in user_data and user_data['email'] != user.email:
+ new_email = user_data['email']
+ if model.user.find_user_by_email(new_email):
+ # Email already used.
+ raise request_error(message='E-mail address already used')
+
+ if features.MAILING:
+ logger.debug('Sending email to change email address for user: %s',
+ user.username)
+ confirmation_code = model.user.create_confirm_email_code(user, new_email=new_email)
+ send_change_email(user.username, user_data['email'], confirmation_code)
+ else:
+ ua_future = user_analytics.change_email(user.email, new_email)
+ ua_future.add_done_callback(build_error_callback('Change email failed'))
+ model.user.update_email(user, new_email, auto_verify=not features.MAILING)
+
+ if features.USER_METADATA:
+ metadata = {}
+
+ for field in ('given_name', 'family_name', 'company', 'location'):
+ if field in user_data:
+ metadata[field] = user_data.get(field)
+
+ if len(metadata) > 0:
+ model.user.update_user_metadata(user, metadata)
+
+ ua_mdata_future = user_analytics.change_metadata(user.email, **metadata)
+ ua_mdata_future.add_done_callback(build_error_callback('Change metadata failed'))
+
+ # Check for username rename. A username can be renamed if the feature is enabled OR the user
+ # currently has a confirm_username prompt.
+ if 'username' in user_data:
+ confirm_username = model.user.has_user_prompt(user, 'confirm_username')
+ new_username = user_data.get('username')
+ previous_username = user.username
+
+ rename_allowed = (features.USER_RENAME or
+ (confirm_username and features.USERNAME_CONFIRMATION))
+ username_changing = new_username and new_username != previous_username
+
+ if rename_allowed and username_changing:
+ if model.user.get_user_or_org(new_username) is not None:
+ # Username already used.
+ raise request_error(message='Username is already in use')
+
+ user = model.user.change_username(user.id, new_username)
+ username_future = user_analytics.change_username(user.email, new_username)
+ username_future.add_done_callback(build_error_callback('Change username failed'))
+
+ elif confirm_username:
+ model.user.remove_user_prompt(user, 'confirm_username')
+
+ except model.user.InvalidPasswordException, ex:
+ raise request_error(exception=ex)
+
+ return user_view(user, previous_username=previous_username), 200, headers
+
+ @show_if(features.USER_CREATION)
+ @show_if(features.DIRECT_LOGIN)
+ @nickname('createNewUser')
+ @internal_only
+ @validate_json_request('NewUser')
+ def post(self):
+ """ Create a new user. """
+ if app.config['AUTHENTICATION_TYPE'] != 'Database':
+ abort(404)
+
+ user_data = request.get_json()
+
+ invite_code = user_data.get('invite_code', '')
+ existing_user = model.user.get_nonrobot_user(user_data['username'])
+ if existing_user:
+ raise request_error(message='The username already exists')
+
+ # Ensure an e-mail address was specified if required.
+ if features.MAILING and not user_data.get('email'):
+ raise request_error(message='Email address is required')
+
+ # If invite-only user creation is turned on and no invite code was sent, return an error.
+ # Technically, this is handled by the can_create_user call below as well, but it makes
+ # a nicer error.
+ if features.INVITE_ONLY_USER_CREATION and not invite_code:
+ raise request_error(message='Cannot create non-invited user')
+
+ # Ensure that this user can be created.
+ blacklisted_domains = app.config.get('BLACKLISTED_EMAIL_DOMAINS', [])
+ if not can_create_user(user_data.get('email'), blacklisted_domains=blacklisted_domains):
+ raise request_error(message='Creation of a user account for this e-mail is disabled; please contact an administrator')
+
+ # If recaptcha is enabled, then verify the user is a human.
+ if features.RECAPTCHA:
+ recaptcha_response = user_data.get('recaptcha_response', '')
+ result = recaptcha2.verify(app.config['RECAPTCHA_SECRET_KEY'],
+ recaptcha_response,
+ get_request_ip())
+
+ if not result['success']:
+ return {
+ 'message': 'Are you a bot? If not, please revalidate the captcha.'
+ }, 400
+
+ is_possible_abuser = ip_resolver.is_ip_possible_threat(get_request_ip())
+ try:
+ prompts = model.user.get_default_user_prompts(features)
+ new_user = model.user.create_user(user_data['username'], user_data['password'],
+ user_data.get('email'),
+ auto_verify=not features.MAILING,
+ email_required=features.MAILING,
+ is_possible_abuser=is_possible_abuser,
+ prompts=prompts)
+
+ email_address_confirmed = handle_invite_code(invite_code, new_user)
+ if features.MAILING and not email_address_confirmed:
+ confirmation_code = model.user.create_confirm_email_code(new_user)
+ send_confirmation_email(new_user.username, new_user.email, confirmation_code)
+ return {
+ 'awaiting_verification': True
+ }
+ else:
+ success, headers = common_login(new_user.uuid)
+ if not success:
+ return {
+ 'message': 'Could not login. Is your account inactive?'
+ }, 403
+
+ return user_view(new_user), 200, headers
+ except model.user.DataModelException as ex:
+ raise request_error(exception=ex)
+
+ @require_user_admin
+ @require_fresh_login
+ @nickname('deleteCurrentUser')
+ @internal_only
+ def delete(self):
+ """ Deletes the current user. """
+ if app.config['AUTHENTICATION_TYPE'] != 'Database':
+ abort(404)
+
+ model.user.mark_namespace_for_deletion(get_authenticated_user(), all_queues, namespace_gc_queue)
+ return '', 204
+
+
+@resource('/v1/user/private')
+@internal_only
+@show_if(features.BILLING)
+class PrivateRepositories(ApiResource):
+ """ Operations dealing with the available count of private repositories. """
+ @require_user_admin
+ @nickname('getUserPrivateAllowed')
+ def get(self):
+ """ Get the number of private repos this user has, and whether they are allowed to create more.
+ """
+ user = get_authenticated_user()
+ private_repos = model.user.get_private_repo_count(user.username)
+ repos_allowed = 0
+
+ if user.stripe_id:
+ cus = stripe.Customer.retrieve(user.stripe_id)
+ if cus.subscription:
+ plan = get_plan(cus.subscription.plan.id)
+ if plan:
+ repos_allowed = plan['privateRepos']
+
+ return {
+ 'privateCount': private_repos,
+ 'privateAllowed': (private_repos < repos_allowed)
+ }
+
+
+@resource('/v1/user/clientkey')
+@internal_only
+class ClientKey(ApiResource):
+ """ Operations for returning an encrypted key which can be used in place of a password
+ for the Docker client. """
+ schemas = {
+ 'GenerateClientKey': {
+ 'type': 'object',
+ 'required': [
+ 'password',
+ ],
+ 'properties': {
+ 'password': {
+ 'type': 'string',
+ 'description': 'The user\'s password',
+ },
+ }
+ }
+ }
+
+ @require_user_admin
+ @nickname('generateUserClientKey')
+ @validate_json_request('GenerateClientKey')
+ def post(self):
+ """ Return's the user's private client key. """
+ if not authentication.supports_encrypted_credentials:
+ raise NotFound()
+
+ username = get_authenticated_user().username
+ password = request.get_json()['password']
+ (result, error_message) = authentication.confirm_existing_user(username, password)
+ if not result:
+ raise request_error(message=error_message)
+
+ return {
+ 'key': authentication.encrypt_user_password(password)
+ }
+
+
+def conduct_signin(username_or_email, password, invite_code=None):
+ needs_email_verification = False
+ invalid_credentials = False
+
+ (found_user, error_message) = authentication.verify_and_link_user(username_or_email, password)
+ if found_user:
+ # If there is an attached invitation code, handle it here. This will mark the
+ # user as verified if the code is valid.
+ if invite_code:
+ handle_invite_code(invite_code, found_user)
+
+ success, headers = common_login(found_user.uuid)
+ if success:
+ return {'success': True}, 200, headers
+ else:
+ needs_email_verification = True
+
+ else:
+ invalid_credentials = True
+
+ return {
+ 'needsEmailVerification': needs_email_verification,
+ 'invalidCredentials': invalid_credentials,
+ 'message': error_message
+ }, 403, None
+
+
+@resource('/v1/user/convert')
+@internal_only
+@show_if(app.config['AUTHENTICATION_TYPE'] == 'Database')
+class ConvertToOrganization(ApiResource):
+ """ Operations for converting a user to an organization. """
+ schemas = {
+ 'ConvertUser': {
+ 'type': 'object',
+ 'description': 'Information required to convert a user to an organization.',
+ 'required': [
+ 'adminUser',
+ 'adminPassword'
+ ],
+ 'properties': {
+ 'adminUser': {
+ 'type': 'string',
+ 'description': 'The user who will become an org admin\'s username',
+ },
+ 'adminPassword': {
+ 'type': 'string',
+ 'description': 'The user who will become an org admin\'s password',
+ },
+ 'plan': {
+ 'type': 'string',
+ 'description': 'The plan to which the organization should be subscribed',
+ },
+ },
+ },
+ }
+
+ @require_user_admin
+ @nickname('convertUserToOrganization')
+ @validate_json_request('ConvertUser')
+ def post(self):
+ """ Convert the user to an organization. """
+ user = get_authenticated_user()
+ convert_data = request.get_json()
+
+ # Ensure that the sign in credentials work.
+ admin_username = convert_data['adminUser']
+ admin_password = convert_data['adminPassword']
+ (admin_user, _) = authentication.verify_and_link_user(admin_username, admin_password)
+ if not admin_user:
+ raise request_error(reason='invaliduser',
+ message='The admin user credentials are not valid')
+
+ # Ensure that the new admin user is the not user being converted.
+ if admin_user.id == user.id:
+ raise request_error(reason='invaliduser', message='The admin user is not valid')
+
+ # Subscribe the organization to the new plan.
+ if features.BILLING:
+ plan = convert_data.get('plan', 'free')
+ subscribe(user, plan, None, True) # Require business plans
+
+ # Convert the user to an organization.
+ model.organization.convert_user_to_organization(user, admin_user)
+ log_action('account_convert', user.username)
+
+ # And finally login with the admin credentials.
+ return conduct_signin(admin_username, admin_password)
+
+
+@resource('/v1/signin')
+@show_if(features.DIRECT_LOGIN)
+@internal_only
+class Signin(ApiResource):
+ """ Operations for signing in the user. """
+ schemas = {
+ 'SigninUser': {
+ 'type': 'object',
+ 'description': 'Information required to sign in a user.',
+ 'required': [
+ 'username',
+ 'password',
+ ],
+ 'properties': {
+ 'username': {
+ 'type': 'string',
+ 'description': 'The user\'s username',
+ },
+ 'password': {
+ 'type': 'string',
+ 'description': 'The user\'s password',
+ },
+ 'invite_code': {
+ 'type': 'string',
+ 'description': 'The optional invite code'
+ },
+ },
+ },
+ }
+
+ @nickname('signinUser')
+ @validate_json_request('SigninUser')
+ @anon_allowed
+ @readonly_call_allowed
+ def post(self):
+ """ Sign in the user with the specified credentials. """
+ signin_data = request.get_json()
+ if not signin_data:
+ raise NotFound()
+
+ username = signin_data['username']
+ password = signin_data['password']
+ invite_code = signin_data.get('invite_code', '')
+ return conduct_signin(username, password, invite_code=invite_code)
+
+
+@resource('/v1/signin/verify')
+@internal_only
+class VerifyUser(ApiResource):
+ """ Operations for verifying the existing user. """
+ schemas = {
+ 'VerifyUser': {
+ 'id': 'VerifyUser',
+ 'type': 'object',
+ 'description': 'Information required to verify the signed in user.',
+ 'required': [
+ 'password',
+ ],
+ 'properties': {
+ 'password': {
+ 'type': 'string',
+ 'description': 'The user\'s password',
+ },
+ },
+ },
+ }
+
+ @require_user_admin
+ @nickname('verifyUser')
+ @validate_json_request('VerifyUser')
+ @readonly_call_allowed
+ def post(self):
+ """ Verifies the signed in the user with the specified credentials. """
+ signin_data = request.get_json()
+ password = signin_data['password']
+
+ username = get_authenticated_user().username
+ (result, error_message) = authentication.confirm_existing_user(username, password)
+ if not result:
+ return {
+ 'message': error_message,
+ 'invalidCredentials': True,
+ }, 403
+
+ success, headers = common_login(result.uuid)
+ if not success:
+ return {
+ 'message': 'Could not verify user.',
+ }, 403
+
+ return {'success': True}, 200, headers
+
+
+@resource('/v1/signout')
+@internal_only
+class Signout(ApiResource):
+ """ Resource for signing out users. """
+ @nickname('logout')
+ def post(self):
+ """ Request that the current user be signed out. """
+ # Invalidate all sessions for the user.
+ model.user.invalidate_all_sessions(get_authenticated_user())
+
+ # Clear out the user's identity.
+ identity_changed.send(app, identity=AnonymousIdentity())
+
+ # Remove the user's session cookie.
+ logout_user()
+
+ return {'success': True}
+
+
+@resource('/v1/externallogin/')
+@internal_only
+class ExternalLoginInformation(ApiResource):
+ """ Resource for both setting a token for external login and returning its authorization
+ url.
+ """
+ schemas = {
+ 'GetLogin': {
+ 'type': 'object',
+ 'description': 'Information required to an retrieve external login URL.',
+ 'required': [
+ 'kind',
+ ],
+ 'properties': {
+ 'kind': {
+ 'type': 'string',
+ 'description': 'The kind of URL',
+ 'enum': ['login', 'attach', 'cli'],
+ },
+ },
+ },
+ }
+
+
+ @nickname('retrieveExternalLoginAuthorizationUrl')
+ @anon_allowed
+ @readonly_call_allowed
+ @validate_json_request('GetLogin')
+ def post(self, service_id):
+ """ Generates the auth URL and CSRF token explicitly for OIDC/OAuth-associated login. """
+ login_service = oauth_login.get_service(service_id)
+ if login_service is None:
+ raise InvalidRequest()
+
+ csrf_token = generate_csrf_token(OAUTH_CSRF_TOKEN_NAME)
+ kind = request.get_json()['kind']
+ redirect_suffix = '' if kind == 'login' else '/' + kind
+
+ try:
+ login_scopes = login_service.get_login_scopes()
+ auth_url = login_service.get_auth_url(url_scheme_and_hostname, redirect_suffix, csrf_token, login_scopes)
+ return {'auth_url': auth_url}
+ except DiscoveryFailureException as dfe:
+ logger.exception('Could not discovery OAuth endpoint information')
+ raise DownstreamIssue(dfe.message)
+
+
+@resource('/v1/detachexternal/')
+@show_if(features.DIRECT_LOGIN)
+@internal_only
+class DetachExternal(ApiResource):
+ """ Resource for detaching an external login. """
+ @require_user_admin
+ @nickname('detachExternalLogin')
+ def post(self, service_id):
+ """ Request that the current user be detached from the external login service. """
+ model.user.detach_external_login(get_authenticated_user(), service_id)
+ return {'success': True}
+
+
+@resource("/v1/recovery")
+@show_if(features.MAILING)
+@internal_only
+class Recovery(ApiResource):
+ """ Resource for requesting a password recovery email. """
+ schemas = {
+ 'RequestRecovery': {
+ 'type': 'object',
+ 'description': 'Information required to sign in a user.',
+ 'required': [
+ 'email',
+ ],
+ 'properties': {
+ 'email': {
+ 'type': 'string',
+ 'description': 'The user\'s email address',
+ },
+ 'recaptcha_response': {
+ 'type': 'string',
+ 'description': 'The (may be disabled) recaptcha response code for verification',
+ },
+ },
+ },
+ }
+
+ @nickname('requestRecoveryEmail')
+ @anon_allowed
+ @validate_json_request('RequestRecovery')
+ def post(self):
+ """ Request a password recovery email."""
+ def redact(value):
+ threshold = max((len(value) / 3) - 1, 1)
+ v = ''
+ for i in range(0, len(value)):
+ if i < threshold or i >= len(value) - threshold:
+ v = v + value[i]
+ else:
+ v = v + u'\u2022'
+
+ return v
+
+ recovery_data = request.get_json()
+
+ # If recaptcha is enabled, then verify the user is a human.
+ if features.RECAPTCHA:
+ recaptcha_response = recovery_data.get('recaptcha_response', '')
+ result = recaptcha2.verify(app.config['RECAPTCHA_SECRET_KEY'],
+ recaptcha_response,
+ get_request_ip())
+
+ if not result['success']:
+ return {
+ 'message': 'Are you a bot? If not, please revalidate the captcha.'
+ }, 400
+
+ email = recovery_data['email']
+ user = model.user.find_user_by_email(email)
+ if not user:
+ return {
+ 'status': 'sent',
+ }
+
+ if user.organization:
+ send_org_recovery_email(user, model.organization.get_admin_users(user))
+ return {
+ 'status': 'org',
+ 'orgemail': email,
+ 'orgname': redact(user.username),
+ }
+
+ confirmation_code = model.user.create_reset_password_email_code(email)
+ send_recovery_email(email, confirmation_code)
+ return {
+ 'status': 'sent',
+ }
+
+
+@resource('/v1/user/notifications')
+@internal_only
+class UserNotificationList(ApiResource):
+ @require_user_admin
+ @parse_args()
+ @query_param('page', 'Offset page number. (int)', type=int, default=0)
+ @query_param('limit', 'Limit on the number of results (int)', type=int, default=5)
+ @nickname('listUserNotifications')
+ def get(self, parsed_args):
+ page = parsed_args['page']
+ limit = parsed_args['limit']
+
+ notifications = list(model.notification.list_notifications(get_authenticated_user(), page=page,
+ limit=limit + 1))
+ has_more = False
+
+ if len(notifications) > limit:
+ has_more = True
+ notifications = notifications[0:limit]
+
+ return {
+ 'notifications': [notification_view(note) for note in notifications],
+ 'additional': has_more
+ }
+
+
+@resource('/v1/user/notifications/')
+@path_param('uuid', 'The uuid of the user notification')
+@internal_only
+class UserNotification(ApiResource):
+ schemas = {
+ 'UpdateNotification': {
+ 'type': 'object',
+ 'description': 'Information for updating a notification',
+ 'properties': {
+ 'dismissed': {
+ 'type': 'boolean',
+ 'description': 'Whether the notification is dismissed by the user',
+ },
+ },
+ },
+ }
+
+ @require_user_admin
+ @nickname('getUserNotification')
+ def get(self, uuid):
+ note = model.notification.lookup_notification(get_authenticated_user(), uuid)
+ if not note:
+ raise NotFound()
+
+ return notification_view(note)
+
+ @require_user_admin
+ @nickname('updateUserNotification')
+ @validate_json_request('UpdateNotification')
+ def put(self, uuid):
+ note = model.notification.lookup_notification(get_authenticated_user(), uuid)
+ if not note:
+ raise NotFound()
+
+ note.dismissed = request.get_json().get('dismissed', False)
+ note.save()
+
+ return notification_view(note)
+
+
+def authorization_view(access_token):
+ oauth_app = access_token.application
+ app_email = oauth_app.avatar_email or oauth_app.organization.email
+ return {
+ 'application': {
+ 'name': oauth_app.name,
+ 'description': oauth_app.description,
+ 'url': oauth_app.application_uri,
+ 'avatar': avatar.get_data(oauth_app.name, app_email, 'app'),
+ 'organization': {
+ 'name': oauth_app.organization.username,
+ 'avatar': avatar.get_data_for_org(oauth_app.organization)
+ }
+ },
+ 'scopes': scopes.get_scope_information(access_token.scope),
+ 'uuid': access_token.uuid
+ }
+
+@resource('/v1/user/authorizations')
+@internal_only
+class UserAuthorizationList(ApiResource):
+ @require_user_admin
+ @nickname('listUserAuthorizations')
+ def get(self):
+ access_tokens = model.oauth.list_access_tokens_for_user(get_authenticated_user())
+
+ return {
+ 'authorizations': [authorization_view(token) for token in access_tokens]
+ }
+
+
+@resource('/v1/user/authorizations/')
+@path_param('access_token_uuid', 'The uuid of the access token')
+@internal_only
+class UserAuthorization(ApiResource):
+ @require_user_admin
+ @nickname('getUserAuthorization')
+ def get(self, access_token_uuid):
+ access_token = model.oauth.lookup_access_token_for_user(get_authenticated_user(),
+ access_token_uuid)
+ if not access_token:
+ raise NotFound()
+
+ return authorization_view(access_token)
+
+ @require_user_admin
+ @nickname('deleteUserAuthorization')
+ def delete(self, access_token_uuid):
+ access_token = model.oauth.lookup_access_token_for_user(get_authenticated_user(), access_token_uuid)
+ if not access_token:
+ raise NotFound()
+
+ access_token.delete_instance(recursive=True, delete_nullable=True)
+ return '', 204
+
+@resource('/v1/user/starred')
+class StarredRepositoryList(ApiResource):
+ """ Operations for creating and listing starred repositories. """
+ schemas = {
+ 'NewStarredRepository': {
+ 'type': 'object',
+ 'required': [
+ 'namespace',
+ 'repository',
+ ],
+ 'properties': {
+ 'namespace': {
+ 'type': 'string',
+ 'description': 'Namespace in which the repository belongs',
+ },
+ 'repository': {
+ 'type': 'string',
+ 'description': 'Repository name'
+ }
+ }
+ }
+ }
+
+ @nickname('listStarredRepos')
+ @parse_args()
+ @require_user_admin
+ @page_support()
+ def get(self, page_token, parsed_args):
+ """ List all starred repositories. """
+ repo_query = model.repository.get_user_starred_repositories(get_authenticated_user())
+
+ repos, next_page_token = model.modelutil.paginate(repo_query, RepositoryTable,
+ page_token=page_token, limit=REPOS_PER_PAGE)
+
+ def repo_view(repo_obj):
+ return {
+ 'namespace': repo_obj.namespace_user.username,
+ 'name': repo_obj.name,
+ 'description': repo_obj.description,
+ 'is_public': repo_obj.visibility.name == 'public',
+ }
+
+ return {'repositories': [repo_view(repo) for repo in repos]}, next_page_token
+
+ @require_scope(scopes.READ_REPO)
+ @nickname('createStar')
+ @validate_json_request('NewStarredRepository')
+ @require_user_admin
+ def post(self):
+ """ Star a repository. """
+ user = get_authenticated_user()
+ req = request.get_json()
+ namespace = req['namespace']
+ repository = req['repository']
+ repo = model.repository.get_repository(namespace, repository)
+
+ if repo:
+ try:
+ model.repository.star_repository(user, repo)
+ except IntegrityError:
+ pass
+
+ return {
+ 'namespace': namespace,
+ 'repository': repository,
+ }, 201
+
+
+@resource('/v1/user/starred/')
+@path_param('repository', 'The full path of the repository. e.g. namespace/name')
+class StarredRepository(RepositoryParamResource):
+ """ Operations for managing a specific starred repository. """
+ @nickname('deleteStar')
+ @require_user_admin
+ def delete(self, namespace, repository):
+ """ Removes a star from a repository. """
+ user = get_authenticated_user()
+ repo = model.repository.get_repository(namespace, repository)
+
+ if repo:
+ model.repository.unstar_repository(user, repo)
+ return '', 204
+
+
+@resource('/v1/users/')
+class Users(ApiResource):
+ """ Operations related to retrieving information about other users. """
+ @nickname('getUserInformation')
+ def get(self, username):
+ """ Get user information for the specified user. """
+ user = model.user.get_nonrobot_user(username)
+ if user is None:
+ abort(404)
+
+ return user_view(user)
diff --git a/endpoints/appr/__init__.py b/endpoints/appr/__init__.py
new file mode 100644
index 000000000..c998d8a95
--- /dev/null
+++ b/endpoints/appr/__init__.py
@@ -0,0 +1,43 @@
+import logging
+
+from functools import wraps
+
+from cnr.exception import Forbidden
+from flask import Blueprint
+
+from app import metric_queue
+from auth.permissions import (AdministerRepositoryPermission, ReadRepositoryPermission,
+ ModifyRepositoryPermission)
+from endpoints.appr.decorators import require_repo_permission
+from util.metrics.metricqueue import time_blueprint
+
+
+appr_bp = Blueprint('appr', __name__)
+time_blueprint(appr_bp, metric_queue)
+logger = logging.getLogger(__name__)
+
+
+def _raise_method(repository, scopes):
+ raise Forbidden("Unauthorized access for: %s" % repository,
+ {"package": repository, "scopes": scopes})
+
+
+def _get_reponame_kwargs(*args, **kwargs):
+ return [kwargs['namespace'], kwargs['package_name']]
+
+
+require_app_repo_read = require_repo_permission(ReadRepositoryPermission,
+ scopes=['pull'],
+ allow_public=True,
+ raise_method=_raise_method,
+ get_reponame_method=_get_reponame_kwargs)
+
+require_app_repo_write = require_repo_permission(ModifyRepositoryPermission,
+ scopes=['pull', 'push'],
+ raise_method=_raise_method,
+ get_reponame_method=_get_reponame_kwargs)
+
+require_app_repo_admin = require_repo_permission(AdministerRepositoryPermission,
+ scopes=['pull', 'push'],
+ raise_method=_raise_method,
+ get_reponame_method=_get_reponame_kwargs)
diff --git a/endpoints/appr/cnr_backend.py b/endpoints/appr/cnr_backend.py
new file mode 100644
index 000000000..a9e1b2539
--- /dev/null
+++ b/endpoints/appr/cnr_backend.py
@@ -0,0 +1,177 @@
+import base64
+
+from cnr.exception import raise_package_not_found
+from cnr.models.blob_base import BlobBase
+from cnr.models.channel_base import ChannelBase
+from cnr.models.db_base import CnrDB
+from cnr.models.package_base import PackageBase, manifest_media_type
+
+from flask import request
+from app import storage
+from endpoints.appr.models_cnr import model
+from util.request import get_request_ip
+
+
+class Blob(BlobBase):
+ @classmethod
+ def upload_url(cls, digest):
+ return "cnr/blobs/sha256/%s/%s" % (digest[0:2], digest)
+
+ def save(self, content_media_type):
+ model.store_blob(self, content_media_type)
+
+ @classmethod
+ def delete(cls, package_name, digest):
+ pass
+
+ @classmethod
+ def _fetch_b64blob(cls, package_name, digest):
+ blobpath = cls.upload_url(digest)
+ locations = model.get_blob_locations(digest)
+ if not locations:
+ raise_package_not_found(package_name, digest)
+ return base64.b64encode(storage.get_content(locations, blobpath))
+
+ @classmethod
+ def download_url(cls, package_name, digest):
+ blobpath = cls.upload_url(digest)
+ locations = model.get_blob_locations(digest)
+ if not locations:
+ raise_package_not_found(package_name, digest)
+ return storage.get_direct_download_url(locations, blobpath, get_request_ip())
+
+
+class Channel(ChannelBase):
+ """ CNR Channel model implemented against the Quay data model. """
+
+ def __init__(self, name, package, current=None):
+ super(Channel, self).__init__(name, package, current=current)
+ self._channel_data = None
+
+ def _exists(self):
+ """ Check if the channel is saved already """
+ return model.channel_exists(self.package, self.name)
+
+ @classmethod
+ def get(cls, name, package):
+ chanview = model.fetch_channel(package, name, with_releases=False)
+ return cls(name, package, chanview.current)
+
+ def save(self):
+ model.update_channel(self.package, self.name, self.current)
+
+ def delete(self):
+ model.delete_channel(self.package, self.name)
+
+ @classmethod
+ def all(cls, package_name):
+ return [
+ Channel(c.name, package_name, c.current) for c in model.list_channels(package_name)
+ ]
+
+ @property
+ def _channel(self):
+ if self._channel_data is None:
+ self._channel_data = model.fetch_channel(self.package, self.name)
+ return self._channel_data
+
+ def releases(self):
+ """ Returns the list of versions """
+ return self._channel.releases
+
+ def _add_release(self, release):
+ return model.update_channel(self.package, self.name, release)._asdict
+
+ def _remove_release(self, release):
+ model.delete_channel(self.package, self.name)
+
+
+class User(object):
+ """ User in CNR models """
+
+ @classmethod
+ def get_user(cls, username, password):
+ """ Returns True if user creds is valid """
+ return model.get_user(username, password)
+
+
+class Package(PackageBase):
+ """ CNR Package model implemented against the Quay data model. """
+
+ @classmethod
+ def _apptuple_to_dict(cls, apptuple):
+ return {
+ 'release': apptuple.release,
+ 'created_at': apptuple.created_at,
+ 'digest': apptuple.manifest.digest,
+ 'mediaType': apptuple.manifest.mediaType,
+ 'package': apptuple.name,
+ 'content': apptuple.manifest.content._asdict()
+ }
+
+ @classmethod
+ def create_repository(cls, package_name, visibility, owner):
+ model.create_application(package_name, visibility, owner)
+
+ @classmethod
+ def exists(cls, package_name):
+ return model.application_exists(package_name)
+
+ @classmethod
+ def all(cls, organization=None, media_type=None, search=None, username=None, **kwargs):
+ return [
+ dict(x._asdict())
+ for x in model.list_applications(namespace=organization, media_type=media_type,
+ search=search, username=username)
+ ]
+
+ @classmethod
+ def _fetch(cls, package_name, release, media_type):
+ data = model.fetch_release(package_name, release, manifest_media_type(media_type))
+ return cls._apptuple_to_dict(data)
+
+ @classmethod
+ def all_releases(cls, package_name, media_type=None):
+ return model.list_releases(package_name, media_type)
+
+ @classmethod
+ def search(cls, query, username=None):
+ return model.basic_search(query, username=username)
+
+ def _save(self, force=False, **kwargs):
+ user = kwargs['user']
+ visibility = kwargs['visibility']
+ model.create_release(self, user, visibility, force)
+
+ @classmethod
+ def _delete(cls, package_name, release, media_type):
+ model.delete_release(package_name, release, manifest_media_type(media_type))
+
+ @classmethod
+ def isdeleted_release(cls, package, release):
+ return model.release_exists(package, release)
+
+ def channels(self, channel_class, iscurrent=True):
+ return [
+ c.name
+ for c in model.list_release_channels(self.package, self.release, active=iscurrent)
+ ]
+
+ @classmethod
+ def manifests(cls, package, release=None):
+ return model.list_manifests(package, release)
+
+ @classmethod
+ def dump_all(cls, blob_cls):
+ raise NotImplementedError
+
+
+class QuayDB(CnrDB):
+ """ Wrapper Class to embed all CNR Models """
+ Channel = Channel
+ Package = Package
+ Blob = Blob
+
+ @classmethod
+ def reset_db(cls, force=False):
+ pass
diff --git a/endpoints/appr/decorators.py b/endpoints/appr/decorators.py
new file mode 100644
index 000000000..8df6a46a9
--- /dev/null
+++ b/endpoints/appr/decorators.py
@@ -0,0 +1,52 @@
+import logging
+
+from functools import wraps
+
+from data import model
+from util.http import abort
+
+
+logger = logging.getLogger(__name__)
+
+
+def _raise_unauthorized(repository, scopes):
+ raise StandardError("Unauthorized acces to %s", repository)
+
+
+def _get_reponame_kwargs(*args, **kwargs):
+ return [kwargs['namespace'], kwargs['package_name']]
+
+
+def disallow_for_image_repository(get_reponame_method=_get_reponame_kwargs):
+ def wrapper(func):
+ @wraps(func)
+ def wrapped(*args, **kwargs):
+ namespace_name, repo_name = get_reponame_method(*args, **kwargs)
+ image_repo = model.repository.get_repository(namespace_name, repo_name, kind_filter='image')
+ if image_repo is not None:
+ logger.debug('Tried to invoked a CNR method on an image repository')
+ abort(405, message='Cannot push an application to an image repository with the same name')
+ return func(*args, **kwargs)
+ return wrapped
+ return wrapper
+
+
+def require_repo_permission(permission_class, scopes=None, allow_public=False,
+ raise_method=_raise_unauthorized,
+ get_reponame_method=_get_reponame_kwargs):
+ def wrapper(func):
+ @wraps(func)
+ @disallow_for_image_repository(get_reponame_method=get_reponame_method)
+ def wrapped(*args, **kwargs):
+ namespace_name, repo_name = get_reponame_method(*args, **kwargs)
+ logger.debug('Checking permission %s for repo: %s/%s', permission_class,
+ namespace_name, repo_name)
+ permission = permission_class(namespace_name, repo_name)
+ if (permission.can() or
+ (allow_public and
+ model.repository.repository_is_public(namespace_name, repo_name))):
+ return func(*args, **kwargs)
+ repository = namespace_name + '/' + repo_name
+ raise_method(repository, scopes)
+ return wrapped
+ return wrapper
diff --git a/endpoints/appr/models_cnr.py b/endpoints/appr/models_cnr.py
new file mode 100644
index 000000000..89216127c
--- /dev/null
+++ b/endpoints/appr/models_cnr.py
@@ -0,0 +1,316 @@
+from datetime import datetime
+
+import cnr.semver
+
+from cnr.exception import raise_package_not_found, raise_channel_not_found, CnrException
+
+import features
+import data.model
+
+from app import storage, authentication
+from data import appr_model
+from data.database import Repository, MediaType, db_transaction
+from data.appr_model.models import NEW_MODELS
+from endpoints.appr.models_interface import (
+ ApplicationManifest, ApplicationRelease, ApplicationSummaryView, AppRegistryDataInterface,
+ BlobDescriptor, ChannelView, ChannelReleasesView)
+from util.audit import track_and_log
+from util.morecollections import AttrDict
+from util.names import parse_robot_username
+
+
+
+class ReadOnlyException(CnrException):
+ status_code = 405
+ errorcode = "read-only"
+
+
+def _strip_sha256_header(digest):
+ if digest.startswith('sha256:'):
+ return digest.split('sha256:')[1]
+ return digest
+
+
+def _split_package_name(package):
+ """ Returns the namespace and package-name """
+ return package.split("/")
+
+
+def _join_package_name(ns, name):
+ """ Returns a app-name in the 'namespace/name' format """
+ return "%s/%s" % (ns, name)
+
+
+def _timestamp_to_iso(timestamp, in_ms=True):
+ if in_ms:
+ timestamp = timestamp / 1000
+ return datetime.fromtimestamp(timestamp).isoformat()
+
+
+def _application(package):
+ ns, name = _split_package_name(package)
+ repo = data.model.repository.get_app_repository(ns, name)
+ if repo is None:
+ raise_package_not_found(package)
+ return repo
+
+
+class CNRAppModel(AppRegistryDataInterface):
+ def __init__(self, models_ref, is_readonly):
+ self.models_ref = models_ref
+ self.is_readonly = is_readonly
+
+ def log_action(self, event_name, namespace_name, repo_name=None, analytics_name=None,
+ analytics_sample=1, metadata=None):
+ metadata = {} if metadata is None else metadata
+
+ repo = None
+ if repo_name is not None:
+ db_repo = data.model.repository.get_repository(namespace_name, repo_name,
+ kind_filter='application')
+ repo = AttrDict({
+ 'id': db_repo.id,
+ 'name': db_repo.name,
+ 'namespace_name': db_repo.namespace_user.username,
+ 'is_free_namespace': db_repo.namespace_user.stripe_id is None,
+ })
+ track_and_log(event_name, repo, analytics_name=analytics_name,
+ analytics_sample=analytics_sample, **metadata)
+
+ def list_applications(self, namespace=None, media_type=None, search=None, username=None,
+ with_channels=False):
+ """ Lists all repositories that contain applications, with optional filtering to a specific
+ namespace and view a specific user.
+ """
+
+ views = []
+ for repo in appr_model.package.list_packages_query(self.models_ref, namespace, media_type,
+ search, username=username):
+ tag_set_prefetch = getattr(repo, self.models_ref.tag_set_prefetch_name)
+ releases = [t.name for t in tag_set_prefetch]
+ if not releases:
+ continue
+ available_releases = [
+ str(x) for x in sorted(cnr.semver.versions(releases, False), reverse=True)]
+ channels = None
+ if with_channels:
+ channels = [
+ ChannelView(name=chan.name, current=chan.linked_tag.name)
+ for chan in appr_model.channel.get_repo_channels(repo, self.models_ref)]
+
+ app_name = _join_package_name(repo.namespace_user.username, repo.name)
+ manifests = self.list_manifests(app_name, available_releases[0])
+ view = ApplicationSummaryView(
+ namespace=repo.namespace_user.username,
+ name=app_name,
+ visibility=repo.visibility.name,
+ default=available_releases[0],
+ channels=channels,
+ manifests=manifests,
+ releases=available_releases,
+ updated_at=_timestamp_to_iso(tag_set_prefetch[-1].lifetime_start),
+ created_at=_timestamp_to_iso(tag_set_prefetch[0].lifetime_start),)
+ views.append(view)
+ return views
+
+ def application_is_public(self, package_name):
+ """
+ Returns:
+ * True if the repository is public
+ """
+ namespace, name = _split_package_name(package_name)
+ return data.model.repository.repository_is_public(namespace, name)
+
+ def create_application(self, package_name, visibility, owner):
+ """ Create a new app repository, owner is the user who creates it """
+ if self.is_readonly:
+ raise ReadOnlyException('Currently in read-only mode')
+
+ ns, name = _split_package_name(package_name)
+ data.model.repository.create_repository(ns, name, owner, visibility, 'application')
+
+ def application_exists(self, package_name):
+ """ Create a new app repository, owner is the user who creates it """
+ ns, name = _split_package_name(package_name)
+ return data.model.repository.get_repository(ns, name, kind_filter='application') is not None
+
+ def basic_search(self, query, username=None):
+ """ Returns an array of matching AppRepositories in the format: 'namespace/name'
+ Note:
+ * Only 'public' repositories are returned
+
+ Todo:
+ * Filter results with readeable reposistory for the user (including visibilitys)
+ """
+ return [
+ _join_package_name(r.namespace_user.username, r.name)
+ for r in data.model.repository.get_app_search(lookup=query, username=username, limit=50)]
+
+ def list_releases(self, package_name, media_type=None):
+ """ Return the list of all releases of an Application
+ Example:
+ >>> get_app_releases('ant31/rocketchat')
+ ['1.7.1', '1.7.0', '1.7.2']
+
+ Todo:
+ * Paginate
+ """
+ return appr_model.release.get_releases(_application(package_name), self.models_ref, media_type)
+
+ def list_manifests(self, package_name, release=None):
+ """ Returns the list of all manifests of an Application.
+
+ Todo:
+ * Paginate
+ """
+ try:
+ repo = _application(package_name)
+ return list(appr_model.manifest.get_manifest_types(repo, self.models_ref, release))
+ except (Repository.DoesNotExist, self.models_ref.Tag.DoesNotExist):
+ raise_package_not_found(package_name, release)
+
+ def fetch_release(self, package_name, release, media_type):
+ """
+ Retrieves an AppRelease from it's repository-name and release-name
+ """
+ repo = _application(package_name)
+ try:
+ tag, manifest, blob = appr_model.release.get_app_release(repo, release, media_type,
+ self.models_ref)
+ created_at = _timestamp_to_iso(tag.lifetime_start)
+
+ blob_descriptor = BlobDescriptor(digest=_strip_sha256_header(blob.digest),
+ mediaType=blob.media_type.name, size=blob.size, urls=[])
+
+ app_manifest = ApplicationManifest(
+ digest=manifest.digest, mediaType=manifest.media_type.name, content=blob_descriptor)
+
+ app_release = ApplicationRelease(release=tag.name, created_at=created_at, name=package_name,
+ manifest=app_manifest)
+ return app_release
+ except (self.models_ref.Tag.DoesNotExist,
+ self.models_ref.Manifest.DoesNotExist,
+ self.models_ref.Blob.DoesNotExist,
+ Repository.DoesNotExist,
+ MediaType.DoesNotExist):
+ raise_package_not_found(package_name, release, media_type)
+
+ def store_blob(self, cnrblob, content_media_type):
+ if self.is_readonly:
+ raise ReadOnlyException('Currently in read-only mode')
+
+ fp = cnrblob.packager.io_file
+ path = cnrblob.upload_url(cnrblob.digest)
+ locations = storage.preferred_locations
+ storage.stream_write(locations, path, fp, 'application/x-gzip')
+ db_blob = appr_model.blob.get_or_create_blob(cnrblob.digest, cnrblob.size, content_media_type,
+ locations, self.models_ref)
+ return BlobDescriptor(mediaType=content_media_type,
+ digest=_strip_sha256_header(db_blob.digest), size=db_blob.size, urls=[])
+
+ def create_release(self, package, user, visibility, force=False):
+ """ Add an app-release to a repository
+ package is an instance of data.cnr.package.Package
+ """
+ if self.is_readonly:
+ raise ReadOnlyException('Currently in read-only mode')
+
+ manifest = package.manifest()
+ ns, name = package.namespace, package.name
+ repo = data.model.repository.get_or_create_repository(ns, name, user, visibility=visibility,
+ repo_kind='application')
+ tag_name = package.release
+ appr_model.release.create_app_release(repo, tag_name, package.manifest(),
+ manifest['content']['digest'], self.models_ref, force)
+
+ def delete_release(self, package_name, release, media_type):
+ """ Remove/Delete an app-release from an app-repository.
+ It does not delete the entire app-repository, only a single release
+ """
+ if self.is_readonly:
+ raise ReadOnlyException('Currently in read-only mode')
+
+ repo = _application(package_name)
+ try:
+ appr_model.release.delete_app_release(repo, release, media_type, self.models_ref)
+ except (self.models_ref.Channel.DoesNotExist,
+ self.models_ref.Tag.DoesNotExist,
+ MediaType.DoesNotExist):
+ raise_package_not_found(package_name, release, media_type)
+
+ def release_exists(self, package, release):
+ """ Return true if a release with that name already exist or
+ have existed (include deleted ones) """
+ # TODO: Figure out why this isn't implemented.
+
+ def channel_exists(self, package_name, channel_name):
+ """ Returns true if channel exists """
+ repo = _application(package_name)
+ return appr_model.tag.tag_exists(repo, channel_name, self.models_ref, "channel")
+
+ def delete_channel(self, package_name, channel_name):
+ """ Delete an AppChannel
+ Note:
+ It doesn't delete the AppReleases
+ """
+ if self.is_readonly:
+ raise ReadOnlyException('Currently in read-only mode')
+
+ repo = _application(package_name)
+ try:
+ appr_model.channel.delete_channel(repo, channel_name, self.models_ref)
+ except (self.models_ref.Channel.DoesNotExist, self.models_ref.Tag.DoesNotExist):
+ raise_channel_not_found(package_name, channel_name)
+
+ def list_channels(self, package_name):
+ """ Returns all AppChannel for a package """
+ repo = _application(package_name)
+ channels = appr_model.channel.get_repo_channels(repo, self.models_ref)
+ return [ChannelView(name=chan.name, current=chan.linked_tag.name) for chan in channels]
+
+ def fetch_channel(self, package_name, channel_name, with_releases=True):
+ """ Returns an AppChannel """
+ repo = _application(package_name)
+
+ try:
+ channel = appr_model.channel.get_channel(repo, channel_name, self.models_ref)
+ except (self.models_ref.Channel.DoesNotExist, self.models_ref.Tag.DoesNotExist):
+ raise_channel_not_found(package_name, channel_name)
+
+ if with_releases:
+ releases = appr_model.channel.get_channel_releases(repo, channel, self.models_ref)
+ chanview = ChannelReleasesView(
+ current=channel.linked_tag.name, name=channel.name,
+ releases=[channel.linked_tag.name] + [c.name for c in releases])
+ else:
+ chanview = ChannelView(current=channel.linked_tag.name, name=channel.name)
+
+ return chanview
+
+ def list_release_channels(self, package_name, release, active=True):
+ repo = _application(package_name)
+ try:
+ channels = appr_model.channel.get_tag_channels(repo, release, self.models_ref, active=active)
+ return [ChannelView(name=c.name, current=c.linked_tag.name) for c in channels]
+ except (self.models_ref.Channel.DoesNotExist, self.models_ref.Tag.DoesNotExist):
+ raise_package_not_found(package_name, release)
+
+ def update_channel(self, package_name, channel_name, release):
+ """ Append a new release to the AppChannel
+ Returns:
+ A new AppChannel with the release
+ """
+ if self.is_readonly:
+ raise ReadOnlyException('Currently in read-only mode')
+
+ repo = _application(package_name)
+ channel = appr_model.channel.create_or_update_channel(repo, channel_name, release,
+ self.models_ref)
+ return ChannelView(current=channel.linked_tag.name, name=channel.name)
+
+ def get_blob_locations(self, digest):
+ return appr_model.blob.get_blob_locations(digest, self.models_ref)
+
+
+# Phase 3: Read and write from new tables.
+model = CNRAppModel(NEW_MODELS, features.READONLY_APP_REGISTRY)
diff --git a/endpoints/appr/models_interface.py b/endpoints/appr/models_interface.py
new file mode 100644
index 000000000..6ebf949ac
--- /dev/null
+++ b/endpoints/appr/models_interface.py
@@ -0,0 +1,191 @@
+from abc import ABCMeta, abstractmethod
+from collections import namedtuple
+
+from six import add_metaclass
+
+
+class BlobDescriptor(namedtuple('Blob', ['mediaType', 'size', 'digest', 'urls'])):
+ """ BlobDescriptor describes a blob with its mediatype, size and digest.
+ A BlobDescriptor is used to retrieves the actual blob.
+ """
+
+
+class ChannelReleasesView(namedtuple('ChannelReleasesView', ['name', 'current', 'releases'])):
+ """ A channel is a pointer to a Release (current).
+ Releases are the previous tags pointed by channel (history).
+ """
+
+
+class ChannelView(namedtuple('ChannelView', ['name', 'current'])):
+ """ A channel is a pointer to a Release (current).
+ """
+
+
+class ApplicationSummaryView(
+ namedtuple('ApplicationSummaryView', [
+ 'name', 'namespace', 'visibility', 'default', 'manifests', 'channels', 'releases',
+ 'updated_at', 'created_at'
+ ])):
+ """ ApplicationSummaryView is an aggregated view of an application repository.
+ """
+
+
+class ApplicationManifest(namedtuple('ApplicationManifest', ['mediaType', 'digest', 'content'])):
+ """ ApplicationManifest embed the BlobDescriptor and some metadata around it.
+ An ApplicationManifest is content-addressable.
+ """
+
+
+class ApplicationRelease(
+ namedtuple('ApplicationRelease', ['release', 'name', 'created_at', 'manifest'])):
+ """ The ApplicationRelease associates an ApplicationManifest to a repository and release.
+ """
+
+
+@add_metaclass(ABCMeta)
+class AppRegistryDataInterface(object):
+ """ Interface that represents all data store interactions required by a App Registry.
+ """
+
+ @abstractmethod
+ def list_applications(self, namespace=None, media_type=None, search=None, username=None,
+ with_channels=False):
+ """ Lists all repositories that contain applications, with optional filtering to a specific
+ namespace and/or to those visible to a specific user.
+
+ Returns: list of ApplicationSummaryView
+ """
+ pass
+
+ @abstractmethod
+ def application_is_public(self, package_name):
+ """
+ Returns true if the application is public
+ """
+ pass
+
+ @abstractmethod
+ def create_application(self, package_name, visibility, owner):
+ """ Create a new app repository, owner is the user who creates it """
+ pass
+
+ @abstractmethod
+ def application_exists(self, package_name):
+ """ Returns true if the application exists """
+ pass
+
+ @abstractmethod
+ def basic_search(self, query, username=None):
+ """ Returns an array of matching application in the format: 'namespace/name'
+ Note:
+ * Only 'public' repositories are returned
+ """
+ pass
+
+ # @TODO: Paginate
+ @abstractmethod
+ def list_releases(self, package_name, media_type=None):
+ """ Returns the list of all releases(names) of an AppRepository
+ Example:
+ >>> get_app_releases('ant31/rocketchat')
+ ['1.7.1', '1.7.0', '1.7.2']
+ """
+ pass
+
+ # @TODO: Paginate
+ @abstractmethod
+ def list_manifests(self, package_name, release=None):
+ """ Returns the list of all available manifests type of an Application across all releases or
+ for a specific one.
+
+ Example:
+ >>> get_app_releases('ant31/rocketchat')
+ ['1.7.1', '1.7.0', '1.7.2']
+ """
+ pass
+
+ @abstractmethod
+ def fetch_release(self, package_name, release, media_type):
+ """
+ Returns an ApplicationRelease
+ """
+ pass
+
+ @abstractmethod
+ def store_blob(self, cnrblob, content_media_type):
+ """
+ Upload the blob content to a storage location and creates a Blob entry in the DB.
+
+ Returns a BlobDescriptor
+ """
+ pass
+
+ @abstractmethod
+ def create_release(self, package, user, visibility, force=False):
+ """ Creates and returns an ApplicationRelease
+ - package is a data.model.Package object
+ - user is the owner of the package
+ - visibility is a string: 'public' or 'private'
+ """
+ pass
+
+ @abstractmethod
+ def release_exists(self, package, release):
+ """ Return true if a release with that name already exist or
+ has existed (including deleted ones)
+ """
+ pass
+
+ @abstractmethod
+ def delete_release(self, package_name, release, media_type):
+ """ Remove/Delete an app-release from an app-repository.
+ It does not delete the entire app-repository, only a single release
+ """
+ pass
+
+ @abstractmethod
+ def list_release_channels(self, package_name, release, active=True):
+ """ Returns a list of Channel that are/was pointing to a release.
+ If active is True, returns only active Channel (lifetime_end not null)
+ """
+ pass
+
+ @abstractmethod
+ def channel_exists(self, package_name, channel_name):
+ """ Returns true if the channel with the given name exists under the matching package """
+ pass
+
+ @abstractmethod
+ def update_channel(self, package_name, channel_name, release):
+ """ Append a new release to the Channel
+ Returns a new Channel with the release as current
+ """
+ pass
+
+ @abstractmethod
+ def delete_channel(self, package_name, channel_name):
+ """ Delete a Channel, it doesn't delete/touch the ApplicationRelease pointed by the channel """
+
+ # @TODO: Paginate
+ @abstractmethod
+ def list_channels(self, package_name):
+ """ Returns all AppChannel for a package """
+ pass
+
+ @abstractmethod
+ def fetch_channel(self, package_name, channel_name, with_releases=True):
+ """ Returns an Channel
+ Raises: ChannelNotFound, PackageNotFound
+ """
+ pass
+
+ @abstractmethod
+ def log_action(self, event_name, namespace_name, repo_name=None, analytics_name=None,
+ analytics_sample=1, **kwargs):
+ """ Logs an action to the audit log. """
+ pass
+
+ @abstractmethod
+ def get_blob_locations(self, digest):
+ """ Returns a list of strings for the locations in which a Blob is present. """
+ pass
diff --git a/endpoints/appr/registry.py b/endpoints/appr/registry.py
new file mode 100644
index 000000000..0b470f878
--- /dev/null
+++ b/endpoints/appr/registry.py
@@ -0,0 +1,318 @@
+import logging
+from base64 import b64encode
+
+import cnr
+from cnr.api.impl import registry as cnr_registry
+from cnr.api.registry import _pull, repo_name
+from cnr.exception import (
+ ChannelNotFound, CnrException, Forbidden, InvalidParams, InvalidRelease, InvalidUsage,
+ PackageAlreadyExists, PackageNotFound, PackageReleaseNotFound, UnableToLockResource,
+ UnauthorizedAccess, Unsupported)
+from flask import jsonify, request
+
+from auth.auth_context import get_authenticated_user
+from auth.credentials import validate_credentials
+from auth.decorators import process_auth
+from auth.permissions import CreateRepositoryPermission, ModifyRepositoryPermission
+from data.logs_model import logs_model
+from endpoints.appr import appr_bp, require_app_repo_read, require_app_repo_write
+from endpoints.appr.cnr_backend import Blob, Channel, Package, User
+from endpoints.appr.decorators import disallow_for_image_repository
+from endpoints.appr.models_cnr import model
+from endpoints.decorators import anon_allowed, anon_protect, check_region_blacklisted
+from util.names import REPOSITORY_NAME_REGEX, TAG_REGEX
+
+logger = logging.getLogger(__name__)
+
+
+@appr_bp.errorhandler(Unsupported)
+@appr_bp.errorhandler(PackageAlreadyExists)
+@appr_bp.errorhandler(InvalidRelease)
+@appr_bp.errorhandler(Forbidden)
+@appr_bp.errorhandler(UnableToLockResource)
+@appr_bp.errorhandler(UnauthorizedAccess)
+@appr_bp.errorhandler(PackageNotFound)
+@appr_bp.errorhandler(PackageReleaseNotFound)
+@appr_bp.errorhandler(CnrException)
+@appr_bp.errorhandler(InvalidUsage)
+@appr_bp.errorhandler(InvalidParams)
+@appr_bp.errorhandler(ChannelNotFound)
+def render_error(error):
+ response = jsonify({"error": error.to_dict()})
+ response.status_code = error.status_code
+ return response
+
+
+@appr_bp.route("/version")
+@anon_allowed
+def version():
+ return jsonify({"cnr-api": cnr.__version__})
+
+
+@appr_bp.route("/api/v1/users/login", methods=['POST'])
+@anon_allowed
+def login():
+ values = request.get_json(force=True, silent=True) or {}
+ username = values.get('user', {}).get('username')
+ password = values.get('user', {}).get('password')
+ if not username or not password:
+ raise InvalidUsage('Missing username or password')
+
+ result, _ = validate_credentials(username, password)
+ if not result.auth_valid:
+ raise UnauthorizedAccess(result.error_message)
+
+ return jsonify({'token': "basic " + b64encode("%s:%s" % (username, password))})
+
+
+# @TODO: Redirect to S3 url
+@appr_bp.route(
+ "/api/v1/packages///blobs/sha256/",
+ methods=['GET'],
+ strict_slashes=False,)
+@process_auth
+@require_app_repo_read
+@check_region_blacklisted(namespace_name_kwarg='namespace')
+@anon_protect
+def blobs(namespace, package_name, digest):
+ reponame = repo_name(namespace, package_name)
+ data = cnr_registry.pull_blob(reponame, digest, blob_class=Blob)
+ json_format = request.args.get('format', None) == 'json'
+ return _pull(data, json_format=json_format)
+
+
+@appr_bp.route("/api/v1/packages", methods=['GET'], strict_slashes=False)
+@process_auth
+@anon_protect
+def list_packages():
+ namespace = request.args.get('namespace', None)
+ media_type = request.args.get('media_type', None)
+ query = request.args.get('query', None)
+ user = get_authenticated_user()
+ username = None
+ if user:
+ username = user.username
+ result_data = cnr_registry.list_packages(namespace, package_class=Package, search=query,
+ media_type=media_type, username=username)
+ return jsonify(result_data)
+
+
+@appr_bp.route(
+ "/api/v1/packages////",
+ methods=['DELETE'], strict_slashes=False)
+@process_auth
+@require_app_repo_write
+@anon_protect
+def delete_package(namespace, package_name, release, media_type):
+ reponame = repo_name(namespace, package_name)
+ result = cnr_registry.delete_package(reponame, release, media_type, package_class=Package)
+ logs_model.log_action('delete_tag', namespace, repository_name=package_name,
+ metadata={'release': release, 'mediatype': media_type})
+ return jsonify(result)
+
+
+@appr_bp.route(
+ "/api/v1/packages////",
+ methods=['GET'], strict_slashes=False)
+@process_auth
+@require_app_repo_read
+@check_region_blacklisted(namespace_name_kwarg='namespace')
+@anon_protect
+def show_package(namespace, package_name, release, media_type):
+ reponame = repo_name(namespace, package_name)
+ result = cnr_registry.show_package(reponame, release, media_type, channel_class=Channel,
+ package_class=Package)
+ return jsonify(result)
+
+
+@appr_bp.route("/api/v1/packages//", methods=['GET'],
+ strict_slashes=False)
+@process_auth
+@require_app_repo_read
+@anon_protect
+def show_package_releases(namespace, package_name):
+ reponame = repo_name(namespace, package_name)
+ media_type = request.args.get('media_type', None)
+ result = cnr_registry.show_package_releases(reponame, media_type=media_type,
+ package_class=Package)
+ return jsonify(result)
+
+
+@appr_bp.route("/api/v1/packages///",
+ methods=['GET'], strict_slashes=False)
+@process_auth
+@require_app_repo_read
+@anon_protect
+def show_package_release_manifests(namespace, package_name, release):
+ reponame = repo_name(namespace, package_name)
+ result = cnr_registry.show_package_manifests(reponame, release, package_class=Package)
+ return jsonify(result)
+
+
+@appr_bp.route(
+ "/api/v1/packages/////pull",
+ methods=['GET'],
+ strict_slashes=False,)
+@process_auth
+@require_app_repo_read
+@check_region_blacklisted(namespace_name_kwarg='namespace')
+@anon_protect
+def pull(namespace, package_name, release, media_type):
+ logger.debug('Pull of release %s of app repository %s/%s', release, namespace, package_name)
+ reponame = repo_name(namespace, package_name)
+ data = cnr_registry.pull(reponame, release, media_type, Package, blob_class=Blob)
+ logs_model.log_action('pull_repo', namespace, repository_name=package_name,
+ metadata={'release': release, 'mediatype': media_type})
+ json_format = request.args.get('format', None) == 'json'
+ return _pull(data, json_format)
+
+
+@appr_bp.route("/api/v1/packages//", methods=['POST'],
+ strict_slashes=False)
+@disallow_for_image_repository()
+@process_auth
+@anon_protect
+def push(namespace, package_name):
+ reponame = repo_name(namespace, package_name)
+
+ if not REPOSITORY_NAME_REGEX.match(package_name):
+ logger.debug('Found invalid repository name CNR push: %s', reponame)
+ raise InvalidUsage('invalid repository name: %s' % reponame)
+
+ values = request.get_json(force=True, silent=True) or {}
+ private = values.get('visibility', 'private')
+
+ owner = get_authenticated_user()
+ if not Package.exists(reponame):
+ if not CreateRepositoryPermission(namespace).can():
+ raise Forbidden("Unauthorized access for: %s" % reponame,
+ {"package": reponame,
+ "scopes": ['create']})
+ Package.create_repository(reponame, private, owner)
+ logs_model.log_action('create_repo', namespace, repository_name=package_name)
+
+ if not ModifyRepositoryPermission(namespace, package_name).can():
+ raise Forbidden("Unauthorized access for: %s" % reponame,
+ {"package": reponame,
+ "scopes": ['push']})
+
+ if not 'release' in values:
+ raise InvalidUsage('Missing release')
+
+ if not 'media_type' in values:
+ raise InvalidUsage('Missing media_type')
+
+ if not 'blob' in values:
+ raise InvalidUsage('Missing blob')
+
+ release_version = str(values['release'])
+ media_type = values['media_type']
+ force = request.args.get('force', 'false') == 'true'
+
+ blob = Blob(reponame, values['blob'])
+ app_release = cnr_registry.push(reponame, release_version, media_type, blob, force,
+ package_class=Package, user=owner, visibility=private)
+ logs_model.log_action('push_repo', namespace, repository_name=package_name,
+ metadata={'release': release_version})
+ return jsonify(app_release)
+
+
+@appr_bp.route("/api/v1/packages/search", methods=['GET'], strict_slashes=False)
+@process_auth
+@anon_protect
+def search_packages():
+ query = request.args.get("q")
+ user = get_authenticated_user()
+ username = None
+ if user:
+ username = user.username
+
+ search_results = cnr_registry.search(query, Package, username=username)
+ return jsonify(search_results)
+
+
+# CHANNELS
+@appr_bp.route("/api/v1/packages///channels",
+ methods=['GET'], strict_slashes=False)
+@process_auth
+@require_app_repo_read
+@anon_protect
+def list_channels(namespace, package_name):
+ reponame = repo_name(namespace, package_name)
+ return jsonify(cnr_registry.list_channels(reponame, channel_class=Channel))
+
+
+@appr_bp.route(
+ "/api/v1/packages///channels/",
+ methods=['GET'], strict_slashes=False)
+@process_auth
+@require_app_repo_read
+@anon_protect
+def show_channel(namespace, package_name, channel_name):
+ reponame = repo_name(namespace, package_name)
+ channel = cnr_registry.show_channel(reponame, channel_name, channel_class=Channel)
+ return jsonify(channel)
+
+
+@appr_bp.route(
+ "/api/v1/packages///channels//",
+ methods=['POST'],
+ strict_slashes=False,)
+@process_auth
+@require_app_repo_write
+@anon_protect
+def add_channel_release(namespace, package_name, channel_name, release):
+ _check_channel_name(channel_name, release)
+ reponame = repo_name(namespace, package_name)
+ result = cnr_registry.add_channel_release(reponame, channel_name, release, channel_class=Channel,
+ package_class=Package)
+ logs_model.log_action('create_tag', namespace, repository_name=package_name,
+ metadata={'channel': channel_name, 'release': release})
+ return jsonify(result)
+
+
+def _check_channel_name(channel_name, release=None):
+ if not TAG_REGEX.match(channel_name):
+ logger.debug('Found invalid channel name CNR add channel release: %s', channel_name)
+ raise InvalidUsage("Found invalid channelname %s" % release,
+ {'name': channel_name,
+ 'release': release})
+
+ if release is not None and not TAG_REGEX.match(release):
+ logger.debug('Found invalid release name CNR add channel release: %s', release)
+ raise InvalidUsage('Found invalid channel release name %s' % release,
+ {'name': channel_name,
+ 'release': release})
+
+
+@appr_bp.route(
+ "/api/v1/packages/