mirror of
https://github.com/quay/quay.git
synced 2025-07-30 07:43:13 +03:00
local-dev: implement local development environment (#610)
* local-dev: implement local development environment this commit copies the files in /init into /local-dev, edits those files to support hot-reload features for local development, and introduces docker-compose/makefile targets in order to support local dev. Signed-off-by: ldelossa <ldelossa@redhat.com> * local-dev: hop quay workers to gunicorn this commit adds a uwsgi worker application delegate and a factory function allowing each worker to be ran by gunicorn. each worker now supports hot-reload and will reload itself when it's code is updated. this changes only affects the local dev env. Signed-off-by: ldelossa <ldelossa@redhat.com> * local-dev: add docs Signed-off-by: ldelossa <ldelossa@redhat.com>
This commit is contained in:
committed by
GitHub
parent
c9ac4aac1f
commit
113ccebbbf
17
.gitignore
vendored
17
.gitignore
vendored
@ -28,3 +28,20 @@ Dockerfile-e
|
|||||||
.pytest_cache/*
|
.pytest_cache/*
|
||||||
test/dockerclients/Vagrantfile
|
test/dockerclients/Vagrantfile
|
||||||
test/dockerclients/.*
|
test/dockerclients/.*
|
||||||
|
|
||||||
|
# files generated by local dev,
|
||||||
|
# do not need to check in and can be deleted
|
||||||
|
# if not running local dev
|
||||||
|
util/ipresolver/aws-ip-ranges.json
|
||||||
|
/local-dev/*.sock
|
||||||
|
revision_head
|
||||||
|
local-dev/jwtproxy_conf.yaml
|
||||||
|
local-dev/mitm.cert
|
||||||
|
local-dev/mitm.key
|
||||||
|
local-dev/quay.kid
|
||||||
|
local-dev/quay.pem
|
||||||
|
local-dev/supervisord.conf
|
||||||
|
local-dev/__pycache__
|
||||||
|
static/webfonts/
|
||||||
|
supervisord.log
|
||||||
|
supervisord.pid
|
||||||
|
37
Makefile
37
Makefile
@ -178,3 +178,40 @@ generate-proto-py:
|
|||||||
|
|
||||||
black:
|
black:
|
||||||
black --line-length=100 --target-version=py38 --exclude "/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/" .
|
black --line-length=100 --target-version=py38 --exclude "/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/" .
|
||||||
|
|
||||||
|
#################################
|
||||||
|
# Local Development Environment #
|
||||||
|
#################################
|
||||||
|
|
||||||
|
.PHONY: local-dev-clean
|
||||||
|
local-dev-clean:
|
||||||
|
sudo ./local-dev/scripts/clean.sh
|
||||||
|
|
||||||
|
.PHONY: local-dev-build
|
||||||
|
local-dev-build:
|
||||||
|
make local-dev-clean
|
||||||
|
docker-compose build
|
||||||
|
|
||||||
|
.PHONY: local-dev-up
|
||||||
|
local-dev-up:
|
||||||
|
make local-dev-clean
|
||||||
|
docker-compose up -d redis
|
||||||
|
docker-compose up -d quay-db
|
||||||
|
docker exec -it quay-db bash -c 'while ! pg_isready; do echo "waiting for postgres"; sleep 2; done'
|
||||||
|
docker-compose up -d quay
|
||||||
|
|
||||||
|
.PHONY: local-dev-up-with-clair
|
||||||
|
local-dev-up-with-clair:
|
||||||
|
make local-dev-clean
|
||||||
|
docker-compose up -d redis
|
||||||
|
docker-compose up -d quay-db
|
||||||
|
docker exec -it quay-db bash -c 'while ! pg_isready; do echo "waiting for postgres"; sleep 2; done'
|
||||||
|
docker-compose up -d quay
|
||||||
|
docker-compose up -d clair-db
|
||||||
|
docker exec -it clair-db bash -c 'while ! pg_isready; do echo "waiting for postgres"; sleep 2; done'
|
||||||
|
docker-compose up -d clair
|
||||||
|
|
||||||
|
.PHONY: local-dev-up
|
||||||
|
local-dev-down:
|
||||||
|
docker-compose down
|
||||||
|
make local-dev-clean
|
||||||
|
73
docker-compose.yaml
Normal file
73
docker-compose.yaml
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
version: "3.7"
|
||||||
|
services:
|
||||||
|
|
||||||
|
quay-db:
|
||||||
|
container_name: quay-db
|
||||||
|
image: postgres:12.1
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "quay"
|
||||||
|
POSTGRES_PASSWORD: "quay"
|
||||||
|
POSTGRES_DB: "quay"
|
||||||
|
volumes:
|
||||||
|
- "./local-dev/init/pg_bootstrap.sql:/docker-entrypoint-initdb.d/pg_bootstrap.sql"
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U quay -d quay"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 9s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
|
||||||
|
clair-db:
|
||||||
|
container_name: clair-db
|
||||||
|
image: postgres:12.1
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "clair"
|
||||||
|
POSTGRES_DB: "clair"
|
||||||
|
ports:
|
||||||
|
- "5433:5432"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U clair -d clair"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 9s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
|
||||||
|
redis:
|
||||||
|
container_name: quay-redis
|
||||||
|
image: redis:latest
|
||||||
|
ports:
|
||||||
|
- "6379:6379"
|
||||||
|
|
||||||
|
quay:
|
||||||
|
container_name: quay-quay
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: ./local-dev/Dockerfile
|
||||||
|
image: quay-local:local
|
||||||
|
volumes:
|
||||||
|
- ".:/quay-registry"
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
environment:
|
||||||
|
DEBUGLOG: "true"
|
||||||
|
IGNORE_VALIDATION: "true"
|
||||||
|
|
||||||
|
# clair is configured to share it's network
|
||||||
|
# namespace with quay. this allows quay to serve
|
||||||
|
# layers to clair over localhost.
|
||||||
|
clair:
|
||||||
|
container_name: quay-clair
|
||||||
|
image: quay.io/projectquay/clair:4.0.0-rc.22
|
||||||
|
volumes:
|
||||||
|
- "./local-dev/clair:/src/clair/"
|
||||||
|
environment:
|
||||||
|
CLAIR_CONF: "/src/clair/config.yaml"
|
||||||
|
CLAIR_MODE: "combo"
|
||||||
|
network_mode: "service:quay"
|
||||||
|
command:
|
||||||
|
["bash", "-c", "cd /src/clair/cmd/clair; go run -mod vendor ."]
|
||||||
|
depends_on:
|
||||||
|
- quay
|
||||||
|
|
137
docs/docker-local-dev.md
Normal file
137
docs/docker-local-dev.md
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
# Docker Local Development Environment
|
||||||
|
|
||||||
|
The Quay team has developed and supports a Docker based local development environment.
|
||||||
|
This environment can quickly setup Quay and Clair for developing and testing changes.
|
||||||
|
When utilizing the local development environment changes to your source code are automatically hot-reloaded, taking effect in real time.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
The local development environment is driven by the following `makefile` targets:
|
||||||
|
|
||||||
|
### make local-dev-up
|
||||||
|
|
||||||
|
Deploys Quay and necessary dependencies.
|
||||||
|
|
||||||
|
#### Details
|
||||||
|
|
||||||
|
| Infra | Local Adress | Docker Network Address |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| Quay | `localhost:8080` | `quay-quay:8080` |
|
||||||
|
| Quay Database | `localhost:5432` | `quay-db:5432` |
|
||||||
|
| Quay Redis | `localhost:6379` | `quay-redis:6379` |
|
||||||
|
|
||||||
|
### make local-dev-up-with-clair
|
||||||
|
|
||||||
|
Deploys Quay and necessary dependencies along with Clair.
|
||||||
|
The version of Clair deployed can be determined by viewing the docker compose file at `/docker-compose.yaml`.
|
||||||
|
Updating this field and restarting the local development environment will deploy a specified Clair version.
|
||||||
|
|
||||||
|
#### Details
|
||||||
|
|
||||||
|
| Infra | Local Adress | Docker Network Address |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| Quay | `localhost:8080` | `quay-quay:8080` |
|
||||||
|
| Quay Database | `localhost:5432` | `quay-db:5432` |
|
||||||
|
| Quay Redis | `localhost:6379` | `quay-redis:6379` |
|
||||||
|
| Clair | (not exposed) | `localhost:6000 from Quay container` |
|
||||||
|
| Clair Database | `localhost:5433` | `clair-db:5432` |
|
||||||
|
|
||||||
|
*Note: Clair is deployed in the network namespace of the Quay container.
|
||||||
|
This allows Quay to provide Clair layer location information over `localhost`.*
|
||||||
|
|
||||||
|
### make local-dev-down
|
||||||
|
|
||||||
|
Tears down the local development environment.
|
||||||
|
|
||||||
|
#### Details
|
||||||
|
|
||||||
|
This make target stops and removes any containers currently running, regardless of whether `make local-dev-up` or `make local-dev-up-with-clair` was ran.
|
||||||
|
After containers are cleanly removed the script in `./local-dev/scripts/clean.sh` is ran to remove any files created or modified by the local development environment.
|
||||||
|
|
||||||
|
### make local-dev-clean
|
||||||
|
|
||||||
|
Cleans any created or modified files in the repository as a result of a local development deployment.
|
||||||
|
This target is ran explicitly by the all targets listed above.
|
||||||
|
|
||||||
|
#### Details
|
||||||
|
|
||||||
|
Typically this make target will not be ran manually, however its available to do so.
|
||||||
|
It's possible your repository may get into an odd state due to docker crashing or some other unexpected issue.
|
||||||
|
If that occurs running this target will restore your repistory to a good state (sans any changes to source code you made).
|
||||||
|
|
||||||
|
## Pushing to Quay
|
||||||
|
|
||||||
|
Container images can be tagged and pushed to your local quay instance.
|
||||||
|
|
||||||
|
The follow steps outline how to accomplish this:
|
||||||
|
|
||||||
|
* Visit `http://localhost:8080` and create an account. (Using the username 'admin' will create a superuser account).
|
||||||
|
|
||||||
|
* Create an organization and repository, we will refer to these as {org} and {repo}.
|
||||||
|
|
||||||
|
* Use podman or docker to login (we will use podman as pushing to http is far easier).
|
||||||
|
* `$ podman login --tls-verify=false localhost:8080`
|
||||||
|
|
||||||
|
* Tag any image with the local development repository hostname, org, repo, and tag.
|
||||||
|
* `$ podman tag ubuntu:latest localhost:8080/{org}/{repo}:{tag}`
|
||||||
|
|
||||||
|
* Push the tagged image to Quay
|
||||||
|
* `$ podman push --tls-verify=false localhost:8080/{org}/{repo}:{tag}`
|
||||||
|
|
||||||
|
## Hot-Reload
|
||||||
|
|
||||||
|
The local development environment supports hot-reload of Quay, Quay workers, and the front end UI code.
|
||||||
|
|
||||||
|
Quay registry runs in production as a gunicorn worker so no changes except enabling hot-reload is necessary.
|
||||||
|
|
||||||
|
Quay workers run as generic python modules in production.
|
||||||
|
To support hot-reloading each worker was modified to run as a gunicorn worker sub-process (only for local-dev).
|
||||||
|
When the source code is updated and saved to disk the gunicorn worker is restarted.
|
||||||
|
|
||||||
|
The front end code supports hot-reload by running `npm watch` in the background during container startup.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
If you are having trouble with the local development environment run through the following common issues first.
|
||||||
|
|
||||||
|
### Quay and Clair initialization
|
||||||
|
|
||||||
|
Both Quay and Clair take some time to fully initialize.
|
||||||
|
This time is also bound to your workstation's resources.
|
||||||
|
Give the environment a few minutes to fully initialize.
|
||||||
|
|
||||||
|
If a considerable amount of time has passed you are able to view Quay and Clair logs for a hint.
|
||||||
|
|
||||||
|
``` shell
|
||||||
|
docker logs -f quay-quay
|
||||||
|
```
|
||||||
|
|
||||||
|
for quay logs
|
||||||
|
|
||||||
|
``` shell
|
||||||
|
docker logs -f quay-clair
|
||||||
|
```
|
||||||
|
|
||||||
|
for clair logs
|
||||||
|
|
||||||
|
### Docker Resources
|
||||||
|
|
||||||
|
If using Docker on macOS its possible the virtual machine used is too resource restrainted to host Quay.
|
||||||
|
You can modify these resources by following this tutorial:
|
||||||
|
<https://docs.docker.com/docker-for-mac/#resources>
|
||||||
|
|
||||||
|
Provide the vm more memory and cpu so ensure smooth performance.
|
||||||
|
|
||||||
|
Linux machines should not have this issue as Docker runs directly on the host and resources are not constrained by a VM.
|
||||||
|
|
||||||
|
### Firewalld
|
||||||
|
|
||||||
|
In certain cases firewalld may block network traffic between the host and containers.
|
||||||
|
If you are experiencing connectivity issues try disabling Firewalld or any other firewall on your host machine.
|
||||||
|
|
||||||
|
### Port conflicts
|
||||||
|
|
||||||
|
If you are running any containers or services on the same ports the local development environment tries to bind to you will experience connectivity issues or errors.
|
||||||
|
|
||||||
|
Make sure no other software is bound to the ports listed above while utilizing the local development environment.
|
||||||
|
|
@ -1,5 +1,9 @@
|
|||||||
# Getting Started With Quay
|
# Getting Started With Quay
|
||||||
|
|
||||||
|
The quickest way to get Quay and (optionally) Clair running on for development and testing purposes would be to use our [docker based local development environment](./docker-local-dev.md)
|
||||||
|
|
||||||
|
If you'd like to understand a Quay deployment in more details continue reading...
|
||||||
|
|
||||||
This guide is meant to show how to get Quay up and running with a minimum of external dependencies for testing or evaluation. It aims to illustrate the components of a Quay installation to understand how the pieces fit together. Please don't use this configuration for production or a highly available Quay installation.
|
This guide is meant to show how to get Quay up and running with a minimum of external dependencies for testing or evaluation. It aims to illustrate the components of a Quay installation to understand how the pieces fit together. Please don't use this configuration for production or a highly available Quay installation.
|
||||||
|
|
||||||
NOTE: This guide uses [podman](https://podman.io/) which assumes a Linux environment- on MacOS or Windows you should be able to substitute with `docker` without trouble.
|
NOTE: This guide uses [podman](https://podman.io/) which assumes a Linux environment- on MacOS or Windows you should be able to substitute with `docker` without trouble.
|
||||||
|
64
local-dev/Dockerfile
Normal file
64
local-dev/Dockerfile
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
FROM centos:8
|
||||||
|
|
||||||
|
ENV OS=linux \
|
||||||
|
ARCH=amd64 \
|
||||||
|
PYTHON_VERSION=3.6 \
|
||||||
|
PATH=$HOME/.local/bin/:$PATH \
|
||||||
|
PYTHONUNBUFFERED=1 \
|
||||||
|
PYTHONIOENCODING=UTF-8 \
|
||||||
|
LC_ALL=C.UTF-8 \
|
||||||
|
LANG=C.UTF-8
|
||||||
|
|
||||||
|
# source code will be mounted here.
|
||||||
|
ENV QUAYDIR /quay-registry
|
||||||
|
WORKDIR $QUAYDIR
|
||||||
|
|
||||||
|
# all files necessary to invoke quay-entrypoint in the
|
||||||
|
# local-dev environment will exist here.
|
||||||
|
ENV QUAYCONF /quay-registry/local-dev
|
||||||
|
ENV QUAYPATH "."
|
||||||
|
|
||||||
|
RUN INSTALL_PKGS="\
|
||||||
|
python3 \
|
||||||
|
nginx \
|
||||||
|
openldap \
|
||||||
|
gcc-c++ git \
|
||||||
|
openldap-devel \
|
||||||
|
python3-devel \
|
||||||
|
python3-gpg \
|
||||||
|
dnsmasq \
|
||||||
|
memcached \
|
||||||
|
nodejs \
|
||||||
|
openssl \
|
||||||
|
skopeo \
|
||||||
|
" && \
|
||||||
|
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
|
||||||
|
yum -y update && \
|
||||||
|
yum -y clean all
|
||||||
|
|
||||||
|
|
||||||
|
ENV JWTPROXY_VERSION=0.0.3
|
||||||
|
RUN curl -fsSL -o /usr/local/bin/jwtproxy "https://github.com/coreos/jwtproxy/releases/download/v${JWTPROXY_VERSION}/jwtproxy-${OS}-${ARCH}" && \
|
||||||
|
chmod +x /usr/local/bin/jwtproxy
|
||||||
|
|
||||||
|
ENV PUSHGATEWAY_VERSION=1.0.0
|
||||||
|
RUN curl -fsSL "https://github.com/prometheus/pushgateway/releases/download/v${PUSHGATEWAY_VERSION}/pushgateway-${PUSHGATEWAY_VERSION}.${OS}-${ARCH}.tar.gz" | \
|
||||||
|
tar xz "pushgateway-${PUSHGATEWAY_VERSION}.${OS}-${ARCH}/pushgateway" && \
|
||||||
|
mv "pushgateway-${PUSHGATEWAY_VERSION}.${OS}-${ARCH}/pushgateway" /usr/local/bin/pushgateway && \
|
||||||
|
rm -rf "pushgateway-${PUSHGATEWAY_VERSION}.${OS}-${ARCH}" && \
|
||||||
|
chmod +x /usr/local/bin/pushgateway
|
||||||
|
|
||||||
|
RUN curl -L -o /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_amd64
|
||||||
|
RUN chmod +x /usr/local/bin/dumb-init
|
||||||
|
|
||||||
|
# keep this last, will allow for fastest container rebuilds.
|
||||||
|
COPY requirements.txt .
|
||||||
|
RUN alternatives --set python /usr/bin/python3 && \
|
||||||
|
python -m pip install --no-cache-dir --upgrade setuptools pip && \
|
||||||
|
python -m pip install --no-cache-dir -r requirements.txt --no-cache && \
|
||||||
|
python -m pip freeze
|
||||||
|
|
||||||
|
EXPOSE 8080 8443 7443 9091 55443
|
||||||
|
|
||||||
|
ENTRYPOINT ["dumb-init", "--", "/quay-registry/quay-entrypoint.sh"]
|
||||||
|
CMD ["registry"]
|
24
local-dev/clair/config.yaml
Normal file
24
local-dev/clair/config.yaml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
log_level: debug-color
|
||||||
|
introspection_addr: ""
|
||||||
|
http_listen_addr: ":6000"
|
||||||
|
updaters: {}
|
||||||
|
indexer:
|
||||||
|
connstring: host=clair-db port=5432 user=clair dbname=clair sslmode=disable
|
||||||
|
scanlock_retry: 10
|
||||||
|
layer_scan_concurrency: 5
|
||||||
|
migrations: true
|
||||||
|
matcher:
|
||||||
|
connstring: host=clair-db port=5432 user=clair dbname=clair sslmode=disable
|
||||||
|
max_conn_pool: 100
|
||||||
|
migrations: true
|
||||||
|
notifier:
|
||||||
|
connstring: host=clair-db port=5432 user=clair dbname=clair sslmode=disable
|
||||||
|
migrations: true
|
||||||
|
delivery_interval: 5s
|
||||||
|
poll_interval: 15s
|
||||||
|
webhook:
|
||||||
|
target: "http://localhost:8080/secscan/notification"
|
||||||
|
callback: "http://localhost:6000/notifier/api/v1/notification"
|
||||||
|
metrics:
|
||||||
|
name: "prometheus"
|
67
local-dev/config.yaml
Normal file
67
local-dev/config.yaml
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
SUPER_USERS:
|
||||||
|
- admin
|
||||||
|
AUTHENTICATION_TYPE: Database
|
||||||
|
BITTORRENT_FILENAME_PEPPER: 0ee18f90-5b6d-42d2-ab5e-ec9fcd846272
|
||||||
|
BUILDLOGS_REDIS:
|
||||||
|
host: quay-redis
|
||||||
|
port: 6379
|
||||||
|
DATABASE_SECRET_KEY: '30060361640793187613697366923211113205676925445650250274752125083971638376224'
|
||||||
|
DB_URI: postgresql://quay:quay@quay-db/quay
|
||||||
|
DEFAULT_TAG_EXPIRATION: 2w
|
||||||
|
DISTRIBUTED_STORAGE_CONFIG:
|
||||||
|
default:
|
||||||
|
- LocalStorage
|
||||||
|
- storage_path: /datastorage/registry
|
||||||
|
DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: []
|
||||||
|
DISTRIBUTED_STORAGE_PREFERENCE:
|
||||||
|
- default
|
||||||
|
ENTERPRISE_LOGO_URL: /static/img/quay-horizontal-color.svg
|
||||||
|
EXTERNAL_TLS_TERMINATION: true
|
||||||
|
FEATURE_ACI_CONVERSION: false
|
||||||
|
FEATURE_ANONYMOUS_ACCESS: true
|
||||||
|
FEATURE_APP_REGISTRY: false
|
||||||
|
FEATURE_APP_SPECIFIC_TOKENS: true
|
||||||
|
FEATURE_BUILD_SUPPORT: false
|
||||||
|
FEATURE_CHANGE_TAG_EXPIRATION: true
|
||||||
|
FEATURE_DIRECT_LOGIN: true
|
||||||
|
FEATURE_MAILING: false
|
||||||
|
FEATURE_PARTIAL_USER_AUTOCOMPLETE: true
|
||||||
|
FEATURE_REPO_MIRROR: false
|
||||||
|
FEATURE_REQUIRE_TEAM_INVITE: true
|
||||||
|
FEATURE_RESTRICTED_V1_PUSH: false
|
||||||
|
FEATURE_SECURITY_NOTIFICATIONS: true
|
||||||
|
FEATURE_SECURITY_SCANNER: true
|
||||||
|
FEATURE_USERNAME_CONFIRMATION: true
|
||||||
|
FEATURE_USER_CREATION: true
|
||||||
|
FEATURE_USER_LOG_ACCESS: true
|
||||||
|
GITHUB_LOGIN_CONFIG: {}
|
||||||
|
GITHUB_TRIGGER_CONFIG: {}
|
||||||
|
GITLAB_TRIGGER_KIND: {}
|
||||||
|
GPG2_PRIVATE_KEY_FILENAME: signing-private.gpg
|
||||||
|
GPG2_PUBLIC_KEY_FILENAME: signing-public.gpg
|
||||||
|
LOG_ARCHIVE_LOCATION: default
|
||||||
|
MAIL_DEFAULT_SENDER: support@quay.io
|
||||||
|
MAIL_PORT: 587
|
||||||
|
MAIL_USE_TLS: true
|
||||||
|
PREFERRED_URL_SCHEME: http
|
||||||
|
REGISTRY_TITLE: Red Hat Quay
|
||||||
|
REGISTRY_TITLE_SHORT: Red Hat Quay
|
||||||
|
REPO_MIRROR_SERVER_HOSTNAME: null
|
||||||
|
REPO_MIRROR_TLS_VERIFY: true
|
||||||
|
SERVER_HOSTNAME: localhost:8080
|
||||||
|
SETUP_COMPLETE: true
|
||||||
|
SIGNING_ENGINE: gpg2
|
||||||
|
TAG_EXPIRATION_OPTIONS:
|
||||||
|
- 0s
|
||||||
|
- 1d
|
||||||
|
- 1w
|
||||||
|
- 2w
|
||||||
|
- 4w
|
||||||
|
TEAM_RESYNC_STALE_TIME: 60m
|
||||||
|
TESTING: false
|
||||||
|
USERFILES_LOCATION: default
|
||||||
|
USERFILES_PATH: userfiles/
|
||||||
|
USER_EVENTS_REDIS:
|
||||||
|
host: quay-redis
|
||||||
|
port: 6379
|
||||||
|
USE_CDN: false
|
34
local-dev/gunicorn_registry.py
Normal file
34
local-dev/gunicorn_registry.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# NOTE: Must be before we import or call anything that may be synchronous.
|
||||||
|
from gevent import monkey
|
||||||
|
|
||||||
|
monkey.patch_all()
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from Crypto import Random
|
||||||
|
|
||||||
|
bind = "unix:/tmp/gunicorn_registry.sock"
|
||||||
|
workers = 1
|
||||||
|
worker_class = "gevent"
|
||||||
|
worker_connections = 30
|
||||||
|
pythonpath = "."
|
||||||
|
reload = True
|
||||||
|
reload_engine = "auto"
|
||||||
|
|
||||||
|
|
||||||
|
def post_fork(server, worker):
|
||||||
|
# Reset the Random library to ensure it won't raise the "PID check failed." error after
|
||||||
|
# gunicorn forks.
|
||||||
|
Random.atfork()
|
||||||
|
|
||||||
|
|
||||||
|
def when_ready(server):
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.debug(
|
||||||
|
"Starting registry gunicorn with %s workers and %s worker class", workers, worker_class
|
||||||
|
)
|
34
local-dev/gunicorn_secscan.py
Normal file
34
local-dev/gunicorn_secscan.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# NOTE: Must be before we import or call anything that may be synchronous.
|
||||||
|
from gevent import monkey
|
||||||
|
|
||||||
|
monkey.patch_all()
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from Crypto import Random
|
||||||
|
|
||||||
|
bind = "unix:/tmp/gunicorn_secscan.sock"
|
||||||
|
workers = 1
|
||||||
|
worker_class = "gevent"
|
||||||
|
worker_connections = 30
|
||||||
|
pythonpath = "."
|
||||||
|
reload = True
|
||||||
|
reload_engine = "auto"
|
||||||
|
|
||||||
|
|
||||||
|
def post_fork(server, worker):
|
||||||
|
# Reset the Random library to ensure it won't raise the "PID check failed." error after
|
||||||
|
# gunicorn forks.
|
||||||
|
Random.atfork()
|
||||||
|
|
||||||
|
|
||||||
|
def when_ready(server):
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.debug(
|
||||||
|
"Starting secscan gunicorn with %s workers and %s worker class", workers, worker_class
|
||||||
|
)
|
32
local-dev/gunicorn_web.py
Normal file
32
local-dev/gunicorn_web.py
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# NOTE: Must be before we import or call anything that may be synchronous.
|
||||||
|
from gevent import monkey
|
||||||
|
|
||||||
|
monkey.patch_all()
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from Crypto import Random
|
||||||
|
|
||||||
|
bind = "unix:/tmp/gunicorn_web.sock"
|
||||||
|
workers = 1
|
||||||
|
worker_class = "gevent"
|
||||||
|
worker_connections = 30
|
||||||
|
pythonpath = "."
|
||||||
|
reload = True
|
||||||
|
reload_engine = "auto"
|
||||||
|
|
||||||
|
|
||||||
|
def post_fork(server, worker):
|
||||||
|
# Reset the Random library to ensure it won't raise the "PID check failed." error after
|
||||||
|
# gunicorn forks.
|
||||||
|
Random.atfork()
|
||||||
|
|
||||||
|
|
||||||
|
def when_ready(server):
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.debug("Starting web gunicorn with %s workers and %s worker class", workers, worker_class)
|
6
local-dev/gunicorn_worker.py
Normal file
6
local-dev/gunicorn_worker.py
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
workers = 1
|
||||||
|
worker_class = "sync"
|
||||||
|
worker_connections = 30
|
||||||
|
pythonpath = "."
|
||||||
|
reload = True
|
||||||
|
reload_engine = "auto"
|
20
local-dev/init/01_local-dev-dependencies.sh
Executable file
20
local-dev/init/01_local-dev-dependencies.sh
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
QUAYDIR=${QUAYDIR:-"/"}
|
||||||
|
|
||||||
|
cd $QUAYDIR
|
||||||
|
|
||||||
|
echo "[Local Dev] - Downloading AWS IP Ranges..."
|
||||||
|
curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
|
||||||
|
|
||||||
|
echo "[Local Dev] - Building Front End..."
|
||||||
|
mkdir -p $QUAYDIR/static/webfonts && \
|
||||||
|
mkdir -p $QUAYDIR/static/fonts && \
|
||||||
|
mkdir -p $QUAYDIR/static/ldn && \
|
||||||
|
PYTHONPATH=$QUAYPATH python -m external_libraries && \
|
||||||
|
npm install --ignore-engines && \
|
||||||
|
npm run watch &
|
||||||
|
|
||||||
|
cd -
|
0
local-dev/init/__init__.py
Normal file
0
local-dev/init/__init__.py
Normal file
17
local-dev/init/certs_create.sh
Executable file
17
local-dev/init/certs_create.sh
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
set -e
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
SYSTEM_CERTDIR=${SYSTEM_CERTDIR:-"/etc/pki/ca-trust/source/anchors"}
|
||||||
|
# Create certs for jwtproxy to mitm outgoing TLS connections
|
||||||
|
# echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare mitm
|
||||||
|
mkdir -p /tmp/certificates; cd /tmp/certificates
|
||||||
|
openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 \
|
||||||
|
-subj "/C=US/ST=NY/L=NYC/O=Dis/CN=self-signed" \
|
||||||
|
-keyout mitm-key.pem -out mitm.pem
|
||||||
|
cp /tmp/certificates/mitm-key.pem $QUAYCONF/mitm.key
|
||||||
|
cp /tmp/certificates/mitm.pem $QUAYCONF/mitm.cert
|
||||||
|
cp /tmp/certificates/mitm.pem $SYSTEM_CERTDIR/mitm.crt
|
||||||
|
rm -Rf /tmp/certificates
|
||||||
|
|
44
local-dev/init/certs_install.sh
Executable file
44
local-dev/init/certs_install.sh
Executable file
@ -0,0 +1,44 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
set -e
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
QUAYCONFIG=${QUAYCONFIG:-"$QUAYCONF/stack"}
|
||||||
|
CERTDIR=${CERTDIR:-"$QUAYCONFIG/extra_ca_certs"}
|
||||||
|
SYSTEM_CERTDIR=${SYSTEM_CERTDIR:-"/etc/pki/ca-trust/source/anchors"}
|
||||||
|
|
||||||
|
PYTHON_ROOT=${PYTHON_ROOT:-"/usr/local/lib/python3.6"}
|
||||||
|
|
||||||
|
cd ${QUAYDIR:-"/quay-registry"}
|
||||||
|
|
||||||
|
# Add the custom LDAP certificate
|
||||||
|
if [ -e $QUAYCONFIG/ldap.crt ]
|
||||||
|
then
|
||||||
|
cp $QUAYCONFIG/ldap.crt ${SYSTEM_CERTDIR}/ldap.crt
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add extra trusted certificates (as a directory)
|
||||||
|
if [ -d $CERTDIR ]; then
|
||||||
|
if test "$(ls -A "$CERTDIR")"; then
|
||||||
|
echo "Installing extra certificates found in $CERTDIR directory"
|
||||||
|
cp $CERTDIR/* ${SYSTEM_CERTDIR}
|
||||||
|
cat $CERTDIR/* >> $PYTHON_ROOT/site-packages/certifi/cacert.pem
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add extra trusted certificates (as a file)
|
||||||
|
if [ -f $CERTDIR ]; then
|
||||||
|
echo "Installing extra certificates found in $CERTDIR file"
|
||||||
|
csplit -z -f ${SYSTEM_CERTDIR}/extra-ca- $CERTDIR '/-----BEGIN CERTIFICATE-----/' '{*}'
|
||||||
|
cat $CERTDIR >> $PYTHON_ROOT/site-packages/certifi/cacert.pem
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add extra trusted certificates (prefixed)
|
||||||
|
for f in $(find -L $QUAYCONFIG/ -maxdepth 1 -type f -name "extra_ca*")
|
||||||
|
do
|
||||||
|
echo "Installing extra cert $f"
|
||||||
|
cp "$f" ${SYSTEM_CERTDIR}
|
||||||
|
cat "$f" >> $PYTHON_ROOT/site-packages/certifi/cacert.pem
|
||||||
|
done
|
||||||
|
|
||||||
|
# Update all CA certificates.
|
||||||
|
update-ca-trust extract
|
16
local-dev/init/copy_config_files.sh
Executable file
16
local-dev/init/copy_config_files.sh
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#! /bin/sh
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
|
||||||
|
|
||||||
|
if [ -e $QUAYCONF/stack/robots.txt ]
|
||||||
|
then
|
||||||
|
cp $QUAYCONF/stack/robots.txt $QUAYPATH/templates/robots.txt
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -e $QUAYCONF/stack/favicon.ico ]
|
||||||
|
then
|
||||||
|
cp $QUAYCONF/stack/favicon.ico $QUAYPATH/static/favicon.ico
|
||||||
|
fi
|
17
local-dev/init/d_validate_config_bundle.sh
Executable file
17
local-dev/init/d_validate_config_bundle.sh
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
# IGNORE_VALIDATION= -> Set this variable to continue Quay boot after a failed config validation.
|
||||||
|
|
||||||
|
|
||||||
|
echo "Validating Configuration"
|
||||||
|
config-tool validate -c $QUAYCONF/stack/ --mode online
|
||||||
|
|
||||||
|
status=$?
|
||||||
|
|
||||||
|
if [ -z "${IGNORE_VALIDATION}"]
|
||||||
|
then
|
||||||
|
exit $status
|
||||||
|
else
|
||||||
|
exit 0
|
||||||
|
fi
|
25
local-dev/init/data_migration.py
Normal file
25
local-dev/init/data_migration.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from app import app
|
||||||
|
from active_migration import ActiveDataMigration
|
||||||
|
|
||||||
|
|
||||||
|
def current_migration():
|
||||||
|
if os.getenv("ENSURE_NO_MIGRATION", "").lower() == "true":
|
||||||
|
raise Exception("Cannot call migration when ENSURE_NO_MIGRATION is true")
|
||||||
|
|
||||||
|
if not app.config.get("SETUP_COMPLETE", False):
|
||||||
|
return "head"
|
||||||
|
else:
|
||||||
|
if ActiveDataMigration is not None:
|
||||||
|
return ActiveDataMigration.alembic_migration_revision
|
||||||
|
else:
|
||||||
|
return "head"
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
print(current_migration())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
42
local-dev/init/logrotate.conf
Normal file
42
local-dev/init/logrotate.conf
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
#
|
||||||
|
# This file exists because of a bug in phusion/baseimage:0.9.19 where the su
|
||||||
|
# directive below is configured to use the nonexistant syslog user.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# see "man logrotate" for details
|
||||||
|
# rotate log files weekly
|
||||||
|
weekly
|
||||||
|
|
||||||
|
# use the syslog group by default, since this is the owning group
|
||||||
|
# of /var/log/syslog.
|
||||||
|
su root root
|
||||||
|
|
||||||
|
# keep 4 weeks worth of backlogs
|
||||||
|
rotate 4
|
||||||
|
|
||||||
|
# create new (empty) log files after rotating old ones
|
||||||
|
create
|
||||||
|
|
||||||
|
# uncomment this if you want your log files compressed
|
||||||
|
#compress
|
||||||
|
|
||||||
|
# packages drop log rotation information into this directory
|
||||||
|
include /etc/logrotate.d
|
||||||
|
|
||||||
|
# no packages own wtmp, or btmp -- we'll rotate them here
|
||||||
|
/var/log/wtmp {
|
||||||
|
missingok
|
||||||
|
monthly
|
||||||
|
create 0664 root utmp
|
||||||
|
rotate 1
|
||||||
|
}
|
||||||
|
|
||||||
|
/var/log/btmp {
|
||||||
|
missingok
|
||||||
|
monthly
|
||||||
|
create 0660 root utmp
|
||||||
|
rotate 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# system-specific logs may be configured here
|
150
local-dev/init/nginx_conf_create.py
Normal file
150
local-dev/init/nginx_conf_create.py
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
import jinja2
|
||||||
|
|
||||||
|
QUAYPATH = os.getenv("QUAYPATH", ".")
|
||||||
|
QUAYDIR = os.getenv("QUAYDIR", "/")
|
||||||
|
QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf"))
|
||||||
|
STATIC_DIR = os.path.join(QUAYDIR, "static")
|
||||||
|
|
||||||
|
SSL_PROTOCOL_DEFAULTS = ["TLSv1", "TLSv1.1", "TLSv1.2"]
|
||||||
|
SSL_CIPHER_DEFAULTS = [
|
||||||
|
"ECDHE-RSA-AES128-GCM-SHA256",
|
||||||
|
"ECDHE-ECDSA-AES128-GCM-SHA256",
|
||||||
|
"ECDHE-RSA-AES256-GCM-SHA384",
|
||||||
|
"ECDHE-ECDSA-AES256-GCM-SHA384",
|
||||||
|
"DHE-RSA-AES128-GCM-SHA256",
|
||||||
|
"DHE-DSS-AES128-GCM-SHA256",
|
||||||
|
"kEDH+AESGCM",
|
||||||
|
"ECDHE-RSA-AES128-SHA256",
|
||||||
|
"ECDHE-ECDSA-AES128-SHA256",
|
||||||
|
"ECDHE-RSA-AES128-SHA",
|
||||||
|
"ECDHE-ECDSA-AES128-SHA",
|
||||||
|
"ECDHE-RSA-AES256-SHA384",
|
||||||
|
"ECDHE-ECDSA-AES256-SHA384",
|
||||||
|
"ECDHE-RSA-AES256-SHA",
|
||||||
|
"ECDHE-ECDSA-AES256-SHA",
|
||||||
|
"DHE-RSA-AES128-SHA256",
|
||||||
|
"DHE-RSA-AES128-SHA",
|
||||||
|
"DHE-DSS-AES128-SHA256",
|
||||||
|
"DHE-RSA-AES256-SHA256",
|
||||||
|
"DHE-DSS-AES256-SHA",
|
||||||
|
"DHE-RSA-AES256-SHA",
|
||||||
|
"AES128-GCM-SHA256",
|
||||||
|
"AES256-GCM-SHA384",
|
||||||
|
"AES128-SHA256",
|
||||||
|
"AES256-SHA256",
|
||||||
|
"AES128-SHA",
|
||||||
|
"AES256-SHA",
|
||||||
|
"AES",
|
||||||
|
"CAMELLIA",
|
||||||
|
"!3DES",
|
||||||
|
"!aNULL",
|
||||||
|
"!eNULL",
|
||||||
|
"!EXPORT",
|
||||||
|
"!DES",
|
||||||
|
"!RC4",
|
||||||
|
"!MD5",
|
||||||
|
"!PSK",
|
||||||
|
"!aECDH",
|
||||||
|
"!EDH-DSS-DES-CBC3-SHA",
|
||||||
|
"!EDH-RSA-DES-CBC3-SHA",
|
||||||
|
"!KRB5-DES-CBC3-SHA",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def write_config(filename, **kwargs):
|
||||||
|
with open(filename + ".jnj") as f:
|
||||||
|
template = jinja2.Template(f.read())
|
||||||
|
rendered = template.render(kwargs)
|
||||||
|
|
||||||
|
with open(filename, "w") as f:
|
||||||
|
f.write(rendered)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_nginx_config(config):
|
||||||
|
"""
|
||||||
|
Generates nginx config from the app config.
|
||||||
|
"""
|
||||||
|
config = config or {}
|
||||||
|
use_https = os.path.exists(os.path.join(QUAYCONF_DIR, "stack/ssl.key"))
|
||||||
|
v1_only_domain = config.get("V1_ONLY_DOMAIN", None)
|
||||||
|
enable_rate_limits = config.get("FEATURE_RATE_LIMITS", False)
|
||||||
|
ssl_protocols = config.get("SSL_PROTOCOLS", SSL_PROTOCOL_DEFAULTS)
|
||||||
|
ssl_ciphers = config.get("SSL_CIPHERS", SSL_CIPHER_DEFAULTS)
|
||||||
|
|
||||||
|
write_config(
|
||||||
|
os.path.join(QUAYCONF_DIR, "nginx/nginx.conf"),
|
||||||
|
use_https=use_https,
|
||||||
|
enable_rate_limits=enable_rate_limits,
|
||||||
|
v1_only_domain=v1_only_domain,
|
||||||
|
ssl_protocols=ssl_protocols,
|
||||||
|
ssl_ciphers=":".join(ssl_ciphers),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_server_config(config):
|
||||||
|
"""
|
||||||
|
Generates server config from the app config.
|
||||||
|
"""
|
||||||
|
config = config or {}
|
||||||
|
tuf_server = config.get("TUF_SERVER", None)
|
||||||
|
tuf_host = config.get("TUF_HOST", None)
|
||||||
|
signing_enabled = config.get("FEATURE_SIGNING", False)
|
||||||
|
maximum_layer_size = config.get("MAXIMUM_LAYER_SIZE", "20G")
|
||||||
|
maximum_cnr_layer_size = config.get("MAXIMUM_CNR_LAYER_SIZE", "1M")
|
||||||
|
enable_rate_limits = config.get("FEATURE_RATE_LIMITS", False)
|
||||||
|
|
||||||
|
write_config(
|
||||||
|
os.path.join(QUAYCONF_DIR, "nginx/server-base.conf"),
|
||||||
|
tuf_server=tuf_server,
|
||||||
|
tuf_host=tuf_host,
|
||||||
|
signing_enabled=signing_enabled,
|
||||||
|
maximum_layer_size=maximum_layer_size,
|
||||||
|
maximum_cnr_layer_size=maximum_cnr_layer_size,
|
||||||
|
enable_rate_limits=enable_rate_limits,
|
||||||
|
static_dir=STATIC_DIR,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_rate_limiting_config(config):
|
||||||
|
"""
|
||||||
|
Generates rate limiting config from the app config.
|
||||||
|
"""
|
||||||
|
config = config or {}
|
||||||
|
non_rate_limited_namespaces = config.get("NON_RATE_LIMITED_NAMESPACES") or set()
|
||||||
|
enable_rate_limits = config.get("FEATURE_RATE_LIMITS", False)
|
||||||
|
write_config(
|
||||||
|
os.path.join(QUAYCONF_DIR, "nginx/rate-limiting.conf"),
|
||||||
|
non_rate_limited_namespaces=non_rate_limited_namespaces,
|
||||||
|
enable_rate_limits=enable_rate_limits,
|
||||||
|
static_dir=STATIC_DIR,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_hosted_http_base_config(config):
|
||||||
|
"""
|
||||||
|
Generates hosted http base config from the app config.
|
||||||
|
"""
|
||||||
|
config = config or {}
|
||||||
|
feature_proxy_protocol = config.get("FEATURE_PROXY_PROTOCOL", False)
|
||||||
|
|
||||||
|
write_config(
|
||||||
|
os.path.join(QUAYCONF_DIR, "nginx/hosted-http-base.conf"),
|
||||||
|
feature_proxy_protocol=feature_proxy_protocol,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if os.path.exists(os.path.join(QUAYCONF_DIR, "stack/config.yaml")):
|
||||||
|
with open(os.path.join(QUAYCONF_DIR, "stack/config.yaml"), "r") as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
else:
|
||||||
|
config = None
|
||||||
|
|
||||||
|
generate_hosted_http_base_config(config)
|
||||||
|
generate_rate_limiting_config(config)
|
||||||
|
generate_server_config(config)
|
||||||
|
generate_nginx_config(config)
|
8
local-dev/init/nginx_conf_create.sh
Executable file
8
local-dev/init/nginx_conf_create.sh
Executable file
@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
QUAYDIR=${QUAYDIR:-"/"}
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
cd $QUAYDIR
|
||||||
|
python $QUAYCONF/init/nginx_conf_create.py
|
1
local-dev/init/pg_bootstrap.sql
Normal file
1
local-dev/init/pg_bootstrap.sql
Normal file
@ -0,0 +1 @@
|
|||||||
|
CREATE EXTENSION pg_trgm;
|
10
local-dev/init/runmigration.sh
Executable file
10
local-dev/init/runmigration.sh
Executable file
@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
set -e
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
|
||||||
|
# Run the database migration
|
||||||
|
PYTHONPATH=${QUAYPATH:-"."} python $QUAYCONF/init/data_migration.py > revision_head
|
||||||
|
PYTHONPATH=${QUAYPATH:-"."} alembic upgrade `cat revision_head`
|
130
local-dev/init/supervisord_conf_create.py
Normal file
130
local-dev/init/supervisord_conf_create.py
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import sys
|
||||||
|
import jinja2
|
||||||
|
|
||||||
|
QUAYPATH = os.getenv("QUAYPATH", ".")
|
||||||
|
QUAYDIR = os.getenv("QUAYDIR", "/")
|
||||||
|
QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf"))
|
||||||
|
|
||||||
|
QUAY_LOGGING = os.getenv("QUAY_LOGGING", "stdout") # or "syslog"
|
||||||
|
|
||||||
|
QUAY_SERVICES = os.getenv("QUAY_SERVICES", [])
|
||||||
|
QUAY_OVERRIDE_SERVICES = os.getenv("QUAY_OVERRIDE_SERVICES", [])
|
||||||
|
|
||||||
|
|
||||||
|
def registry_services():
|
||||||
|
return {
|
||||||
|
"blobuploadcleanupworker": {"autostart": "true"},
|
||||||
|
"buildlogsarchiver": {"autostart": "true"},
|
||||||
|
"builder": {"autostart": "true"},
|
||||||
|
"chunkcleanupworker": {"autostart": "true"},
|
||||||
|
"expiredappspecifictokenworker": {"autostart": "true"},
|
||||||
|
"exportactionlogsworker": {"autostart": "true"},
|
||||||
|
"gcworker": {"autostart": "true"},
|
||||||
|
"globalpromstats": {"autostart": "true"},
|
||||||
|
"logrotateworker": {"autostart": "true"},
|
||||||
|
"namespacegcworker": {"autostart": "true"},
|
||||||
|
"repositorygcworker": {"autostart": "true"},
|
||||||
|
"notificationworker": {"autostart": "true"},
|
||||||
|
"queuecleanupworker": {"autostart": "true"},
|
||||||
|
"repositoryactioncounter": {"autostart": "true"},
|
||||||
|
"securityworker": {"autostart": "true"},
|
||||||
|
"storagereplication": {"autostart": "true"},
|
||||||
|
"teamsyncworker": {"autostart": "true"},
|
||||||
|
"dnsmasq": {"autostart": "true"},
|
||||||
|
"gunicorn-registry": {"autostart": "true"},
|
||||||
|
"gunicorn-secscan": {"autostart": "true"},
|
||||||
|
"gunicorn-web": {"autostart": "true"},
|
||||||
|
"ip-resolver-update-worker": {"autostart": "true"},
|
||||||
|
"jwtproxy": {"autostart": "true"},
|
||||||
|
"memcache": {"autostart": "true"},
|
||||||
|
"nginx": {"autostart": "true"},
|
||||||
|
"pushgateway": {"autostart": "true"},
|
||||||
|
"servicekey": {"autostart": "true"},
|
||||||
|
"repomirrorworker": {"autostart": "false"},
|
||||||
|
"manifestbackfillworker": {"autostart": "true"},
|
||||||
|
"securityscanningnotificationworker": {"autostart": "true"},
|
||||||
|
"config-editor": {"autostart": "false"},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def config_services():
|
||||||
|
return {
|
||||||
|
"blobuploadcleanupworker": {"autostart": "false"},
|
||||||
|
"buildlogsarchiver": {"autostart": "false"},
|
||||||
|
"builder": {"autostart": "false"},
|
||||||
|
"chunkcleanupworker": {"autostart": "false"},
|
||||||
|
"expiredappspecifictokenworker": {"autostart": "false"},
|
||||||
|
"exportactionlogsworker": {"autostart": "false"},
|
||||||
|
"gcworker": {"autostart": "false"},
|
||||||
|
"globalpromstats": {"autostart": "false"},
|
||||||
|
"logrotateworker": {"autostart": "false"},
|
||||||
|
"namespacegcworker": {"autostart": "false"},
|
||||||
|
"repositorygcworker": {"autostart": "false"},
|
||||||
|
"notificationworker": {"autostart": "false"},
|
||||||
|
"queuecleanupworker": {"autostart": "false"},
|
||||||
|
"repositoryactioncounter": {"autostart": "false"},
|
||||||
|
"securityworker": {"autostart": "false"},
|
||||||
|
"storagereplication": {"autostart": "false"},
|
||||||
|
"teamsyncworker": {"autostart": "false"},
|
||||||
|
"dnsmasq": {"autostart": "false"},
|
||||||
|
"gunicorn-registry": {"autostart": "false"},
|
||||||
|
"gunicorn-secscan": {"autostart": "false"},
|
||||||
|
"gunicorn-web": {"autostart": "false"},
|
||||||
|
"ip-resolver-update-worker": {"autostart": "false"},
|
||||||
|
"jwtproxy": {"autostart": "false"},
|
||||||
|
"memcache": {"autostart": "false"},
|
||||||
|
"nginx": {"autostart": "false"},
|
||||||
|
"pushgateway": {"autostart": "false"},
|
||||||
|
"servicekey": {"autostart": "false"},
|
||||||
|
"repomirrorworker": {"autostart": "false"},
|
||||||
|
"manifestbackfillworker": {"autostart": "false"},
|
||||||
|
"securityscanningnotificationworker": {"autostart": "false"},
|
||||||
|
"config-editor": {"autostart": "true"},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def generate_supervisord_config(filename, config, logdriver):
|
||||||
|
with open(filename + ".jnj") as f:
|
||||||
|
template = jinja2.Template(f.read())
|
||||||
|
rendered = template.render(config=config, logdriver=logdriver)
|
||||||
|
|
||||||
|
with open(filename, "w") as f:
|
||||||
|
f.write(rendered)
|
||||||
|
|
||||||
|
|
||||||
|
def limit_services(config, enabled_services):
|
||||||
|
if enabled_services == []:
|
||||||
|
return
|
||||||
|
|
||||||
|
for service in list(config.keys()):
|
||||||
|
if service in enabled_services:
|
||||||
|
config[service]["autostart"] = "true"
|
||||||
|
else:
|
||||||
|
config[service]["autostart"] = "false"
|
||||||
|
|
||||||
|
|
||||||
|
def override_services(config, override_services):
|
||||||
|
if override_services == []:
|
||||||
|
return
|
||||||
|
|
||||||
|
for service in list(config.keys()):
|
||||||
|
if service + "=true" in override_services:
|
||||||
|
config[service]["autostart"] = "true"
|
||||||
|
elif service + "=false" in override_services:
|
||||||
|
config[service]["autostart"] = "false"
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
|
config = config_services()
|
||||||
|
else:
|
||||||
|
config = registry_services()
|
||||||
|
limit_services(config, QUAY_SERVICES)
|
||||||
|
override_services(config, QUAY_OVERRIDE_SERVICES)
|
||||||
|
generate_supervisord_config(
|
||||||
|
os.path.join(QUAYCONF_DIR, "supervisord.conf"),
|
||||||
|
config,
|
||||||
|
QUAY_LOGGING,
|
||||||
|
)
|
11
local-dev/init/supervisord_conf_create.sh
Executable file
11
local-dev/init/supervisord_conf_create.sh
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
QUAYDIR=${QUAYDIR:-"/"}
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
QUAYENTRY=${QUAYENTRY:=$1}
|
||||||
|
QUAYENTRY=${QUAYENTRY:=registry}
|
||||||
|
|
||||||
|
cd $QUAYDIR
|
||||||
|
python $QUAYCONF/init/supervisord_conf_create.py $QUAYENTRY
|
4
local-dev/init/zz_boot.sh
Executable file
4
local-dev/init/zz_boot.sh
Executable file
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
|
||||||
|
python ${QUAYPATH:-"."}/boot.py
|
28
local-dev/jwtproxy_conf.yaml.jnj
Normal file
28
local-dev/jwtproxy_conf.yaml.jnj
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
jwtproxy:
|
||||||
|
signer_proxy:
|
||||||
|
enabled: true
|
||||||
|
listen_addr: :8081
|
||||||
|
ca_key_file: {{ conf_dir }}/mitm.key
|
||||||
|
ca_crt_file: {{ conf_dir }}/mitm.cert
|
||||||
|
|
||||||
|
signer:
|
||||||
|
issuer: quay
|
||||||
|
expiration_time: 5m
|
||||||
|
max_skew: 1m
|
||||||
|
private_key:
|
||||||
|
type: preshared
|
||||||
|
options:
|
||||||
|
key_id: {{ key_id }}
|
||||||
|
private_key_path: {{ service_key_location }}
|
||||||
|
verifier_proxies:
|
||||||
|
- enabled: true
|
||||||
|
listen_addr: unix:/tmp/jwtproxy_secscan.sock
|
||||||
|
socket_permission: 0777
|
||||||
|
verifier:
|
||||||
|
upstream: unix:/tmp/gunicorn_secscan.sock
|
||||||
|
audience: {{ audience }}
|
||||||
|
key_server:
|
||||||
|
type: keyregistry
|
||||||
|
options:
|
||||||
|
issuer: {{ security_issuer }}
|
||||||
|
registry: {{ registry }}
|
36
local-dev/logging.conf
Normal file
36
local-dev/logging.conf
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
[loggers]
|
||||||
|
keys=root,gunicorn.error,gunicorn.access
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys=console
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
keys=generic,json
|
||||||
|
|
||||||
|
[logger_root]
|
||||||
|
level=INFO
|
||||||
|
handlers=console
|
||||||
|
|
||||||
|
[handler_console]
|
||||||
|
class=StreamHandler
|
||||||
|
formatter=generic
|
||||||
|
args=(sys.stdout, )
|
||||||
|
|
||||||
|
[formatter_generic]
|
||||||
|
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||||
|
class=logging.Formatter
|
||||||
|
|
||||||
|
[formatter_json]
|
||||||
|
class=loghandler.JsonFormatter
|
||||||
|
|
||||||
|
[logger_gunicorn.error]
|
||||||
|
level=ERROR
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.error
|
||||||
|
|
||||||
|
[logger_gunicorn.access]
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.access
|
||||||
|
level=DEBUG
|
41
local-dev/logging_debug.conf
Normal file
41
local-dev/logging_debug.conf
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
[loggers]
|
||||||
|
keys=root,boto,gunicorn.error,gunicorn.access
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys=console
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
keys=generic,json
|
||||||
|
|
||||||
|
[logger_root]
|
||||||
|
level=DEBUG
|
||||||
|
handlers=console
|
||||||
|
|
||||||
|
[logger_boto]
|
||||||
|
level=INFO
|
||||||
|
handlers=console
|
||||||
|
qualname=boto
|
||||||
|
|
||||||
|
[logger_gunicorn.access]
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.access
|
||||||
|
level=DEBUG
|
||||||
|
|
||||||
|
[handler_console]
|
||||||
|
class=StreamHandler
|
||||||
|
formatter=generic
|
||||||
|
args=(sys.stdout, )
|
||||||
|
|
||||||
|
[logger_gunicorn.error]
|
||||||
|
level=ERROR
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.error
|
||||||
|
|
||||||
|
[formatter_generic]
|
||||||
|
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||||
|
class=logging.Formatter
|
||||||
|
|
||||||
|
[formatter_json]
|
||||||
|
class=loghandler.JsonFormatter
|
41
local-dev/logging_debug_json.conf
Normal file
41
local-dev/logging_debug_json.conf
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
[loggers]
|
||||||
|
keys=root,boto,gunicorn.error,gunicorn.access
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys=console
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
keys=generic,json
|
||||||
|
|
||||||
|
[logger_root]
|
||||||
|
level=DEBUG
|
||||||
|
handlers=console
|
||||||
|
|
||||||
|
[logger_boto]
|
||||||
|
level=INFO
|
||||||
|
handlers=console
|
||||||
|
qualname=boto
|
||||||
|
|
||||||
|
[logger_gunicorn.access]
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.access
|
||||||
|
level=DEBUG
|
||||||
|
|
||||||
|
[handler_console]
|
||||||
|
class=StreamHandler
|
||||||
|
formatter=json
|
||||||
|
args=(sys.stdout, )
|
||||||
|
|
||||||
|
[logger_gunicorn.error]
|
||||||
|
level=ERROR
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.error
|
||||||
|
|
||||||
|
[formatter_generic]
|
||||||
|
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||||
|
class=logging.Formatter
|
||||||
|
|
||||||
|
[formatter_json]
|
||||||
|
class=loghandler.JsonFormatter
|
36
local-dev/logging_json.conf
Normal file
36
local-dev/logging_json.conf
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
[loggers]
|
||||||
|
keys=root,gunicorn.error,gunicorn.access
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys=console
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
keys=json,generic
|
||||||
|
|
||||||
|
[logger_root]
|
||||||
|
level=INFO
|
||||||
|
handlers=console
|
||||||
|
|
||||||
|
[handler_console]
|
||||||
|
class=StreamHandler
|
||||||
|
formatter=json
|
||||||
|
args=(sys.stdout, )
|
||||||
|
|
||||||
|
[formatter_generic]
|
||||||
|
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||||
|
class=logging.Formatter
|
||||||
|
|
||||||
|
[formatter_json]
|
||||||
|
class=loghandler.JsonFormatter
|
||||||
|
|
||||||
|
[logger_gunicorn.error]
|
||||||
|
level=ERROR
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.error
|
||||||
|
|
||||||
|
[logger_gunicorn.access]
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.access
|
||||||
|
level=DEBUG
|
8
local-dev/nginx/dhparams.pem
Normal file
8
local-dev/nginx/dhparams.pem
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
-----BEGIN DH PARAMETERS-----
|
||||||
|
MIIBCAKCAQEAk7fEh4MFr446aU61ZGxCl8VHvcJhDGcdd+3zaNxdWF7Wvr5QE8zX
|
||||||
|
QswoM5K2szlK7klcJOXer2IToHHQQn00nuWO3m6quZGV6EPbRmRKfRGa8pzSwH+R
|
||||||
|
Ph0OUpEQPh7zvegeVwEbrblD7i53ookbHlYGtxsPb28Y06OP5/xpks9C815Zy4gy
|
||||||
|
tx2yHi4FkFo52yErBF9jD/glsZYVHCo42LFrVGa5/7V0g++fG8yXCrBnqmz2d8FF
|
||||||
|
uU6/KJcmDCUn1m3mDfcf5HgeXSIsukW/XMZ3l9w1fdluJRwdEE9W2ePgqMiG3eC0
|
||||||
|
2T1sPfXCdXPQ7/5Gzf1eMtRZ/McipxVbgwIBAg==
|
||||||
|
-----END DH PARAMETERS-----
|
11
local-dev/nginx/hosted-http-base.conf
Normal file
11
local-dev/nginx/hosted-http-base.conf
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
server {
|
||||||
|
|
||||||
|
|
||||||
|
listen 8080 default_server;
|
||||||
|
|
||||||
|
|
||||||
|
server_name _;
|
||||||
|
rewrite ^ https://$host$request_uri? permanent;
|
||||||
|
}
|
14
local-dev/nginx/hosted-http-base.conf.jnj
Normal file
14
local-dev/nginx/hosted-http-base.conf.jnj
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
server {
|
||||||
|
|
||||||
|
{% if feature_proxy_protocol %}
|
||||||
|
listen 8080 default_server proxy_protocol;
|
||||||
|
{% else %}
|
||||||
|
listen 8080 default_server;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
server_name _;
|
||||||
|
rewrite ^ https://$host$request_uri? permanent;
|
||||||
|
}
|
||||||
|
|
64
local-dev/nginx/http-base.conf
Normal file
64
local-dev/nginx/http-base.conf
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
set_real_ip_from 0.0.0.0/0;
|
||||||
|
real_ip_recursive on;
|
||||||
|
log_format lb_logs '$remote_addr ($proxy_protocol_addr) '
|
||||||
|
'- $remote_user [$time_local] '
|
||||||
|
'"$request" $status $body_bytes_sent '
|
||||||
|
'"$http_referer" "$http_user_agent" '
|
||||||
|
'($request_time $request_length $upstream_response_time)';
|
||||||
|
|
||||||
|
types_hash_max_size 2048;
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log;
|
||||||
|
error_log /var/log/nginx/error.log;
|
||||||
|
client_body_temp_path /tmp/nginx 1 2;
|
||||||
|
proxy_temp_path /tmp/nginx-proxy;
|
||||||
|
fastcgi_temp_path /tmp/nginx-fastcgi;
|
||||||
|
uwsgi_temp_path /tmp/nginx-uwsgi;
|
||||||
|
scgi_temp_path /tmp/nginx-scgi;
|
||||||
|
|
||||||
|
sendfile on;
|
||||||
|
|
||||||
|
gzip on;
|
||||||
|
gzip_http_version 1.0;
|
||||||
|
gzip_proxied any;
|
||||||
|
gzip_min_length 500;
|
||||||
|
gzip_disable "MSIE [1-6]\.";
|
||||||
|
gzip_types text/plain text/xml text/css
|
||||||
|
text/javascript application/x-javascript
|
||||||
|
application/javascript image/svg+xml
|
||||||
|
application/octet-stream;
|
||||||
|
|
||||||
|
map $proxy_protocol_addr $proper_forwarded_for {
|
||||||
|
"" $proxy_add_x_forwarded_for;
|
||||||
|
default $proxy_protocol_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $http_x_forwarded_proto $proper_scheme {
|
||||||
|
default $scheme;
|
||||||
|
https https;
|
||||||
|
}
|
||||||
|
|
||||||
|
upstream web_app_server {
|
||||||
|
server unix:/tmp/gunicorn_web.sock fail_timeout=0;
|
||||||
|
}
|
||||||
|
upstream jwtproxy_secscan {
|
||||||
|
server unix:/tmp/jwtproxy_secscan.sock fail_timeout=0;
|
||||||
|
}
|
||||||
|
upstream registry_app_server {
|
||||||
|
server unix:/tmp/gunicorn_registry.sock fail_timeout=0;
|
||||||
|
}
|
||||||
|
|
||||||
|
# NOTE: Exposed for the _internal_ping *only*. All other secscan routes *MUST* go through
|
||||||
|
# the jwtproxy.
|
||||||
|
upstream secscan_app_server {
|
||||||
|
server unix:/tmp/gunicorn_secscan.sock fail_timeout=0;
|
||||||
|
}
|
||||||
|
|
||||||
|
upstream build_manager_server {
|
||||||
|
server localhost:50051;
|
||||||
|
}
|
21
local-dev/nginx/nginx.conf
Normal file
21
local-dev/nginx/nginx.conf
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
include root-base.conf;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
http {
|
||||||
|
include http-base.conf;
|
||||||
|
include rate-limiting.conf;
|
||||||
|
|
||||||
|
resolver 127.0.0.1:8053 valid=10s;
|
||||||
|
|
||||||
|
server {
|
||||||
|
include server-base.conf;
|
||||||
|
|
||||||
|
listen 8080 default;
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log lb_logs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
140
local-dev/nginx/nginx.conf.jnj
Normal file
140
local-dev/nginx/nginx.conf.jnj
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
include root-base.conf;
|
||||||
|
|
||||||
|
{% if use_https %}
|
||||||
|
|
||||||
|
http {
|
||||||
|
include http-base.conf;
|
||||||
|
include hosted-http-base.conf;
|
||||||
|
include rate-limiting.conf;
|
||||||
|
|
||||||
|
server_names_hash_bucket_size 64;
|
||||||
|
|
||||||
|
resolver 127.0.0.1:8053 valid=10s;
|
||||||
|
|
||||||
|
ssl_ciphers '{{ ssl_ciphers }}';
|
||||||
|
ssl_protocols {% for ssl_protocol in ssl_protocols %}{{ ssl_protocol }} {% endfor %};
|
||||||
|
ssl_session_cache shared:SSL:60m;
|
||||||
|
ssl_session_timeout 2h;
|
||||||
|
ssl_session_tickets on;
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
ssl_dhparam dhparams.pem;
|
||||||
|
|
||||||
|
server {
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
ssl_certificate ../stack/ssl.cert;
|
||||||
|
ssl_certificate_key ../stack/ssl.key;
|
||||||
|
|
||||||
|
include server-base.conf;
|
||||||
|
|
||||||
|
listen 8443 ssl http2 default;
|
||||||
|
|
||||||
|
ssl on;
|
||||||
|
|
||||||
|
# This header must be set only for HTTPS
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; preload";
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log lb_logs;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
ssl_certificate ../stack/ssl.cert;
|
||||||
|
ssl_certificate_key ../stack/ssl.key;
|
||||||
|
|
||||||
|
include server-base.conf;
|
||||||
|
|
||||||
|
listen 7443 ssl http2 default proxy_protocol;
|
||||||
|
ssl on;
|
||||||
|
|
||||||
|
# This header must be set only for HTTPS
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; preload";
|
||||||
|
|
||||||
|
real_ip_header proxy_protocol;
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log lb_logs;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
ssl_certificate ../stack/ssl.cert;
|
||||||
|
ssl_certificate_key ../stack/ssl.key;
|
||||||
|
|
||||||
|
listen 55443 ssl http2 default;
|
||||||
|
ssl on;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
grpc_pass grpc://build_manager_server;
|
||||||
|
}
|
||||||
|
|
||||||
|
# This header must be set only for HTTPS
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; preload";
|
||||||
|
|
||||||
|
real_ip_header proxy_protocol;
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log lb_logs;
|
||||||
|
}
|
||||||
|
|
||||||
|
{% if v1_only_domain %}
|
||||||
|
server {
|
||||||
|
include server-base.conf;
|
||||||
|
|
||||||
|
server_name {{ v1_only_domain }};
|
||||||
|
|
||||||
|
ssl_certificate ../stack/ssl.cert;
|
||||||
|
ssl_certificate_key ../stack/ssl.key;
|
||||||
|
|
||||||
|
listen 8443 ssl;
|
||||||
|
|
||||||
|
ssl on;
|
||||||
|
|
||||||
|
# This header must be set only for HTTPS
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; preload";
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log lb_logs;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
server_name {{ v1_only_domain }};
|
||||||
|
|
||||||
|
ssl_certificate ../stack/ssl.cert;
|
||||||
|
ssl_certificate_key ../stack/ssl.key;
|
||||||
|
|
||||||
|
include server-base.conf;
|
||||||
|
|
||||||
|
listen 7443 ssl proxy_protocol;
|
||||||
|
ssl on;
|
||||||
|
|
||||||
|
# This header must be set only for HTTPS
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; preload";
|
||||||
|
|
||||||
|
real_ip_header proxy_protocol;
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log lb_logs;
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
http {
|
||||||
|
include http-base.conf;
|
||||||
|
include rate-limiting.conf;
|
||||||
|
|
||||||
|
resolver 127.0.0.1:8053 valid=10s;
|
||||||
|
|
||||||
|
server {
|
||||||
|
include server-base.conf;
|
||||||
|
|
||||||
|
listen 8080 default;
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log lb_logs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{% endif %}
|
56
local-dev/nginx/rate-limiting.conf
Normal file
56
local-dev/nginx/rate-limiting.conf
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
# Define two buckets: Once for http1 connections (which we force to shard across our fleet) and
|
||||||
|
# one for http2 connections (which will all hit the same node).
|
||||||
|
map $http2 $http1_bucket {
|
||||||
|
"" $proxy_protocol_addr; # HTTP1 case: use the IP address, since shared across nodes.
|
||||||
|
default $request_id; # HTTP2 case: use request ID to "disable" check.
|
||||||
|
}
|
||||||
|
|
||||||
|
map $http2 $http2_bucket {
|
||||||
|
"" $request_id; # HTTP1 case: use the request ID to "disable" check.
|
||||||
|
default $connection; # HTTP2 case: use the connection serial number to limit.
|
||||||
|
}
|
||||||
|
|
||||||
|
# Define two additional buckets that fall to $request_id (thus no effective rate limiting) if
|
||||||
|
# a specific set of namespaces is matched. This allows us to turn off rate limiting selectively
|
||||||
|
# for special internal namespaces.
|
||||||
|
map $namespace $namespaced_http1_bucket {
|
||||||
|
|
||||||
|
|
||||||
|
default $request_id;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
map $namespace $namespaced_http2_bucket {
|
||||||
|
|
||||||
|
|
||||||
|
default $request_id;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
limit_req_zone $request_id zone=staticauth:10m rate=300r/s;
|
||||||
|
|
||||||
|
|
||||||
|
limit_req_zone $http1_bucket zone=dynamicauth_very_light_http1:10m rate=30r/s;
|
||||||
|
limit_req_zone $http2_bucket zone=dynamicauth_very_light_http2:10m rate=600r/s;
|
||||||
|
limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_very_light_http1:10m rate=30r/s;
|
||||||
|
limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_very_light_http2:10m rate=600r/s;
|
||||||
|
|
||||||
|
limit_req_zone $http1_bucket zone=dynamicauth_light_http1:10m rate=20r/s;
|
||||||
|
limit_req_zone $http2_bucket zone=dynamicauth_light_http2:10m rate=400r/s;
|
||||||
|
limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_light_http1:10m rate=20r/s;
|
||||||
|
limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_light_http2:10m rate=400r/s;
|
||||||
|
|
||||||
|
# This zone should always be used with burst=<number> (nodelay|delay) as the
|
||||||
|
# limit is very low on purpose but should allow for the burst of traffic
|
||||||
|
# required for a registry operation. The burst number should also vary per
|
||||||
|
# endpoint.
|
||||||
|
limit_req_zone $http1_bucket zone=dynamicauth_heavy_http1:10m rate=1r/s;
|
||||||
|
limit_req_zone $http2_bucket zone=dynamicauth_heavy_http2:10m rate=20r/s;
|
||||||
|
limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_heavy_http1:10m rate=1r/s;
|
||||||
|
limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_heavy_http2:10m rate=20r/s;
|
||||||
|
|
||||||
|
limit_req_status 429;
|
||||||
|
limit_req_log_level warn;
|
66
local-dev/nginx/rate-limiting.conf.jnj
Normal file
66
local-dev/nginx/rate-limiting.conf.jnj
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
# Define two buckets: Once for http1 connections (which we force to shard across our fleet) and
|
||||||
|
# one for http2 connections (which will all hit the same node).
|
||||||
|
map $http2 $http1_bucket {
|
||||||
|
"" $proxy_protocol_addr; # HTTP1 case: use the IP address, since shared across nodes.
|
||||||
|
default $request_id; # HTTP2 case: use request ID to "disable" check.
|
||||||
|
}
|
||||||
|
|
||||||
|
map $http2 $http2_bucket {
|
||||||
|
"" $request_id; # HTTP1 case: use the request ID to "disable" check.
|
||||||
|
default $connection; # HTTP2 case: use the connection serial number to limit.
|
||||||
|
}
|
||||||
|
|
||||||
|
# Define two additional buckets that fall to $request_id (thus no effective rate limiting) if
|
||||||
|
# a specific set of namespaces is matched. This allows us to turn off rate limiting selectively
|
||||||
|
# for special internal namespaces.
|
||||||
|
map $namespace $namespaced_http1_bucket {
|
||||||
|
{% for namespace in non_rate_limited_namespaces %}
|
||||||
|
"{{ namespace }}" $request_id;
|
||||||
|
{% endfor %}
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
default $http1_bucket;
|
||||||
|
{% else %}
|
||||||
|
default $request_id;
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
|
|
||||||
|
map $namespace $namespaced_http2_bucket {
|
||||||
|
{% for namespace in non_rate_limited_namespaces %}
|
||||||
|
"{{ namespace }}" $request_id;
|
||||||
|
{% endfor %}
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
default $http2_bucket;
|
||||||
|
{% else %}
|
||||||
|
default $request_id;
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req_zone $http_authorization zone=staticauth:10m rate=30r/s;
|
||||||
|
{% else %}
|
||||||
|
limit_req_zone $request_id zone=staticauth:10m rate=300r/s;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
limit_req_zone $http1_bucket zone=dynamicauth_very_light_http1:10m rate=30r/s;
|
||||||
|
limit_req_zone $http2_bucket zone=dynamicauth_very_light_http2:10m rate=600r/s;
|
||||||
|
limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_very_light_http1:10m rate=30r/s;
|
||||||
|
limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_very_light_http2:10m rate=600r/s;
|
||||||
|
|
||||||
|
limit_req_zone $http1_bucket zone=dynamicauth_light_http1:10m rate=20r/s;
|
||||||
|
limit_req_zone $http2_bucket zone=dynamicauth_light_http2:10m rate=400r/s;
|
||||||
|
limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_light_http1:10m rate=20r/s;
|
||||||
|
limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_light_http2:10m rate=400r/s;
|
||||||
|
|
||||||
|
# This zone should always be used with burst=<number> (nodelay|delay) as the
|
||||||
|
# limit is very low on purpose but should allow for the burst of traffic
|
||||||
|
# required for a registry operation. The burst number should also vary per
|
||||||
|
# endpoint.
|
||||||
|
limit_req_zone $http1_bucket zone=dynamicauth_heavy_http1:10m rate=1r/s;
|
||||||
|
limit_req_zone $http2_bucket zone=dynamicauth_heavy_http2:10m rate=20r/s;
|
||||||
|
limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_heavy_http1:10m rate=1r/s;
|
||||||
|
limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_heavy_http2:10m rate=20r/s;
|
||||||
|
|
||||||
|
limit_req_status 429;
|
||||||
|
limit_req_log_level warn;
|
1
local-dev/nginx/resolver.conf
Normal file
1
local-dev/nginx/resolver.conf
Normal file
@ -0,0 +1 @@
|
|||||||
|
resolver 127.0.0.1:8053 valid=10s;
|
15
local-dev/nginx/root-base.conf
Normal file
15
local-dev/nginx/root-base.conf
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
pid /tmp/nginx.pid;
|
||||||
|
error_log /var/log/nginx/error.log;
|
||||||
|
|
||||||
|
worker_processes auto;
|
||||||
|
worker_priority -10;
|
||||||
|
worker_rlimit_nofile 10240;
|
||||||
|
|
||||||
|
daemon off;
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 10240;
|
||||||
|
accept_mutex off;
|
||||||
|
}
|
280
local-dev/nginx/server-base.conf
Normal file
280
local-dev/nginx/server-base.conf
Normal file
@ -0,0 +1,280 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
keepalive_timeout 5;
|
||||||
|
|
||||||
|
if ($host = "www.quay.io") {
|
||||||
|
return 301 $proper_scheme://quay.io$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Disable the ability to be embedded into iframes
|
||||||
|
add_header X-Frame-Options DENY;
|
||||||
|
|
||||||
|
|
||||||
|
# Proxy Headers
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_redirect off;
|
||||||
|
|
||||||
|
proxy_set_header Transfer-Encoding $http_transfer_encoding;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /push {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
client_max_body_size 5M;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /realtime {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_request_buffering off;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/_storage_proxy/([^/]+)/([^/]+)/([^/]+)/(.+) {
|
||||||
|
include resolver.conf;
|
||||||
|
|
||||||
|
auth_request /_storage_proxy_auth;
|
||||||
|
|
||||||
|
proxy_pass $2://$3/$4$is_args$args;
|
||||||
|
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header Host $3;
|
||||||
|
proxy_set_header Authorization "";
|
||||||
|
|
||||||
|
add_header Host $3;
|
||||||
|
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_request_buffering off;
|
||||||
|
|
||||||
|
proxy_read_timeout 60s;
|
||||||
|
}
|
||||||
|
|
||||||
|
location = /_storage_proxy_auth {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
proxy_pass_request_body off;
|
||||||
|
proxy_set_header Content-Length "";
|
||||||
|
|
||||||
|
proxy_set_header X-Original-URI $request_uri;
|
||||||
|
|
||||||
|
proxy_read_timeout 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/v2/_catalog(.*)$ {
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
proxy_read_timeout 10;
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
location /secscan/ {
|
||||||
|
proxy_pass http://secscan_app_server;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
location /cnr {
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
proxy_request_buffering off;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
proxy_read_timeout 120;
|
||||||
|
proxy_temp_path /tmp 1 2;
|
||||||
|
|
||||||
|
client_max_body_size 1M;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api/ {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api/suconfig {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
|
||||||
|
# For suconfig, set our read timeout as super large for both DB migrations
|
||||||
|
# and awaiting for secrets to be updated.
|
||||||
|
proxy_read_timeout 2000;
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block handles blob requests, and will receive a high volume of traffic, so we set the burst
|
||||||
|
# much higher.
|
||||||
|
location ~ /v2/([^/]+)\/[^/]+/blobs/ {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# NOTE: We disable gzip for HEAD requests because Docker issues them to determine the Content
|
||||||
|
# Length of a blob. Unfortunately, nginx, seeing an empty body, overwrites the header with
|
||||||
|
# a length of 0, which breaks this functionality.
|
||||||
|
if ($request_method = HEAD) {
|
||||||
|
gzip off;
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_request_buffering off;
|
||||||
|
proxy_read_timeout 2000;
|
||||||
|
proxy_temp_path /tmp 1 2;
|
||||||
|
|
||||||
|
client_max_body_size 20G;
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
set $namespace $1;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block handles tags endpoint requests, for which we want to restrict traffic due to how
|
||||||
|
# heavy an operation it can be
|
||||||
|
location ~ /v2/([^/]+)\/[^/]+/tags/ {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
set $namespace $1;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block handles manifests endpoint requests, for which we want to restrict traffic heavier than
|
||||||
|
# the generic V2 operations, as it handles pushes and pulls.
|
||||||
|
location ~ /v2/([^/]+)\/[^/]+/manifests/ {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
set $namespace $1;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block applies to the beginning of a push or pull
|
||||||
|
location = /v2/auth {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block handles all other V2 requests, for which we can use a higher rate limit.
|
||||||
|
location ~ ^/v2 {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# NOTE: We disable gzip for HEAD requests because Docker issues them to determine the Content
|
||||||
|
# Length of a blob. Unfortunately, nginx, seeing an empty body, overwrites the header with
|
||||||
|
# a length of 0, which breaks this functionality. Included here for completeness.
|
||||||
|
if ($request_method = HEAD) {
|
||||||
|
gzip off;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
location /v1/ {
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
proxy_request_buffering off;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
proxy_temp_path /tmp 1 2;
|
||||||
|
|
||||||
|
client_max_body_size 20G;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
location = /v1/_ping {
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
add_header X-Docker-Registry-Version 0.6.0;
|
||||||
|
add_header X-Docker-Registry-Standalone 0;
|
||||||
|
return 200 'true';
|
||||||
|
}
|
||||||
|
|
||||||
|
location /static/ {
|
||||||
|
# checks for static file, if not found proxy to app
|
||||||
|
alias /quay-registry/static/;
|
||||||
|
error_page 404 /404;
|
||||||
|
}
|
||||||
|
|
||||||
|
error_page 502 /quay-registry/static/502.html;
|
311
local-dev/nginx/server-base.conf.jnj
Normal file
311
local-dev/nginx/server-base.conf.jnj
Normal file
@ -0,0 +1,311 @@
|
|||||||
|
# vim: ft=nginx
|
||||||
|
|
||||||
|
keepalive_timeout 5;
|
||||||
|
|
||||||
|
if ($host = "www.quay.io") {
|
||||||
|
return 301 $proper_scheme://quay.io$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Disable the ability to be embedded into iframes
|
||||||
|
add_header X-Frame-Options DENY;
|
||||||
|
|
||||||
|
|
||||||
|
# Proxy Headers
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_redirect off;
|
||||||
|
|
||||||
|
proxy_set_header Transfer-Encoding $http_transfer_encoding;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /push {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
client_max_body_size 5M;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /realtime {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_request_buffering off;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/_storage_proxy/([^/]+)/([^/]+)/([^/]+)/(.+) {
|
||||||
|
include resolver.conf;
|
||||||
|
|
||||||
|
auth_request /_storage_proxy_auth;
|
||||||
|
|
||||||
|
proxy_pass $2://$3/$4$is_args$args;
|
||||||
|
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header Host $3;
|
||||||
|
proxy_set_header Authorization "";
|
||||||
|
|
||||||
|
add_header Host $3;
|
||||||
|
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_request_buffering off;
|
||||||
|
|
||||||
|
proxy_read_timeout 60s;
|
||||||
|
}
|
||||||
|
|
||||||
|
location = /_storage_proxy_auth {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
proxy_pass_request_body off;
|
||||||
|
proxy_set_header Content-Length "";
|
||||||
|
|
||||||
|
proxy_set_header X-Original-URI $request_uri;
|
||||||
|
|
||||||
|
proxy_read_timeout 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/v2/_catalog(.*)$ {
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
proxy_read_timeout 10;
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req zone=dynamicauth_heavy_http1 burst=1 nodelay;
|
||||||
|
limit_req zone=dynamicauth_heavy_http2 burst=5 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
|
|
||||||
|
location /secscan/ {
|
||||||
|
proxy_pass http://secscan_app_server;
|
||||||
|
}
|
||||||
|
|
||||||
|
{% if signing_enabled %}
|
||||||
|
location ~ ^/v2/(.+)/_trust/tuf/(.*)$ {
|
||||||
|
set $upstream_tuf {{ tuf_server }};
|
||||||
|
proxy_pass $upstream_tuf$uri;
|
||||||
|
proxy_set_header Host "{{ tuf_host }}";
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
location /cnr {
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
proxy_request_buffering off;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
proxy_read_timeout 120;
|
||||||
|
proxy_temp_path /tmp 1 2;
|
||||||
|
|
||||||
|
client_max_body_size {{ maximum_cnr_layer_size }};
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req zone=staticauth burst=5 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api/ {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req zone=dynamicauth_heavy_http1 burst=25 nodelay;
|
||||||
|
limit_req zone=dynamicauth_heavy_http2 burst=100 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api/suconfig {
|
||||||
|
proxy_pass http://web_app_server;
|
||||||
|
|
||||||
|
# For suconfig, set our read timeout as super large for both DB migrations
|
||||||
|
# and awaiting for secrets to be updated.
|
||||||
|
proxy_read_timeout 2000;
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block handles blob requests, and will receive a high volume of traffic, so we set the burst
|
||||||
|
# much higher.
|
||||||
|
location ~ /v2/([^/]+)\/[^/]+/blobs/ {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# NOTE: We disable gzip for HEAD requests because Docker issues them to determine the Content
|
||||||
|
# Length of a blob. Unfortunately, nginx, seeing an empty body, overwrites the header with
|
||||||
|
# a length of 0, which breaks this functionality.
|
||||||
|
if ($request_method = HEAD) {
|
||||||
|
gzip off;
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_request_buffering off;
|
||||||
|
proxy_read_timeout 2000;
|
||||||
|
proxy_temp_path /tmp 1 2;
|
||||||
|
|
||||||
|
client_max_body_size {{ maximum_layer_size }};
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
set $namespace $1;
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req zone=namespaced_dynamicauth_light_http1 burst=50 nodelay;
|
||||||
|
limit_req zone=namespaced_dynamicauth_light_http2 burst=100 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block handles tags endpoint requests, for which we want to restrict traffic due to how
|
||||||
|
# heavy an operation it can be
|
||||||
|
location ~ /v2/([^/]+)\/[^/]+/tags/ {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
set $namespace $1;
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req zone=namespaced_dynamicauth_heavy_http1 burst=2 nodelay;
|
||||||
|
limit_req zone=namespaced_dynamicauth_heavy_http2 burst=2 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block handles manifests endpoint requests, for which we want to restrict traffic heavier than
|
||||||
|
# the generic V2 operations, as it handles pushes and pulls.
|
||||||
|
location ~ /v2/([^/]+)\/[^/]+/manifests/ {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
set $namespace $1;
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req zone=namespaced_dynamicauth_light_http1 burst=10 nodelay;
|
||||||
|
limit_req zone=namespaced_dynamicauth_light_http2 burst=50 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block applies to the beginning of a push or pull
|
||||||
|
location = /v2/auth {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req zone=staticauth burst=2 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
# This block handles all other V2 requests, for which we can use a higher rate limit.
|
||||||
|
location ~ ^/v2 {
|
||||||
|
# If we're being accessed via v1.quay.io, pretend we don't support v2.
|
||||||
|
if ($host = "v1.quay.io") {
|
||||||
|
return 404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# NOTE: We disable gzip for HEAD requests because Docker issues them to determine the Content
|
||||||
|
# Length of a blob. Unfortunately, nginx, seeing an empty body, overwrites the header with
|
||||||
|
# a length of 0, which breaks this functionality. Included here for completeness.
|
||||||
|
if ($request_method = HEAD) {
|
||||||
|
gzip off;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req zone=dynamicauth_very_light_http1 burst=20 nodelay;
|
||||||
|
limit_req zone=dynamicauth_very_light_http2 burst=80 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
location /v1/ {
|
||||||
|
# Setting ANY header clears all inherited proxy_set_header directives
|
||||||
|
proxy_set_header X-Forwarded-For $proper_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $proper_scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
proxy_request_buffering off;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
|
proxy_pass http://registry_app_server;
|
||||||
|
proxy_temp_path /tmp 1 2;
|
||||||
|
|
||||||
|
client_max_body_size {{ maximum_layer_size }};
|
||||||
|
|
||||||
|
{% if enable_rate_limits %}
|
||||||
|
limit_req zone=dynamicauth_heavy_http1 burst=5 nodelay;
|
||||||
|
limit_req zone=dynamicauth_heavy_http2 burst=25 nodelay;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
|
||||||
|
}
|
||||||
|
|
||||||
|
location = /v1/_ping {
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
add_header X-Docker-Registry-Version 0.6.0;
|
||||||
|
add_header X-Docker-Registry-Standalone 0;
|
||||||
|
return 200 'true';
|
||||||
|
}
|
||||||
|
|
||||||
|
location /static/ {
|
||||||
|
# checks for static file, if not found proxy to app
|
||||||
|
alias {{static_dir}}/;
|
||||||
|
error_page 404 /404;
|
||||||
|
}
|
||||||
|
|
||||||
|
error_page 502 {{static_dir}}/502.html;
|
27
local-dev/scripts/clean.sh
Executable file
27
local-dev/scripts/clean.sh
Executable file
@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# this script expects to be ran from root of
|
||||||
|
# quay repository.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
Files=(
|
||||||
|
'util/ipresolver/aws-ip-ranges.json'
|
||||||
|
'revision_head'
|
||||||
|
'local-dev/jwtproxy_conf.yaml'
|
||||||
|
'local-dev/mitm.cert'
|
||||||
|
'local-dev/mitm.key'
|
||||||
|
'local-dev/quay.kid'
|
||||||
|
'local-dev/quay.pem'
|
||||||
|
'local-dev/supervisord.conf'
|
||||||
|
'local-dev/__pycache__'
|
||||||
|
'/local-dev/*.sock'
|
||||||
|
'node_modules'
|
||||||
|
'static/webfonts/'
|
||||||
|
'supervisord.log'
|
||||||
|
'supervisord.pid'
|
||||||
|
)
|
||||||
|
|
||||||
|
for file in "${Files[@]}"; do
|
||||||
|
rm -rf $file
|
||||||
|
done
|
71
local-dev/stack/config.yaml
Normal file
71
local-dev/stack/config.yaml
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
UPER_USERS:
|
||||||
|
- admin
|
||||||
|
AUTHENTICATION_TYPE: Database
|
||||||
|
BITTORRENT_FILENAME_PEPPER: 0ee18f90-5b6d-42d2-ab5e-ec9fcd846272
|
||||||
|
BUILDLOGS_REDIS:
|
||||||
|
host: quay-redis
|
||||||
|
port: 6379
|
||||||
|
DATABASE_SECRET_KEY: '30060361640793187613697366923211113205676925445650250274752125083971638376224'
|
||||||
|
DB_URI: postgresql://quay:quay@quay-db/quay
|
||||||
|
DEFAULT_TAG_EXPIRATION: 2w
|
||||||
|
DISTRIBUTED_STORAGE_CONFIG:
|
||||||
|
default:
|
||||||
|
- LocalStorage
|
||||||
|
- storage_path: /datastorage/registry
|
||||||
|
DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: []
|
||||||
|
DISTRIBUTED_STORAGE_PREFERENCE:
|
||||||
|
- default
|
||||||
|
ENTERPRISE_LOGO_URL: /static/img/quay-horizontal-color.svg
|
||||||
|
EXTERNAL_TLS_TERMINATION: true
|
||||||
|
FEATURE_ACI_CONVERSION: false
|
||||||
|
FEATURE_ANONYMOUS_ACCESS: true
|
||||||
|
FEATURE_APP_REGISTRY: false
|
||||||
|
FEATURE_APP_SPECIFIC_TOKENS: true
|
||||||
|
FEATURE_BUILD_SUPPORT: false
|
||||||
|
FEATURE_CHANGE_TAG_EXPIRATION: true
|
||||||
|
FEATURE_DIRECT_LOGIN: true
|
||||||
|
FEATURE_MAILING: false
|
||||||
|
FEATURE_PARTIAL_USER_AUTOCOMPLETE: true
|
||||||
|
FEATURE_REPO_MIRROR: false
|
||||||
|
FEATURE_REQUIRE_TEAM_INVITE: true
|
||||||
|
FEATURE_RESTRICTED_V1_PUSH: false
|
||||||
|
FEATURE_SECURITY_NOTIFICATIONS: true
|
||||||
|
FEATURE_SECURITY_SCANNER: true
|
||||||
|
FEATURE_USERNAME_CONFIRMATION: true
|
||||||
|
FEATURE_USER_CREATION: true
|
||||||
|
FEATURE_USER_LOG_ACCESS: true
|
||||||
|
GITHUB_LOGIN_CONFIG: {}
|
||||||
|
GITHUB_TRIGGER_CONFIG: {}
|
||||||
|
GITLAB_TRIGGER_KIND: {}
|
||||||
|
GPG2_PRIVATE_KEY_FILENAME: signing-private.gpg
|
||||||
|
GPG2_PUBLIC_KEY_FILENAME: signing-public.gpg
|
||||||
|
LOG_ARCHIVE_LOCATION: default
|
||||||
|
MAIL_DEFAULT_SENDER: support@quay.io
|
||||||
|
MAIL_PORT: 587
|
||||||
|
MAIL_USE_TLS: true
|
||||||
|
PREFERRED_URL_SCHEME: http
|
||||||
|
REGISTRY_TITLE: Red Hat Quay
|
||||||
|
REGISTRY_TITLE_SHORT: Red Hat Quay
|
||||||
|
REPO_MIRROR_SERVER_HOSTNAME: null
|
||||||
|
REPO_MIRROR_TLS_VERIFY: true
|
||||||
|
SECURITY_SCANNER_V4_ENDPOINT: http://localhost:6000
|
||||||
|
SECURITY_SCANNER_V4_NAMESPACE_WHITELIST:
|
||||||
|
- "clairv4-org"
|
||||||
|
SECURITY_SCANNER_ISSUER_NAME: security_scanner
|
||||||
|
SERVER_HOSTNAME: localhost:8080
|
||||||
|
SETUP_COMPLETE: true
|
||||||
|
SIGNING_ENGINE: gpg2
|
||||||
|
TAG_EXPIRATION_OPTIONS:
|
||||||
|
- 0s
|
||||||
|
- 1d
|
||||||
|
- 1w
|
||||||
|
- 2w
|
||||||
|
- 4w
|
||||||
|
TEAM_RESYNC_STALE_TIME: 60m
|
||||||
|
TESTING: false
|
||||||
|
USERFILES_LOCATION: default
|
||||||
|
USERFILES_PATH: userfiles/
|
||||||
|
USER_EVENTS_REDIS:
|
||||||
|
host: quay-redis
|
||||||
|
port: 6379
|
||||||
|
USE_CDN: false
|
263
local-dev/supervisord.conf.jnj
Normal file
263
local-dev/supervisord.conf.jnj
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
[supervisord]
|
||||||
|
nodaemon=true
|
||||||
|
|
||||||
|
[unix_http_server]
|
||||||
|
file=%(ENV_QUAYCONF)s/supervisord.sock
|
||||||
|
user=root
|
||||||
|
|
||||||
|
[supervisorctl]
|
||||||
|
serverurl=unix:///%(ENV_QUAYCONF)s/supervisord.sock
|
||||||
|
|
||||||
|
[rpcinterface:supervisor]
|
||||||
|
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
||||||
|
|
||||||
|
[eventlistener:stdout]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
{%- if logdriver == "syslog" %}
|
||||||
|
command = supervisor_logging
|
||||||
|
{% else %}
|
||||||
|
command = supervisor_stdout
|
||||||
|
result_handler = supervisor_stdout:event_handler
|
||||||
|
{% endif -%}
|
||||||
|
buffer_size = 1024
|
||||||
|
events = PROCESS_LOG
|
||||||
|
|
||||||
|
;;; Run batch scripts
|
||||||
|
[program:blobuploadcleanupworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/blobuploadcleanupworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.blobuploadcleanupworker.blobuploadcleanupworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['blobuploadcleanupworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:buildlogsarchiver]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/buildlogsarchiver.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.buildlogsarchiver.buildlogsarchiver:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['buildlogsarchiver']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:builder]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=python -m buildman.builder
|
||||||
|
autostart = {{ config['builder']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:chunkcleanupworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/chunkcleanupworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.chunkcleanupworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['chunkcleanupworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:expiredappspecifictokenworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/expiredappspecifictokenworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.expiredappspecifictokenworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['expiredappspecifictokenworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:exportactionlogsworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/exportactionlogsworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.exportactionlogsworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['exportactionlogsworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:gcworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/gcworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.gc.gcworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['gcworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:globalpromstats]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/globalpromstats.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.globalpromstats.globalpromstats:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['globalpromstats']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:logrotateworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/logrotateworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.logrotateworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['logrotateworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:repositorygcworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/repositorygcworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.repositorygcworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['repositorygcworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:namespacegcworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/namespacegcworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.namespacegcworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['namespacegcworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:notificationworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/notificationworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.notificationworker.notificationworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['notificationworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:queuecleanupworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/queuecleanupworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.queuecleanupworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['queuecleanupworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:repositoryactioncounter]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/repositoryactioncounter.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.repositoryactioncounter:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['repositoryactioncounter']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:securityworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/securityworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.securityworker.securityworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['securityworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:storagereplication]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/storagereplication.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.storagereplication:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['storagereplication']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:teamsyncworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/teamsyncworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.teamsyncworker.teamsyncworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['teamsyncworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
;;; Run interactive scripts
|
||||||
|
[program:dnsmasq]
|
||||||
|
command=/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 --port=8053
|
||||||
|
autostart = {{ config['dnsmasq']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:gunicorn-registry]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s,
|
||||||
|
DB_CONNECTION_POOLING=%(ENV_DB_CONNECTION_POOLING_REGISTRY)s
|
||||||
|
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_registry.py registry:application
|
||||||
|
autostart = {{ config['gunicorn-registry']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:gunicorn-secscan]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_secscan.py secscan:application
|
||||||
|
autostart = {{ config['gunicorn-secscan']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:gunicorn-web]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_web.py web:application
|
||||||
|
autostart = {{ config['gunicorn-web']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:jwtproxy]
|
||||||
|
command=/usr/local/bin/jwtproxy --config %(ENV_QUAYCONF)s/jwtproxy_conf.yaml
|
||||||
|
autostart = {{ config['jwtproxy']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:memcache]
|
||||||
|
command=memcached -u memcached -m 64 -l 127.0.0.1 -p 18080
|
||||||
|
autostart = {{ config['memcache']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:nginx]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=nginx -c %(ENV_QUAYCONF)s/nginx/nginx.conf
|
||||||
|
autostart = {{ config['nginx']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:pushgateway]
|
||||||
|
command=/usr/local/bin/pushgateway
|
||||||
|
autostart = {{ config['pushgateway']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:servicekey]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/servicekey.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.servicekeyworker.servicekeyworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['servicekey']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:manifestbackfillworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/manifestbackfillworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.manifestbackfillworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['manifestbackfillworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:securityscanningnotificationworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/securityscanningnotificationworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.securityscanningnotificationworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['securityscanningnotificationworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:repomirrorworker]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s
|
||||||
|
command=gunicorn -b 'unix:/tmp/repomirrorworker.sock' -c %(ENV_QUAYCONF)s/gunicorn_worker.py 'workers.repomirrorworker.repomirrorworker:create_gunicorn_worker()'
|
||||||
|
autostart = {{ config['repomirrorworker']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
|
||||||
|
[program:config-editor]
|
||||||
|
environment=
|
||||||
|
PYTHONPATH=%(ENV_QUAYDIR)s,
|
||||||
|
CONFIG_APP_PASSWORD=%(ENV_CONFIG_APP_PASSWORD)s,
|
||||||
|
ENV_OPERATOR_ENDPOINT=%(ENV_OPERATOR_ENDPOINT)s,
|
||||||
|
CONFIG_EDITOR_STATIC_CONTENT_PATH=%(ENV_QUAYDIR)s/config_app/static
|
||||||
|
command=config-tool editor --config-dir /conf/stack --password %(ENV_CONFIG_APP_PASSWORD)s --operator-endpoint %(ENV_OPERATOR_ENDPOINT)s --readonly-fieldgroups %(ENV_QUAY_CONFIG_READ_ONLY_FIELD_GROUPS)s
|
||||||
|
autostart = {{ config['config-editor']['autostart'] }}
|
||||||
|
stdout_events_enabled = true
|
||||||
|
stderr_events_enabled = true
|
||||||
|
# EOF NO NEWLINE
|
@ -1,6 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
@ -27,7 +28,10 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
def _get_aws_ip_ranges():
|
def _get_aws_ip_ranges():
|
||||||
try:
|
try:
|
||||||
with open("util/ipresolver/aws-ip-ranges.json", "r") as f:
|
# resolve absolute path to file
|
||||||
|
path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
file_path = os.path.join(path, "aws-ip-ranges.json")
|
||||||
|
with open(file_path, "r") as f:
|
||||||
return json.loads(f.read())
|
return json.loads(f.read())
|
||||||
except IOError:
|
except IOError:
|
||||||
logger.exception("Could not load AWS IP Ranges")
|
logger.exception("Could not load AWS IP Ranges")
|
||||||
@ -77,7 +81,12 @@ class NoopIPResolver(IPResolverInterface):
|
|||||||
class IPResolver(IPResolverInterface):
|
class IPResolver(IPResolverInterface):
|
||||||
def __init__(self, app):
|
def __init__(self, app):
|
||||||
self.app = app
|
self.app = app
|
||||||
self.geoip_db = geoip2.database.Reader("util/ipresolver/GeoLite2-Country.mmdb")
|
|
||||||
|
# resolve absolute path to file
|
||||||
|
path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
file_path = os.path.join(path, "GeoLite2-Country.mmdb")
|
||||||
|
self.geoip_db = geoip2.database.Reader(file_path)
|
||||||
|
|
||||||
self.amazon_ranges = None
|
self.amazon_ranges = None
|
||||||
self.sync_token = None
|
self.sync_token = None
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ from workers.blobuploadcleanupworker.models_pre_oci import pre_oci_model as mode
|
|||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
from util.locking import GlobalLock, LockNotAcquiredException
|
from util.locking import GlobalLock, LockNotAcquiredException
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -70,6 +71,19 @@ class BlobUploadCleanupWorker(Worker):
|
|||||||
logger.debug("Removed stale blob upload %s", stale_upload.uuid)
|
logger.debug("Removed stale blob upload %s", stale_upload.uuid)
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
worker = GunicornWorker(__name__, app, BlobUploadCleanupWorker(), True)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
worker = BlobUploadCleanupWorker()
|
worker = BlobUploadCleanupWorker()
|
||||||
|
@ -9,6 +9,7 @@ from data.database import CloseForLongOperation
|
|||||||
from util.streamingjsonencoder import StreamingJSONEncoder
|
from util.streamingjsonencoder import StreamingJSONEncoder
|
||||||
from workers.buildlogsarchiver.models_pre_oci import pre_oci_model as model
|
from workers.buildlogsarchiver.models_pre_oci import pre_oci_model as model
|
||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -64,6 +65,19 @@ class ArchiveBuildLogsWorker(Worker):
|
|||||||
logger.debug("Another worker pre-empted us when archiving: %s", to_archive.uuid)
|
logger.debug("Another worker pre-empted us when archiving: %s", to_archive.uuid)
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
worker = GunicornWorker(__name__, app, ArchiveBuildLogsWorker(), True)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
worker = ArchiveBuildLogsWorker()
|
worker = ArchiveBuildLogsWorker()
|
||||||
worker.start()
|
worker.start()
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
from app import app, storage, chunk_cleanup_queue
|
from app import app, storage, chunk_cleanup_queue
|
||||||
from workers.queueworker import QueueWorker, JobException
|
from workers.queueworker import QueueWorker, JobException
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
@ -33,6 +34,28 @@ class ChunkCleanupWorker(QueueWorker):
|
|||||||
raise JobException()
|
raise JobException()
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
engines = set(
|
||||||
|
[config[0] for config in list(app.config.get("DISTRIBUTED_STORAGE_CONFIG", {}).values())]
|
||||||
|
)
|
||||||
|
feature_flag = "SwiftStorage" in engines
|
||||||
|
worker = GunicornWorker(
|
||||||
|
__name__,
|
||||||
|
app,
|
||||||
|
ChunkCleanupWorker(chunk_cleanup_queue, poll_period_seconds=POLL_PERIOD_SECONDS),
|
||||||
|
feature_flag,
|
||||||
|
)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ from data import model
|
|||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
from util.timedeltastring import convert_to_timedelta
|
from util.timedeltastring import convert_to_timedelta
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -37,6 +37,22 @@ class ExpiredAppSpecificTokenWorker(Worker):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
feature_flag = (features.APP_SPECIFIC_TOKENS) or (
|
||||||
|
app.config.get("EXPIRED_APP_SPECIFIC_TOKEN_GC") is not None
|
||||||
|
)
|
||||||
|
worker = GunicornWorker(__name__, app, ExpiredAppSpecificTokenWorker(), feature_flag)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ from data.logs_model.interface import LogsIterationTimeout
|
|||||||
from workers.queueworker import QueueWorker
|
from workers.queueworker import QueueWorker
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
from util.useremails import send_logs_exported_email
|
from util.useremails import send_logs_exported_email
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -289,6 +290,22 @@ def _parse_time(specified_time):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
log_worker = ExportActionLogsWorker(
|
||||||
|
export_action_logs_queue, poll_period_seconds=POLL_PERIOD_SECONDS
|
||||||
|
)
|
||||||
|
worker = GunicornWorker(__name__, app, log_worker, features.LOG_EXPORT)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ from data.model.repository import get_random_gc_policy
|
|||||||
from data.model.gc import garbage_collect_repo
|
from data.model.gc import garbage_collect_repo
|
||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
from util.locking import GlobalLock, LockNotAcquiredException
|
from util.locking import GlobalLock, LockNotAcquiredException
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -70,6 +70,19 @@ class GarbageCollectionWorker(Worker):
|
|||||||
logger.debug("Could not acquire repo lock for garbage collection")
|
logger.debug("Could not acquire repo lock for garbage collection")
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
worker = GunicornWorker(__name__, app, GarbageCollectionWorker(), features.GARBAGE_COLLECTION)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if not features.GARBAGE_COLLECTION:
|
if not features.GARBAGE_COLLECTION:
|
||||||
logger.debug("Garbage collection is disabled; skipping")
|
logger.debug("Garbage collection is disabled; skipping")
|
||||||
|
@ -9,7 +9,7 @@ from data.database import UseThenDisconnect
|
|||||||
from util.locking import GlobalLock, LockNotAcquiredException
|
from util.locking import GlobalLock, LockNotAcquiredException
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -65,6 +65,20 @@ class GlobalPrometheusStatsWorker(Worker):
|
|||||||
robot_rows.set(get_robot_count())
|
robot_rows.set(get_robot_count())
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
feature_flag = app.config.get("PROMETHEUS_PUSHGATEWAY_URL") is not None
|
||||||
|
worker = GunicornWorker(__name__, app, GlobalPrometheusStatsWorker(), feature_flag)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
34
workers/gunicorn_worker.py
Normal file
34
workers/gunicorn_worker.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
import logging.config
|
||||||
|
import threading
|
||||||
|
from multiprocessing import Process
|
||||||
|
from util.log import logfile_path
|
||||||
|
|
||||||
|
|
||||||
|
class GunicornWorker:
|
||||||
|
"""
|
||||||
|
GunicornWorker allows a quay worker to run as a Gunicorn worker.
|
||||||
|
The Quay worker is launched as a sub-process and this class serves as a delegate
|
||||||
|
for the wsgi app.
|
||||||
|
|
||||||
|
name: the quay worker this class delegates for.
|
||||||
|
app: a uwsgi framework application object.
|
||||||
|
worker: a quay worker type which implements a .start method.
|
||||||
|
feature_flag: a boolean value determine if the worker thread should be launched
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, name, app, worker, feature_flag):
|
||||||
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
self.app = app
|
||||||
|
self.name = name
|
||||||
|
self.worker = worker
|
||||||
|
self.feature_flag = feature_flag
|
||||||
|
self.logger = logging.getLogger(name)
|
||||||
|
|
||||||
|
if self.feature_flag:
|
||||||
|
self.logger.debug("starting {} thread".format(self.name))
|
||||||
|
p = Process(target=self.worker.start)
|
||||||
|
p = p.start()
|
||||||
|
|
||||||
|
def __call__(environ, start_response):
|
||||||
|
return self.app(environ, start_response)
|
@ -15,7 +15,7 @@ from util.log import logfile_path
|
|||||||
from util.streamingjsonencoder import StreamingJSONEncoder
|
from util.streamingjsonencoder import StreamingJSONEncoder
|
||||||
from util.timedeltastring import convert_to_timedelta
|
from util.timedeltastring import convert_to_timedelta
|
||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -131,6 +131,20 @@ def _write_logs(filename, logs, log_archive):
|
|||||||
log_archive.store_file(tempfile, JSON_MIMETYPE, content_encoding="gzip", file_id=filename)
|
log_archive.store_file(tempfile, JSON_MIMETYPE, content_encoding="gzip", file_id=filename)
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
feature_flag = (features.ACTION_LOG_ROTATION) or (not None in [SAVE_PATH, SAVE_LOCATION])
|
||||||
|
worker = GunicornWorker(__name__, app, LogRotateWorker(), feature_flag)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ from workers.worker import Worker
|
|||||||
from util.migrate.allocator import yield_random_entries
|
from util.migrate.allocator import yield_random_entries
|
||||||
from util.bytes import Bytes
|
from util.bytes import Bytes
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -85,6 +85,21 @@ class ManifestBackfillWorker(Worker):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
worker = GunicornWorker(
|
||||||
|
__name__, app, ManifestBackfillWorker(), features.MANIFEST_SIZE_BACKFILL
|
||||||
|
)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
@ -3,11 +3,12 @@ import time
|
|||||||
|
|
||||||
import features
|
import features
|
||||||
|
|
||||||
from app import namespace_gc_queue, all_queues
|
from app import app, namespace_gc_queue, all_queues
|
||||||
from data import model
|
from data import model
|
||||||
from workers.queueworker import QueueWorker, WorkerSleepException
|
from workers.queueworker import QueueWorker, WorkerSleepException
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
from util.locking import GlobalLock, LockNotAcquiredException
|
from util.locking import GlobalLock, LockNotAcquiredException
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -39,6 +40,24 @@ class NamespaceGCWorker(QueueWorker):
|
|||||||
raise Exception("GC interrupted; will retry")
|
raise Exception("GC interrupted; will retry")
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
gc_worker = NamespaceGCWorker(
|
||||||
|
namespace_gc_queue,
|
||||||
|
poll_period_seconds=POLL_PERIOD_SECONDS,
|
||||||
|
reservation_seconds=NAMESPACE_GC_TIMEOUT,
|
||||||
|
)
|
||||||
|
worker = GunicornWorker(__name__, app, gc_worker, features.NAMESPACE_GARBAGE_COLLECTION)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
from app import notification_queue
|
from app import app, notification_queue
|
||||||
from notifications.notificationmethod import NotificationMethod, InvalidNotificationMethodException
|
from notifications.notificationmethod import NotificationMethod, InvalidNotificationMethodException
|
||||||
from notifications.notificationevent import NotificationEvent, InvalidNotificationEventException
|
from notifications.notificationevent import NotificationEvent, InvalidNotificationEventException
|
||||||
from workers.notificationworker.models_pre_oci import pre_oci_model as model
|
from workers.notificationworker.models_pre_oci import pre_oci_model as model
|
||||||
from workers.queueworker import QueueWorker, JobException
|
from workers.queueworker import QueueWorker, JobException
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -37,6 +38,22 @@ class NotificationWorker(QueueWorker):
|
|||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
note_worker = NotificationWorker(
|
||||||
|
notification_queue, poll_period_seconds=10, reservation_seconds=30, retry_after_seconds=30
|
||||||
|
)
|
||||||
|
worker = GunicornWorker(__name__, app, note_worker, True)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
worker = NotificationWorker(
|
worker = NotificationWorker(
|
||||||
notification_queue, poll_period_seconds=10, reservation_seconds=30, retry_after_seconds=30
|
notification_queue, poll_period_seconds=10, reservation_seconds=30, retry_after_seconds=30
|
||||||
|
@ -6,7 +6,7 @@ from app import app
|
|||||||
from data.database import UseThenDisconnect
|
from data.database import UseThenDisconnect
|
||||||
from data.queue import delete_expired
|
from data.queue import delete_expired
|
||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -37,6 +37,19 @@ class QueueCleanupWorker(Worker):
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
worker = GunicornWorker(__name__, app, QueueCleanupWorker(), True)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
worker = QueueCleanupWorker()
|
worker = QueueCleanupWorker()
|
||||||
worker.start()
|
worker.start()
|
||||||
|
@ -11,7 +11,7 @@ from workers.repomirrorworker import process_mirrors
|
|||||||
from util.repomirror.validator import RepoMirrorConfigValidator
|
from util.repomirror.validator import RepoMirrorConfigValidator
|
||||||
from util.repomirror.skopeomirror import SkopeoMirror
|
from util.repomirror.skopeomirror import SkopeoMirror
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -38,6 +38,19 @@ class RepoMirrorWorker(Worker):
|
|||||||
break
|
break
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
worker = GunicornWorker(__name__, app, RepoMirrorWorker(), features.REPO_MIRROR)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if os.getenv("PYDEV_DEBUG", None):
|
if os.getenv("PYDEV_DEBUG", None):
|
||||||
import pydevd_pycharm
|
import pydevd_pycharm
|
||||||
|
@ -11,7 +11,7 @@ from data import model, database
|
|||||||
from data.logs_model import logs_model
|
from data.logs_model import logs_model
|
||||||
from util.migrate.allocator import yield_random_entries
|
from util.migrate.allocator import yield_random_entries
|
||||||
from workers.worker import Worker, with_exponential_backoff
|
from workers.worker import Worker, with_exponential_backoff
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -105,6 +105,21 @@ class RepositoryActionCountWorker(Worker):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
worker = GunicornWorker(
|
||||||
|
__name__, app, RepositoryActionCountWorker(), features.REPOSITORY_ACTION_COUNTER
|
||||||
|
)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if not features.REPOSITORY_ACTION_COUNTER:
|
if not features.REPOSITORY_ACTION_COUNTER:
|
||||||
logger.info("Repository action count is disabled; skipping")
|
logger.info("Repository action count is disabled; skipping")
|
||||||
|
@ -3,11 +3,12 @@ import time
|
|||||||
|
|
||||||
import features
|
import features
|
||||||
|
|
||||||
from app import repository_gc_queue, all_queues
|
from app import repository_gc_queue, all_queues, app
|
||||||
from data import model, database
|
from data import model, database
|
||||||
from workers.queueworker import QueueWorker, WorkerSleepException
|
from workers.queueworker import QueueWorker, WorkerSleepException
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
from util.locking import GlobalLock, LockNotAcquiredException
|
from util.locking import GlobalLock, LockNotAcquiredException
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -47,6 +48,25 @@ class RepositoryGCWorker(QueueWorker):
|
|||||||
raise Exception("GC interrupted; will retry")
|
raise Exception("GC interrupted; will retry")
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
gc_worker = RepositoryGCWorker(
|
||||||
|
repository_gc_queue,
|
||||||
|
poll_period_seconds=POLL_PERIOD_SECONDS,
|
||||||
|
reservation_seconds=REPOSITORY_GC_TIMEOUT,
|
||||||
|
)
|
||||||
|
|
||||||
|
worker = GunicornWorker(__name__, app, gc_worker, features.REPOSITORY_GARBAGE_COLLECTION)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ from notifications import notification_batch
|
|||||||
from workers.queueworker import QueueWorker, JobException
|
from workers.queueworker import QueueWorker, JobException
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
from util.secscan import PRIORITY_LEVELS
|
from util.secscan import PRIORITY_LEVELS
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -131,6 +132,23 @@ class SecurityScanningNotificationWorker(QueueWorker):
|
|||||||
self.extend_processing(_PROCESSING_SECONDS_EXPIRATION, job_details)
|
self.extend_processing(_PROCESSING_SECONDS_EXPIRATION, job_details)
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
feature_flag = features.SECURITY_SCANNER and features.SECURITY_NOTIFICATIONS
|
||||||
|
note_worker = SecurityScanningNotificationWorker(
|
||||||
|
secscan_notification_queue, poll_period_seconds=_POLL_PERIOD_SECONDS
|
||||||
|
)
|
||||||
|
worker = GunicornWorker(__name__, app, note_worker, feature_flag)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
@ -3,9 +3,11 @@ import os
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
import features
|
import features
|
||||||
|
import threading
|
||||||
|
|
||||||
from app import app
|
from app import app
|
||||||
from data.secscan_model import secscan_model
|
from data.secscan_model import secscan_model
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
from endpoints.v2 import v2_bp
|
from endpoints.v2 import v2_bp
|
||||||
@ -30,6 +32,20 @@ class SecurityWorker(Worker):
|
|||||||
self._next_token = self._model.perform_indexing(self._next_token)
|
self._next_token = self._model.perform_indexing(self._next_token)
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
app.register_blueprint(v2_bp, url_prefix="/v2")
|
||||||
|
worker = GunicornWorker(__name__, app, SecurityWorker(), features.SECURITY_SCANNER)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if os.getenv("PYDEV_DEBUG", None):
|
if os.getenv("PYDEV_DEBUG", None):
|
||||||
import pydevd_pycharm
|
import pydevd_pycharm
|
||||||
|
@ -6,7 +6,7 @@ from prometheus_client import Counter
|
|||||||
from app import app, instance_keys
|
from app import app, instance_keys
|
||||||
from workers.servicekeyworker.models_pre_oci import pre_oci_model as model
|
from workers.servicekeyworker.models_pre_oci import pre_oci_model as model
|
||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -56,6 +56,19 @@ class ServiceKeyWorker(Worker):
|
|||||||
instance_key_renewal_self.labels(True).inc()
|
instance_key_renewal_self.labels(True).inc()
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
worker = GunicornWorker(__name__, app, ServiceKeyWorker(), True)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
worker = ServiceKeyWorker()
|
worker = ServiceKeyWorker()
|
||||||
worker.start()
|
worker.start()
|
||||||
|
@ -8,7 +8,7 @@ from data.database import CloseForLongOperation
|
|||||||
from data import model
|
from data import model
|
||||||
from workers.queueworker import QueueWorker, WorkerUnhealthyException, JobException
|
from workers.queueworker import QueueWorker, WorkerUnhealthyException, JobException
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -170,6 +170,33 @@ class StorageReplicationWorker(QueueWorker):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
has_local_storage = False
|
||||||
|
|
||||||
|
if features.STORAGE_REPLICATION:
|
||||||
|
for storage_type, _ in list(app.config.get("DISTRIBUTED_STORAGE_CONFIG", {}).values()):
|
||||||
|
if storage_type == "LocalStorage":
|
||||||
|
has_local_storage = True
|
||||||
|
break
|
||||||
|
|
||||||
|
feature_flag = (features.STORAGE_REPLICATION) and (not has_local_storage)
|
||||||
|
repl_worker = StorageReplicationWorker(
|
||||||
|
image_replication_queue,
|
||||||
|
poll_period_seconds=POLL_PERIOD_SECONDS,
|
||||||
|
reservation_seconds=RESERVATION_SECONDS,
|
||||||
|
)
|
||||||
|
worker = GunicornWorker(__name__, app, repl_worker, feature_flag)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ from data.users.teamsync import sync_teams_to_groups
|
|||||||
from workers.worker import Worker
|
from workers.worker import Worker
|
||||||
from util.timedeltastring import convert_to_timedelta
|
from util.timedeltastring import convert_to_timedelta
|
||||||
from util.log import logfile_path
|
from util.log import logfile_path
|
||||||
|
from workers.gunicorn_worker import GunicornWorker
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -30,6 +30,20 @@ class TeamSynchronizationWorker(Worker):
|
|||||||
sync_teams_to_groups(authentication, STALE_CUTOFF)
|
sync_teams_to_groups(authentication, STALE_CUTOFF)
|
||||||
|
|
||||||
|
|
||||||
|
def create_gunicorn_worker():
|
||||||
|
"""
|
||||||
|
follows the gunicorn application factory pattern, enabling
|
||||||
|
a quay worker to run as a gunicorn worker thread.
|
||||||
|
|
||||||
|
this is useful when utilizing gunicorn's hot reload in local dev.
|
||||||
|
|
||||||
|
utilizing this method will enforce a 1:1 quay worker to gunicorn worker ratio.
|
||||||
|
"""
|
||||||
|
feature_flag = (features.TEAM_SYNCING) and (authentication.federated_service)
|
||||||
|
worker = GunicornWorker(__name__, app, TeamSynchronizationWorker(), feature_flag)
|
||||||
|
return worker
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user