1
0
mirror of https://github.com/erlang/docker-erlang-example.git synced 2025-07-30 22:43:04 +03:00

Refactoring: Copy Minikube, Erlang and Prometheus example

This commit copies Lukas Larsson's (@garazdawi) and Siri Hansen's
(@sirihansen) example from:

https://github.com/erlang/docker-erlang-example/tree/minikube-prom-graf
This commit is contained in:
Kjell Winblad
2019-05-20 14:18:17 +02:00
parent 81c88928de
commit 6b805ff343
23 changed files with 1169 additions and 0 deletions

View File

@ -0,0 +1,2 @@
_build/
rebar.lock

View File

@ -0,0 +1,56 @@
sudo: required
dist: xenial
addons:
apt:
packages:
- curl
env:
- CHANGE_MINIKUBE_NONE_USER=true
before_script:
# Download minikube.
- MINIKUBE_VERSION=latest
- curl -Lo minikube https://storage.googleapis.com/minikube/releases/$MINIKUBE_VERSION/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/
# Download kubectl, which is a requirement for using minikube.
- KUBERNETES_VERSION=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)
- curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
# Test that it works
- kubectl -h
- sudo minikube start -v 7 --logtostderr --vm-driver=none --kubernetes-version "$KUBERNETES_VERSION"
# Fix the kubectl context, as it's often stale.
- minikube update-context
# Wait for Kubernetes to be up and ready.
- JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 1; done
script:
- kubectl cluster-info
# kube-addon-manager is responsible for managing other kubernetes components, such as kube-dns, dashboard, storage-provisioner..
- JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl -n kube-system get pods -lcomponent=kube-addon-manager -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 5;echo "waiting for kube-addon-manager to be available"; kubectl get pods --all-namespaces; done
# Wait for kube-dns to be ready.
- JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl -n kube-system get pods -lk8s-app=kube-dns -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 5;echo "waiting for kube-dns to be available"; kubectl get pods --all-namespaces; done
- kubectl create service nodeport dockerwatch --tcp=8080:8080 --tcp=8443:8443
- kubectl get service
- ./create-certs $(minikube ip)
- kubectl create secret generic dockerwatch --from-file=ssl/
- kubectl get secret
- docker build -t dockerwatch .
- kubectl apply -f dockerwatch-deploy.yaml
# Wait for dockerwatch to be ready.
- JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl -n default get pods -lapp=dockerwatch -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 5;echo "waiting for dockerwatch to be available"; kubectl get pods --all-namespaces; done
- HTTP=$(minikube service dockerwatch --url | head -1)
- HTTPS=$(minikube service dockerwatch --url --https | tail -1)
- "curl -v -H 'Content-Type: application/json' -X POST -d '' $HTTP/cnt"
- "curl -v -H 'Content-Type: application/json' -X POST -d '{}' $HTTP/cnt"
- "curl -v --cacert ssl/dockerwatch-ca.pem -H 'Accept: application/json' $HTTPS/"
- "curl -v --cacert ssl/dockerwatch-ca.pem -H 'Accept: application/json' $HTTPS/cnt"
- kubectl apply -f monitoring-namespace.yaml
- kubectl apply -f prometheus-config.yaml
- kubectl apply -f prometheus-deployment.yaml
- kubectl apply -f prometheus-service.yaml
- kubectl apply -f grafana-deployment.yaml
- kubectl apply -f grafana-service.yaml
- JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl -n monitoring get pods -lname=grafana -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 5;echo "waiting for grafana to be available"; kubectl get pods --all-namespaces; done
- JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl -n monitoring get pods -lname=prometheus -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 5;echo "waiting for prometheus to be available"; kubectl get pods --all-namespaces; done

View File

@ -0,0 +1,26 @@
## Docker Cheatsheet
* Remove all containers that are not running:
$ docker rm $(docker ps -aq -f status=exited)
* Remove dangling images:
$ docker rmi $(docker images -f dangling=true -q)
* Attach to running docker:
$ docker exec -i -t NameOrId /bin/sh
## Core generation
* `/proc/sys/core_pattern` is clearly persisted on the host. Taking note of
its content before starting any endeavour is therefore highly encouraged.
* dockers `--privileged` is necessary for a gdb session to catch the stack,
without privileges, gdb just complains about No stack. Google still is
hardly knowledgeable about this phenomenon...
* setting ulimit on docker run works perfectly, for future googlers (syntax hard to find),
a docker-compose example:
ulimits:
core: -1

View File

@ -0,0 +1,42 @@
# Build stage 0
FROM erlang:alpine
# Install some libs
RUN apk add --no-cache g++ && \
apk add --no-cache make
# Install Rebar3
RUN mkdir -p /buildroot/rebar3/bin
ADD https://s3.amazonaws.com/rebar3/rebar3 /buildroot/rebar3/bin/rebar3
RUN chmod a+x /buildroot/rebar3/bin/rebar3
# Setup Environment
ENV PATH=/buildroot/rebar3/bin:$PATH
# Reset working directory
WORKDIR /buildroot
# Copy our Erlang test application
COPY dockerwatch dockerwatch
# And build the release
WORKDIR dockerwatch
RUN rebar3 as prod release
# Build stage 1
FROM alpine
# Install some libs
RUN apk add --no-cache openssl && \
apk add --no-cache ncurses-libs && \
apk add --no-cache libstdc++
# Install the released application
COPY --from=0 /buildroot/dockerwatch/_build/prod/rel/dockerwatch /dockerwatch
# Expose relevant ports
EXPOSE 8080
EXPOSE 8443
CMD ["/dockerwatch/bin/dockerwatch", "foreground"]

View File

@ -0,0 +1,23 @@
## Generating Certificate
Generate certificates in subdirectory `ssl`.
### Root CA
$ openssl genrsa -out dockerwatch-ca.key 4096
$ openssl req -x509 -new -nodes -key dockerwatch-ca.key -sha256 -days 1024 -out dockerwatch-ca.pem
### Server Certificate
$ openssl genrsa -out dockerwatch-server.key 4096
Certificate signing request
$ openssl req -new -key dockerwatch-server.key -out dockerwatch-server.csr
The most important field: `Common Name (eg, YOUR name) []: localhost`. We use localhost in this example.
### Sign it
$ openssl x509 -req -in dockerwatch-server.csr -CA dockerwatch-ca.pem -CAkey dockerwatch-ca.key -CAcreateserial -out dockerwatch-server.pem -days 500 -sha256

View File

@ -0,0 +1,221 @@
# Using Minikube, Erlang and Prometheus
This is a quick demo of using minikube to run an Erlang node with prometheus and grafana.
The example we will use is the
[Docker Watch](http://github.com/erlang/docker-erlang-example/tree/master) node.
This demo assumes that you have done the
[Using Minikube](http://github.com/erlang/docker-erlang-example/tree/minikube-simple) demo.
This is only meant to be an example of how to get started. It is not the only,
nor neccesarily the best way to setup minikube with Erlang.
# Other Demos
* [Using Docker](http://github.com/erlang/docker-erlang-example/)
* [Using Docker: Logstash](http://github.com/erlang/docker-erlang-example/tree/logstash)
* [Using Docker Compose: Logstash/ElasticSearch/Kibana](http://github.com/erlang/docker-erlang-example/tree/elk)
* [Using Minikube: Simple](http://github.com/erlang/docker-erlang-example/tree/minikube-simple)
* [Using Minikube: Prometheus/Grafana](http://github.com/erlang/docker-erlang-example/tree/minikube-prom-graf)
* [Using Minikube: Distributed Erlang](http://github.com/erlang/docker-erlang-example/tree/minikube-dist)
* [Using Minikube: Encrypted Distributed Erlang](http://github.com/erlang/docker-erlang-example/tree/minikube-tls-dist)
# Prerequisites
To start with you should familiarize yourself with minikube through this guide:
https://kubernetes.io/docs/setup/minikube/
In a nutshell:
## Install
* [VirtualBox](https://www.virtualbox.org/wiki/Downloads)
* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
* [minikube](https://github.com/kubernetes/minikube/releases)
## Start and test
> minikube start
> kubectl run hello-minikube --image=k8s.gcr.io/echoserver:1.10 --port=8080
> kubectl expose deployment hello-minikube --type=NodePort
> curl $(minikube service hello-minikube --url)
## Should print a lot of text
> kubectl delete services hello-minikube
> kubectl delete deployment hello-minikube
> minikube stop
# Deploying Dockerwatch and Prometheus
In this demo we will be doing three things:
* Extend dockerwatch with prometheus metrics support
* Create Deployments for Prometheus and Grafana
* Create a Service that will be used to access the dockerwatch API
* Create a Secret for our ssl keys
* Create a Deployment of dockerwatch that implements the Service
First however, make sure that the minikube cluster is started:
> minikube start
and that you have cloned this repo and checked out this branch:
> git clone https://github.com/erlang/docker-erlang-example
> cd docker-erlang-example
> git checkout minikube-simple
## Extend dockerwatch
In this demo we will be using the [prometheus](https://hex.pm/packages/prometheus),
[prometheus\_process\_collector](https://hex.pm/packages/prometheus_process_collector)
and [prometheus\_cowboy](https://hex.pm/packages/prometheus_cowboy) hex packages to get
the instrumentation we need. So we need to add those packages to the rebar.conf file.
```
{deps, [{jsone, "1.4.7"}, %% JSON Encode/Decode
{cowboy, "2.5.0"}, %% HTTP Server
{prometheus,"4.2.0"},
{prometheus_process_collector,"1.4.0"}
{prometheus_cowboy,"0.1.4"}]}.
```
And also the corresponding modificaion to the app.src file:
```
{applications, [
kernel,
stdlib,
jsone,
cowboy,
prometheus,
prometheus_process_collector,
prometheus_cowboy
]},
```
We then need to add a new http endpoint that takes requests from the prometheus
server and returns the correct results. This service traditionally runs on port
9000 so we add another child to the dockerwatch supervisor.
```
PromConfig =
#{ env => #{ dispatch =>
cowboy_router:compile(
[{'_', [{"/metrics/[:registry]", prometheus_cowboy2_handler, []}]}]) }
},
Prometheus = ranch:child_spec(
cowboy_prometheus, 100, ranch_tcp,
[{port, 9000}],
cowboy_clear,
PromConfig),
```
We also need to add the correct instrumentation to the cowboy servers so that we
can measure things like requests per minute and the 95th percentile latency of
requests. This is done by modifying the cowboy config to include a metric_callback
and two stream handlers.
```
CowConfig = #{ env => #{ dispatch => Dispatch },
metrics_callback => fun prometheus_cowboy2_instrumenter:observe/1,
stream_handlers => [cowboy_metrics_h, cowboy_stream_h] },
```
You can view the entire new supervisor module [here](dockerwatch/src/dockerwatch_sup.erl).
## Deploy Dockerwatch
Now we should deploy the dockerwatch service almost the same way as was done in
[Using Minikube: Simple](http://github.com/erlang/docker-erlang-example/tree/minikube-simple).
So:
```
> kubectl create service nodeport dockerwatch --tcp=8080:8080 --tcp=8443:8443
service/dockerwatch created
> ./create-certs $(minikube ip)
......
> kubectl create secret generic dockerwatch --from-file=ssl/
secret/dockerwatch created
> eval $(minikube docker-env)
> docker build -t dockerwatch .
```
We will have to modify the deployment somewhat from the original example:
```
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
## Name and labels of the Deployment
labels:
app: dockerwatch
name: dockerwatch
spec:
replicas: 1
selector:
matchLabels:
app: dockerwatch
template:
metadata:
labels:
app: dockerwatch
annotations: ## These annotations will tell prometheus to scrape us
prometheus.io/scrape: "true"
prometheus.io/port: "9000"
spec:
containers:
## The container to launch
- image: dockerwatch
name: dockerwatch
imagePullPolicy: Never
ports:
- containerPort: 8080
protocol: TCP
- containerPort: 8443
protocol: TCP
- containerPort: 9000 ## Expose the prometheus port
protocol: TCP
volumeMounts:
- name: kube-keypair
readOnly: true
mountPath: /etc/ssl/certs
volumes:
- name: kube-keypair
secret:
secretName: dockerwatch
EOF
```
We can then setup prometheus and grafana following [the excellent example by bakins](https://github.com/bakins/minikube-prometheus-demo).
```
> kubectl apply -f monitoring-namespace.yaml
> kubectl apply -f prometheus-config.yaml
> kubectl apply -f prometheus-deployment.yaml
> kubectl apply -f prometheus-service.yaml
```
You should now be able to view the prometheus dashboard through the url given by:
minikube service --namespace=monitoring prometheus
Then we can start Grafana:
```
> kubectl apply -f grafana-deployment.yaml
> kubectl apply -f grafana-service.yaml
```
Grafana can then be found under:
minikube service --namespace=monitoring grafana
The username and password is `admin`. You then need to add a new prometheus datasource to grafana.
The url to prometheus within the cluster is http://prometheus.monitoring.svc.cluster.local:9090.
There are some ready made dashboards at: https://github.com/deadtrickster/beam-dashboards that
can be imported to get something quick up and running. If done correcly it could look like this:
![BEAM Dashboard](grafana-screenshot.png)

View File

@ -0,0 +1,22 @@
#!/bin/sh
set -e
if [ ! -d ssl ]; then
mkdir ssl
fi
# Create the root CA (Certificate Authority)
openssl genrsa -out ssl/dockerwatch-ca.key 4096
## Certificate signing request for root CA
openssl req -x509 -new -nodes -key ssl/dockerwatch-ca.key -sha256 -days 1024 -subj "/C=SE/" -out ssl/dockerwatch-ca.pem
# Create the server certificate
openssl genrsa -out ssl/dockerwatch-server.key 4096
## Certificate signing request for server certificate
openssl req -new -key ssl/dockerwatch-server.key -subj "/C=SE/CN=$1/" -out ssl/dockerwatch-server.csr
## Sign the server certificate using the root CA
openssl x509 -req -in ssl/dockerwatch-server.csr -CA ssl/dockerwatch-ca.pem -CAkey ssl/dockerwatch-ca.key -CAcreateserial -out ssl/dockerwatch-server.pem -days 500 -sha256

View File

@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
## Name and labels of the Deployment
labels:
app: dockerwatch
name: dockerwatch
spec:
replicas: 1
selector:
matchLabels:
app: dockerwatch
template:
metadata:
labels:
app: dockerwatch
annotations: ## These annotations will tell prometheus to scrape us
prometheus.io/scrape: "true"
prometheus.io/port: "9000"
spec:
containers:
## The container to launch
- image: dockerwatch
name: dockerwatch
imagePullPolicy: Never ## Set to Never as we built the image in the cluster
ports:
- containerPort: 8080
protocol: TCP
- containerPort: 8443
protocol: TCP
- containerPort: 9000 ## Expose the prometheus port
protocol: TCP
volumeMounts:
- name: kube-keypair
readOnly: true
mountPath: /etc/ssl/certs
volumes:
- name: kube-keypair
secret:
secretName: dockerwatch

View File

@ -0,0 +1,5 @@
[%% Kernel/logger
{kernel, [{logger,[{handler,default,logger_std_h,#{}}]}
%%,{logger_level,info}
]}
].

View File

@ -0,0 +1,2 @@
-sname dockerwatch

View File

@ -0,0 +1,20 @@
{deps, [{jsone, "1.4.7"}, %% JSON Encode/Decode
{cowboy, "2.5.0"}, %% HTTP Server
{prometheus,"4.2.0"},
{prometheus_process_collector,"1.4.0"},
{prometheus_cowboy,"0.1.4"}]}.
{relx, [{release, {"dockerwatch", "1.0.0"}, [dockerwatch]},
{vm_args, "config/vm.args"},
{sys_config, "config/sys.config"},
{dev_mode, true},
{include_erts, false},
{extended_start_script, true}
]}.
{profiles, [{prod, [{relx, [{dev_mode, false},
{include_erts, true},
{include_src, false}]}]}
]}.
%% vim: ft=erlang

View File

@ -0,0 +1,19 @@
%% Feel free to use, reuse and abuse the code in this file.
{application, dockerwatch, [
{description, "Cowboy REST Hello World example."},
{vsn, "1.0.0"},
{modules, []},
{registered, [dockerwatch_sup]},
{applications, [
kernel,
stdlib,
jsone,
cowboy,
prometheus,
prometheus_process_collector,
prometheus_cowboy
]},
{mod, {dockerwatch_app, []}},
{env, []}
]}.

View File

@ -0,0 +1,45 @@
%%
%% Copyright (C) 2014 Björn-Egil Dahlberg
%%
%% File: dockerwatch.erl
%% Author: Björn-Egil Dahlberg
%% Created: 2014-09-10
%%
-module(dockerwatch).
-export([start_link/0, all/0, create/1, get/1, increment/2, decrement/2]).
-type counter() :: binary().
-spec start_link() -> {ok, pid()}.
start_link() ->
{ok, spawn_link(fun() -> ets:new(?MODULE, [named_table, public]),
receive after infinity -> ok end end)}.
-spec all() -> [counter()].
all() ->
ets:select(?MODULE, [{{'$1','_'},[],['$1']}]).
-spec create(counter()) -> ok | already_exists.
create(CounterName) ->
case ets:insert_new(?MODULE, {CounterName, 0}) of
true ->
ok;
false ->
already_exists
end.
-spec get(counter()) -> integer().
get(CounterName) ->
ets:lookup_element(?MODULE, CounterName, 2).
-spec increment(counter(), integer()) -> ok.
increment(CounterName, Howmuch) ->
_ = ets:update_counter(?MODULE, CounterName, [{2, Howmuch}]),
ok.
-spec decrement(counter(), integer()) -> ok.
decrement(CounterName, Howmuch) ->
_ = ets:update_counter(?MODULE, CounterName, [{2, -1 * Howmuch}]),
ok.

View File

@ -0,0 +1,19 @@
%%
%% Copyright (C) 2014 Björn-Egil Dahlberg
%%
%% File: dockerwatch_app.erl
%% Author: Björn-Egil Dahlberg
%% Created: 2014-09-10
%%
-module(dockerwatch_app).
-behaviour(application).
-export([start/2,stop/1]).
%% API.
start(_Type, _Args) ->
dockerwatch_sup:start_link().
stop(_State) ->
ok.

View File

@ -0,0 +1,103 @@
%%
%% Copyright (C) 2014 Björn-Egil Dahlberg
%%
%% File: dockerwatch_handler.erl
%% Author: Björn-Egil Dahlberg
%% Created: 2014-09-10
%%
-module(dockerwatch_handler).
-export([init/2]).
-export([allowed_methods/2]).
-export([content_types_accepted/2]).
-export([content_types_provided/2]).
-export([handle_post/2]).
-export([to_html/2]).
-export([to_json/2]).
-export([to_text/2]).
init(Req, []) ->
{cowboy_rest, Req, []}.
%% Which HTTP methods are allowed
allowed_methods(Req, State) ->
{[<<"GET">>, <<"POST">>], Req, State}.
%% Which content types are accepted by POST/PUT requests
content_types_accepted(Req, State) ->
{[{{<<"application">>, <<"json">>, []}, handle_post}],
Req, State}.
%% Handle the POST/PUT request
handle_post(Req, State) ->
case cowboy_req:binding(counter_name, Req) of
undefined ->
{false, Req, State};
Name ->
case cowboy_req:has_body(Req) of
true ->
{ok, Body, Req3} = cowboy_req:read_body(Req),
Json = jsone:decode(Body),
ActionBin = maps:get(<<"action">>, Json, <<"increment">>),
Value = maps:get(<<"value">>, Json, 1),
Action = list_to_atom(binary_to_list(ActionBin)),
ok = dockerwatch:Action(Name, Value),
{true, Req3, State};
false ->
ok = dockerwatch:create(Name),
{true, Req, State}
end
end.
%% Which content types we handle for GET/HEAD requests
content_types_provided(Req, State) ->
{[{<<"text/html">>, to_html},
{<<"application/json">>, to_json},
{<<"text/plain">>, to_text}
], Req, State}.
%% Return counters/counter as json
to_json(Req, State) ->
Resp = case cowboy_req:binding(counter_name, Req) of
undefined ->
dockerwatch:all();
Counter ->
#{ Counter => dockerwatch:get(Counter) }
end,
{jsone:encode(Resp), Req, State}.
%% Return counters/counter as plain text
to_text(Req, State) ->
Resp = case cowboy_req:binding(counter_name, Req) of
undefined ->
[io_lib:format("~s~n",[Counter]) || Counter <- dockerwatch:all()];
Counter ->
io_lib:format("~p",[dockerwatch:get(Counter)])
end,
{Resp, Req, State}.
%% Return counters/counter as html
to_html(Req, State) ->
Body = case cowboy_req:binding(counter_name, Req) of
undefined ->
Counters = dockerwatch:all(),
["<ul>\n",
[io_lib:format("<li>~s</li>\n", [Counter]) || Counter <- Counters],
"</ul>\n"];
Counter ->
Value = dockerwatch:get(Counter),
io_lib:format("~s = ~p",[Counter, Value])
end,
{[html_head(),Body,html_tail()], Req, State}.
html_head() ->
<<"<html>
<head>
<meta charset=\"utf-8\">
<title>dockerwatch</title>
</head>">>.
html_tail() ->
<<"</body>
</html>">>.

View File

@ -0,0 +1,66 @@
%%
%% Copyright (C) 2014 Björn-Egil Dahlberg
%%
%% File: dockerwatch_sup.erl
%% Author: Björn-Egil Dahlberg
%% Created: 2014-09-10
%%
-module(dockerwatch_sup).
-behaviour(supervisor).
-export([start_link/0,init/1]).
%% API.
-spec start_link() -> {ok, pid()}.
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
%% supervisor.
init([]) ->
CertsDir = "/etc/ssl/certs/",
Dispatch = cowboy_router:compile(
[
{'_', [{"/[:counter_name]", dockerwatch_handler, []}]}
]),
CowConfig = #{ env => #{ dispatch => Dispatch },
metrics_callback => fun prometheus_cowboy2_instrumenter:observe/1,
stream_handlers => [cowboy_metrics_h, cowboy_stream_h] },
HTTPS = ranch:child_spec(
cowboy_https, 100, ranch_ssl,
[{port, 8443},
{cacertfile, filename:join(CertsDir, "dockerwatch-ca.pem")},
{certfile, filename:join(CertsDir, "dockerwatch-server.pem")},
{keyfile, filename:join(CertsDir, "dockerwatch-server.key")}],
cowboy_tls,
CowConfig),
HTTP = ranch:child_spec(
cowboy_http, 100, ranch_tcp,
[{port, 8080}],
cowboy_clear,
CowConfig),
PromConfig =
#{ env => #{ dispatch =>
cowboy_router:compile(
[{'_', [{"/metrics/[:registry]", prometheus_cowboy2_handler, []}]}]) }
},
Prometheus = ranch:child_spec(
cowboy_prometheus, 100, ranch_tcp,
[{port, 9000}],
cowboy_clear,
PromConfig),
Counter = {dockerwatch, {dockerwatch, start_link, []},
permanent, 5000, worker, [dockerwatch]},
Procs = [Counter, HTTP, HTTPS, Prometheus],
{ok, {{one_for_one, 10, 10}, Procs}}.

View File

@ -0,0 +1,46 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
name: grafana
name: grafana
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
name: grafana
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
name: grafana
spec:
containers:
- image: grafana/grafana
imagePullPolicy: Always
name: grafana
ports:
- containerPort: 3000
protocol: TCP
resources:
limits:
cpu: 500m
memory: 2500Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- mountPath: /var/lib/grafana
name: data
restartPolicy: Always
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- emptyDir: {}
name: data

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: grafana
namespace: monitoring
spec:
ports:
- port: 3000
protocol: TCP
targetPort: 3000
selector:
name: grafana
type: NodePort

View File

@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@ -0,0 +1,280 @@
apiVersion: v1
data:
prometheus.yml: |
# A scrape configuration for running Prometheus on a Kubernetes cluster.
# This uses separate scrape configs for cluster components (i.e. API server, node)
# and services to allow each to use different authentication configs.
#
# Kubernetes labels will be added as Prometheus labels on metrics via the
# `labelmap` relabeling action.
#
# If you are using Kubernetes 1.7.2 or earlier, please take note of the comments
# for the kubernetes-cadvisor job; you will need to edit or remove this job.
# Scrape config for API servers.
#
# Kubernetes exposes API servers as endpoints to the default/kubernetes
# service so this uses `endpoints` role and uses relabelling to only keep
# the endpoints associated with the default/kubernetes service using the
# default named port `https`. This works for single API server deployments as
# well as HA API server deployments.
scrape_configs:
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & bearer token file config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
# Keep only the default/kubernetes service endpoints for the https port. This
# will add targets for each API server which Kubernetes adds an endpoint to
# the default/kubernetes service.
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
# Scrape config for nodes (kubelet).
#
# Rather than connecting directly to the node, the scrape is proxied though the
# Kubernetes apiserver. This means it will work if Prometheus is running out of
# cluster, or can't connect to nodes for some other reason (e.g. because of
# firewalling).
- job_name: 'kubernetes-nodes'
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & bearer token file config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
# Scrape config for Kubelet cAdvisor.
#
# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
# (those whose names begin with 'container_') have been removed from the
# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
# retrieve those metrics.
#
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
# HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
# in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
# the --cadvisor-port=0 Kubelet flag).
#
# This job is not necessary and should be removed in Kubernetes 1.6 and
# earlier versions, or it will cause the metrics to be scraped twice.
- job_name: 'kubernetes-cadvisor'
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & bearer token file config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
# Scrape config for service endpoints.
#
# The relabeling allows the actual service scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
# to set this to `https` & most likely set the `tls_config` of the scrape config.
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port`: If the metrics are exposed on a different port to the
# service then set this appropriately.
- job_name: 'kubernetes-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name
# Example scrape config for probing services via the Blackbox Exporter.
#
# The relabeling allows the actual service scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/probe`: Only probe services that have a value of `true`
- job_name: 'kubernetes-services'
metrics_path: /probe
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
# Example scrape config for probing ingresses via the Blackbox Exporter.
#
# The relabeling allows the actual ingress scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/probe`: Only probe services that have a value of `true`
- job_name: 'kubernetes-ingresses'
metrics_path: /probe
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: ingress
relabel_configs:
- source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path]
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_ingress_name]
target_label: kubernetes_name
# Example scrape config for pods
#
# The relabeling allows the actual pod scrape endpoint to be configured via the
# following annotations:
#
# * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port`: Scrape the pod on the indicated port instead of the
# pod's declared ports (default is a port-free target if none are declared).
- job_name: 'kubernetes-pods'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
kind: ConfigMap
metadata:
name: prometheus-config
namespace: monitoring

View File

@ -0,0 +1,101 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups:
- extensions
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: default
namespace: monitoring
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: default
namespace: monitoring
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
name: prometheus
name: prometheus
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
name: prometheus
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
name: prometheus
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
spec:
containers:
- args:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus
- --storage.tsdb.retention=24h
command:
- /bin/prometheus
image: prom/prometheus
imagePullPolicy: Always
name: prometheus
ports:
- containerPort: 9090
protocol: TCP
resources:
limits:
cpu: 500m
memory: 2500Mi
requests:
cpu: 10m
memory: 100Mi
volumeMounts:
- mountPath: /prometheus
name: data
- mountPath: /etc/prometheus
name: config-volume
restartPolicy: Always
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- emptyDir: {}
name: data
- configMap:
name: prometheus-config
name: config-volume

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: monitoring
spec:
ports:
- port: 9090
protocol: TCP
targetPort: 9090
selector:
name: prometheus
type: NodePort