You've already forked docker-erlang-example
mirror of
https://github.com/erlang/docker-erlang-example.git
synced 2025-07-30 22:43:04 +03:00
Refactoring: Copy ELK stack example
This commit copies Siri Hansen's (@sirihansen) ELK stack example from: https://github.com/erlang/docker-erlang-example/tree/elk
This commit is contained in:
2
advanced_examples/elk/.gitignore
vendored
Normal file
2
advanced_examples/elk/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
_build/
|
||||||
|
rebar.lock
|
41
advanced_examples/elk/.travis.yml
Normal file
41
advanced_examples/elk/.travis.yml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
sudo: required
|
||||||
|
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- curl
|
||||||
|
- jq
|
||||||
|
|
||||||
|
env:
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
|
||||||
|
script:
|
||||||
|
- ./create-certs
|
||||||
|
- docker-compose up -d
|
||||||
|
# Wait for elasticsearch and logstash to finish startup
|
||||||
|
- until curl -s 'localhost:9200/_cluster/health?wait_for_status=yellow'; do sleep 5; echo "waiting for elasticsearch to finish startup"; done
|
||||||
|
- until curl -s 'localhost:9600/_node'; do sleep 5; echo "waiting for logstash to finish startup"; done
|
||||||
|
# Create counter
|
||||||
|
- "curl --cacert ssl/dockerwatch-ca.pem -H 'Content-Type: application/json' -X POST -d '' https://localhost:8443/cnt"
|
||||||
|
# Increment counter
|
||||||
|
- "curl --cacert ssl/dockerwatch-ca.pem -H 'Content-Type: application/json' -X POST -d '{}' https://localhost:8443/cnt"
|
||||||
|
# Read all counters
|
||||||
|
- "curl --cacert ssl/dockerwatch-ca.pem -H 'Accept: application/json' https://localhost:8443/"
|
||||||
|
# Read the counter `cnt` as json
|
||||||
|
- "curl --cacert ssl/dockerwatch-ca.pem -H 'Accept: application/json' https://localhost:8443/cnt"
|
||||||
|
# Increment the counter `cnt` by 20
|
||||||
|
- "curl --cacert ssl/dockerwatch-ca.pem -H 'Content-Type: application/json' -X POST -d '{\"value\":20}' https://localhost:8443/cnt"
|
||||||
|
# Read the counter `cnt` as text
|
||||||
|
- "curl --cacert ssl/dockerwatch-ca.pem -H 'Accept: text/plain' https://localhost:8443/cnt"
|
||||||
|
# Check that there are 6 lines in the logstash log (one for each curl command above)
|
||||||
|
- sleep 10
|
||||||
|
- test "$(docker exec dockererlangexample_logstash_1 cat /usr/share/logstash/logs/output.log | wc -l)" = "6"
|
||||||
|
# Get the index name, and check that there are also 6 log events to be read from elasticsearch
|
||||||
|
- INDEX=$(curl -s 'localhost:9200/_cat/indices/logstash*?h=i')
|
||||||
|
- echo $INDEX
|
||||||
|
- S=$(curl -s "localhost:9200/$INDEX/_search?_source=false")
|
||||||
|
- echo $S
|
||||||
|
- T=$(curl -s "localhost:9200/$INDEX/_search?_source=false" | jq -r ".hits.total")
|
||||||
|
- echo $T
|
||||||
|
- test "$T" = "6"
|
26
advanced_examples/elk/Docker-Cheat-Sheet.md
Normal file
26
advanced_examples/elk/Docker-Cheat-Sheet.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
## Docker Cheatsheet
|
||||||
|
|
||||||
|
* Remove all containers that are not running:
|
||||||
|
|
||||||
|
$ docker rm $(docker ps -aq -f status=exited)
|
||||||
|
|
||||||
|
* Remove dangling images:
|
||||||
|
|
||||||
|
$ docker rmi $(docker images -f dangling=true -q)
|
||||||
|
|
||||||
|
* Attach to running docker:
|
||||||
|
|
||||||
|
$ docker exec -i -t NameOrId /bin/sh
|
||||||
|
|
||||||
|
## Core generation
|
||||||
|
|
||||||
|
* `/proc/sys/core_pattern` is clearly persisted on the host. Taking note of
|
||||||
|
its content before starting any endeavour is therefore highly encouraged.
|
||||||
|
* dockers `--privileged` is necessary for a gdb session to catch the stack,
|
||||||
|
without privileges, gdb just complains about No stack. Google still is
|
||||||
|
hardly knowledgeable about this phenomenon...
|
||||||
|
* setting ulimit on docker run works perfectly, for future googlers (syntax hard to find),
|
||||||
|
a docker-compose example:
|
||||||
|
|
||||||
|
ulimits:
|
||||||
|
core: -1
|
37
advanced_examples/elk/Dockerfile
Normal file
37
advanced_examples/elk/Dockerfile
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# Build stage 0
|
||||||
|
FROM erlang:alpine
|
||||||
|
|
||||||
|
# Install Rebar3
|
||||||
|
RUN mkdir -p /buildroot/rebar3/bin
|
||||||
|
ADD https://s3.amazonaws.com/rebar3/rebar3 /buildroot/rebar3/bin/rebar3
|
||||||
|
RUN chmod a+x /buildroot/rebar3/bin/rebar3
|
||||||
|
|
||||||
|
# Setup Environment
|
||||||
|
ENV PATH=/buildroot/rebar3/bin:$PATH
|
||||||
|
|
||||||
|
# Reset working directory
|
||||||
|
WORKDIR /buildroot
|
||||||
|
|
||||||
|
# Copy our Erlang test application
|
||||||
|
COPY dockerwatch dockerwatch
|
||||||
|
|
||||||
|
# And build the release
|
||||||
|
WORKDIR dockerwatch
|
||||||
|
RUN rebar3 as prod release
|
||||||
|
|
||||||
|
|
||||||
|
# Build stage 1
|
||||||
|
FROM alpine
|
||||||
|
|
||||||
|
# Install some libs
|
||||||
|
RUN apk add --no-cache openssl && \
|
||||||
|
apk add --no-cache ncurses-libs
|
||||||
|
|
||||||
|
# Install the released application
|
||||||
|
COPY --from=0 /buildroot/dockerwatch/_build/prod/rel/dockerwatch /dockerwatch
|
||||||
|
|
||||||
|
# Expose relevant ports
|
||||||
|
EXPOSE 8080
|
||||||
|
EXPOSE 8443
|
||||||
|
|
||||||
|
CMD ["/dockerwatch/bin/dockerwatch", "foreground"]
|
23
advanced_examples/elk/README-CERTS.md
Normal file
23
advanced_examples/elk/README-CERTS.md
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
## Generating Certificate
|
||||||
|
|
||||||
|
Generate certificates in subdirectory `ssl`.
|
||||||
|
|
||||||
|
### Root CA
|
||||||
|
|
||||||
|
$ openssl genrsa -out dockerwatch-ca.key 4096
|
||||||
|
|
||||||
|
$ openssl req -x509 -new -nodes -key dockerwatch-ca.key -sha256 -days 1024 -out dockerwatch-ca.pem
|
||||||
|
|
||||||
|
### Server Certificate
|
||||||
|
|
||||||
|
$ openssl genrsa -out dockerwatch-server.key 4096
|
||||||
|
|
||||||
|
Certificate signing request
|
||||||
|
|
||||||
|
$ openssl req -new -key dockerwatch-server.key -out dockerwatch-server.csr
|
||||||
|
|
||||||
|
The most important field: `Common Name (eg, YOUR name) []: localhost`. We use localhost in this example.
|
||||||
|
|
||||||
|
### Sign it
|
||||||
|
|
||||||
|
$ openssl x509 -req -in dockerwatch-server.csr -CA dockerwatch-ca.pem -CAkey dockerwatch-ca.key -CAcreateserial -out dockerwatch-server.pem -days 500 -sha256
|
184
advanced_examples/elk/README.md
Normal file
184
advanced_examples/elk/README.md
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
## Using Logstash, Elasticsearch and Kibana, a.k.a. the ELK stack
|
||||||
|
|
||||||
|
This example runs four docker containers:
|
||||||
|
|
||||||
|
- dockerwatch (our example application)
|
||||||
|
- logstash (log pipeline)
|
||||||
|
- elasticsearch (search and analytics engine)
|
||||||
|
- kibana (analytics and visualization platform, web interface)
|
||||||
|
|
||||||
|
As in the
|
||||||
|
[Logstash example](http://github.com/erlang/docker-erlang-example/tree/logstash),
|
||||||
|
the dockerwatch container is started with a logging driver that sends
|
||||||
|
everything printed on standard out on to a UDP port in the logstash
|
||||||
|
container. Logstash forwards each log event over http to the
|
||||||
|
elasticsearch container, and kibana collects log events from
|
||||||
|
elasticsearch.
|
||||||
|
|
||||||
|
This setup, using Logstash, Elasticsearch and Kibana, is a quite
|
||||||
|
common open source solution for collecting and visualizing log events
|
||||||
|
from any number of sources. It is known as the Elastic Stack or the
|
||||||
|
ELK stack, https://www.elastic.co/elk-stack.
|
||||||
|
|
||||||
|
In the example, we use
|
||||||
|
[`docker-compose`](https://docs.docker.com/compose/) to define and run
|
||||||
|
the containers. This also simplifies addressing between the
|
||||||
|
containers.
|
||||||
|
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
Install [`docker-compose`](https://docs.docker.com/compose/install/).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Run the example
|
||||||
|
|
||||||
|
First, generate certificates in subdirectory `ssl`:
|
||||||
|
|
||||||
|
./create-certs
|
||||||
|
|
||||||
|
Then start it all by running
|
||||||
|
|
||||||
|
docker-compose up
|
||||||
|
|
||||||
|
The startup takes a few minutes, but finally you can point your
|
||||||
|
browser at `http://localhost:5601` to connect to the Kibana web interface.
|
||||||
|
|
||||||
|
To generate some log events, use curl towards the dockerwatch
|
||||||
|
application. Each request generates `notice` level event through the
|
||||||
|
Erlang Logger. For example:
|
||||||
|
|
||||||
|
Create a counter called `cnt`:
|
||||||
|
|
||||||
|
# curl --cacert ssl/dockerwatch-ca.pem -i -H "Content-Type: application/json" -X POST -d "" https://localhost:8443/cnt
|
||||||
|
HTTP/1.1 204 No Content
|
||||||
|
content-type: text/html
|
||||||
|
date: Fri, 23 Nov 2018 13:48:03 GMT
|
||||||
|
server: Cowboy
|
||||||
|
vary: accept
|
||||||
|
|
||||||
|
Increment the counter `cnt`:
|
||||||
|
|
||||||
|
# curl --cacert ssl/dockerwatch-ca.pem -H "Content-Type: application/json" -X POST -d '{}' https://localhost:8443/cnt
|
||||||
|
|
||||||
|
Read the counter `cnt` as json:
|
||||||
|
|
||||||
|
# curl --cacert ssl/dockerwatch-ca.pem -H "Accept: application/json" https://localhost:8443/cnt
|
||||||
|
{"cnt":1}
|
||||||
|
|
||||||
|
For further example requests, see the
|
||||||
|
[simple docker example](http://github.com/erlang/docker-erlang-example/).
|
||||||
|
|
||||||
|
In the Kibana web interface, you need to create an index pattern for
|
||||||
|
the logstash events. This you can do under the Management tab. Then
|
||||||
|
you can see all events in the Discovery tab.
|
||||||
|
|
||||||
|
|
||||||
|
### docker-compose configuration
|
||||||
|
|
||||||
|
docker-compose.yml:
|
||||||
|
```
|
||||||
|
version: "2"
|
||||||
|
services:
|
||||||
|
dockerwatch:
|
||||||
|
build: .
|
||||||
|
image: dockerwatch
|
||||||
|
ports:
|
||||||
|
- "8443:8443"
|
||||||
|
logging:
|
||||||
|
driver: "gelf"
|
||||||
|
options:
|
||||||
|
gelf-address: "udp://localhost:44622"
|
||||||
|
volumes:
|
||||||
|
- "./ssl:/etc/ssl/certs"
|
||||||
|
logstash:
|
||||||
|
image: docker.elastic.co/logstash/logstash:6.4.3
|
||||||
|
ports:
|
||||||
|
- "44622:44622/udp"
|
||||||
|
- "9600:9600"
|
||||||
|
volumes:
|
||||||
|
- "./logstash/logstash.yml:/usr/share/logstash/config/logstash.yml"
|
||||||
|
- "./logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf"
|
||||||
|
elasticsearch:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:6.4.3
|
||||||
|
ports:
|
||||||
|
- "9200:9200"
|
||||||
|
volumes:
|
||||||
|
- "./elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml"
|
||||||
|
kibana:
|
||||||
|
image: docker.elastic.co/kibana/kibana:6.4.3
|
||||||
|
ports:
|
||||||
|
- "5601:5601"
|
||||||
|
volumes:
|
||||||
|
- "./kibana/kibana.yml:/usr/share/kibana/config/kibana.yml"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Logstash configuration
|
||||||
|
|
||||||
|
The Logstash pipeline configuration is specified in
|
||||||
|
`logstash/pipeline/logstash.conf`. This is the same as in the
|
||||||
|
[Logstash example](http://github.com/erlang/docker-erlang-example/tree/logstash),
|
||||||
|
except it has the additional `elasticsearch` output plugin.
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
input {
|
||||||
|
gelf {
|
||||||
|
use_udp => true
|
||||||
|
port => 44622
|
||||||
|
}
|
||||||
|
}
|
||||||
|
filter {
|
||||||
|
# If a log message can be parsed as json, do so, and populate the
|
||||||
|
# log event with fields found.
|
||||||
|
json {
|
||||||
|
skip_on_invalid_json => "true"
|
||||||
|
source => "message"
|
||||||
|
}
|
||||||
|
# Convert the level field to an integer
|
||||||
|
mutate {
|
||||||
|
convert => {
|
||||||
|
"level" => "integer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
output {
|
||||||
|
file {
|
||||||
|
path => "/usr/share/logstash/logs/output.log"
|
||||||
|
}
|
||||||
|
elasticsearch {
|
||||||
|
hosts => ["elasticsearch:9200"]
|
||||||
|
http_compression => true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Equivalent without using `docker-compose`
|
||||||
|
|
||||||
|
The equivalent, not using docker-compose, would be something like the
|
||||||
|
following (after modifying logstash.conf to use $ELASTICSERACHIP
|
||||||
|
instead of the name 'elasticsearch' for the elasticsearch host):
|
||||||
|
|
||||||
|
Start elasticsearch:
|
||||||
|
|
||||||
|
docker run --rm -p 9200:9200 --volume="$PWD/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml" docker.elastic.co/elasticsearch/elasticsearch:6.4.3
|
||||||
|
|
||||||
|
Get IP address of elasticsearch container:
|
||||||
|
|
||||||
|
EIP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' <elasticsearch container id>)
|
||||||
|
|
||||||
|
Start logstash with ($ELASTICSEARCHIP is used in logstash.conf)
|
||||||
|
|
||||||
|
docker run --rm --env ELASTICSEARCHIP=$EIP -p 44622:44622/udp -p 9600:9600 --volume="$PWD/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml" --volume="$PWD/logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf" docker.elastic.co/logstash/logstash:6.4.3
|
||||||
|
|
||||||
|
Start Kibana (configuration found in kibana/config/kibana.yml, $ELASTICSEACH_URL is an env var which Kibana reads):
|
||||||
|
|
||||||
|
docker run --rm --env ELASTICSEARCH_URL=http://$EIP:9200 -p 5601:5601 --volume="$PWD/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml" docker.elastic.co/kibana/kibana:6.4.3
|
||||||
|
|
||||||
|
Start dockerwatch application
|
||||||
|
|
||||||
|
docker run -p 8443:8443 --volume="$PWD/ssl:/etc/ssl/certs" --log-driver=gelf --log-opt gelf-address=udp://0.0.0.0:44622 dockerwatch
|
22
advanced_examples/elk/create-certs
Executable file
22
advanced_examples/elk/create-certs
Executable file
@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ ! -d ssl ]; then
|
||||||
|
mkdir ssl
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create the root CA (Certificate Authority)
|
||||||
|
openssl genrsa -out ssl/dockerwatch-ca.key 4096
|
||||||
|
|
||||||
|
## Certificate signing request for root CA
|
||||||
|
openssl req -x509 -new -nodes -key ssl/dockerwatch-ca.key -sha256 -days 1024 -subj "/C=SE/" -out ssl/dockerwatch-ca.pem
|
||||||
|
|
||||||
|
# Create the server certificate
|
||||||
|
openssl genrsa -out ssl/dockerwatch-server.key 4096
|
||||||
|
|
||||||
|
## Certificate signing request for server certificate
|
||||||
|
openssl req -new -key ssl/dockerwatch-server.key -subj "/C=SE/CN=localhost/" -out ssl/dockerwatch-server.csr
|
||||||
|
|
||||||
|
## Sign the server certificate using the root CA
|
||||||
|
openssl x509 -req -in ssl/dockerwatch-server.csr -CA ssl/dockerwatch-ca.pem -CAkey ssl/dockerwatch-ca.key -CAcreateserial -out ssl/dockerwatch-server.pem -days 500 -sha256
|
33
advanced_examples/elk/docker-compose.yml
Normal file
33
advanced_examples/elk/docker-compose.yml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
version: "2"
|
||||||
|
services:
|
||||||
|
dockerwatch:
|
||||||
|
build: .
|
||||||
|
image: dockerwatch
|
||||||
|
ports:
|
||||||
|
- "8443:8443"
|
||||||
|
logging:
|
||||||
|
driver: "gelf"
|
||||||
|
options:
|
||||||
|
gelf-address: "udp://localhost:44622"
|
||||||
|
volumes:
|
||||||
|
- "./ssl:/etc/ssl/certs"
|
||||||
|
logstash:
|
||||||
|
image: docker.elastic.co/logstash/logstash:6.4.3
|
||||||
|
ports:
|
||||||
|
- "44622:44622/udp"
|
||||||
|
- "9600:9600"
|
||||||
|
volumes:
|
||||||
|
- "./logstash/logstash.yml:/usr/share/logstash/config/logstash.yml"
|
||||||
|
- "./logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf"
|
||||||
|
elasticsearch:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:6.4.3
|
||||||
|
ports:
|
||||||
|
- "9200:9200"
|
||||||
|
volumes:
|
||||||
|
- "./elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml"
|
||||||
|
kibana:
|
||||||
|
image: docker.elastic.co/kibana/kibana:6.4.3
|
||||||
|
ports:
|
||||||
|
- "5601:5601"
|
||||||
|
volumes:
|
||||||
|
- "./kibana/kibana.yml:/usr/share/kibana/config/kibana.yml"
|
22
advanced_examples/elk/dockerwatch/config/sys.config
Normal file
22
advanced_examples/elk/dockerwatch/config/sys.config
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
[{kernel, [{logger,
|
||||||
|
[%% Set formatter template to print jason
|
||||||
|
{handler,default,logger_std_h,
|
||||||
|
#{formatter=>
|
||||||
|
{logger_formatter,
|
||||||
|
#{template=>
|
||||||
|
["{ \"@timestamp\": \"",time,
|
||||||
|
"\", \"level\": \"",{level_int,[level_int],[level]},
|
||||||
|
"\", \"message\": \"",msg,"\" }\n"]}}}},
|
||||||
|
|
||||||
|
%% Add a primary filter to insert 'level_int' field in metadata
|
||||||
|
{filters,log,
|
||||||
|
[{level_int,{fun dockerwatch_filter:add_level_int/2,[]}}]},
|
||||||
|
|
||||||
|
%% Set log level 'debug' for module dockerwatch - to get
|
||||||
|
%% some log printouts when sending requests
|
||||||
|
{module_level,debug,[dockerwatch]}
|
||||||
|
]}
|
||||||
|
%% For debugging
|
||||||
|
%%,{logger_level,debug}
|
||||||
|
]}
|
||||||
|
].
|
2
advanced_examples/elk/dockerwatch/config/vm.args
Normal file
2
advanced_examples/elk/dockerwatch/config/vm.args
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
-sname dockerwatch
|
||||||
|
|
17
advanced_examples/elk/dockerwatch/rebar.config
Normal file
17
advanced_examples/elk/dockerwatch/rebar.config
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
|
||||||
|
{deps, [{jsone, "1.4.7"}, %% JSON Encode/Decode
|
||||||
|
{cowboy, "2.5.0"}]}. %% HTTP Server
|
||||||
|
|
||||||
|
{relx, [{release, {"dockerwatch", "1.0.0"}, [dockerwatch]},
|
||||||
|
{vm_args, "config/vm.args"},
|
||||||
|
{sys_config, "config/sys.config"},
|
||||||
|
{dev_mode, true},
|
||||||
|
{include_erts, false},
|
||||||
|
{extended_start_script, true}
|
||||||
|
]}.
|
||||||
|
|
||||||
|
{profiles, [{prod, [{relx, [{dev_mode, false},
|
||||||
|
{include_erts, true},
|
||||||
|
{include_src, false}]}]}
|
||||||
|
]}.
|
||||||
|
%% vim: ft=erlang
|
16
advanced_examples/elk/dockerwatch/src/dockerwatch.app.src
Normal file
16
advanced_examples/elk/dockerwatch/src/dockerwatch.app.src
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
%% Feel free to use, reuse and abuse the code in this file.
|
||||||
|
|
||||||
|
{application, dockerwatch, [
|
||||||
|
{description, "Cowboy REST Hello World example."},
|
||||||
|
{vsn, "1.0.0"},
|
||||||
|
{modules, []},
|
||||||
|
{registered, [dockerwatch_sup]},
|
||||||
|
{applications, [
|
||||||
|
kernel,
|
||||||
|
stdlib,
|
||||||
|
jsone,
|
||||||
|
cowboy
|
||||||
|
]},
|
||||||
|
{mod, {dockerwatch_app, []}},
|
||||||
|
{env, []}
|
||||||
|
]}.
|
54
advanced_examples/elk/dockerwatch/src/dockerwatch.erl
Normal file
54
advanced_examples/elk/dockerwatch/src/dockerwatch.erl
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
%%
|
||||||
|
%% Copyright (C) 2014 Björn-Egil Dahlberg
|
||||||
|
%%
|
||||||
|
%% File: dockerwatch.erl
|
||||||
|
%% Author: Björn-Egil Dahlberg
|
||||||
|
%% Created: 2014-09-10
|
||||||
|
%%
|
||||||
|
|
||||||
|
-module(dockerwatch).
|
||||||
|
|
||||||
|
-export([start_link/0, all/0, create/1, get/1, increment/2, decrement/2]).
|
||||||
|
|
||||||
|
-include_lib("kernel/include/logger.hrl").
|
||||||
|
|
||||||
|
-type counter() :: binary().
|
||||||
|
|
||||||
|
-spec start_link() -> {ok, pid()}.
|
||||||
|
start_link() ->
|
||||||
|
?LOG_DEBUG("~p starting",[?MODULE]),
|
||||||
|
{ok, spawn_link(fun() -> ets:new(?MODULE, [named_table, public]),
|
||||||
|
receive after infinity -> ok end end)}.
|
||||||
|
|
||||||
|
-spec all() -> [counter()].
|
||||||
|
all() ->
|
||||||
|
?LOG_DEBUG("~p all",[?MODULE]),
|
||||||
|
ets:select(?MODULE, [{{'$1','_'},[],['$1']}]).
|
||||||
|
|
||||||
|
-spec create(counter()) -> ok | already_exists.
|
||||||
|
create(CounterName) ->
|
||||||
|
case ets:insert_new(?MODULE, {CounterName, 0}) of
|
||||||
|
true ->
|
||||||
|
?LOG_DEBUG("Counter ~s created",[CounterName]),
|
||||||
|
ok;
|
||||||
|
false ->
|
||||||
|
?LOG_DEBUG("Counter ~s already exists",[CounterName]),
|
||||||
|
already_exists
|
||||||
|
end.
|
||||||
|
|
||||||
|
-spec get(counter()) -> integer().
|
||||||
|
get(CounterName) ->
|
||||||
|
?LOG_DEBUG("Counter ~s, get",[CounterName]),
|
||||||
|
ets:lookup_element(?MODULE, CounterName, 2).
|
||||||
|
|
||||||
|
-spec increment(counter(), integer()) -> ok.
|
||||||
|
increment(CounterName, Howmuch) ->
|
||||||
|
?LOG_DEBUG("Counter ~s, increment ~p",[CounterName,Howmuch]),
|
||||||
|
_ = ets:update_counter(?MODULE, CounterName, [{2, Howmuch}]),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
-spec decrement(counter(), integer()) -> ok.
|
||||||
|
decrement(CounterName, Howmuch) ->
|
||||||
|
?LOG_DEBUG("Counter ~s, decrement ~p",[CounterName,Howmuch]),
|
||||||
|
_ = ets:update_counter(?MODULE, CounterName, [{2, -1 * Howmuch}]),
|
||||||
|
ok.
|
19
advanced_examples/elk/dockerwatch/src/dockerwatch_app.erl
Normal file
19
advanced_examples/elk/dockerwatch/src/dockerwatch_app.erl
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
%%
|
||||||
|
%% Copyright (C) 2014 Björn-Egil Dahlberg
|
||||||
|
%%
|
||||||
|
%% File: dockerwatch_app.erl
|
||||||
|
%% Author: Björn-Egil Dahlberg
|
||||||
|
%% Created: 2014-09-10
|
||||||
|
%%
|
||||||
|
|
||||||
|
-module(dockerwatch_app).
|
||||||
|
-behaviour(application).
|
||||||
|
|
||||||
|
-export([start/2,stop/1]).
|
||||||
|
%% API.
|
||||||
|
|
||||||
|
start(_Type, _Args) ->
|
||||||
|
dockerwatch_sup:start_link().
|
||||||
|
|
||||||
|
stop(_State) ->
|
||||||
|
ok.
|
@ -0,0 +1,9 @@
|
|||||||
|
-module(dockerwatch_filter).
|
||||||
|
|
||||||
|
-export([add_level_int/2]).
|
||||||
|
|
||||||
|
%% Add a field named level_int to the metadata of each log event. For
|
||||||
|
%% the formatter to insert in the message string when required by the
|
||||||
|
%% log target
|
||||||
|
add_level_int(#{level:=L,meta:=M}=E,_) ->
|
||||||
|
E#{meta=>M#{level_int=>logger_config:level_to_int(L)}}.
|
103
advanced_examples/elk/dockerwatch/src/dockerwatch_handler.erl
Normal file
103
advanced_examples/elk/dockerwatch/src/dockerwatch_handler.erl
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
%%
|
||||||
|
%% Copyright (C) 2014 Björn-Egil Dahlberg
|
||||||
|
%%
|
||||||
|
%% File: dockerwatch_handler.erl
|
||||||
|
%% Author: Björn-Egil Dahlberg
|
||||||
|
%% Created: 2014-09-10
|
||||||
|
%%
|
||||||
|
|
||||||
|
-module(dockerwatch_handler).
|
||||||
|
|
||||||
|
-export([init/2]).
|
||||||
|
-export([allowed_methods/2]).
|
||||||
|
-export([content_types_accepted/2]).
|
||||||
|
-export([content_types_provided/2]).
|
||||||
|
-export([handle_post/2]).
|
||||||
|
-export([to_html/2]).
|
||||||
|
-export([to_json/2]).
|
||||||
|
-export([to_text/2]).
|
||||||
|
|
||||||
|
init(Req, []) ->
|
||||||
|
{cowboy_rest, Req, []}.
|
||||||
|
|
||||||
|
%% Which HTTP methods are allowed
|
||||||
|
allowed_methods(Req, State) ->
|
||||||
|
{[<<"GET">>, <<"POST">>], Req, State}.
|
||||||
|
|
||||||
|
%% Which content types are accepted by POST/PUT requests
|
||||||
|
content_types_accepted(Req, State) ->
|
||||||
|
{[{{<<"application">>, <<"json">>, []}, handle_post}],
|
||||||
|
Req, State}.
|
||||||
|
|
||||||
|
%% Handle the POST/PUT request
|
||||||
|
handle_post(Req, State) ->
|
||||||
|
case cowboy_req:binding(counter_name, Req) of
|
||||||
|
undefined ->
|
||||||
|
{false, Req, State};
|
||||||
|
Name ->
|
||||||
|
case cowboy_req:has_body(Req) of
|
||||||
|
true ->
|
||||||
|
{ok, Body, Req3} = cowboy_req:read_body(Req),
|
||||||
|
Json = jsone:decode(Body),
|
||||||
|
ActionBin = maps:get(<<"action">>, Json, <<"increment">>),
|
||||||
|
Value = maps:get(<<"value">>, Json, 1),
|
||||||
|
Action = list_to_atom(binary_to_list(ActionBin)),
|
||||||
|
ok = dockerwatch:Action(Name, Value),
|
||||||
|
{true, Req3, State};
|
||||||
|
false ->
|
||||||
|
ok = dockerwatch:create(Name),
|
||||||
|
{true, Req, State}
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
|
%% Which content types we handle for GET/HEAD requests
|
||||||
|
content_types_provided(Req, State) ->
|
||||||
|
{[{<<"text/html">>, to_html},
|
||||||
|
{<<"application/json">>, to_json},
|
||||||
|
{<<"text/plain">>, to_text}
|
||||||
|
], Req, State}.
|
||||||
|
|
||||||
|
|
||||||
|
%% Return counters/counter as json
|
||||||
|
to_json(Req, State) ->
|
||||||
|
Resp = case cowboy_req:binding(counter_name, Req) of
|
||||||
|
undefined ->
|
||||||
|
dockerwatch:all();
|
||||||
|
Counter ->
|
||||||
|
#{ Counter => dockerwatch:get(Counter) }
|
||||||
|
end,
|
||||||
|
{jsone:encode(Resp), Req, State}.
|
||||||
|
|
||||||
|
%% Return counters/counter as plain text
|
||||||
|
to_text(Req, State) ->
|
||||||
|
Resp = case cowboy_req:binding(counter_name, Req) of
|
||||||
|
undefined ->
|
||||||
|
[io_lib:format("~s~n",[Counter]) || Counter <- dockerwatch:all()];
|
||||||
|
Counter ->
|
||||||
|
io_lib:format("~p",[dockerwatch:get(Counter)])
|
||||||
|
end,
|
||||||
|
{Resp, Req, State}.
|
||||||
|
|
||||||
|
%% Return counters/counter as html
|
||||||
|
to_html(Req, State) ->
|
||||||
|
Body = case cowboy_req:binding(counter_name, Req) of
|
||||||
|
undefined ->
|
||||||
|
Counters = dockerwatch:all(),
|
||||||
|
["<ul>\n",
|
||||||
|
[io_lib:format("<li>~s</li>\n", [Counter]) || Counter <- Counters],
|
||||||
|
"</ul>\n"];
|
||||||
|
Counter ->
|
||||||
|
Value = dockerwatch:get(Counter),
|
||||||
|
io_lib:format("~s = ~p",[Counter, Value])
|
||||||
|
end,
|
||||||
|
{[html_head(),Body,html_tail()], Req, State}.
|
||||||
|
|
||||||
|
html_head() ->
|
||||||
|
<<"<html>
|
||||||
|
<head>
|
||||||
|
<meta charset=\"utf-8\">
|
||||||
|
<title>dockerwatch</title>
|
||||||
|
</head>">>.
|
||||||
|
html_tail() ->
|
||||||
|
<<"</body>
|
||||||
|
</html>">>.
|
73
advanced_examples/elk/dockerwatch/src/dockerwatch_sup.erl
Normal file
73
advanced_examples/elk/dockerwatch/src/dockerwatch_sup.erl
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
%%
|
||||||
|
%% Copyright (C) 2014 Björn-Egil Dahlberg
|
||||||
|
%%
|
||||||
|
%% File: dockerwatch_sup.erl
|
||||||
|
%% Author: Björn-Egil Dahlberg
|
||||||
|
%% Created: 2014-09-10
|
||||||
|
%%
|
||||||
|
|
||||||
|
-module(dockerwatch_sup).
|
||||||
|
-behaviour(supervisor).
|
||||||
|
|
||||||
|
-export([start_link/0,init/1]).
|
||||||
|
|
||||||
|
-include_lib("kernel/include/logger.hrl").
|
||||||
|
|
||||||
|
%% API.
|
||||||
|
|
||||||
|
-spec start_link() -> {ok, pid()}.
|
||||||
|
start_link() ->
|
||||||
|
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||||
|
|
||||||
|
%% supervisor.
|
||||||
|
|
||||||
|
init([]) ->
|
||||||
|
CertsDir = "/etc/ssl/certs/",
|
||||||
|
|
||||||
|
Dispatch = cowboy_router:compile([
|
||||||
|
{'_', [{"/[:counter_name]", dockerwatch_handler, []}]}
|
||||||
|
]),
|
||||||
|
|
||||||
|
HTTPS = ranch:child_spec(
|
||||||
|
cowboy_https, 100, ranch_ssl,
|
||||||
|
[{port, 8443},
|
||||||
|
{cacertfile, filename:join(CertsDir, "dockerwatch-ca.pem")},
|
||||||
|
{certfile, filename:join(CertsDir, "dockerwatch-server.pem")},
|
||||||
|
{keyfile, filename:join(CertsDir, "dockerwatch-server.key")}],
|
||||||
|
cowboy_tls,
|
||||||
|
#{env=>#{dispatch=>Dispatch},
|
||||||
|
metrics_callback=>log_fun(),
|
||||||
|
stream_handlers => [cowboy_metrics_h,cowboy_stream_h]}),
|
||||||
|
|
||||||
|
HTTP = ranch:child_spec(
|
||||||
|
cowboy_http, 100, ranch_tcp,
|
||||||
|
[{port, 8080}],
|
||||||
|
cowboy_clear,
|
||||||
|
#{env=>#{dispatch=>Dispatch},
|
||||||
|
metrics_callback=>log_fun(),
|
||||||
|
stream_handlers => [cowboy_metrics_h,cowboy_stream_h]}),
|
||||||
|
|
||||||
|
Counter = {dockerwatch, {dockerwatch, start_link, []},
|
||||||
|
permanent, 5000, worker, [dockerwatch]},
|
||||||
|
|
||||||
|
Procs = [Counter, HTTP, HTTPS],
|
||||||
|
|
||||||
|
{ok, {{one_for_one, 10, 10}, Procs}}.
|
||||||
|
|
||||||
|
log_fun() ->
|
||||||
|
fun(#{resp_status:=RS}=M) when RS>=100, RS<200 ->
|
||||||
|
do_log(M,"Info");
|
||||||
|
(#{resp_status:=RS}=M) when RS>=200, RS<300 ->
|
||||||
|
do_log(M,"OK");
|
||||||
|
(#{resp_status:=RS}=M) when RS>=300, RS<400 ->
|
||||||
|
do_log(M,"Redirect");
|
||||||
|
(#{resp_status:=RS}=M) when RS>=400, RS<500 ->
|
||||||
|
do_log(M,"Client error");
|
||||||
|
(#{resp_status:=RS}=M) when RS>=500 ->
|
||||||
|
do_log(M,"Server error")
|
||||||
|
end.
|
||||||
|
|
||||||
|
do_log(#{req:=#{scheme:=S,method:=M,path:=P},resp_status:=RS},_What) ->
|
||||||
|
?LOG_DEBUG("scheme=~s, method=~s, path=~s, resp_status=~p",[S,M,P,RS]);
|
||||||
|
do_log(#{reason:=Reason,resp_status:=RS},_What) ->
|
||||||
|
?LOG_DEBUG("reason=~p, resp_status=~p",[Reason,RS]).
|
9
advanced_examples/elk/elasticsearch/elasticsearch.yml
Normal file
9
advanced_examples/elk/elasticsearch/elasticsearch.yml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
cluster.name: "docker-cluster"
|
||||||
|
network.host: 0.0.0.0
|
||||||
|
|
||||||
|
# minimum_master_nodes need to be explicitly set when bound on a public IP
|
||||||
|
# set to 1 to allow single node clusters
|
||||||
|
# Details: https://github.com/elastic/elasticsearch/pull/17288
|
||||||
|
discovery.zen.minimum_master_nodes: 1
|
||||||
|
|
||||||
|
discovery.type: single-node
|
4
advanced_examples/elk/kibana/kibana.yml
Normal file
4
advanced_examples/elk/kibana/kibana.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
server.name: kibana
|
||||||
|
server.host: "0"
|
||||||
|
elasticsearch.url: http://elasticsearch:9200
|
||||||
|
xpack.monitoring.ui.container.elasticsearch.enabled: true
|
29
advanced_examples/elk/logstash/logstash.conf
Normal file
29
advanced_examples/elk/logstash/logstash.conf
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
input {
|
||||||
|
gelf {
|
||||||
|
use_udp => true
|
||||||
|
port => 44622
|
||||||
|
}
|
||||||
|
}
|
||||||
|
filter {
|
||||||
|
# If a log message can be parsed as json, do so, and populate the
|
||||||
|
# log event with fields found.
|
||||||
|
json {
|
||||||
|
skip_on_invalid_json => "true"
|
||||||
|
source => "message"
|
||||||
|
}
|
||||||
|
# Convert the level field to an integer
|
||||||
|
mutate {
|
||||||
|
convert => {
|
||||||
|
"level" => "integer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
output {
|
||||||
|
file {
|
||||||
|
path => "/usr/share/logstash/logs/output.log"
|
||||||
|
}
|
||||||
|
elasticsearch {
|
||||||
|
hosts => ["elasticsearch:9200"]
|
||||||
|
http_compression => true
|
||||||
|
}
|
||||||
|
}
|
2
advanced_examples/elk/logstash/logstash.yml
Normal file
2
advanced_examples/elk/logstash/logstash.yml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
log.level: info
|
||||||
|
http.host: "0.0.0.0"
|
Reference in New Issue
Block a user