From 10e6378307d3d0bb217e12c318f66177ddd381b5 Mon Sep 17 00:00:00 2001 From: Garrett Date: Mon, 29 Aug 2016 15:18:48 -0700 Subject: [PATCH] Add docker example (#26) Add docker sandbox as described in the sandbox section of the docs --- ci/do_ci.sh | 1 - docs/_static/docker_compose_v0.1.svg | 4 + docs/install/install.rst | 1 + docs/install/sandboxes.rst | 230 +++++++++++++++++++++++++++ example/Dockerfile-frontenvoy | 3 + example/Dockerfile-service | 8 + example/README.md | 2 + example/docker-compose.yml | 50 ++++++ example/front-envoy.json | 78 +++++++++ example/service-envoy.json | 59 +++++++ example/service.py | 16 ++ example/start_service.sh | 2 + 12 files changed, 453 insertions(+), 1 deletion(-) create mode 100644 docs/_static/docker_compose_v0.1.svg create mode 100644 docs/install/sandboxes.rst create mode 100644 example/Dockerfile-frontenvoy create mode 100644 example/Dockerfile-service create mode 100644 example/README.md create mode 100644 example/docker-compose.yml create mode 100644 example/front-envoy.json create mode 100644 example/service-envoy.json create mode 100644 example/service.py create mode 100644 example/start_service.sh diff --git a/ci/do_ci.sh b/ci/do_ci.sh index e4d7965a2175..33e870706955 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -51,4 +51,3 @@ $EXTRA_CMAKE_FLAGS -DENVOY_DEBUG:BOOL=OFF \ make check_format make -j$NUM_CPUS $TEST_TARGET - diff --git a/docs/_static/docker_compose_v0.1.svg b/docs/_static/docker_compose_v0.1.svg new file mode 100644 index 000000000000..55236771d500 --- /dev/null +++ b/docs/_static/docker_compose_v0.1.svg @@ -0,0 +1,4 @@ + + + + diff --git a/docs/install/install.rst b/docs/install/install.rst index 331905fdc7e0..1a7f395d33cf 100644 --- a/docs/install/install.rst +++ b/docs/install/install.rst @@ -10,3 +10,4 @@ Building and installation building installation ref_configs + sandboxes diff --git a/docs/install/sandboxes.rst b/docs/install/sandboxes.rst new file mode 100644 index 000000000000..69dca374ba32 --- /dev/null +++ b/docs/install/sandboxes.rst @@ -0,0 +1,230 @@ +.. _install_sandboxes: + +Sandboxes +========= + +To get a flavor of what Envoy has to offer, we are releasing a +`docker compose `_ sandbox that deploys a front +envoy and a couple of services (simple flask apps) colocated with a running +service envoy. The three containers will be deployed inside a virtual network +called ``envoymesh``. + +Below you can see a graphic showing the docker compose deployment: + +.. image:: /_static/docker_compose_v0.1.svg + :width: 100% + +All incoming requests are routed via the front envoy, which is acting as a reverse proxy sitting on +the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` by docker compose +(see `docker-compose.yml +`_). Moreover, notice +that all traffic routed by the front envoy to the service containers is actually routed to the +service envoys (routes setup in `front-envoy.json +`_). In turn the service +envoys route the request to the flask app via the loopback address (routes setup in +`service-envoy.json +`_). This setup +illustrates the advantage of running service envoys collocated with your services: all requests are +handled by the service envoy, and efficiently routed to your services. + +Running the Sandbox +------------------- + +The following documentation runs through the setup of an envoy cluster organized +as is described in the image above. + +**Step 1: Install Docker** + +Ensure that you have a recent versions of ``docker`, ``docker-compose`` and +``docker-machine`` installed. + +A simple way to achieve this is via the `Docker Toolbox `_. + +**Step 2: Docker Machine setup** + +First let's create a new machine which will hold the containers:: + + $ docker-machine create --driver virtualbox default + $ eval $(docker-machine env default) + +**Step 4: Start all of our containers** + +:: + + $ pwd + /src/envoy/example + $ docker-compose up --build -d + $ docker-compose ps + Name Command State Ports + ------------------------------------------------------------------------------------------------------------- + example_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp + example_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp + example_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + +**Step 5: Test Envoy's routing capabilities** + +You can now send a request to both services via the front-envoy. + +For service1:: + + $ curl -v $(docker-machine ip default):8000/service/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:39:19 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 + * Connection #0 to host 192.168.99.100 left intact + +For service2:: + + $ curl -v $(docker-machine ip default):8000/service/2 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/2 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 2 + < server: envoy + < date: Fri, 26 Aug 2016 19:39:23 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 + * Connection #0 to host 192.168.99.100 left intact + +Notice that each request, while sent to the front envoy, was correctly routed +to the respective application. + +**Step 6: Test Envoy's load balancing capabilities** + +Now let's scale up our service1 nodes to demonstrate the clustering abilities +of envoy.:: + + $ docker-compose scale service1=3 + Creating and starting example_service1_2 ... done + Creating and starting example_service1_3 ... done + +Now if we send a request to service1, the fron envoy will load balance the +request by doing a round robin of the three service1 machines:: + + $ curl -v $(docker-machine ip default):8000/service/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:40:21 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 + * Connection #0 to host 192.168.99.100 left intact + $ curl -v $(docker-machine ip default):8000/service/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:40:22 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 + * Connection #0 to host 192.168.99.100 left intact + $ curl -v $(docker-machine ip default):8000/service/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:40:24 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 + * Connection #0 to host 192.168.99.100 left intact + +**Step 7: enter containers and curl services** + +In addition of using ``curl`` from your host machine, you can also enter the +containers themselves and ``curl`` from inside them. To enter a container you +can use ``docker-compose exec /bin/bash``. For example we can +enter the ``front-envoy`` container, and ``curl`` for services locally:: + + $ docker-compose exec front-envoy /bin/bash + root@81288499f9d7:/# curl localhost:80/service/1 + Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 + root@81288499f9d7:/# curl localhost:80/service/1 + Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 + root@81288499f9d7:/# curl localhost:80/service/1 + Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 + root@81288499f9d7:/# curl localhost:80/service/2 + Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 + +**Step 8: enter containers and curl admin** + +When envoy runs it also attaches an ``admin`` to your desired port. In the example +configs the admin is bound to port ``8001``. We can ``curl`` it to gain useful information. +For example you can ``curl`` ``/server_info`` to get information about the +envoy version you are running. Addionally you can ``curl`` ``/stats`` to get +statistics. For example inside ``frontenvoy`` we can get:: + + $ docker-compose exec front-envoy /bin/bash + root@e654c2c83277:/# curl localhost:8001/server_info + envoy 10e00b/RELEASE live 142 142 0 + root@e654c2c83277:/# curl localhost:8001/stats + cluster.service1.external.upstream_rq_200: 7 + ... + cluster.service1.membership_change: 2 + cluster.service1.membership_total: 3 + ... + cluster.service1.upstream_cx_http2_total: 3 + ... + cluster.service1.upstream_rq_total: 7 + ... + cluster.service2.external.upstream_rq_200: 2 + ... + cluster.service2.membership_change: 1 + cluster.service2.membership_total: 1 + ... + cluster.service2.upstream_cx_http2_total: 1 + ... + cluster.service2.upstream_rq_total: 2 + ... + +Notice that we can get the number of members of upstream clusters, number of requests +fulfilled by them, information about http ingress, and a plethora of other useful +stats. diff --git a/example/Dockerfile-frontenvoy b/example/Dockerfile-frontenvoy new file mode 100644 index 000000000000..b26aa9dfc85f --- /dev/null +++ b/example/Dockerfile-frontenvoy @@ -0,0 +1,3 @@ +FROM lyft/envoy:latest + +CMD /usr/local/bin/envoy -c /etc/front-envoy.json diff --git a/example/Dockerfile-service b/example/Dockerfile-service new file mode 100644 index 000000000000..056ab55859f8 --- /dev/null +++ b/example/Dockerfile-service @@ -0,0 +1,8 @@ +FROM lyft/envoy:latest + +RUN pip install Flask==0.11.1 +RUN mkdir /code +ADD ./service.py /code +ADD ./start_service.sh /usr/local/bin/start_service.sh +RUN chmod u+x /usr/local/bin/start_service.sh +ENTRYPOINT /usr/local/bin/start_service.sh diff --git a/example/README.md b/example/README.md new file mode 100644 index 000000000000..af297eda7d2f --- /dev/null +++ b/example/README.md @@ -0,0 +1,2 @@ +To learn about this sandbox and for instructions on how to run it please head over +to the envoy docs diff --git a/example/docker-compose.yml b/example/docker-compose.yml new file mode 100644 index 000000000000..3e414773a881 --- /dev/null +++ b/example/docker-compose.yml @@ -0,0 +1,50 @@ +version: '2' +services: + + front-envoy: + build: + context: ../ + dockerfile: example/Dockerfile-frontenvoy + volumes: + - ./front-envoy.json:/etc/front-envoy.json + networks: + - envoymesh + expose: + - "80" + - "8001" + ports: + - "8000:80" + - "8001:8001" + + service1: + build: + context: . + dockerfile: Dockerfile-service + volumes: + - ./service-envoy.json:/etc/service-envoy.json + networks: + envoymesh: + aliases: + - service1 + environment: + - SERVICE_NAME=1 + expose: + - "80" + + service2: + build: + context: . + dockerfile: Dockerfile-service + volumes: + - ./service-envoy.json:/etc/service-envoy.json + networks: + envoymesh: + aliases: + - service2 + environment: + - SERVICE_NAME=2 + expose: + - "80" + +networks: + envoymesh: {} diff --git a/example/front-envoy.json b/example/front-envoy.json new file mode 100644 index 000000000000..7cbba54c3529 --- /dev/null +++ b/example/front-envoy.json @@ -0,0 +1,78 @@ +{ + "listeners": [ + { + "port": 80, + "filters": [ + { + "type": "read", + "name": "http_connection_manager", + "config": { + "codec_type": "auto", + "stat_prefix": "ingress_http", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": ["*"], + "routes": [ + { + "timeout_ms": 0, + "prefix": "/service/1", + "cluster": "service1" + }, + { + "timeout_ms": 0, + "prefix": "/service/2", + "cluster": "service2" + } + + ] + } + ] + }, + "filters": [ + { + "type": "decoder", + "name": "router", + "config": {} + } + ] + } + } + ] + } + ], + "admin": { + "access_log_path": "/dev/null", + "port": 8001 + }, + "cluster_manager": { + "clusters": [ + { + "name": "service1", + "connect_timeout_ms": 250, + "type": "strict_dns", + "lb_type": "round_robin", + "features": "http2", + "hosts": [ + { + "url": "tcp://service1:80" + } + ] + }, + { + "name": "service2", + "connect_timeout_ms": 250, + "type": "strict_dns", + "lb_type": "round_robin", + "features": "http2", + "hosts": [ + { + "url": "tcp://service2:80" + } + ] + } + ] + } +} + diff --git a/example/service-envoy.json b/example/service-envoy.json new file mode 100644 index 000000000000..b41de07ce17f --- /dev/null +++ b/example/service-envoy.json @@ -0,0 +1,59 @@ +{ + "listeners": [ + { + "port": 80, + "filters": [ + { + "type": "read", + "name": "http_connection_manager", + "config": { + "codec_type": "auto", + "stat_prefix": "ingress_http", + "route_config": { + "virtual_hosts": [ + { + "name": "service", + "domains": ["*"], + "routes": [ + { + "timeout_ms": 0, + "prefix": "/service", + "cluster": "local_service" + } + ] + } + ] + }, + "filters": [ + { + "type": "decoder", + "name": "router", + "config": {} + } + ] + } + } + ] + } + ], + "admin": { + "access_log_path": "/dev/null", + "port": 8001 + }, + "cluster_manager": { + "clusters": [ + { + "name": "local_service", + "connect_timeout_ms": 250, + "type": "strict_dns", + "lb_type": "round_robin", + "hosts": [ + { + "url": "tcp://127.0.0.1:8080" + } + ] + } + ] + } +} + diff --git a/example/service.py b/example/service.py new file mode 100644 index 000000000000..b44d084a6821 --- /dev/null +++ b/example/service.py @@ -0,0 +1,16 @@ +from flask import Flask +import socket +import os + +app = Flask(__name__) + + +@app.route('/service/') +def hello(service_number): + return ('Hello from behind Envoy (service {})! hostname: {} resolved' + 'hostname: {}\n'.format(os.environ['SERVICE_NAME'], + socket.gethostname(), + socket.gethostbyname(socket.gethostname()))) + +if __name__ == "__main__": + app.run(host='127.0.0.1', port=8080, debug=True) diff --git a/example/start_service.sh b/example/start_service.sh new file mode 100644 index 000000000000..cf98f2c5b926 --- /dev/null +++ b/example/start_service.sh @@ -0,0 +1,2 @@ +python /code/service.py & +envoy -c /etc/service-envoy.json