diff --git a/certbot/Dockerfile b/certbot/Dockerfile index c62405e9..3ce7546f 100644 --- a/certbot/Dockerfile +++ b/certbot/Dockerfile @@ -1,5 +1,7 @@ FROM phusion/baseimage:latest +MAINTAINER Mahmoud Zalt + COPY run-certbot.sh /root/certbot/run-certbot.sh RUN apt-get update diff --git a/docker-compose.yml b/docker-compose.yml index 5954433b..05b11f04 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -37,10 +37,13 @@ services: volumes_from: - applications extra_hosts: - - "dockerhost:${DOCKER_HOST_IP}" + - "dockerhost:${DOCKER_HOST_IP}" ports: - - "${WORKSPACE_SSH_PORT}:22" + - "${WORKSPACE_SSH_PORT}:22" tty: true + networks: + - frontend + - backend ### PHP-FPM Container ####################################### @@ -72,6 +75,8 @@ services: - "dockerhost:${DOCKER_HOST_IP}" environment: - PHP_IDE_CONFIG=${PHP_IDE_CONFIG} + networks: + - backend ### PHP Worker Container ##################################### php-worker: @@ -81,6 +86,8 @@ services: - applications depends_on: - workspace + networks: + - backend ### Nginx Server Container ################################## @@ -90,15 +97,18 @@ services: args: - PHP_UPSTREAM=php-fpm volumes_from: - - applications + - applications volumes: - - ${NGINX_HOST_LOG_PATH}:/var/log/nginx - - ${NGINX_SITES_PATH}:/etc/nginx/sites-available + - ${NGINX_HOST_LOG_PATH}:/var/log/nginx + - ${NGINX_SITES_PATH}:/etc/nginx/sites-available ports: - - "${NGINX_HOST_HTTP_PORT}:80" - - "${NGINX_HOST_HTTPS_PORT}:443" + - "${NGINX_HOST_HTTP_PORT}:80" + - "${NGINX_HOST_HTTPS_PORT}:443" depends_on: - - php-fpm + - php-fpm + networks: + - frontend + - backend ### Apache Server Container ################################# @@ -117,6 +127,9 @@ services: - "${APACHE_HOST_HTTPS_PORT}:443" depends_on: - php-fpm + networks: + - frontend + - backend ### HHVM Container ########################################## @@ -128,6 +141,9 @@ services: - "9000" depends_on: - workspace + networks: + - frontend + - backend ### Minio Container ######################################### @@ -140,6 +156,8 @@ services: environment: - MINIO_ACCESS_KEY=access - MINIO_SECRET_KEY=secretkey + networks: + - frontend ### MySQL Container ######################################### @@ -155,8 +173,10 @@ services: - mysql:/var/lib/mysql ports: - "${MYSQL_PORT}:3306" + networks: + - backend -### MsSQL Container ######################################### +### MSSQL Container ######################################### mssql: build: @@ -169,6 +189,8 @@ services: - mssql:/var/opt/mssql ports: - "${MSSQL_PORT}:1433" + networks: + - backend ### MariaDB Container ####################################### @@ -183,6 +205,8 @@ services: - MYSQL_USER=${MARIADB_USER} - MYSQL_PASSWORD=${MARIADB_PASSWORD} - MYSQL_ROOT_PASSWORD=${MARIADB_PORT} + networks: + - backend ### PostgreSQL Container #################################### @@ -196,6 +220,8 @@ services: - POSTGRES_DB=${POSTGRES_DB} - POSTGRES_USER=${POSTGRES_USER} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + networks: + - backend ### PostgreSQL PostGis Container ############################ @@ -209,6 +235,8 @@ services: - POSTGRES_DB=${POSTGRES_DB} - POSTGRES_USER=${POSTGRES_USER} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + networks: + - backend ### Neo4j Container ######################################### @@ -221,6 +249,8 @@ services: - NEO4J_AUTH=default:secret volumes: - neo4j:/var/lib/neo4j/data + networks: + - backend ### MongoDB Container ####################################### @@ -230,6 +260,8 @@ services: - "27017:27017" volumes: - mongo:/data/db + networks: + - backend ### RethinkDB Container ####################################### @@ -239,6 +271,8 @@ services: - "8090:8080" volumes: - rethinkdb:/data/rethinkdb_data + networks: + - backend ### Redis Container ######################################### @@ -248,6 +282,8 @@ services: - redis:/data ports: - "6379:6379" + networks: + - backend ### Aerospike c Container ################################### @@ -262,6 +298,8 @@ services: - "3001:3001" - "3002:3002" - "3003:3003" + networks: + - backend ### Memcached Container ##################################### @@ -273,6 +311,8 @@ services: - "${MEMCACHED_HOST_PORT}:11211" depends_on: - php-fpm + networks: + - backend ### Beanstalkd Container #################################### @@ -283,6 +323,8 @@ services: privileged: true depends_on: - php-fpm + networks: + - backend ### RabbitMQ Container ###################################### @@ -298,6 +340,8 @@ services: - RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS} depends_on: - php-fpm + networks: + - backend ### Beanstalkd Console Container ############################ @@ -307,6 +351,8 @@ services: - "2080:2080" depends_on: - beanstalkd + networks: + - backend ### Caddy Server Container ################################## @@ -324,6 +370,9 @@ services: - caddy:/root/.caddy depends_on: - php-fpm + networks: + - frontend + - backend ### phpMyAdmin Container #################################### @@ -338,6 +387,8 @@ services: - "${PMA_PORT}:80" depends_on: - "${PMA_DB_ENGINE}" + networks: + - frontend ### Adminer Container #################################### @@ -347,6 +398,8 @@ services: - "${ADM_PORT}:8080" depends_on: - php-fpm + networks: + - frontend ### pgAdmin Container ####################################### @@ -356,6 +409,8 @@ services: - "5050:5050" depends_on: - postgres + networks: + - frontend ### ElasticSearch Container ################################# @@ -369,6 +424,8 @@ services: - "${ELASTICSEARCH_HOST_TRANSPORT_PORT}:9300" depends_on: - php-fpm + networks: + - frontend ### Certbot Container ################################## @@ -381,6 +438,8 @@ services: environment: - CN="fake.domain.com" - EMAIL="fake.email@gmail.com" + networks: + - frontend ### Mailhog Container ######################################### @@ -389,8 +448,10 @@ services: ports: - "1025:1025" - "8025:8025" + networks: + - frontend -### Selenium Container ######################################### +### Selenium Container ######################################## selenium: build: ./selenium @@ -398,8 +459,68 @@ services: - "${SELENIUM_PORT}:4444" volumes: - /dev/shm:/dev/shm + networks: + - frontend -### Volumes Setup ########################################### +### Varnish Proxy 1 ########################################## + + proxy: + build: ./varnish + expose: + - ${VARNISH_PORT} + environment: + - VARNISH_CONFIG=${VARNISH_CONFIG} + - CACHE_SIZE=${VARNISH_PROXY1_CACHE_SIZE} + - VARNISHD_PARAMS=${VARNISHD_PARAMS} + - VARNISH_PORT=${VARNISH_PORT} + - BACKEND_HOST=${VARNISH_PROXY1_BACKEND_HOST} + - BACKEND_PORT=${VARNISH_BACKEND_PORT} + - VARNISH_SERVER=${VARNISH_PROXY1_SERVER} + links: + - workspace + networks: + - frontend + +### Varnish Proxy 2 ########################################## + + proxy2: + build: ./varnish + expose: + - ${VARNISH_PORT} + environment: + - VARNISH_CONFIG=${VARNISH_CONFIG} + - CACHE_SIZE=${VARNISH_PROXY2_CACHE_SIZE} + - VARNISHD_PARAMS=${VARNISHD_PARAMS} + - VARNISH_PORT=${VARNISH_PORT} + - BACKEND_HOST=${VARNISH_PROXY2_BACKEND_HOST} + - BACKEND_PORT=${VARNISH_BACKEND_PORT} + - VARNISH_SERVER=${VARNISH_PROXY2_SERVER} + links: + - workspace + networks: + - frontend + +### Balancer Haproxy ########################################## + + balancer: + build: ./haproxy + ports: + - "${HAPROXY_HOST_HTTP_PORT}:8085" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + links: + - proxy + - proxy2 + +### Networks Setup ############################################ + +networks: + frontend: + driver: "bridge" + backend: + driver: "bridge" + +### Volumes Setup ############################################# volumes: mysql: diff --git a/env-example b/env-example index d773a6b3..c5183569 100644 --- a/env-example +++ b/env-example @@ -122,9 +122,28 @@ PMA_PASSWORD=secret PMA_ROOT_PASSWORD=secret PMA_PORT=88 -### Adminer Container +### ADMINER Container ADM_PORT=88 +### VARNISH Container +VARNISH_CONFIG=/etc/varnish/default.vcl +VARNISH_PORT=8080 +VARNISH_BACKEND_PORT=8888 +VARNISHD_PARAMS=-p default_ttl=3600 -p default_grace=3600 + +### Varnish Proxy 1 Container +VARNISH_PROXY1_CACHE_SIZE=128m +VARNISH_PROXY1_BACKEND_HOST=workspace +VARNISH_PROXY1_SERVER=SERVER1 + +### Varnish Proxy 2 Container +VARNISH_PROXY2_CACHE_SIZE=128m +VARNISH_PROXY2_BACKEND_HOST=workspace +VARNISH_PROXY2_SERVER=SERVER2 + +### HAPROXY Container +HAPROXY_HOST_HTTP_PORT=8085 + ### MISC # Replace with your Docker Host IP (will be appended to /etc/hosts) DOCKER_HOST_IP=10.0.75.1 diff --git a/haproxy/Dockerfile b/haproxy/Dockerfile new file mode 100644 index 00000000..ceca7191 --- /dev/null +++ b/haproxy/Dockerfile @@ -0,0 +1,5 @@ +FROM dockercloud/haproxy:latest + +MAINTAINER ZeroC0D3 Team + +EXPOSE 80 diff --git a/hhvm/Dockerfile b/hhvm/Dockerfile index cca2c55b..b4404a66 100644 --- a/hhvm/Dockerfile +++ b/hhvm/Dockerfile @@ -1,5 +1,7 @@ FROM ubuntu:14.04 +MAINTAINER Mahmoud Zalt + RUN apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0x5a16e7281be7a449 RUN apt-get update -y \ diff --git a/mailhog/Dockerfile b/mailhog/Dockerfile index 9fd31e4e..4b404d16 100644 --- a/mailhog/Dockerfile +++ b/mailhog/Dockerfile @@ -1,6 +1,6 @@ FROM mailhog/mailhog -Maintainer Mahmoud Zalt +MAINTAINER Mahmoud Zalt CMD ["Mailhog"] diff --git a/mssql/Dockerfile b/mssql/Dockerfile index fd9f0761..de67f6d7 100644 --- a/mssql/Dockerfile +++ b/mssql/Dockerfile @@ -1,5 +1,7 @@ FROM microsoft/mssql-server-linux +MAINTAINER Mahmoud Zalt + # Create config directory # an set it as WORKDIR RUN mkdir -p /usr/src/app @@ -18,4 +20,4 @@ VOLUME /var/opt/mssql EXPOSE 1433 -CMD /bin/bash ./entrypoint.sh \ No newline at end of file +CMD /bin/bash ./entrypoint.sh diff --git a/php-worker/Dockerfile b/php-worker/Dockerfile index 13c6d95f..58de7d5b 100644 --- a/php-worker/Dockerfile +++ b/php-worker/Dockerfile @@ -9,6 +9,8 @@ FROM nielsvdoorn/laravel-supervisor +MAINTAINER Mahmoud Zalt + # #-------------------------------------------------------------------------- # Optional Supervisord Configuration diff --git a/postgres-postgis/Dockerfile b/postgres-postgis/Dockerfile index c7d9ea6d..23e65862 100644 --- a/postgres-postgis/Dockerfile +++ b/postgres-postgis/Dockerfile @@ -1,5 +1,7 @@ FROM mdillon/postgis:latest +MAINTAINER Mahmoud Zalt + CMD ["postgres"] -EXPOSE 5432 \ No newline at end of file +EXPOSE 5432 diff --git a/rabbitmq/Dockerfile b/rabbitmq/Dockerfile index 48727d2d..ad73a2a1 100644 --- a/rabbitmq/Dockerfile +++ b/rabbitmq/Dockerfile @@ -1,5 +1,7 @@ FROM rabbitmq +MAINTAINER Mahmoud Zalt + RUN rabbitmq-plugins enable --offline rabbitmq_management EXPOSE 15671 15672 diff --git a/varnish/Dockerfile b/varnish/Dockerfile new file mode 100644 index 00000000..a4f61a99 --- /dev/null +++ b/varnish/Dockerfile @@ -0,0 +1,29 @@ +FROM debian:latest + +MAINTAINER ZeroC0D3 Team + +# Set Environment Variables +ENV DEBIAN_FRONTEND noninteractive + +# Install Dependencies +RUN apt-get update && apt-get install -y apt-utils && apt-get upgrade -y +RUN mkdir /home/site && mkdir /home/site/cache +RUN apt-get install -y varnish +RUN rm -rf /var/lib/apt/lists/* + +# Setting Configurations +ENV VARNISH_CONFIG /etc/varnish/default.vcl +ENV CACHE_SIZE 128m +ENV VARNISHD_PARAMS -p default_ttl=3600 -p default_grace=3600 +ENV VARNISH_PORT 6081 +ENV BACKEND_HOST localhost +ENV BACKEND_PORT 80 + +ADD default.vcl /etc/varnish/default.vcl +ADD start.sh /etc/varnish/start.sh + +RUN chmod +x /etc/varnish/start.sh + +CMD ["/etc/varnish/start.sh"] + +EXPOSE 8080 diff --git a/varnish/default.vcl b/varnish/default.vcl new file mode 100644 index 00000000..155a863a --- /dev/null +++ b/varnish/default.vcl @@ -0,0 +1,415 @@ +vcl 4.0; +# Based on: https://github.com/mattiasgeniar/varnish-4.0-configuration-templates/blob/master/default.vcl + +import std; +import directors; + +backend server1 { # Define one backend + .host = "${BACKEND_HOST}"; # IP or Hostname of backend + .port = "${BACKEND_PORT}"; # Port Apache or whatever is listening + .max_connections = 300; # That's it + + .probe = { + #.url = "/"; # short easy way (GET /) + # We prefer to only do a HEAD / + .request = + "HEAD / HTTP/1.1" + "Host: ${BACKEND_HOST}" + "Connection: close" + "User-Agent: Varnish Health Probe"; + + .interval = 5s; # check the health of each backend every 5 seconds + .timeout = 1s; # timing out after 1 second. + .window = 5; # If 3 out of the last 5 polls succeeded the backend is considered healthy, otherwise it will be marked as sick + .threshold = 3; + } + + .first_byte_timeout = 300s; # How long to wait before we receive a first byte from our backend? + .connect_timeout = 5s; # How long to wait for a backend connection? + .between_bytes_timeout = 2s; # How long to wait between bytes received from our backend? +} + +acl purge { + # ACL we'll use later to allow purges + "localhost"; + "127.0.0.1"; + "::1"; +} + +#acl editors { +# # ACL to honor the "Cache-Control: no-cache" header to force a refresh but only from selected IPs +# "localhost"; +# "127.0.0.1"; +# "::1"; +#} + +sub vcl_init { + # Called when VCL is loaded, before any requests pass through it. + # Typically used to initialize VMODs. + + new vdir = directors.round_robin(); + vdir.add_backend(server1); + # vdir.add_backend(servern); +} + +sub vcl_recv { + # Called at the beginning of a request, after the complete request has been received and parsed. + # Its purpose is to decide whether or not to serve the request, how to do it, and, if applicable, + # which backend to use. + # also used to modify the request + + set req.backend_hint = vdir.backend(); # send all traffic to the vdir director + + # Normalize the header, remove the port (in case you're testing this on various TCP ports) + set req.http.Host = regsub(req.http.Host, ":[0-9]+", ""); + + # Remove the proxy header (see https://httpoxy.org/#mitigate-varnish) + unset req.http.proxy; + + # Normalize the query arguments + set req.url = std.querysort(req.url); + + # Allow purging + if (req.method == "PURGE") { + if (!client.ip ~ purge) { # purge is the ACL defined at the begining + # Not from an allowed IP? Then die with an error. + return (synth(405, "This IP is not allowed to send PURGE requests.")); + } + # If you got this stage (and didn't error out above), purge the cached result + return (purge); + } + + # Only deal with "normal" types + if (req.method != "GET" && + req.method != "HEAD" && + req.method != "PUT" && + req.method != "POST" && + req.method != "TRACE" && + req.method != "OPTIONS" && + req.method != "PATCH" && + req.method != "DELETE") { + # Non-RFC2616 or CONNECT which is weird. + return (pipe); + } + + # Implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html) + if (req.http.Upgrade ~ "(?i)websocket") { + return (pipe); + } + + # Only cache GET or HEAD requests. This makes sure the POST requests are always passed. + if (req.method != "GET" && req.method != "HEAD") { + return (pass); + } + + # Some generic URL manipulation, useful for all templates that follow + # First remove the Google Analytics added parameters, useless for our backend + if (req.url ~ "(\?|&)(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=") { + set req.url = regsuball(req.url, "&(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=([A-z0-9_\-\.%25]+)", ""); + set req.url = regsuball(req.url, "\?(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=([A-z0-9_\-\.%25]+)", "?"); + set req.url = regsub(req.url, "\?&", "?"); + set req.url = regsub(req.url, "\?$", ""); + } + + # Strip hash, server doesn't need it. + if (req.url ~ "\#") { + set req.url = regsub(req.url, "\#.*$", ""); + } + + # Strip a trailing ? if it exists + if (req.url ~ "\?$") { + set req.url = regsub(req.url, "\?$", ""); + } + + # Some generic cookie manipulation, useful for all templates that follow + # Remove the "has_js" cookie + set req.http.Cookie = regsuball(req.http.Cookie, "has_js=[^;]+(; )?", ""); + + # Remove any Google Analytics based cookies + set req.http.Cookie = regsuball(req.http.Cookie, "__utm.=[^;]+(; )?", ""); + set req.http.Cookie = regsuball(req.http.Cookie, "_ga=[^;]+(; )?", ""); + set req.http.Cookie = regsuball(req.http.Cookie, "_gat=[^;]+(; )?", ""); + set req.http.Cookie = regsuball(req.http.Cookie, "utmctr=[^;]+(; )?", ""); + set req.http.Cookie = regsuball(req.http.Cookie, "utmcmd.=[^;]+(; )?", ""); + set req.http.Cookie = regsuball(req.http.Cookie, "utmccn.=[^;]+(; )?", ""); + + # Remove DoubleClick offensive cookies + set req.http.Cookie = regsuball(req.http.Cookie, "__gads=[^;]+(; )?", ""); + + # Remove the Quant Capital cookies (added by some plugin, all __qca) + set req.http.Cookie = regsuball(req.http.Cookie, "__qc.=[^;]+(; )?", ""); + + # Remove the AddThis cookies + set req.http.Cookie = regsuball(req.http.Cookie, "__atuv.=[^;]+(; )?", ""); + + # Remove a ";" prefix in the cookie if present + set req.http.Cookie = regsuball(req.http.Cookie, "^;\s*", ""); + + # Are there cookies left with only spaces or that are empty? + if (req.http.cookie ~ "^\s*$") { + unset req.http.cookie; + } + + if (req.http.Cache-Control ~ "(?i)no-cache") { + #if (req.http.Cache-Control ~ "(?i)no-cache" && client.ip ~ editors) { # create the acl editors if you want to restrict the Ctrl-F5 + # http://varnish.projects.linpro.no/wiki/VCLExampleEnableForceRefresh + # Ignore requests via proxy caches and badly behaved crawlers + # like msnbot that send no-cache with every request. + if (! (req.http.Via || req.http.User-Agent ~ "(?i)bot" || req.http.X-Purge)) { + #set req.hash_always_miss = true; # Doesn't seems to refresh the object in the cache + return(purge); # Couple this with restart in vcl_purge and X-Purge header to avoid loops + } + } + + # Large static files are delivered directly to the end-user without + # waiting for Varnish to fully read the file first. + # Varnish 4 fully supports Streaming, so set do_stream in vcl_backend_response() + if (req.url ~ "^[^?]*\.(7z|avi|bz2|flac|flv|gz|mka|mkv|mov|mp3|mp4|mpeg|mpg|ogg|ogm|opus|rar|tar|tgz|tbz|txz|wav|webm|xz|zip)(\?.*)?$") { + unset req.http.Cookie; + return (hash); + } + + # Remove all cookies for static files + # A valid discussion could be held on this line: do you really need to cache static files that don't cause load? Only if you have memory left. + # Sure, there's disk I/O, but chances are your OS will already have these files in their buffers (thus memory). + # Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/ + if (req.url ~ "^[^?]*\.(7z|avi|bmp|bz2|css|csv|doc|docx|eot|flac|flv|gif|gz|ico|jpeg|jpg|js|less|mka|mkv|mov|mp3|mp4|mpeg|mpg|odt|otf|ogg|ogm|opus|pdf|png|ppt|pptx|rar|rtf|svg|svgz|swf|tar|tbz|tgz|ttf|txt|txz|wav|webm|webp|woff|woff2|xls|xlsx|xml|xz|zip)(\?.*)?$") { + unset req.http.Cookie; + return (hash); + } + + # Send Surrogate-Capability headers to announce ESI support to backend + set req.http.Surrogate-Capability = "key=ESI/1.0"; + + if (req.http.Authorization) { + # Not cacheable by default + return (pass); + } + + return (hash); +} + +sub vcl_pipe { + # Called upon entering pipe mode. + # In this mode, the request is passed on to the backend, and any further data from both the client + # and backend is passed on unaltered until either end closes the connection. Basically, Varnish will + # degrade into a simple TCP proxy, shuffling bytes back and forth. For a connection in pipe mode, + # no other VCL subroutine will ever get called after vcl_pipe. + + # Note that only the first request to the backend will have + # X-Forwarded-For set. If you use X-Forwarded-For and want to + # have it set for all requests, make sure to have: + # set bereq.http.connection = "close"; + # here. It is not set by default as it might break some broken web + # applications, like IIS with NTLM authentication. + + # set bereq.http.Connection = "Close"; + + # Implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html) + if (req.http.upgrade) { + set bereq.http.upgrade = req.http.upgrade; + } + + return (pipe); +} + +sub vcl_pass { + # Called upon entering pass mode. In this mode, the request is passed on to the backend, and the + # backend's response is passed on to the client, but is not entered into the cache. Subsequent + # requests submitted over the same client connection are handled normally. + + # return (pass); +} + +# The data on which the hashing will take place +sub vcl_hash { + # Called after vcl_recv to create a hash value for the request. This is used as a key + # to look up the object in Varnish. + + hash_data(req.url); + + if (req.http.host) { + hash_data(req.http.host); + } else { + hash_data(server.ip); + } + + # hash cookies for requests that have them + if (req.http.Cookie) { + hash_data(req.http.Cookie); + } +} + +sub vcl_hit { + # Called when a cache lookup is successful. + + if (obj.ttl >= 0s) { + # A pure unadultered hit, deliver it + return (deliver); + } + + # https://www.varnish-cache.org/docs/trunk/users-guide/vcl-grace.html + # When several clients are requesting the same page Varnish will send one request to the backend and place the others on hold while fetching one copy from the backend. In some products this is called request coalescing and Varnish does this automatically. + # If you are serving thousands of hits per second the queue of waiting requests can get huge. There are two potential problems - one is a thundering herd problem - suddenly releasing a thousand threads to serve content might send the load sky high. Secondly - nobody likes to wait. To deal with this we can instruct Varnish to keep the objects in cache beyond their TTL and to serve the waiting requests somewhat stale content. + + # if (!std.healthy(req.backend_hint) && (obj.ttl + obj.grace > 0s)) { + # return (deliver); + # } else { + # return (fetch); + # } + + # We have no fresh fish. Lets look at the stale ones. + if (std.healthy(req.backend_hint)) { + # Backend is healthy. Limit age to 10s. + if (obj.ttl + 10s > 0s) { + #set req.http.grace = "normal(limited)"; + return (deliver); + } else { + # No candidate for grace. Fetch a fresh object. + return(fetch); + } + } else { + # backend is sick - use full grace + if (obj.ttl + obj.grace > 0s) { + #set req.http.grace = "full"; + return (deliver); + } else { + # no graced object. + return (fetch); + } + } + + # fetch & deliver once we get the result + return (fetch); # Dead code, keep as a safeguard +} + +sub vcl_miss { + # Called after a cache lookup if the requested document was not found in the cache. Its purpose + # is to decide whether or not to attempt to retrieve the document from the backend, and which + # backend to use. + + return (fetch); +} + +# Handle the HTTP request coming from our backend +sub vcl_backend_response { + # Called after the response headers has been successfully retrieved from the backend. + + # Pause ESI request and remove Surrogate-Control header + if (beresp.http.Surrogate-Control ~ "ESI/1.0") { + unset beresp.http.Surrogate-Control; + set beresp.do_esi = true; + } + + # Enable cache for all static files + # The same argument as the static caches from above: monitor your cache size, if you get data nuked out of it, consider giving up the static file cache. + # Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/ + if (bereq.url ~ "^[^?]*\.(7z|avi|bmp|bz2|css|csv|doc|docx|eot|flac|flv|gif|gz|ico|jpeg|jpg|js|less|mka|mkv|mov|mp3|mp4|mpeg|mpg|odt|otf|ogg|ogm|opus|pdf|png|ppt|pptx|rar|rtf|svg|svgz|swf|tar|tbz|tgz|ttf|txt|txz|wav|webm|webp|woff|woff2|xls|xlsx|xml|xz|zip)(\?.*)?$") { + unset beresp.http.set-cookie; + } + + # Large static files are delivered directly to the end-user without + # waiting for Varnish to fully read the file first. + # Varnish 4 fully supports Streaming, so use streaming here to avoid locking. + if (bereq.url ~ "^[^?]*\.(7z|avi|bz2|flac|flv|gz|mka|mkv|mov|mp3|mp4|mpeg|mpg|ogg|ogm|opus|rar|tar|tgz|tbz|txz|wav|webm|xz|zip)(\?.*)?$") { + unset beresp.http.set-cookie; + set beresp.do_stream = true; # Check memory usage it'll grow in fetch_chunksize blocks (128k by default) if the backend doesn't send a Content-Length header, so only enable it for big objects + } + + # Sometimes, a 301 or 302 redirect formed via Apache's mod_rewrite can mess with the HTTP port that is being passed along. + # This often happens with simple rewrite rules in a scenario where Varnish runs on :80 and Apache on :8080 on the same box. + # A redirect can then often redirect the end-user to a URL on :8080, where it should be :80. + # This may need finetuning on your setup. + # + # To prevent accidental replace, we only filter the 301/302 redirects for now. + if (beresp.status == 301 || beresp.status == 302) { + set beresp.http.Location = regsub(beresp.http.Location, ":[0-9]+", ""); + } + + # Set 2min cache if unset for static files + if (beresp.ttl <= 0s || beresp.http.Set-Cookie || beresp.http.Vary == "*") { + set beresp.ttl = 120s; # Important, you shouldn't rely on this, SET YOUR HEADERS in the backend + set beresp.uncacheable = true; + return (deliver); + } + + # Don't cache 50x responses + if (beresp.status == 500 || beresp.status == 502 || beresp.status == 503 || beresp.status == 504) { + return (abandon); + } + + # Allow stale content, in case the backend goes down. + # make Varnish keep all objects for 6 hours beyond their TTL + set beresp.grace = 6h; + + return (deliver); +} + +# The routine when we deliver the HTTP request to the user +# Last chance to modify headers that are sent to the client +sub vcl_deliver { + # Called before a cached object is delivered to the client. + + if (obj.hits > 0) { # Add debug header to see if it's a HIT/MISS and the number of hits, disable when not needed + set resp.http.X-Cache = "HIT"; + } else { + set resp.http.X-Cache = "MISS"; + } + + # Please note that obj.hits behaviour changed in 4.0, now it counts per objecthead, not per object + # and obj.hits may not be reset in some cases where bans are in use. See bug 1492 for details. + # So take hits with a grain of salt + set resp.http.X-Cache-Hits = obj.hits; + + # Remove some headers: PHP version + unset resp.http.X-Powered-By; + + # Remove some headers: Apache version & OS + unset resp.http.Server; + unset resp.http.X-Drupal-Cache; + unset resp.http.X-Varnish; + unset resp.http.Via; + unset resp.http.Link; + unset resp.http.X-Generator; + unset resp.http.X-Debug-Token; + unset resp.http.X-Debug-Token-Link; + set resp.http.Server = "${VARNISH_SERVER}"; + set resp.http.X-Powered-By = "MSI"; + + return (deliver); +} + +sub vcl_purge { + # Only handle actual PURGE HTTP methods, everything else is discarded + if (req.method != "PURGE") { + # restart request + set req.http.X-Purge = "Yes"; + return(restart); + } +} + +sub vcl_synth { + if (resp.status == 720) { + # We use this special error status 720 to force redirects with 301 (permanent) redirects + # To use this, call the following from anywhere in vcl_recv: return (synth(720, "http://host/new.html")); + set resp.http.Location = resp.reason; + set resp.status = 301; + return (deliver); + } elseif (resp.status == 721) { + # And we use error status 721 to force redirects with a 302 (temporary) redirect + # To use this, call the following from anywhere in vcl_recv: return (synth(720, "http://host/new.html")); + set resp.http.Location = resp.reason; + set resp.status = 302; + return (deliver); + } + + return (deliver); +} + + +sub vcl_fini { + # Called when VCL is discarded only after all requests have exited the VCL. + # Typically used to clean up VMODs. + + return (ok); +} diff --git a/varnish/start.sh b/varnish/start.sh new file mode 100644 index 00000000..e14511a9 --- /dev/null +++ b/varnish/start.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -e + +for name in BACKEND_PORT BACKEND_HOST VARNISH_SERVER +do + eval value=\$$name + sed -i "s|\${${name}}|${value}|g" /etc/varnish/default.vcl +done + +exec bash -c \ + "exec varnishd \ + -a :$VARNISH_PORT \ + -T localhost:6082 \ + -F -u varnish \ + -f $VARNISH_CONFIG \ + -s malloc,$CACHE_SIZE \ + $VARNISHD_PARAMS" \ No newline at end of file