# Ignore requests via proxy caches and badly behaved crawlers
# like msnbot that send no-cache with every request.
if (! (req.http.Via || req.http.User-Agent ~ "(?i)bot" || req.http.X-Purge)) {
#set req.hash_always_miss = true; # Doesn't seems to refresh the object in the cache
return(purge); # Couple this with restart in vcl_purge and X-Purge header to avoid loops
}
}
# Large static files are delivered directly to the end-user without
# waiting for Varnish to fully read the file first.
# Varnish 4 fully supports Streaming, so set do_stream in vcl_backend_response()
if (req.url ~ "^[^?]*\.(7z|avi|bz2|flac|flv|gz|mka|mkv|mov|mp3|mp4|mpeg|mpg|ogg|ogm|opus|rar|tar|tgz|tbz|txz|wav|webm|xz|zip)(\?.*)?$") {
unset req.http.Cookie;
return (hash);
}
# Remove all cookies for static files
# A valid discussion could be held on this line: do you really need to cache static files that don't cause load? Only if you have memory left.
# Sure, there's disk I/O, but chances are your OS will already have these files in their buffers (thus memory).
# Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/
if (req.url ~ "^[^?]*\.(7z|avi|bmp|bz2|css|csv|doc|docx|eot|flac|flv|gif|gz|ico|jpeg|jpg|js|less|mka|mkv|mov|mp3|mp4|mpeg|mpg|odt|otf|ogg|ogm|opus|pdf|png|ppt|pptx|rar|rtf|svg|svgz|swf|tar|tbz|tgz|ttf|txt|txz|wav|webm|webp|woff|woff2|xls|xlsx|xml|xz|zip)(\?.*)?$") {
unset req.http.Cookie;
return (hash);
}
# Send Surrogate-Capability headers to announce ESI support to backend
set req.http.Surrogate-Capability = "key=ESI/1.0";
if (req.http.Authorization) {
# Not cacheable by default
return (pass);
}
return (hash);
}
sub vcl_pipe {
# Called upon entering pipe mode.
# In this mode, the request is passed on to the backend, and any further data from both the client
# and backend is passed on unaltered until either end closes the connection. Basically, Varnish will
# degrade into a simple TCP proxy, shuffling bytes back and forth. For a connection in pipe mode,
# no other VCL subroutine will ever get called after vcl_pipe.
# Note that only the first request to the backend will have
# X-Forwarded-For set. If you use X-Forwarded-For and want to
# have it set for all requests, make sure to have:
# set bereq.http.connection = "close";
# here. It is not set by default as it might break some broken web
# applications, like IIS with NTLM authentication.
# set bereq.http.Connection = "Close";
# Implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html)
if (req.http.upgrade) {
set bereq.http.upgrade = req.http.upgrade;
}
return (pipe);
}
sub vcl_pass {
# Called upon entering pass mode. In this mode, the request is passed on to the backend, and the
# backend's response is passed on to the client, but is not entered into the cache. Subsequent
# requests submitted over the same client connection are handled normally.
# return (pass);
}
# The data on which the hashing will take place
sub vcl_hash {
# Called after vcl_recv to create a hash value for the request. This is used as a key
# When several clients are requesting the same page Varnish will send one request to the backend and place the others on hold while fetching one copy from the backend. In some products this is called request coalescing and Varnish does this automatically.
# If you are serving thousands of hits per second the queue of waiting requests can get huge. There are two potential problems - one is a thundering herd problem - suddenly releasing a thousand threads to serve content might send the load sky high. Secondly - nobody likes to wait. To deal with this we can instruct Varnish to keep the objects in cache beyond their TTL and to serve the waiting requests somewhat stale content.
# if (!std.healthy(req.backend_hint) && (obj.ttl + obj.grace > 0s)) {
# return (deliver);
# } else {
# return (fetch);
# }
# We have no fresh fish. Lets look at the stale ones.
# Called after a cache lookup if the requested document was not found in the cache. Its purpose
# is to decide whether or not to attempt to retrieve the document from the backend, and which
# backend to use.
return (fetch);
}
# Handle the HTTP request coming from our backend
sub vcl_backend_response {
# Called after the response headers has been successfully retrieved from the backend.
# Pause ESI request and remove Surrogate-Control header
if (beresp.http.Surrogate-Control ~ "ESI/1.0") {
unset beresp.http.Surrogate-Control;
set beresp.do_esi = true;
}
# Enable cache for all static files
# The same argument as the static caches from above: monitor your cache size, if you get data nuked out of it, consider giving up the static file cache.
# Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/
if (bereq.url ~ "^[^?]*\.(7z|avi|bmp|bz2|css|csv|doc|docx|eot|flac|flv|gif|gz|ico|jpeg|jpg|js|less|mka|mkv|mov|mp3|mp4|mpeg|mpg|odt|otf|ogg|ogm|opus|pdf|png|ppt|pptx|rar|rtf|svg|svgz|swf|tar|tbz|tgz|ttf|txt|txz|wav|webm|webp|woff|woff2|xls|xlsx|xml|xz|zip)(\?.*)?$") {
unset beresp.http.set-cookie;
}
# Large static files are delivered directly to the end-user without
# waiting for Varnish to fully read the file first.
# Varnish 4 fully supports Streaming, so use streaming here to avoid locking.
if (bereq.url ~ "^[^?]*\.(7z|avi|bz2|flac|flv|gz|mka|mkv|mov|mp3|mp4|mpeg|mpg|ogg|ogm|opus|rar|tar|tgz|tbz|txz|wav|webm|xz|zip)(\?.*)?$") {
unset beresp.http.set-cookie;
set beresp.do_stream = true; # Check memory usage it'll grow in fetch_chunksize blocks (128k by default) if the backend doesn't send a Content-Length header, so only enable it for big objects
}
# Sometimes, a 301 or 302 redirect formed via Apache's mod_rewrite can mess with the HTTP port that is being passed along.
# This often happens with simple rewrite rules in a scenario where Varnish runs on :80 and Apache on :8080 on the same box.
# A redirect can then often redirect the end-user to a URL on :8080, where it should be :80.
# This may need finetuning on your setup.
#
# To prevent accidental replace, we only filter the 301/302 redirects for now.
if (beresp.status == 301 || beresp.status == 302) {
set beresp.http.Location = regsub(beresp.http.Location, ":[0-9]+", "");