varnish4.1 配置文件default.vcl

# This is a Varnish 4.x VCL file
vcl 4.0;

backend default {
    .host = "127.0.0.1";
    .port = "81";
    .probe = {
        .url = "/ping";
        .timeout   = 1s;
        .interval  = 10s;
        .window    = 5;
        .threshold = 2;
    }
    .first_byte_timeout     = 300s;   # How long to wait before we receive a first byte from our backend?
    .connect_timeout        = 5s;     # How long to wait for a backend connection?
    .between_bytes_timeout  = 2s;     # How long to wait between bytes received from our backend?
}

backend web1 {
    .host = "192.168.20.117";
    .port = "80";
}

backend web2 {
    .host = "192.168.20.118";
    .port = "80";
}

# Below is an example redirector based on round-robin requests
import directors;
sub vcl_init {
    new cluster1 = directors.round_robin();
    cluster1.add_backend(web1);    # Backend web1 defined above
    cluster1.add_backend(web2);    # Backend web2 defined above
}

# Below is an example redirector based on the client IP (sticky sessions)
#sub vcl_init {
#    new cluster2 = directors.hash();
#    cluster2.add_backend(web1);    # Backend web1 defined above
#    cluster2.add_backend(web2);    # Backend web2 defined above
#}

acl purge {
    # For now, I‘ll only allow purges coming from localhost
    "127.0.0.1";
    "localhost";
}

# Handle the HTTP request received by the client
sub vcl_recv {
    # Choose the round-robin backend
    set req.backend_hint = cluster1.backend();

    # Or chose the client-IP backend (sticky sessions)
    #set req.backend_hint = cluster2.backend();

    # shortcut for DFind requests
    if (req.url ~ "^/w00tw00t") {
        return (synth(404, "Not Found"));
    }

    if (req.restarts == 0) {
        if (req.http.X-Forwarded-For) {
            set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip;
        } else {
            set req.http.X-Forwarded-For = client.ip;
        }
    }

    # Normalize the header, remove the port (in case you‘re testing this on various TCP ports)
    set req.http.Host = regsub(req.http.Host, ":[0-9]+", "");

    # Allow purging
    if (req.method == "PURGE") {
        if (!client.ip ~ purge) {
            # Not from an allowed IP? Then die with an error.
            return (synth(405, "This IP is not allowed to send PURGE requests."));
        }

        # If you got this stage (and didn‘t error out above), purge the cached result
        return (purge);
    }

    # Only deal with "normal" types
    if (req.method != "GET" &&
            req.method != "HEAD" &&
            req.method != "PUT" &&
            req.method != "POST" &&
            req.method != "TRACE" &&
            req.method != "OPTIONS" &&
            req.method != "PATCH" &&
            req.method != "DELETE") {
        /* Non-RFC2616 or CONNECT which is weird. */
        return (pipe);
    }

    # Only cache GET or HEAD requests. This makes sure the POST requests are always passed.
    if (req.method != "GET" && req.method != "HEAD") {
        return (pass);
    }

    # Configure grace period, in case the backend goes down. This allows otherwise "outdated"
    # cache entries to still be served to the user, because the backend is unavailable to refresh them.
    # This may not be desireable for you, but showing a Varnish Guru Meditation error probably isn‘t either.
    #set req.grace = 15s;
    #if (std.healthy(req.backend)) {
    #    set req.grace = 30s;
    #} else {
    #    unset req.http.Cookie;
    #    set req.grace = 6h;
    #}

    # Some generic URL manipulation, useful for all templates that follow
    # First remove the Google Analytics added parameters, useless for our backend
    if (req.url ~ "(\?|&)(utm_source|utm_medium|utm_campaign|gclid|cx|ie|cof|siteurl)=") {
        set req.url = regsuball(req.url, "&(utm_source|utm_medium|utm_campaign|gclid|cx|ie|cof|siteurl)=([A-z0-9_\-\.%25]+)", "");
        set req.url = regsuball(req.url, "\?(utm_source|utm_medium|utm_campaign|gclid|cx|ie|cof|siteurl)=([A-z0-9_\-\.%25]+)", "?");
        set req.url = regsub(req.url, "\?&", "?");
        set req.url = regsub(req.url, "\?$", "");
    }

    # Strip hash, server doesn‘t need it.
    if (req.url ~ "\#") {
        set req.url = regsub(req.url, "\#.*$", "");
    }

    # Strip a trailing ? if it exists
    if (req.url ~ "\?$") {
        set req.url = regsub(req.url, "\?$", "");
    }

    # Some generic cookie manipulation, useful for all templates that follow
    # Remove the "has_js" cookie
    set req.http.Cookie = regsuball(req.http.Cookie, "has_js=[^;]+(; )?", "");

    # Remove any Google Analytics based cookies
    set req.http.Cookie = regsuball(req.http.Cookie, "__utm.=[^;]+(; )?", "");
    set req.http.Cookie = regsuball(req.http.Cookie, "_ga=[^;]+(; )?", "");
    set req.http.Cookie = regsuball(req.http.Cookie, "utmctr=[^;]+(; )?", "");
    set req.http.Cookie = regsuball(req.http.Cookie, "utmcmd.=[^;]+(; )?", "");
    set req.http.Cookie = regsuball(req.http.Cookie, "utmccn.=[^;]+(; )?", "");

    # Remove the Quant Capital cookies (added by some plugin, all __qca)
    set req.http.Cookie = regsuball(req.http.Cookie, "__qc.=[^;]+(; )?", "");

    # Remove the AddThis cookies
    set req.http.Cookie = regsuball(req.http.Cookie, "__atuvc=[^;]+(; )?", "");

    # Remove a ";" prefix in the cookie if present
    set req.http.Cookie = regsuball(req.http.Cookie, "^;\s*", "");

    # Are there cookies left with only spaces or that are empty?
    if (req.http.cookie ~ "^\s*$") {
        unset req.http.cookie;
    }

    # Normalize Accept-Encoding header
    # straight from the manual: https://www.varnish-cache.org/docs/3.0/tutorial/vary.html
    if (req.http.Accept-Encoding) {
        if (req.url ~ "\.(jpg|png|gif|gz|tgz|bz2|tbz|ogg)$") {
            # No point in compressing these
            unset req.http.Accept-Encoding;
        } elsif (req.http.Accept-Encoding ~ "gzip") {
            set req.http.Accept-Encoding = "gzip";
        } elsif (req.http.Accept-Encoding ~ "deflate") {
            set req.http.Accept-Encoding = "deflate";
        } else {
            # unkown algorithm
            unset req.http.Accept-Encoding;
        }
    }

    # Large static files should be piped, so they are delivered directly to the end-user without
    # waiting for Varnish to fully read the file first.
    # TODO: once the Varnish Streaming branch merges with the master branch, use streaming here to avoid locking.
    if (req.url ~ "^[^?]*\.(rar|tar|tgz|gz|wav|zip)(\?.*)?$") {
        unset req.http.Cookie;
        return (pipe);
    }

    # Remove all cookies for static files
    # A valid discussion could be held on this line: do you really need to cache static files that don‘t cause load? Only if you have memory left.
    # Sure, there‘s disk I/O, but chances are your OS will already have these files in their buffers (thus memory).
    # Before you blindly enable this, have a read here: http://mattiasgeniar.be/2012/11/28/stop-caching-static-files/
    if (req.url ~ "^[^?]*\.(bmp|bz2|mp3|css|doc|eot|flv|gif|gz|ico|jpeg|jpg|apk|js|less|pdf|png|rtf|swf|txt|woff|xml)(\?.*)?$") {
        unset req.http.Cookie;
        return (hash);
    }

    # Send Surrogate-Capability headers to announce ESI support to backend
    set req.http.Surrogate-Capability = "key=ESI/1.0";

    if (req.http.Authorization) {
        # Not cacheable by default
        return (pass);
    }

    return (hash);
}

sub vcl_pipe {
    # Note that only the first request to the backend will have
    # X-Forwarded-For set.  If you use X-Forwarded-For and want to
    # have it set for all requests, make sure to have:
    # set bereq.http.connection = "close";
    # here.  It is not set by default as it might break some broken web
    # applications, like IIS with NTLM authentication.

    #set bereq.http.Connection = "Close";
    return (pipe);
}

sub vcl_pass {
#    return (pass);
}

# The data on which the hashing will take place
sub vcl_hash {
    hash_data(req.url);

    if (req.http.host) {
        hash_data(req.http.host);
    } else {
        hash_data(server.ip);
    }

    # hash cookies for requests that have them
    if (req.http.Cookie) {
        hash_data(req.http.Cookie);
    }
}

sub vcl_hit {

    return (deliver);
}

sub vcl_miss {

    return (fetch);
}

# Handle the HTTP request coming from our backend
sub vcl_backend_response {
    # Pause ESI request and remove Surrogate-Control header
    if (beresp.http.Surrogate-Control ~ "ESI/1.0") {
        unset beresp.http.Surrogate-Control;
        set beresp.do_esi = true;
    }

    # Enable cache for all static files
    # The same argument as the static caches from above: monitor your cache size, if you get data nuked out of it, consider giving up the static file cache.
    # Before you blindly enable this, have a read here: http://mattiasgeniar.be/2012/11/28/stop-caching-static-files/
    if (bereq.url ~ "^[^?]*\.(bmp|bz2|css|doc|eot|flv|gif|gz|ico|jpeg|jpg|js|less|mp[34]|pdf|png|rar|rtf|swf|tar|tgz|txt|wav|woff|xml|zip)(\?.*)?$") {
        unset beresp.http.set-cookie;
    }

    # Sometimes, a 301 or 302 redirect formed via Apache‘s mod_rewrite can mess with the HTTP port that is being passed along.
    # This often happens with simple rewrite rules in a scenario where Varnish runs on :80 and Apache on :8080 on the same box.
    # A redirect can then often redirect the end-user to a URL on :8080, where it should be :80.
    # This may need finetuning on your setup.
    #
    # To prevent accidental replace, we only filter the 301/302 redirects for now.
    if (beresp.status == 301 || beresp.status == 302) {
        set beresp.http.Location = regsub(beresp.http.Location, ":[0-9]+", "");
    }

    # Set 2min cache if unset for static files
    if (beresp.ttl <= 0s || beresp.http.Set-Cookie || beresp.http.Vary == "*") {
        set beresp.ttl = 120s;
        set beresp.uncacheable = true;
        return (deliver);
    }

    # Allow stale content, in case the backend goes down.
    set beresp.grace = 6h;

    return (deliver);
}

# The routine when we deliver the HTTP request to the user
# Last chance to modify headers that are sent to the client
sub vcl_deliver {
    if (obj.hits > 0) {
        set resp.http.X-Cache = "Hit";
    } else {
        set resp.http.x-Cache = "Miss";
    }

    # Remove some headers: PHP version
    unset resp.http.X-Powered-By;

    # Remove some headers: Apache version & OS
    unset resp.http.Server;
    unset resp.http.X-Drupal-Cache;
    unset resp.http.X-Varnish;
    unset resp.http.Via;
    unset resp.http.Link;

    return (deliver);
}

sub vcl_synth {
    if (resp.status == 720) {
        # We use this special error status 720 to force redirects with 301 (permanent) redirects
        # To use this, call the following from anywhere in vcl_recv: error 720 "http://host/new.html"
        set resp.status = 301;
        set resp.http.Location = resp.reason;
        return (deliver);
    } elseif (resp.status == 721) {
        # And we use error status 721 to force redirects with a 302 (temporary) redirect
        # To use this, call the following from anywhere in vcl_recv: error 720 "http://host/new.html"
        set resp.status = 302;
        set resp.http.Location = resp.reason;
        return (deliver);
    }

    return (deliver);
}

sub vcl_init {
    return (ok);
}

sub vcl_fini {
    return (ok);
}
时间: 2024-11-05 13:46:50

varnish4.1 配置文件default.vcl的相关文章

【varnish】分流测试案例分析

1.先解释varnish是什么?[摘]    Varnish 是一款高性能开源的Http加速器(其实是反向代理).    工作流程:与服务器软件类似,分为master(management)进程和child(worker,主要做cache的工作)进程.master进程读入命令,进行一些初始化,然后fork并监控child进程.child进程分配若干线程进行工作,主要包括一些管理线程和很多woker线程. 1.vcl_recv,首先接收请求,判断是否要进一步处理,还是直接转发给后端(pass)等.

Varnish安装与配置

安装 文件总数为5个,包含2个安装文件,1个安装脚本,2个配置文件. 上传至静态缓存服务器 /home/{user}/ Varnish目录下(使用root用户,否则安装过程中权限不够) pcre-8.35.tar.gz #Varnish依赖库 varnish-3.0.5.tar.gz #Varnish安装文件 varnish_init.sh #Varnish安装脚本 varnish #Varnish启动配置文件 default.vcl #Varnish主配置文件 依赖库安装 Varnish用到了

7.Varnish

概述 Varnish处理HTTP请求的过程大致分为如下几个步骤:         1> Receive状态:请求处理入口状态,根据VCL规则判断该请求应该Pass或Pipe,还是进入Lookup(本地查询).         2> Lookup状态:进入此状态后,会在hash表中查找数据,若找到,则进入Hit状态,否则进入Miss状态.         3> Fetch状态:在Fetch状态下,对请求进行后端获取,发送请求,获得数据,并进行本地存储.         4> Deli

36 web系统架构及cache基础、varnish4基础应用、varnish状态引擎详解及vcl

02 varnish4基础应用 配置环境: node1 CentOS7.2 192.168.1.131 [[email protected] ~]# yum -y install varnish [[email protected] ~]# vim /etc/varnish/varnish.params 修改 VARNISH_STORAGE="file,/var/lib/varnish/varnish_storage.bin,1G" 为 VARNISH_STORAGE="ma

varnish4.0缓存代理配置

防伪码:你必须非常努力,才能看起来毫不费力. 一.varnish原理: 1)Varnish简介: varnish缓存是web应用加速器,同时也作为http反向缓存代理.你可以安装varnish在任何http的前端,同时配置它缓存内容.与传统的 squid 相比,varnish 具有性能更高.速度更快.管理更加方便等诸多优点.有一部分企业已经在生产环境中使用其作为旧版本的squid的替代方案,以在相同的服务器成本下提供更好的缓存效果,Varnish更是作为CDN缓存服务器的可选服务之一. 根据官网

Varnish4.1.2代理缓存

简介: Varnish是一款高性能.开源的反向代理服务器和缓存服务器,官方说是squid的四倍,实际应用测试中虽然达不到四倍的性能,那也能达到1-2倍的效果. Varnish和Squid的对比: Squid 也是一种开源的代理缓存软件,下面对比 Varnish 和 Squid 的不同点. Varnish的稳定性很好.两者在完成相同负载的工作时,Squid服务器发生故障的几率要高于Varnish,因此Squid需要经常重启.Varnish访问速度更快.Varnish采用了 Visual Page

Varnish4.1.2配合FastDFS构建内存缓存

Varnish是什么 Varnish是一款高性能的开源HTTP加速器,挪威最大的在线报纸 Verdens Gang (http://www.vg.no) 使用3台Varnish代替了原来的12台squid,性能居然比以前更好. Varnish 的作者Poul-Henning Kamp是FreeBSD的内核开发者之一,他认为现在的计算机比起1975年已经复杂许多.在1975年时,储存媒介只有两种:内存与硬盘.但现在计算 机系统的内存除了主存外,还包括了cpu内的L1.L2,甚至有L3快取.硬盘上也

Varnish配置语言之VCL

Varnish配置语言之VCL常用配置 用户请求一个资源的流程 (1)DNS查询 (2)建立连接 (3)服务器接收到请求后构建响应并处理完成 (4)发送至客户端,传输时间 (5)断开连接 缓存优化的意义 所谓的优化就是从以上每个角度去缩短时间,缓存是能够降低服务器的处理时间的 命中率的类型 ·文档命中率,文档命中率高并不能够说明服务器性能增强很明显 比如命中的都是很小的资源,而未命中的都是很大,所以就算命中率达到80%也并不说明传输的内容量小了80%,因此还有字节命中率 ·字节命中率 命中的文档

36补 varnish程序解雇及配置初步、vcl使用详解及varnish命令行工具

01 varnish程序结构及配置初步 配置环境 node1: CentOS 6.7 192.168.1.121 [[email protected] ~]# yum -y install httpd [[email protected] ~]# service httpd start [[email protected] ~]# echo "<h1>Web1</h1>" > /var/www/html/index.html [[email protect