Advertisement
Guest User

nrk-wcache-001/paramjson/

a guest
Feb 27th, 2017
84
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
JSON 36.81 KB | None | 0 0
  1. {
  2.     "workspace_thread": {
  3.         "value": "2k",
  4.         "default": "2k",
  5.         "unit": "bytes",
  6.         "description": "\nMinimum is: 0.25k\nMaximum is: 8k\n\nBytes of auxiliary workspace per thread.\nThis workspace is used for certain temporary data structures\nduring the operation of a worker thread.\nOne use is for the io-vectors for writing requests and\nresponses to sockets, having too little space will result in\nmore writev(2) system calls, having too much just wastes the\nspace.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\n"
  7.     },
  8.     "workspace_session": {
  9.         "value": "0.50k",
  10.         "default": "0.50k",
  11.         "unit": "bytes",
  12.         "description": "\nMinimum is: 0.25k\n\nBytes of workspace for session and TCP connection addresses. \nIf larger than 4k, use a multiple of 4k for VM efficiency.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\n"
  13.     },
  14.     "workspace_client": {
  15.         "value": "64k",
  16.         "default": "64k",
  17.         "unit": "bytes",
  18.         "description": "\nMinimum is: 9k\n\nBytes of HTTP protocol workspace for clients HTTP req/resp.  If\nlarger than 4k, use a multiple of 4k for VM efficiency.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\n"
  19.     },
  20.     "workspace_backend": {
  21.         "value": "64k",
  22.         "default": "64k",
  23.         "unit": "bytes",
  24.         "description": "\nMinimum is: 1k\n\nBytes of HTTP protocol workspace for backend HTTP req/resp.  If\nlarger than 4k, use a multiple of 4k for VM efficiency.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\n"
  25.     },
  26.     "waiter": {
  27.         "value": "epoll (possible values: epoll, poll)",
  28.         "default": "epoll (possible values: epoll, poll)",
  29.         "unit": "",
  30.         "description": "\n\nSelect the waiter kernel interface.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\nNB: Do not change this parameter, unless a developer tell you\nto do so.\n\n"
  31.     },
  32.     "vsm_space": {
  33.         "value": "1M",
  34.         "default": "1M",
  35.         "unit": "bytes",
  36.         "description": "\nMinimum is: 1M\n\nThe amount of space to allocate for stats counters in the VSM\nmemory segment.  If you make this too small, some counters will\nbe invisible.  Making it too large just costs memory resources.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\n"
  37.     },
  38.     "vsl_space": {
  39.         "value": "80M",
  40.         "default": "80M",
  41.         "unit": "bytes",
  42.         "description": "\nMinimum is: 1M\n\nThe amount of space to allocate for the VSL fifo buffer in the\nVSM memory segment.  If you make this too small,\nvarnish{ncsa|log} etc will not be able to keep up.  Making it\ntoo large just costs memory resources.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\n"
  43.     },
  44.     "vsl_reclen": {
  45.         "value": "255b",
  46.         "default": "255b",
  47.         "unit": "bytes",
  48.         "description": "\nMinimum is: 16b\nMaximum is: 4084\n\nMaximum number of bytes in SHM log record.\n\nThe maximum tracks the vsl_buffer parameter - 12 bytes.\n\n"
  49.     },
  50.     "vsl_mask": {
  51.         "value": "-VCL_trace,-WorkThread,-Hash",
  52.         "default": "-VCL_trace,-WorkThread,-Hash",
  53.         "unit": "",
  54.         "description": "\n\nMask individual VSL messages from being logged.\ndefault             Set default value\n\nUse +/- prefixe in front of VSL tag name, to mask/unmask\nindividual VSL messages.\n\n"
  55.     },
  56.     "vsl_buffer": {
  57.         "value": "4k",
  58.         "default": "4k",
  59.         "unit": "bytes",
  60.         "description": "\nMinimum is: 267\n\nBytes of (req-/backend-)workspace dedicated to buffering VSL\nrecords.\nSetting this too high costs memory, setting it too low will\ncause more VSL flushes and likely increase lock-contention on\nthe VSL mutex.\n\nThe minimum tracks the vsl_reclen parameter + 12 bytes.\n\n"
  61.     },
  62.     "vmod_dir": {
  63.         "value": "/usr/lib64/varnish-plus/vmods",
  64.         "default": "/usr/lib64/varnish-plus/vmods",
  65.         "unit": "",
  66.         "description": "\n\nDirectory where VCL modules are to be found.\n\n"
  67.     },
  68.     "vcl_dir": {
  69.         "value": "/etc/varnish",
  70.         "default": "/etc/varnish",
  71.         "unit": "",
  72.         "description": "\n\nDirectory from which relative VCL filenames (vcl.load and\ninclude) are opened.\n\n"
  73.     },
  74.     "vcc_unsafe_path": {
  75.         "value": "on",
  76.         "default": "on",
  77.         "unit": "bool",
  78.         "description": "\n\nAllow '/' in vmod & include paths.\nAllow 'import ... from ...'.\n\n"
  79.     },
  80.     "vcc_err_unref": {
  81.         "value": "on",
  82.         "default": "on",
  83.         "unit": "bool",
  84.         "description": "\n\nUnreferenced VCL objects result in error.\n\n"
  85.     },
  86.     "vcc_allow_inline_c": {
  87.         "value": "off",
  88.         "default": "off",
  89.         "unit": "bool",
  90.         "description": "\n\nAllow inline C code in VCL.\n\n"
  91.     },
  92.     "user": {
  93.         "value": "varnish (498)",
  94.         "default": "nobody (99)",
  95.         "unit": "",
  96.         "description": "\n\nThe unprivileged user to run as.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\nNB: This parameter only works if varnishd is run as root.\n\n"
  97.     },
  98.     "timeout_reqbody": {
  99.         "value": "0.000",
  100.         "default": "0.000",
  101.         "unit": "seconds",
  102.         "description": "\nMinimum is: 0.000\n\nMax time to receive client request body. Indefinitely if zero.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  103.     },
  104.     "timeout_req": {
  105.         "value": "2.000",
  106.         "default": "2.000",
  107.         "unit": "seconds",
  108.         "description": "\nMinimum is: 0.000\n\nMax time to receive clients request headers, measured from\nfirst non-white-space character to double CRNL.\n\n"
  109.     },
  110.     "timeout_linger": {
  111.         "value": "0.050",
  112.         "default": "0.050",
  113.         "unit": "seconds",
  114.         "description": "\nMinimum is: 0.000\n\nHow long the worker thread lingers on an idle session before\nhanding it over to the waiter.\nWhen sessions are reused, as much as half of all reuses happen\nwithin the first 100 msec of the previous request completing.\nSetting this too high results in worker threads not doing\nanything for their keep, setting it too low just means that\nmore sessions take a detour around the waiter.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  115.     },
  116.     "timeout_idle": {
  117.         "value": "5.000",
  118.         "default": "5.000",
  119.         "unit": "seconds",
  120.         "description": "\nMinimum is: 0.000\n\nIdle timeout for client connections.\nA connection is considered idle, until we receive a\nnon-white-space character on it.\n\n"
  121.     },
  122.     "thread_stats_rate": {
  123.         "value": "10",
  124.         "default": "10",
  125.         "unit": "requests",
  126.         "description": "\nMinimum is: 0\n\nWorker threads accumulate statistics, and dump these into the\nglobal stats counters if the lock is free when they finish a\njob (request/fetch etc.)\nThis parameters defines the maximum number of jobs a worker\nthread may handle, before it is forced to dump its accumulated\nstats into the global counters.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  127.     },
  128.     "thread_queue_limit": {
  129.         "value": "20",
  130.         "default": "20",
  131.         "unit": "",
  132.         "description": "\nMinimum is: 0\n\nPermitted queue length per thread-pool.\n\nThis sets the number of requests we will queue, waiting for an\navailable thread.  Above this limit sessions will be dropped\ninstead of queued.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  133.     },
  134.     "thread_pools": {
  135.         "value": "2",
  136.         "default": "2",
  137.         "unit": "pools",
  138.         "description": "\nMinimum is: 1\n\nNumber of worker thread pools.\n\nIncreasing number of worker pools decreases lock contention.\n\nToo many pools waste CPU and RAM resources, and more than one\npool for each CPU is probably detrimal to performance.\n\nCan be increased on the fly, but decreases require a restart to\ntake effect.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  139.     },
  140.     "thread_pool_timeout": {
  141.         "value": "120.000",
  142.         "default": "300.000",
  143.         "unit": "seconds",
  144.         "description": "\nMinimum is: 10.000\n\nThread idle threshold.\n\nThreads in excess of thread_pool_min, which have been idle for\nat least this long, will be destroyed.\n\nMinimum is 10 seconds.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  145.     },
  146.     "thread_pool_stack": {
  147.         "value": "48k",
  148.         "default": "48k",
  149.         "unit": "bytes",
  150.         "description": "\nMinimum is: 16k\n\nWorker thread stack size.\nThis will likely be rounded up to a multiple of 4k (or whatever\nthe page_size might be) by the kernel.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  151.     },
  152.     "thread_pool_min": {
  153.         "value": "1000",
  154.         "default": "100",
  155.         "unit": "threads",
  156.         "description": "\nMaximum is: 5000\n\nThe minimum number of worker threads in each pool.\n\nIncreasing this may help ramp up faster from low load\nsituations or when threads have expired.\n\nMinimum is 10 threads.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\n"
  157.     },
  158.     "thread_pool_max": {
  159.         "value": "5000",
  160.         "default": "5000",
  161.         "unit": "threads",
  162.         "description": "\nMinimum is: 1000\n\nThe maximum number of worker threads in each pool.\n\nDo not set this higher than you have to, since excess worker\nthreads soak up RAM and CPU and generally just get in the way\nof getting work done.\n\nMinimum is 10 threads.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\n"
  163.     },
  164.     "thread_pool_fail_delay": {
  165.         "value": "0.200",
  166.         "default": "0.200",
  167.         "unit": "seconds",
  168.         "description": "\nMinimum is: 0.010\n\nWait at least this long after a failed thread creation before\ntrying to create another thread.\n\nFailure to create a worker thread is often a sign that  the end\nis near, because the process is running out of some resource. \nThis delay tries to not rush the end on needlessly.\n\nIf thread creation failures are a problem, check that\nthread_pool_max is not too high.\n\nIt may also help to increase thread_pool_timeout and\nthread_pool_min, to reduce the rate at which treads are\ndestroyed and later recreated.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  169.     },
  170.     "thread_pool_destroy_delay": {
  171.         "value": "1.000",
  172.         "default": "1.000",
  173.         "unit": "seconds",
  174.         "description": "\nMinimum is: 0.010\n\nWait this long after destroying a thread.\n\nThis controls the decay of thread pools when idle(-ish).\n\nMinimum is 0.01 seconds.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  175.     },
  176.     "thread_pool_add_delay": {
  177.         "value": "0.000",
  178.         "default": "0.000",
  179.         "unit": "seconds",
  180.         "description": "\nMinimum is: 0.000\n\nWait at least this long after creating a thread.\n\nSome (buggy) systems may need a short (sub-second) delay\nbetween creating threads.\nSet this to a few milliseconds if you see the 'threads_failed'\ncounter grow too much.\n\nSetting this too high results in insuffient worker threads.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  181.     },
  182.     "tcp_keepalive_time": {
  183.         "value": "7200.000",
  184.         "default": "7200.000",
  185.         "unit": "seconds",
  186.         "description": "\nMinimum is: 1.000\nMaximum is: 7200.000\n\nThe number of seconds a connection needs to be idle before TCP\nbegins sending out keep-alive probes.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  187.     },
  188.     "tcp_keepalive_probes": {
  189.         "value": "9",
  190.         "default": "9",
  191.         "unit": "probes",
  192.         "description": "\nMinimum is: 1\nMaximum is: 100\n\nThe maximum number of TCP keep-alive probes to send before\ngiving up and killing the connection if no response is obtained\nfrom the other end.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  193.     },
  194.     "tcp_keepalive_intvl": {
  195.         "value": "75.000",
  196.         "default": "75.000",
  197.         "unit": "seconds",
  198.         "description": "\nMinimum is: 1.000\nMaximum is: 100.000\n\nThe number of seconds between TCP keep-alive probes.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  199.     },
  200.     "syslog_cli_traffic": {
  201.         "value": "on",
  202.         "default": "on",
  203.         "unit": "bool",
  204.         "description": "\n\nLog all CLI traffic to syslog(LOG_INFO).\n\n"
  205.     },
  206.     "ssl_buffer": {
  207.         "value": "16k",
  208.         "default": "16k",
  209.         "unit": "bytes",
  210.         "description": "\nMinimum is: 0.25k\n\nSize of SSL buffer.\n\n"
  211.     },
  212.     "sigsegv_handler": {
  213.         "value": "off",
  214.         "default": "off",
  215.         "unit": "bool",
  216.         "description": "\n\nInstall a signal handler which tries to dump debug information\non segmentation faults.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\n"
  217.     },
  218.     "shortlived": {
  219.         "value": "10.000",
  220.         "default": "10.000",
  221.         "unit": "seconds",
  222.         "description": "\nMinimum is: 0.000\n\nObjects created with (ttl+grace+keep) shorter than this are\nalways put in transient storage.\n\n"
  223.     },
  224.     "shm_reclen": {
  225.         "value": "255b",
  226.         "default": "255b",
  227.         "unit": "bytes",
  228.         "description": "\nMinimum is: 16b\nMaximum is: 4084\n\nOld name for vsl_reclen, use that instead.\n\n"
  229.     },
  230.     "session_max": {
  231.         "value": "100000",
  232.         "default": "100000",
  233.         "unit": "sessions",
  234.         "description": "\nMinimum is: 1000\n\nMaximum number of sessions we will allocate from one pool\nbefore just dropping connections.\nThis is mostly an anti-DoS measure, and setting it plenty high\nshould not hurt, as long as you have the memory for it.\n\n"
  235.     },
  236.     "send_timeout": {
  237.         "value": "600.000",
  238.         "default": "600.000",
  239.         "unit": "seconds",
  240.         "description": "\nMinimum is: 0.000\n\nSend timeout for client connections. If the HTTP response\nhasn't been transmitted in this many\nseconds the session is closed.\nSee setsockopt(2) under SO_SNDTIMEO for more information.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\n"
  241.     },
  242.     "rush_exponent": {
  243.         "value": "3",
  244.         "default": "3",
  245.         "unit": "requests per request",
  246.         "description": "\nMinimum is: 2\n\nHow many parked request we start for each completed request on\nthe object.\nNB: Even with the implict delay of delivery, this parameter\ncontrols an exponential increase in number of worker threads.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  247.     },
  248.     "prefer_ipv6": {
  249.         "value": "off",
  250.         "default": "off",
  251.         "unit": "bool",
  252.         "description": "\n\nPrefer IPv6 address when connecting to backends which have both\nIPv4 and IPv6 addresses.\n\n"
  253.     },
  254.     "pool_vbo": {
  255.         "value": "10,100,10",
  256.         "default": "10,100,10",
  257.         "unit": "",
  258.         "description": "\n\nParameters for backend object fetch memory pool.\nThe three numbers are:\nmin_pool            minimum size of free pool.\nmax_pool            maximum size of free pool.\nmax_age             max age of free element.\n\n"
  259.     },
  260.     "pool_vbc": {
  261.         "value": "10,100,10",
  262.         "default": "10,100,10",
  263.         "unit": "",
  264.         "description": "\n\nParameters for backend connection memory pool.\nThe three numbers are:\nmin_pool            minimum size of free pool.\nmax_pool            maximum size of free pool.\nmax_age             max age of free element.\n\n"
  265.     },
  266.     "pool_sslbuffer": {
  267.         "value": "10,100,10",
  268.         "default": "10,100,10",
  269.         "unit": "",
  270.         "description": "\n\nParameters for ssl buffer pool.\nThe three numbers are:\nmin_pool            minimum size of free pool.\nmax_pool            maximum size of free pool.\nmax_age             max age of free element.\n\n"
  271.     },
  272.     "pool_sess": {
  273.         "value": "10,100,10",
  274.         "default": "10,100,10",
  275.         "unit": "",
  276.         "description": "\n\nParameters for per worker pool session memory pool.\nThe three numbers are:\nmin_pool            minimum size of free pool.\nmax_pool            maximum size of free pool.\nmax_age             max age of free element.\n\n"
  277.     },
  278.     "pool_req": {
  279.         "value": "10,100,10",
  280.         "default": "10,100,10",
  281.         "unit": "",
  282.         "description": "\n\nParameters for per worker pool request memory pool.\nThe three numbers are:\nmin_pool            minimum size of free pool.\nmax_pool            maximum size of free pool.\nmax_age             max age of free element.\n\n"
  283.     },
  284.     "pipe_timeout": {
  285.         "value": "60.000",
  286.         "default": "60.000",
  287.         "unit": "seconds",
  288.         "description": "\nMinimum is: 0.000\n\nIdle timeout for PIPE sessions. If nothing have been received\nin either direction for this many seconds, the session is\nclosed.\n\n"
  289.     },
  290.     "ping_interval": {
  291.         "value": "3",
  292.         "default": "3",
  293.         "unit": "seconds",
  294.         "description": "\nMinimum is: 0\n\nInterval between pings from parent to child.\nZero will disable pinging entirely, which makes it possible to\nattach a debugger to the child.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\n"
  295.     },
  296.     "pcre_match_limit_recursion": {
  297.         "value": "10000",
  298.         "default": "10000",
  299.         "unit": "",
  300.         "description": "\nMinimum is: 1\n\nThe limit for the  number of internal matching function\nrecursions in a pcre_exec() execution.\n\n"
  301.     },
  302.     "pcre_match_limit": {
  303.         "value": "10000",
  304.         "default": "10000",
  305.         "unit": "",
  306.         "description": "\nMinimum is: 1\n\nThe limit for the  number of internal matching function calls\nin a pcre_exec() execution.\n\n"
  307.     },
  308.     "nuke_limit": {
  309.         "value": "50",
  310.         "default": "50",
  311.         "unit": "allocations",
  312.         "description": "\nMinimum is: 0\n\nMaximum number of objects we attempt to nuke in orderto make\nspace for a object body.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  313.     },
  314.     "mse_sendfile_min": {
  315.         "value": "0b",
  316.         "default": "0b",
  317.         "unit": "bytes",
  318.         "description": "\n\nMinimum size of a transmit chunk to consider using sendfile. If\nzero sendfile is disabled.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  319.     },
  320.     "mse_prune_loop": {
  321.         "value": "10",
  322.         "default": "10",
  323.         "unit": "",
  324.         "description": "\n\nNumber of times to loop during pruning before bailing.\n\n"
  325.     },
  326.     "mse_prune_factor": {
  327.         "value": "2",
  328.         "default": "2",
  329.         "unit": "",
  330.         "description": "\nMinimum is: 1\n\nSegment pruning goal factor. Segment pruning finishes when the\nsegment free big extents reach mse_bigalloc times this number.\n\n"
  331.     },
  332.     "mse_pad_writes": {
  333.         "value": "on",
  334.         "default": "on",
  335.         "unit": "bool",
  336.         "description": "\n\nPad writes up to block size\n\n"
  337.     },
  338.     "mse_nuke_limit": {
  339.         "value": "10",
  340.         "default": "10",
  341.         "unit": "",
  342.         "description": "\n\nNumber of objects to nuke to seed segment pruning algorithm.\n\n"
  343.     },
  344.     "mse_minextfree": {
  345.         "value": "4k",
  346.         "default": "4k",
  347.         "unit": "bytes",
  348.         "description": "\nMinimum is: 4k\n\nMinimum size of a free extent for it to be tracked.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\n"
  349.     },
  350.     "mse_membuf_size": {
  351.         "value": "4",
  352.         "default": "4",
  353.         "unit": "pages",
  354.         "description": "\nMinimum is: 1\n\nSize of write memory buffers.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\n"
  355.     },
  356.     "mse_delay_writes": {
  357.         "value": "on",
  358.         "default": "on",
  359.         "unit": "bool",
  360.         "description": "\n\nDelay writes until the memory buffer is full\n\n"
  361.     },
  362.     "mse_bigalloc": {
  363.         "value": "1M",
  364.         "default": "1M",
  365.         "unit": "bytes",
  366.         "description": "\nMinimum is: 4k\n\nMinimum size of an allocation for it to be considered big.\nAllocations exceeding this size are considered non-fragmented.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\n"
  367.     },
  368.     "max_retries": {
  369.         "value": "4",
  370.         "default": "4",
  371.         "unit": "retries",
  372.         "description": "\nMinimum is: 0\n\nUpper limit on how many times a backend fetch can retry.\n\n"
  373.     },
  374.     "max_restarts": {
  375.         "value": "4",
  376.         "default": "4",
  377.         "unit": "restarts",
  378.         "description": "\nMinimum is: 0\n\nUpper limit on how many times a request can restart.\nBe aware that restarts are likely to cause a hit against the\nbackend, so don't increase thoughtlessly.\n\n"
  379.     },
  380.     "max_esi_depth": {
  381.         "value": "5",
  382.         "default": "5",
  383.         "unit": "levels",
  384.         "description": "\nMinimum is: 0\n\nMaximum depth of esi:include processing.\n\n"
  385.     },
  386.     "lru_interval": {
  387.         "value": "2.000",
  388.         "default": "2.000",
  389.         "unit": "seconds",
  390.         "description": "\nMinimum is: 0.000\n\nGrace period before object moves on LRU list.\nObjects are only moved to the front of the LRU list if they\nhave not been moved there already inside this timeout period. \nThis reduces the amount of lock operations necessary for LRU\nlist access.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  391.     },
  392.     "listen_depth": {
  393.         "value": "1024",
  394.         "default": "1024",
  395.         "unit": "connections",
  396.         "description": "\nMinimum is: 0\n\nListen queue depth.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\n"
  397.     },
  398.     "listen_address": {
  399.         "value": "0.0.0.0:80",
  400.         "default": ":80",
  401.         "unit": "",
  402.         "description": "\n\nWhitespace separated list of network endpoints where Varnish\nwill accept requests.\nPossible formats: host, host:port, :port\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\n"
  403.     },
  404.     "idle_send_timeout": {
  405.         "value": "60.000",
  406.         "default": "60.000",
  407.         "unit": "seconds",
  408.         "description": "\nMinimum is: 0.000\n\nTime to wait with no data sent. If no data has been transmitted\nin this many\nseconds the session is closed.\nSee setsockopt(2) under SO_SNDTIMEO for more information.\n\nNB: This parameter may take quite some time to take (full)\neffect.\n\n"
  409.     },
  410.     "http_resp_size": {
  411.         "value": "32k",
  412.         "default": "32k",
  413.         "unit": "bytes",
  414.         "description": "\nMinimum is: 0.25k\n\nMaximum number of bytes of HTTP backend response we will deal\nwith.  This is a limit on all bytes up to the double blank line\nwhich ends the HTTP request.\nThe memory for the request is allocated from the worker\nworkspace (param: thread_pool_workspace) and this parameter\nlimits how much of that the request is allowed to take up.\n\n"
  415.     },
  416.     "http_resp_hdr_len": {
  417.         "value": "8k",
  418.         "default": "8k",
  419.         "unit": "bytes",
  420.         "description": "\nMinimum is: 40b\n\nMaximum length of any HTTP backend response header we will\nallow.  The limit is inclusive its continuation lines.\n\n"
  421.     },
  422.     "http_req_size": {
  423.         "value": "32k",
  424.         "default": "32k",
  425.         "unit": "bytes",
  426.         "description": "\nMinimum is: 0.25k\n\nMaximum number of bytes of HTTP client request we will deal\nwith.  This is a limit on all bytes up to the double blank line\nwhich ends the HTTP request.\nThe memory for the request is allocated from the client\nworkspace (param: workspace_client) and this parameter limits\nhow much of that the request is allowed to take up.\n\n"
  427.     },
  428.     "http_req_hdr_len": {
  429.         "value": "8k",
  430.         "default": "8k",
  431.         "unit": "bytes",
  432.         "description": "\nMinimum is: 40b\n\nMaximum length of any HTTP client request header we will allow.\nThe limit is inclusive its continuation lines.\n\n"
  433.     },
  434.     "http_range_support": {
  435.         "value": "on",
  436.         "default": "on",
  437.         "unit": "bool",
  438.         "description": "\n\nEnable support for HTTP Range headers.\n\n"
  439.     },
  440.     "http_max_hdr": {
  441.         "value": "64",
  442.         "default": "64",
  443.         "unit": "header lines",
  444.         "description": "\nMinimum is: 32\nMaximum is: 65535\n\nMaximum number of HTTP header lines we allow in\n{req|resp|bereq|beresp}.http (obj.http is autosized to the\nexact number of headers).\nCheap, ~20 bytes, in terms of workspace memory.\nNote that the first line occupies five header lines.\n\n"
  445.     },
  446.     "http_gzip_support": {
  447.         "value": "on",
  448.         "default": "on",
  449.         "unit": "bool",
  450.         "description": "\n\nEnable gzip support. When enabled Varnish request compressed\nobjects from the backend and store them compressed. If a client\ndoes not support gzip encoding Varnish will uncompress\ncompressed objects on demand. Varnish will also rewrite the\nAccept-Encoding header of clients indicating support for gzip\nto:\nAccept-Encoding: gzip\n\nClients that do not support gzip will have their\nAccept-Encoding header removed. For more information on how\ngzip is implemented please see the chapter on gzip in the\nVarnish reference.\n\n"
  451.     },
  452.     "gzip_memlevel": {
  453.         "value": "8",
  454.         "default": "8",
  455.         "unit": "",
  456.         "description": "\nMinimum is: 1\nMaximum is: 9\n\nGzip memory level 1=slow/least, 9=fast/most compression.\nMemory impact is 1=1k, 2=2k, ... 9=256k.\n\n"
  457.     },
  458.     "gzip_level": {
  459.         "value": "6",
  460.         "default": "6",
  461.         "unit": "",
  462.         "description": "\nMinimum is: 0\nMaximum is: 9\n\nGzip compression level: 0=debug, 1=fast, 9=best\n\n"
  463.     },
  464.     "gzip_buffer": {
  465.         "value": "32k",
  466.         "default": "32k",
  467.         "unit": "bytes",
  468.         "description": "\nMinimum is: 2k\n\nSize of malloc buffer used for gzip processing.\nThese buffers are used for in-transit data, for instance\ngunzip'ed data being sent to a client.Making this space to\nsmall results in more overhead, writes to sockets etc, making\nit too big is probably just a waste of memory.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  469.     },
  470.     "group_cc": {
  471.         "value": "<not set>",
  472.         "default": "<not set>",
  473.         "unit": "",
  474.         "description": "\n\nOn some systems the C-compiler is restricted so not everybody\ncan run it.  This parameter makes it possible to add an extra\ngroup to the sandbox process which runs the cc_command, in\norder to gain access to such a restricted C-compiler.\n\nNB: This parameter only works if varnishd is run as root.\n\n"
  475.     },
  476.     "group": {
  477.         "value": "varnish (497)",
  478.         "default": "GID 0",
  479.         "unit": "",
  480.         "description": "\n\nThe unprivileged group to run as.\n\nNB: This parameter will not take any effect until the child\nprocess has been restarted.\n\nNB: This parameter only works if varnishd is run as root.\n\n"
  481.     },
  482.     "first_byte_timeout": {
  483.         "value": "60.000",
  484.         "default": "60.000",
  485.         "unit": "seconds",
  486.         "description": "\nMinimum is: 0.000\n\nDefault timeout for receiving first byte from backend. We only\nwait for this many seconds for the first byte before giving up.\nA value of 0 means it will never time out. VCL can override\nthis default value for each backend and backend request. This\nparameter does not apply to pipe.\n\n"
  487.     },
  488.     "fetch_maxchunksize": {
  489.         "value": "0.25G",
  490.         "default": "0.25G",
  491.         "unit": "bytes",
  492.         "description": "\nMinimum is: 64k\n\nThe maximum chunksize we attempt to allocate from storage.\nMaking this too large may cause delays and storage\nfragmentation.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  493.     },
  494.     "fetch_chunksize": {
  495.         "value": "16k",
  496.         "default": "16k",
  497.         "unit": "bytes",
  498.         "description": "\nMinimum is: 4k\n\nThe default chunksize used by fetcher. This should be bigger\nthan the majority of objects with short TTLs.\nInternal limits in the storage_file module makes increases\nabove 128kb a dubious idea.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  499.     },
  500.     "feature": {
  501.         "value": "none",
  502.         "default": "none",
  503.         "unit": "",
  504.         "description": "\n\nEnable/Disable various minor features.\nnone                       Disable all features.\n\nUse +/- prefix to enable/disable individual feature:\nshort_panic                Short panic message.\nwait_silo                  Wait for persistent silo.\nno_coredump                No coredumps.\nesi_ignore_https           Treat HTTPS as HTTP in\nESI:includes\nesi_disable_xml_check      Don't check of body looks like\nXML\nesi_ignore_other_elements  Ignore non-esi XML-elements\nesi_remove_bom             Remove UTF-8 BOM\n\n"
  505.     },
  506.     "default_ttl": {
  507.         "value": "120.000",
  508.         "default": "120.000",
  509.         "unit": "seconds",
  510.         "description": "\nMinimum is: 0.000\n\nThe TTL assigned to objects if neither the backend nor the VCL\ncode assigns one.\n\nNB: This parameter is evaluated only when objects are\ncreated.To change it for all objects, restart or ban\neverything.\n\n"
  511.     },
  512.     "default_keep": {
  513.         "value": "0.000",
  514.         "default": "0.000",
  515.         "unit": "seconds",
  516.         "description": "\nMinimum is: 0.000\n\nDefault keep period.  We will keep a useless object around this\nlong, making it available for conditional backend fetches. \nThat means that the object will be removed from the cache at\nthe end of ttl+grace+keep.\n\nNB: This parameter is evaluated only when objects are\ncreated.To change it for all objects, restart or ban\neverything.\n\n"
  517.     },
  518.     "default_grace": {
  519.         "value": "10.000",
  520.         "default": "10.000",
  521.         "unit": "seconds",
  522.         "description": "\nMinimum is: 0.000\n\nDefault grace period.  We will deliver an object this long\nafter it has expired, provided another thread is attempting to\nget a new copy.\n\nNB: This parameter is evaluated only when objects are\ncreated.To change it for all objects, restart or ban\neverything.\n\n"
  523.     },
  524.     "debug": {
  525.         "value": "none",
  526.         "default": "none",
  527.         "unit": "",
  528.         "description": "\n\nEnable/Disable various kinds of debugging.\nnone                Disable all debugging\n\nUse +/- prefix to set/reset individual bits:\nreq_state           VSL Request state engine\nworkspace           VSL Workspace operations\nwaiter              VSL Waiter internals\nwaitinglist         VSL Waitinglist events\nsyncvsl             Make VSL synchronous\nhashedge            Edge cases in Hash\nvclrel              Rapid VCL release\nlurker              VSL Ban lurker\nesi_chop            Chop ESI fetch to bits\nflush_head          Flush after http1 head\n\n"
  529.     },
  530.     "critbit_cooloff": {
  531.         "value": "180.000",
  532.         "default": "180.000",
  533.         "unit": "seconds",
  534.         "description": "\nMinimum is: 60.000\nMaximum is: 254.000\n\nHow long the critbit hasher keeps deleted objheads on the\ncooloff list.\n\nNB: Do not change this parameter, unless a developer tell you\nto do so.\n\n"
  535.     },
  536.     "connect_timeout": {
  537.         "value": "3.500",
  538.         "default": "3.500",
  539.         "unit": "seconds",
  540.         "description": "\nMinimum is: 0.000\n\nDefault connection timeout for backend connections. We only try\nto connect to the backend for this many seconds before giving\nup. VCL can override this default value for each backend and\nbackend request.\n\n"
  541.     },
  542.     "clock_skew": {
  543.         "value": "10",
  544.         "default": "10",
  545.         "unit": "seconds",
  546.         "description": "\nMinimum is: 0\n\nHow much clockskew we are willing to accept between the backend\nand our own clock.\n\n"
  547.     },
  548.     "cli_timeout": {
  549.         "value": "60.000",
  550.         "default": "60.000",
  551.         "unit": "seconds",
  552.         "description": "\nMinimum is: 0.000\n\nTimeout for the childs replies to CLI requests from the\nmgt_param.\n\n"
  553.     },
  554.     "cli_limit": {
  555.         "value": "48k",
  556.         "default": "48k",
  557.         "unit": "bytes",
  558.         "description": "\nMinimum is: 128b\nMaximum is: 99999999b\n\nMaximum size of CLI response.  If the response exceeds this\nlimit, the response code will be 201 instead of 200 and the\nlast line will indicate the truncation.\n\n"
  559.     },
  560.     "cli_buffer": {
  561.         "value": "8k",
  562.         "default": "8k",
  563.         "unit": "bytes",
  564.         "description": "\nMinimum is: 4k\n\nSize of buffer for CLI command input.\nYou may need to increase this if you have big VCL files and use\nthe vcl.inline CLI command.\nNB: Must be specified with -p to have effect.\n\n"
  565.     },
  566.     "cc_command": {
  567.         "value": "exec gcc -std=gnu99  -O2 -g -pipe -Wp,-D_FORTIFY_SOURCE=0 -fexceptions -fstack-protector -m64 -mtune=generic -Wall -Werror -pthread -fpic -shared -Wl,-x -o %o %s",
  568.         "default": "exec gcc -std=gnu99  -O2 -g -pipe -Wp,-D_FORTIFY_SOURCE=0 -fexceptions -fstack-protector -m64 -mtune=generic -Wall -Werror -pthread -fpic -shared -Wl,-x -o %o %s",
  569.         "unit": "",
  570.         "description": "\n\nCommand used for compiling the C source code to a dlopen(3)\nloadable object.  Any occurrence of %s in the string will be\nreplaced with the source file name, and %o will be replaced\nwith the output file name.\n\nNB: This parameter will not take any effect until the VCL\nprograms have been reloaded.\n\n"
  571.     },
  572.     "busyobj_worker_cache": {
  573.         "value": "off",
  574.         "default": "off",
  575.         "unit": "bool",
  576.         "description": "\n\nCache free busyobj per worker thread. Disable this if you have\nvery high hitrates and want to save the memory of one busyobj\nper worker thread.\n\n"
  577.     },
  578.     "between_bytes_timeout": {
  579.         "value": "60.000",
  580.         "default": "60.000",
  581.         "unit": "seconds",
  582.         "description": "\nMinimum is: 0.000\n\nDefault timeout between bytes when receiving data from backend.\nWe only wait for this many seconds between bytes before giving\nup. A value of 0 means it will never time out. VCL can override\nthis default value for each backend request and backend\nrequest. This parameter does not apply to pipe.\n\n"
  583.     },
  584.     "ban_lurker_sleep": {
  585.         "value": "0.010",
  586.         "default": "0.010",
  587.         "unit": "seconds",
  588.         "description": "\nMinimum is: 0.000\n\nThe ban lurker thread sleeps between work batches, in order to\nnot monopolize CPU power.  When nothing is done, it sleeps a\nfraction of a second before looking for new work to do.\nA value of zero disables the ban lurker.\n\n"
  589.     },
  590.     "ban_lurker_batch": {
  591.         "value": "1000",
  592.         "default": "1000",
  593.         "unit": "",
  594.         "description": "\nMinimum is: 1\n\nHow many objects the ban lurker examines before taking a\nban_lurker_sleep.  Use this to pace the ban lurker so it does\nnot eat too much CPU.\n\n"
  595.     },
  596.     "ban_lurker_age": {
  597.         "value": "60.000",
  598.         "default": "60.000",
  599.         "unit": "seconds",
  600.         "description": "\nMinimum is: 0.000\n\nThe ban lurker does not process bans until they are this old. \nRight when a ban is added, the most frequently hit objects will\nget tested against it as part of object lookup.  This parameter\nprevents the ban-lurker from kicking in, until the rush is\nover.\n\n"
  601.     },
  602.     "ban_dups": {
  603.         "value": "on",
  604.         "default": "on",
  605.         "unit": "bool",
  606.         "description": "\n\nEliminate older identical bans when new bans are created.  This\ntest is CPU intensive and scales with the number and complexity\nof active (non-Gone) bans.  If identical bans are frequent, the\namount of CPU needed to actually test  the bans will be\nsimilarly reduced.\n\n"
  607.     },
  608.     "auto_restart": {
  609.         "value": "on",
  610.         "default": "on",
  611.         "unit": "bool",
  612.         "description": "\n\nRestart child process automatically if it dies.\n\n"
  613.     },
  614.     "acceptor_sleep_max": {
  615.         "value": "0.050",
  616.         "default": "0.050",
  617.         "unit": "seconds",
  618.         "description": "\nMinimum is: 0.000\nMaximum is: 10.000\n\nIf we run out of resources, such as file descriptors or worker\nthreads, the acceptor will sleep between accepts.\nThis parameter limits how long it can sleep between attempts to\naccept new connections.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  619.     },
  620.     "acceptor_sleep_incr": {
  621.         "value": "0.001",
  622.         "default": "0.001",
  623.         "unit": "seconds",
  624.         "description": "\nMinimum is: 0.000\nMaximum is: 1.000\n\nIf we run out of resources, such as file descriptors or worker\nthreads, the acceptor will sleep between accepts.\nThis parameter control how much longer we sleep, each time we\nfail to accept a new connection.\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  625.     },
  626.     "acceptor_sleep_decay": {
  627.         "value": "0.9",
  628.         "default": "0.9",
  629.         "unit": "",
  630.         "description": "\nMinimum is: 0\nMaximum is: 1\n\nIf we run out of resources, such as file descriptors or worker\nthreads, the acceptor will sleep between accepts.\nThis parameter (multiplicatively) reduce the sleep duration for\neach successful accept. (ie: 0.9 = reduce by 10%)\n\nNB: We do not know yet if it is a good idea to change this\nparameter, or if the default value is even sensible.  Caution\nis advised, and feedback is most welcome.\n\n"
  631.     }
  632. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement