# netdata configuration # # You can download the latest version of this file, using: # # wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf # or # curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf # # You can uncomment and change any of the options below. # The value shown in the commented settings, is the default value. # # Notes about plugins # # tc - QoS stats (if wanted) # cgcroups - No support in OpenWRT/LEDE by default # charts.d - REQUIRES bash, enable here and edit charts.d.conf # node.d - REQUIRES node.js # python.d - REQUIRES python and PyYAML, edit python.d.conf to enable # apps - none atm # health - Disabled by default # KSM - No support in OpenWRT/LEDE by default # global netdata configuration [global] # glibc malloc arena max for plugins = 1 # glibc malloc arena max for netdata = 1 # config directory = /etc/netdata # plugins directory = /usr/libexec/netdata/plugins.d # web files directory = /usr/share/netdata/web # cache directory = /var/cache/netdata # lib directory = /var/lib/netdata # log directory = /var/log/netdata # host access prefix = # home directory = /var/cache/netdata # debug flags = 0x00000000 memory deduplication (ksm) = no debug log = syslog error log = syslog access log = none # errors flood protection period = 1200 # errors to trigger flood protection = 200 memory mode = ram # hostname = LEDE # history = 3600 # update every = 1 update every = 2 # pthread stack size = 8388608 run as user = nobody [web] web files owner = root web files group = root # default port = 19999 # bind to = * [plugins] # PATH environment variable = /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin tc = no # idlejitter = yes # proc = yes # diskspace = yes cgroups = no # checks = no # plugins directory = /usr/libexec/netdata/plugins.d enable running new plugins = no # check for new plugins every = 60 charts.d = no # plugins directory = /usr/libexec/netdata/plugins.d node.d = no python.d = no apps = no fping = no [registry] # enabled = no # registry db directory = /var/lib/netdata/registry # netdata unique id file = /var/lib/netdata/registry/netdata.public.unique.id # registry db file = /var/lib/netdata/registry/registry.db # registry log file = /var/lib/netdata/registry/registry-log.db # registry save db every new entries = 1000000 # registry expire idle persons days = 365 # registry domain = # registry to announce = https://registry.my-netdata.io # registry hostname = LEDE # verify browser cookies support = yes # max URL length = 1024 # max URL name length = 50 [health] enabled = no # health db directory = /var/lib/netdata/health # health db file = /var/lib/netdata/health/health-log.db # health configuration directory = /etc/netdata/health.d # script to execute on alarm = /usr/libexec/netdata/plugins.d/alarm-notify.sh # in memory max health log entries = 1000 # run at least every seconds = 10 # rotate log every lines = 2000 [backend] # enabled = no # data source = average # type = graphite # destination = localhost # prefix = netdata # hostname = LEDE # update every = 10 # buffer on failures = 10 # timeout ms = 20000 # per plugin configuration [plugin:fping] # update every = 1 # command options = [plugin:proc] # netdata server resources = yes # /proc/stat = yes # /proc/uptime = yes # /proc/loadavg = yes # /proc/sys/kernel/random/entropy_avail = yes # /proc/interrupts = yes # /proc/softirqs = yes # /proc/vmstat = yes # /proc/meminfo = yes /sys/kernel/mm/ksm = no /sys/devices/system/edac/mc = no /sys/devices/system/node = no # /proc/net/dev = yes /proc/net/netstat = no /proc/net/snmp = no /proc/net/snmp6 = no /proc/net/softnet_stat = no /proc/net/ip_vs/stats = no # /proc/net/stat/conntrack = yes /proc/net/stat/synproxy = no # /proc/diskstats = yes /proc/net/rpc/nfsd = no /proc/net/rpc/nfs = no # ipc = yes /proc/spl/kstat/zfs/arcstats = no [plugin:proc:/proc/stat] # cpu utilization = yes # per cpu core utilization = yes # cpu interrupts = yes # context switches = yes # processes started = yes # processes running = yes # filename to monitor = /proc/stat [plugin:proc:/proc/interrupts] # interrupts per core = yes # filename to monitor = /proc/interrupts [plugin:proc:/proc/softirqs] # interrupts per core = yes # filename to monitor = /proc/softirqs [plugin:proc:/proc/net/dev] # enable new interfaces detected at runtime = auto # bandwidth for all interfaces = auto # packets for all interfaces = auto # errors for all interfaces = auto # drops for all interfaces = auto # fifo for all interfaces = auto # compressed packets for all interfaces = auto # frames, collisions, carrier counters for all interfaces = auto # disable by default interfaces matching = lo fireqos* *-ifb # filename to monitor = /proc/net/dev [plugin:proc:/proc/net/dev:eth0] # enabled = yes # bandwidth = auto # packets = auto # errors = auto # drops = auto # fifo = auto # compressed = auto # events = auto [plugin:proc:/proc/diskstats] # enable new disks detected at runtime = yes # performance metrics for physical disks = auto # performance metrics for virtual disks = auto # performance metrics for partitions = no # bandwidth for all disks = auto # operations for all disks = auto # merged operations for all disks = auto # i/o time for all disks = auto # queued operations for all disks = auto # utilization percentage for all disks = auto # backlog for all disks = auto # filename to monitor = /proc/diskstats # path to get block device infos = /sys/dev/block/%lu:%lu/%s # path to get h/w sector size = /sys/block/%s/queue/hw_sector_size # path to get h/w sector size for partitions = /sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size # performance metrics for disks with major 8 = yes [plugin:proc:/proc/diskstats:sda] # enable = yes # enable performance metrics = yes # bandwidth = auto # operations = auto # merged operations = auto # i/o time = auto # queued operations = auto # utilization percentage = auto # backlog = auto [plugin:proc:/proc/net/rpc/nfsd] # filename to monitor = /proc/net/rpc/nfsd # read cache = yes # file handles = yes # I/O = yes # threads = yes # read ahead = yes # network = yes # rpc = yes # NFS v2 procedures = yes # NFS v3 procedures = yes # NFS v4 procedures = yes # NFS v4 operations = yes [plugin:proc:/proc/net/rpc/nfs] # filename to monitor = /proc/net/rpc/nfs # network = yes # rpc = yes # NFS v2 procedures = yes # NFS v3 procedures = yes # NFS v4 procedures = yes