Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
Dave Josephsen committed Feb 5, 2015
2 parents b06b990 + 4dfd232 commit d01c5ce
Show file tree
Hide file tree
Showing 9 changed files with 106 additions and 55 deletions.
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ Thumbs.db
.DS_Store
*.bak

# JetBrains PyCharm configuration and working files
# JetBrains PyCharm configuration and working files
.idea

*.py[co]
Expand Down Expand Up @@ -35,3 +35,6 @@ var/
# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# rope
.ropeproject/
10 changes: 10 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
language: python

python:
- "2.6"
- "2.7"

install:
- pip install flake8

script: flake8 *.py
18 changes: 14 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@

Graphios
========

[![Build Status](https://travis-ci.org/shawn-sterling/graphios.svg?branch=master)](https://travis-ci.org/shawn-sterling/graphios)

*Oct 15, 2014*

New graphios 2.0!
Expand All @@ -24,8 +30,8 @@ of supported upstream metrics systems simultaenously.

* A working nagios / icinga / naemon server
* A functional carbon or statsd daemon, and/or Librato credentials
* Python 2.7 or later (Is anyone still using 2.4? Likely very little work to
make this work under 2.4 again if so. Let me know)
* Python 2.6 or later (but not python 3.x) (Is anyone still using 2.4? Likely
very little work to make this work under 2.4 again if so. Let me know)

# License

Expand Down Expand Up @@ -377,6 +383,10 @@ Options:
--sleep_max=SLEEP_MAX
Max time to sleep between runs
--server=SERVER Server address (for backend)
--no_replace_hostname
Replace '.' in nagios hostnames, default on.
--reverse_hostname Reverse nagios hostname, default off.

)
</pre>

Expand Down Expand Up @@ -655,12 +665,12 @@ host_perfdata_file_processing_command=omd-process-host-perfdata-file
<pre>
define command{
command_name omd-process-service-perfdata-file
command_line /bin/mv /omd/sites/SITENAME/var/pnp4nagios/service-perfdata /omd/sites/prod/var/pnp4nagios/spool/service-perfdata.$TIMET$ && cp /omd/sites/prod/var/pnp4nagios/spool/service-perfdata.$TIMET$ /omd/sites/prod/var/graphios/spool/
command_line /bin/mv /omd/sites/SITENAME/var/pnp4nagios/service-perfdata /omd/sites/SITENAME/var/pnp4nagios/spool/service-perfdata.$TIMET$ && cp /omd/sites/SITENAME/var/pnp4nagios/spool/service-perfdata.$TIMET$ /omd/sites/SITENAME/var/graphios/spool/
}

define command{
command_name omd-process-host-perfdata-file
command_line /bin/mv /omd/sites/SITENAME/var/pnp4nagios/host-perfdata /omd/sites/prod/var/pnp4nagios/spool/host-perfdata.$TIMET$ && cp /omd/sites/prod/var/pnp4nagios/spool/host-perfdata.$TIMET$ /omd/sites/prod/var/graphios/spool/
command_line /bin/mv /omd/sites/SITENAME/var/pnp4nagios/host-perfdata /omd/sites/SITENAME/var/pnp4nagios/spool/host-perfdata.$TIMET$ && cp /omd/sites/SITENAME/var/pnp4nagios/spool/host-perfdata.$TIMET$ /omd/sites/prod/var/graphios/spool/
}
</pre>

Expand Down
11 changes: 10 additions & 1 deletion graphios.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ log_max_size = 25165824
#log_level = logging.DEBUG
log_level = logging.INFO

# Disable this once you get it working.
debug = True

# How long to sleep between processing the spool directory
Expand All @@ -38,8 +39,16 @@ test_mode = False
use_service_desc = False

# replace "." in nagios hostnames? (so "my.host.name" becomes "my_host_name")
# (uses the replacement_character)
replace_hostname = True

# reverse hostname
# if you have:
# host.datacenter.company.tld
# as your nagios hostname you may prefer to have your metric stored as:
# tld.company.datacenter.host
reverse_hostname = False

#------------------------------------------------------------------------------
# Carbon Details (comment out if not using carbon)
#------------------------------------------------------------------------------
Expand Down Expand Up @@ -96,7 +105,7 @@ librato_whitelist = [".*"]
# librato_sourcevals = HOSTNAME

#flag the librato backend as 'non essential' for the purposes of error checking
#nerf_librato=False
#nerf_librato = False

#------------------------------------------------------------------------------
# STDOUT Details (comment in if you are using STDOUT)
Expand Down
71 changes: 46 additions & 25 deletions graphios.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@
import time


############################################################
##### Do not edit this file, edit the graphios.cfg #####
# ##########################################################
# ### Do not edit this file, edit the graphios.cfg #####

# nagios spool directory
spool_directory = '/var/spool/nagios/graphios'
Expand Down Expand Up @@ -84,7 +84,7 @@
help="file to log to")
parser.add_option("--backend", dest="backend", default="stdout",
help="sets which storage backend to use")
parser.add_option("--config", dest="config", default="",
parser.add_option("--config_file", dest="config_file", default="",
help="set custom config file location")
parser.add_option("--test", action="store_true", dest="test", default="",
help="Turns on test mode, which won't send to backends")
Expand All @@ -96,6 +96,13 @@
help="Max time to sleep between runs")
parser.add_option("--server", dest="server", default="",
help="Server address (for backend)")
parser.add_option("--no_replace_hostname", action="store_false",
dest="replace_hostname", default=True,
help="Replace '.' in nagios hostnames, default on.")
parser.add_option("--reverse_hostname", action="store_true",
dest="reverse_hostname",
help="Reverse nagios hostname, default off.")


log = logging.getLogger('log')

Expand Down Expand Up @@ -128,13 +135,14 @@ def validate(self):
re.sub('"', "", self.LABEL)
re.sub("'", "", self.VALUE)
re.sub('"', "", self.VALUE)
self.check_adjust_hostname()
if (
self.TIMET is not '' and
self.PERFDATA is not '' and
self.HOSTNAME is not ''
):
if "use_service_desc" in cfg and cfg["use_service_desc"] is True:
if self.SERVICEDESC != '':
if self.SERVICEDESC != '' or self.DATATYPE == 'HOSTPERFDATA':
self.VALID = True
else:
# not using service descriptions
Expand All @@ -146,6 +154,13 @@ def validate(self):
else:
self.VALID = True

def check_adjust_hostname(self):
if cfg["reverse_hostname"]:
self.HOSTNAME = '.'.join(reversed(self.HOSTNAME.split('.')))
if cfg["replace_hostname"]:
self.HOSTNAME = self.HOSTNAME.replace(".",
cfg["replacement_character"])


def chk_bool(value):
"""
Expand Down Expand Up @@ -229,12 +244,10 @@ def verify_options(opts):
global spool_directory
# because these have defaults in the parser section we know they will be
# set. So we don't have to do a bunch of ifs.
if "log_file" in cfg:
if cfg["log_file"] == "''":
cfg["log_file"] = "%s/graphios.log" % sys.path[0]
else:
cfg["log_file"] = opts.log_file
cfg["log_file"] = opts.log_file
if "log_file" not in cfg:
cfg["log_file"] = opts.log_file
if cfg["log_file"] == "''" or cfg["log_file"] == "":
cfg["log_file"] = "%s/graphios.log" % sys.path[0]
cfg["log_max_size"] = 25165824 # 24 MB
if opts.verbose:
cfg["debug"] = True
Expand All @@ -250,8 +263,10 @@ def verify_options(opts):
cfg["spool_directory"] = opts.spool_directory
cfg["sleep_time"] = opts.sleep_time
cfg["sleep_max"] = opts.sleep_max
cfg["replace_hostname"] = opts.replace_hostname
cfg["reverse_hostname"] = opts.reverse_hostname
spool_directory = opts.spool_directory
#cfg["backend"] = opts.backend
# cfg["backend"] = opts.backend
handle_backends(opts)
# cfg["enable_carbon"] = True
return cfg
Expand Down Expand Up @@ -281,7 +296,7 @@ def configure():
global debug
log_handler = logging.handlers.RotatingFileHandler(
cfg["log_file"], maxBytes=cfg["log_max_size"], backupCount=4,
#encoding='bz2')
# encoding='bz2')
)
formatter = logging.Formatter(
"%(asctime)s %(filename)s %(levelname)s %(message)s",
Expand Down Expand Up @@ -318,7 +333,7 @@ def process_log(file_name):
for line in file_array:
if not re.search("^DATATYPE::", line):
continue
#log.debug('parsing: %s' % line)
# log.debug('parsing: %s' % line)
graphite_lines += 1
variables = line.split('\t')
mobj = get_mobj(variables)
Expand All @@ -335,7 +350,8 @@ def process_log(file_name):
nobj.UOM = re.sub("[^a-zA-Z]+", "", u)
processed_objects.append(nobj)
except:
log.critical("failed to parse metric: %s" % nobj.PERFDATA)
log.critical("failed to parse label: '%s' part of perf"
"string '%s'" % (metric, nobj.PERFDATA))
continue
return processed_objects

Expand All @@ -347,7 +363,13 @@ def get_mobj(nag_array):
"""
mobj = GraphiosMetric()
for var in nag_array:
(var_name, value) = var.split('::', 1)
# drop the metric if we can't split it for any reason
try:
(var_name, value) = var.split('::', 1)
except:
log.warn("could not split value %s, dropping metric" % var)
return False

value = re.sub("/", cfg["replacement_character"], value)
if re.search("PERFDATA", var_name):
mobj.PERFDATA = value
Expand Down Expand Up @@ -403,7 +425,7 @@ def process_spool_dir(directory):
mobjs = process_log(file_dir)
mobjs_len = len(mobjs)
processed_dict = send_backends(mobjs)
#process the output from the backends and decide the fate of the file
# process the output from the backends and decide the fate of the file
for backend in be["essential_backends"]:
if processed_dict[backend] < mobjs_len:
log.critical("keeping %s, insufficent metrics sent from %s. \
Expand Down Expand Up @@ -452,15 +474,15 @@ def init_backends():
be = {} # a top-level global for important backend-related stuff
be["enabled_backends"] = {} # a dict of instantiated backend objects
be["essential_backends"] = [] # a list of backends we actually care about
#PLUGIN WRITERS! register your new backends by adding their obj name here
# PLUGIN WRITERS! register your new backends by adding their obj name here
avail_backends = ("carbon",
"statsd",
"librato",
"stdout",
)
#populate the controller dict from avail + config. this assumes you named
#your backend the same as the config option that enables your backend (eg.
#carbon and enable_carbon)
# populate the controller dict from avail + config. this assumes you named
# your backend the same as the config option that enables your backend (eg.
# carbon and enable_carbon)
for backend in avail_backends:
cfg_option = "enable_%s" % (backend)
if cfg_option in cfg and cfg[cfg_option] is True:
Expand Down Expand Up @@ -488,7 +510,7 @@ def send_backends(metrics):
processed_lines = 0
for backend in be["enabled_backends"]:
processed_lines = be["enabled_backends"][backend].send(metrics)
#log.debug('%s processed %s metrics' % backend, processed_lines)
# log.debug('%s processed %s metrics' % backend, processed_lines)
ret[backend] = processed_lines
return ret

Expand All @@ -508,10 +530,9 @@ def main():
if len(sys.argv) > 1:
(options, args) = parser.parse_args()
# print options
try:
if options.config_file:
cfg = read_config(options.config_file)
except AttributeError:
if options.config_file:
cfg = read_config(options.config_file)
else:
cfg = verify_options(options)
else:
cfg = read_config(config_file)
Expand Down
34 changes: 16 additions & 18 deletions graphios_backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def build_path(self, vals, m):

def k_not_in_whitelist(self, k):
# return True if k isn't whitelisted
#wl_match = True
# wl_match = True
for pattern in self.whitelist:
if pattern.search(k) is not None:
return False
Expand All @@ -106,11 +106,11 @@ def add_measure(self, m):

k = "%s\t%s" % (name, source)

#bail if this metric isn't whitelisted
# bail if this metric isn't whitelisted
if self.k_not_in_whitelist(k):
return None

#add the metric to our gauges dict
# add the metric to our gauges dict
if k not in self.gauges:
self.gauges[k] = {
'name': name,
Expand All @@ -131,7 +131,7 @@ def flush_payload(self, headers, g):

try:
f = urllib2.urlopen(req, timeout=self.flush_timeout_secs)
#f.read()
# f.read()
f.close()
except urllib2.HTTPError as error:
self.metrics_sent = 0
Expand Down Expand Up @@ -256,11 +256,11 @@ def __init__(self, cfg):
except:
self.test_mode = False

try:
cfg['replace_hostname']
self.replace_hostname = cfg['replace_hostname']
except:
self.replace_hostname = True
# try:
# cfg['replace_hostname']
# self.replace_hostname = cfg['replace_hostname']
# except:
# self.replace_hostname = True

try:
cfg['carbon_plaintext']
Expand Down Expand Up @@ -314,10 +314,10 @@ def build_path(self, m):
post = ".%s" % m.GRAPHITEPOSTFIX
else:
post = ""
if self.replace_hostname:
hostname = m.HOSTNAME.replace('.', self.replacement_character)
else:
hostname = m.HOSTNAME
# if self.replace_hostname:
# hostname = m.HOSTNAME.replace('.', self.replacement_character)
# else:
hostname = m.HOSTNAME
if self.use_service_desc:
# we want: (prefix.)hostname.service_desc(.postfix).perfdata
service_desc = self.fix_string(m.SERVICEDESC)
Expand Down Expand Up @@ -359,8 +359,7 @@ def send(self, metrics):
port = 2003
else:
port = 2004
self.log.debug("Connecting to carbon at %s:%s" %
(server, port))
self.log.debug("Connecting to carbon at %s:%s" % (server, port))
try:
sock.connect((socket.gethostbyname(server), port))
self.log.debug("connected")
Expand All @@ -377,7 +376,7 @@ def send(self, metrics):
sock.close()
return 0
# this only gets returned if nothing failed.
return len(metrics)
ret += len(metrics)
sock.close()
return ret

Expand Down Expand Up @@ -440,8 +439,7 @@ def send(self, metrics):
else:
server = serv
port = 8125
self.log.debug("sending to statsd at %s:%s" %
(server, port))
self.log.debug("sending to statsd at %s:%s" % (server, port))
for m in mlist:
try:
sock.sendto(m, (socket.gethostbyname(server), port))
Expand Down
Loading

0 comments on commit d01c5ce

Please sign in to comment.