Adding multiple nginx to collectd's monitoring plugin - nginx

Collectd queries nginx's HttpStubStatusModule
in order to find the active connections.
The config end looks like-
<Plugin "nginx">
URL "https://localhost:8433/nginx_status"
</Plugin>
The plugin is here.
i have a setup wherein i have 4 Nginx instances running on the same physical host, each listening at a different port. How do i make collectd monitor multiple Nginxes? The following does not work-
<Plugin "nginx">
URL "https://localhost:8433/nginx_status"
</Plugin>
<Plugin "nginx">
URL "https://localhost:8434/nginx_status"
</Plugin>

I have written a small script for the collectd Python plugin:
https://github.com/magazov/collectd-multinginx-python
It is very simple to use.
Here is the source code:
#! /usr/bin/env python
import re
import urllib2
import collectd
class Nginx(object):
def __init__(self):
self.pattern = re.compile("([A-Z][\w]*).+?(\d+)")
self.urls = {}
def do_nginx_status(self):
for instance, url in self.urls.items():
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError, e:
collectd.error(str(e))
except urllib2.URLError, e:
collectd.error(str(e))
else:
data = response.read()
m = self.pattern.findall(data)
for key, value in m:
metric = collectd.Values()
metric.plugin = 'nginx-%s' % instance
metric.type_instance = key.lower()
metric.type = 'nginx_connections'
metric.values = [value]
metric.dispatch()
requests = data.split('\n')[2].split()[-1]
collectd.debug('Requests %s' % requests)
metric = collectd.Values()
metric.plugin = 'nginx-%s' % instance
metric.type = 'nginx_requests'
metric.values = [requests]
metric.dispatch()
def config(self, obj):
self.urls = dict((node.key, node.values[0]) for node in obj.children)
nginx = Nginx()
collectd.register_config(nginx.config)
collectd.register_read(nginx.do_nginx_status)

struct curl_slist *curl_list = NULL;
curl_list = curl_slist_append(curl_list, header);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, curl_list);

Related

How to set OpenStack API configuration in Centos 7?

Please tell me which SDK is better to call OpenStack APIs.
I haven't found much information or tutorials on this subject.
If you are using the CLI; the official Openstack client is the obvious choice. If you are trying to make programmatic API calls you can use anything from simple http request to a client library like shade for Python, or gophercloud for Golang.
pip install openstacksdk
Conneect to the cloud:
"""
Connect to an OpenStack cloud.
For a full guide see TODO(etoews):link to docs on developer.openstack.org
"""
import argparse
import os
import openstack
from openstack.config import loader
import sys
openstack.enable_logging(True, stream=sys.stdout)
#: Defines the OpenStack Config loud key in your config file,
#: typically in $HOME/.config/openstack/clouds.yaml. That configuration
#: will determine where the examples will be run and what resource defaults
#: will be used to run the examples.
TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'devstack-admin')
config = loader.OpenStackConfig()
cloud = openstack.connect(cloud=TEST_CLOUD)
class Opts(object):
def __init__(self, cloud_name='devstack-admin', debug=False):
self.cloud = cloud_name
self.debug = debug
# Use identity v3 API for examples.
self.identity_api_version = '3'
def _get_resource_value(resource_key, default):
return config.get_extra_config('example').get(resource_key, default)
SERVER_NAME = 'openstacksdk-example'
IMAGE_NAME = _get_resource_value('image_name', 'cirros-0.3.5-x86_64-disk')
FLAVOR_NAME = _get_resource_value('flavor_name', 'm1.small')
NETWORK_NAME = _get_resource_value('network_name', 'private')
KEYPAIR_NAME = _get_resource_value('keypair_name', 'openstacksdk-example')
SSH_DIR = _get_resource_value(
'ssh_dir', '{home}/.ssh'.format(home=os.path.expanduser("~")))
PRIVATE_KEYPAIR_FILE = _get_resource_value(
'private_keypair_file', '{ssh_dir}/id_rsa.{key}'.format(
ssh_dir=SSH_DIR, key=KEYPAIR_NAME))
EXAMPLE_IMAGE_NAME = 'openstacksdk-example-public-image'
def create_connection_from_config():
return openstack.connect(cloud=TEST_CLOUD)
def create_connection_from_args():
parser = argparse.ArgumentParser()
config = loader.OpenStackConfig()
config.register_argparse_arguments(parser, sys.argv[1:])
args = parser.parse_args()
return openstack.connect(config=config.get_one(argparse=args))
def create_connection(auth_url, region, project_name, username, password):
return openstack.connect(
auth_url=auth_url,
project_name=project_name,
username=username,
password=password,
region_name=region,
app_name='examples',
app_version='1.0',
)
Complete guide

When calling a function, how to feed variable as an arg

I have the code below that works, but instead of calling the function with "www.google.com", i need to be able to pass as arg:
python certexp.py www.google.com:
import ssl
import OpenSSL
import time
def get_SSL_Expiry_Date(host, port):
cert = ssl.get_server_certificate((host, 443))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
raw_date = x509.get_notAfter()
decoded_date = raw_date.decode("utf-8")
print (decoded_date)
dexpires = time.strptime(decoded_date, "%Y%m%d%H%M%Sz")
print (dexpires.tm_mon,"/",dexpires.tm_mday,"/",dexpires.tm_year)
get_SSL_Expiry_Date("google.com", 443)
Thank you
In python the sys module handles command line arguments.
This gives you an array of command line parameters, with sys.argv[0] being the name of the executable, then any subsequent elements being user parameters.
This makes your code:
import ssl
import OpenSSL
import time
import sys
def get_SSL_Expiry_Date(host, port):
cert = ssl.get_server_certificate((host, 443))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
raw_date = x509.get_notAfter()
decoded_date = raw_date.decode("utf-8")
print (decoded_date)
dexpires = time.strptime(decoded_date, "%Y%m%d%H%M%Sz")
print (dexpires.tm_mon,"/",dexpires.tm_mday,"/",dexpires.tm_year)
if (len(sys.argv) == 1):
sys.stderr.write("%s: Give hostname as an argument, optionally a port too" % (sys.argv[0]))
sys.exit(1)
hostname = sys.argv[1]
port = 443
if (len(sys.argv) == 3):
port = int(sys.argv[2])
get_SSL_Expiry_Date(hostname, port)
Obviously you could do that for the port too. There's other command line parsing modules too, so you can say --port= etc.

Nginx server with uwsgi,flask and sleekxmpp

I'm trying to handling some messages by using nginx server with uwsgi, flask and sleekxmpp.
Here is the code.
import ssl, json, logging, threading, time
from flask import Flask
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import IqError, IqTimeout
smsg = """{
"version":1,
"type":"request",
"messageId":"xxyyzz",
"payload":
{
"deviceType":"ctlr",
"command":"getDeviceInfo"
}
}"""
class XMPP(ClientXMPP):
rosterList=[]
def __init__(self, jid, password):
ClientXMPP.__init__(self, jid, password)
self.add_event_handler('session_start', self.session_start, threaded = True)
self.add_event_handler('message', self.message, threaded=True)
self.ssl_version = ssl.PROTOCOL_SSLv23
def session_start(self, event):
self.send_presence(pshow='online')
try:
self.rosterList.append(self.get_roster())
except IqError as err:
print 'Error: %' % err.iq['error']['condition']
except IqTimeout:
print 'Error: Request time out'
def message(self, msg):
data = msg['body'][12:]
dictData = json.loads(data)
print data
if 'payload' in dictData.keys():
for lists in dictData['payload']['indexes']:
print lists
elif 'message' in dictData.keys():
print 'Request accepted'
app = Flask(__name__)
#logging.basicConfig(level = logging.DEBUG)
xmpp = XMPP('jid', 'password')
class XmppThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
if xmpp.connect(('server', '5222')):
xmpp.process(block=True)
xt = XmppThread()
xt.start()
#app.route('/')
def send():
xmpp.send_message(mto='receiver', mbody=smsg, mtype='chat')
return '<h1>Send</h1>'
I run the code by uwsgi with these options.
[uwsgi]
uid = uwsgi
gid = uwsgi
pidfile = /run/uwsgi/uwsgi.pid
emperor = /etc/uwsgi.d
stats = /run/uwsgi/stats.sock
chmod-socket = 660
emperor-tyrant = true
cap = setgid,setuid
[uwsgi]
plugin = python
http-socket = :8080
wsgi-file = /var/www/uwsgi/flask_uwsgi.py
callable = app
module = app
enable-threads = True
logto = /var/www/uwsgi/flask_uwsgi.log
When I run uwsgi by typing command, like '/usr/sbin/uwsgi --ini uwsgi.ini', it works well. I can send and recieve the messages. But, when I run this on CentOS 7's service, recieve is working, but send is not working.
Did i need some more options or missing something?

How to get the Tor ExitNode IP with Python and Stem

I'm trying to get the external IP that Tor uses, as mentioned here. When using something like myip.dnsomatic.com, this is very slow. I tried what was suggested in the aforementioned link (python + stem to control tor through the control port), but all you get is circuit's IPs with no assurance of which one is the one on the exitnode, and, sometimes the real IP is not even among the results.
Any help would be appreciated.
Also, from here, at the bottom, Amine suggests a way to renew the identity in Tor. There is an instruction, controller.get_newnym_wait(), which he uses to wait until the new connection is ready (controller is from Control in steam.control), isn't there any thing like that in Steam (sorry, I checked and double/triple checked and couldn't find nothing) that tells you that Tor is changing its identity?
You can get the exit node ip without calling a geoip site.
This is however on a different stackexchange site here - https://tor.stackexchange.com/questions/3253/how-do-i-trap-circuit-id-none-errors-in-the-stem-script-exit-used-py
As posted by #mirimir his code below essentially attaches a stream event listener function, which is then used to get the circuit id, circuit fingerprint, then finally the exit ip address -
#!/usr/bin/python
import functools
import time
from stem import StreamStatus
from stem.control import EventType, Controller
def main():
print "Tracking requests for tor exits. Press 'enter' to end."
print
with Controller.from_port() as controller:
controller.authenticate()
stream_listener = functools.partial(stream_event, controller)
controller.add_event_listener(stream_listener, EventType.STREAM)
raw_input() # wait for user to press enter
def stream_event(controller, event):
if event.status == StreamStatus.SUCCEEDED and event.circ_id:
circ = controller.get_circuit(event.circ_id)
exit_fingerprint = circ.path[-1][0]
exit_relay = controller.get_network_status(exit_fingerprint)
t = time.localtime()
print "datetime|%d-%02d-%02d %02d:%02d:%02d % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
print "website|%s" % (event.target)
print "exitip|%s" % (exit_relay.address)
print "exitport|%i" % (exit_relay.or_port)
print "fingerprint|%s" % exit_relay.fingerprint
print "nickname|%s" % exit_relay.nickname
print "locale|%s" % controller.get_info("ip-to-country/%s" % exit_relay.address, 'unknown')
print
You can use this code for check current IP (change SOCKS_PORT value to yours):
import re
import stem.process
import requesocks
SOCKS_PORT = 9053
tor_process = stem.process.launch_tor()
proxy_address = 'socks5://127.0.0.1:{}'.format(SOCKS_PORT)
proxies = {
'http': proxy_address,
'https': proxy_address
}
response = requesocks.get("http://httpbin.org/ip", proxies=proxies)
print re.findall(r'[\d.-]+', response.text)[0]
tor_process.kill()
If you want to use socks you should do:
pip install requests[socks]
Then you can do:
import requests
import json
import stem.process
import stem
SOCKS_PORT = "9999"
tor = stem.process.launch_tor_with_config(
config={
'SocksPort': SOCKS_PORT,
},
tor_cmd= 'absolute_path/to/tor.exe',
)
r = requests.Session()
proxies = {
'http': 'socks5://localhost:' + SOCKS_PORT,
'https': 'socks5://localhost:' + SOCKS_PORT
}
response = r.get("http://httpbin.org/ip", proxies=proxies)
self.current_ip = response.json()['origin']

Development Mode For uWSGI/Pylons (Reload new code)

I have a setup such that an nginx server passes control off to uWsgi, which launches a pylons app using the following in my xml configuration file:
<ini-paste>...</ini-paste>
Everything is working nicely, and I was able to set it to debug mode using the following in the associated ini file, like:
debug = true
Except debug mode only prints out errors, and doesn't reload the code everytime a file has been touched. If I was running directly through paste, I could use the --reload option, but going through uWsgi complicates things.
Does anybody know of a way to tell uWsgi to tell paste to set the --reload option, or to do this directly in the paste .ini file?
I used something like the following code to solve this, the monitorFiles(...) method is called on application initialization, and it monitors the files, sending the TERM signal when it sees a change.
I'd still much prefer a solution using paster's --reload argument, as I imagine this solution has bugs:
import os
import time
import signal
from deepthought.system import deployment
from multiprocessing.process import Process
def monitorFiles():
if deployment.getDeployment().dev and not FileMonitor.isRunning:
monitor = FileMonitor(os.getpid())
try: monitor.start()
except: print "Something went wrong..."
class FileMonitor(Process):
isRunning = False
def __init__(self, masterPid):
self.updates = {}
self.rootDir = deployment.rootDir() + "/src/python"
self.skip = len(self.rootDir)
self.masterPid = masterPid
FileMonitor.isRunning = True
Process.__init__(self)
def run(self):
while True:
self._loop()
time.sleep(5)
def _loop(self):
for root, _, files in os.walk(self.rootDir):
for file in files:
if file.endswith(".py"):
self._monitorFile(root, file)
def _monitorFile(self, root, file):
mtime = os.path.getmtime("%s/%s" % (root, file))
moduleName = "%s/%s" % (root[self.skip+1:], file[:-3])
moduleName = moduleName.replace("/",".")
if not moduleName in self.updates:
self.updates[moduleName] = mtime
elif self.updates[moduleName] < mtime:
print "Change detected in %s" % moduleName
self._restartWorker()
self.updates[moduleName] = mtime
def _restartWorker(self):
os.kill(self.masterPid, signal.SIGTERM)
Use the signal framework in 0.9.7 tree
http://projects.unbit.it/uwsgi/wiki/SignalFramework
An example of auto-reloading:
import uwsgi
uwsgi.register_signal(1, "", uwsgi.reload)
uwsgi.add_file_monitor(1, 'myfile.py')
def application(env, start_response):
...

Resources