How to set OpenStack API configuration in Centos 7? - openstack

Please tell me which SDK is better to call OpenStack APIs.
I haven't found much information or tutorials on this subject.

If you are using the CLI; the official Openstack client is the obvious choice. If you are trying to make programmatic API calls you can use anything from simple http request to a client library like shade for Python, or gophercloud for Golang.

pip install openstacksdk
Conneect to the cloud:
"""
Connect to an OpenStack cloud.
For a full guide see TODO(etoews):link to docs on developer.openstack.org
"""
import argparse
import os
import openstack
from openstack.config import loader
import sys
openstack.enable_logging(True, stream=sys.stdout)
#: Defines the OpenStack Config loud key in your config file,
#: typically in $HOME/.config/openstack/clouds.yaml. That configuration
#: will determine where the examples will be run and what resource defaults
#: will be used to run the examples.
TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'devstack-admin')
config = loader.OpenStackConfig()
cloud = openstack.connect(cloud=TEST_CLOUD)
class Opts(object):
def __init__(self, cloud_name='devstack-admin', debug=False):
self.cloud = cloud_name
self.debug = debug
# Use identity v3 API for examples.
self.identity_api_version = '3'
def _get_resource_value(resource_key, default):
return config.get_extra_config('example').get(resource_key, default)
SERVER_NAME = 'openstacksdk-example'
IMAGE_NAME = _get_resource_value('image_name', 'cirros-0.3.5-x86_64-disk')
FLAVOR_NAME = _get_resource_value('flavor_name', 'm1.small')
NETWORK_NAME = _get_resource_value('network_name', 'private')
KEYPAIR_NAME = _get_resource_value('keypair_name', 'openstacksdk-example')
SSH_DIR = _get_resource_value(
'ssh_dir', '{home}/.ssh'.format(home=os.path.expanduser("~")))
PRIVATE_KEYPAIR_FILE = _get_resource_value(
'private_keypair_file', '{ssh_dir}/id_rsa.{key}'.format(
ssh_dir=SSH_DIR, key=KEYPAIR_NAME))
EXAMPLE_IMAGE_NAME = 'openstacksdk-example-public-image'
def create_connection_from_config():
return openstack.connect(cloud=TEST_CLOUD)
def create_connection_from_args():
parser = argparse.ArgumentParser()
config = loader.OpenStackConfig()
config.register_argparse_arguments(parser, sys.argv[1:])
args = parser.parse_args()
return openstack.connect(config=config.get_one(argparse=args))
def create_connection(auth_url, region, project_name, username, password):
return openstack.connect(
auth_url=auth_url,
project_name=project_name,
username=username,
password=password,
region_name=region,
app_name='examples',
app_version='1.0',
)
Complete guide

Related

When calling a function, how to feed variable as an arg

I have the code below that works, but instead of calling the function with "www.google.com", i need to be able to pass as arg:
python certexp.py www.google.com:
import ssl
import OpenSSL
import time
def get_SSL_Expiry_Date(host, port):
cert = ssl.get_server_certificate((host, 443))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
raw_date = x509.get_notAfter()
decoded_date = raw_date.decode("utf-8")
print (decoded_date)
dexpires = time.strptime(decoded_date, "%Y%m%d%H%M%Sz")
print (dexpires.tm_mon,"/",dexpires.tm_mday,"/",dexpires.tm_year)
get_SSL_Expiry_Date("google.com", 443)
Thank you
In python the sys module handles command line arguments.
This gives you an array of command line parameters, with sys.argv[0] being the name of the executable, then any subsequent elements being user parameters.
This makes your code:
import ssl
import OpenSSL
import time
import sys
def get_SSL_Expiry_Date(host, port):
cert = ssl.get_server_certificate((host, 443))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
raw_date = x509.get_notAfter()
decoded_date = raw_date.decode("utf-8")
print (decoded_date)
dexpires = time.strptime(decoded_date, "%Y%m%d%H%M%Sz")
print (dexpires.tm_mon,"/",dexpires.tm_mday,"/",dexpires.tm_year)
if (len(sys.argv) == 1):
sys.stderr.write("%s: Give hostname as an argument, optionally a port too" % (sys.argv[0]))
sys.exit(1)
hostname = sys.argv[1]
port = 443
if (len(sys.argv) == 3):
port = int(sys.argv[2])
get_SSL_Expiry_Date(hostname, port)
Obviously you could do that for the port too. There's other command line parsing modules too, so you can say --port= etc.

Telethon MessageMediaDocument MD5

I'm working on a Telegram client using Telethon library. I fetch recent messages from a channel and filter messages containing an APK file. Here is some part of the python code:
import time
import config
import database
from datetime import datetime
from telethon import TelegramClient, sync
from telethon.tl.types import Channel, MessageMediaDocument
from telethon.tl.functions.messages import GetHistoryRequest
def extract_channels(dialogs):
result = []
if dialogs:
for dialog in dialogs:
entity = dialog.entity
if isinstance(entity, Channel):
result.append(entity)
return result
client = TelegramClient('some_name', config.api_id, config.api_hash)
me = client.start(
phone=config.phone_number,
password=config.two_factor_auth_password
)
open_dialogs = me.get_dialogs()
channels = extract_channels(open_dialogs)
database.connect()
for channel in channels:
last_message_id = database.get_last_message_id(channel.id)
channel_messages = client(GetHistoryRequest(
peer=channel,
offset_id=0,
offset_date=None,
add_offset=0,
limit=0 if last_message_id else config.limit,
max_id=0,
min_id=last_message_id,
hash=0)
)
messages = channel_messages.messages
if messages:
for message in messages[::-1]:
media = message.media
if isinstance(media, MessageMediaDocument):
if media.document.mime_type.lower() == config.apk_mime_type:
// apk_md5 = media.md5 ???
check_apk_md5_in_db(apk_md5)
time.sleep(config.wait_seconds)
database.close()
Then i need to get MD5 of the APK but i have to download the APK file first, which is costly for my current environment because there a lot of APK files i have to fetch. So is there any way to obtain MD5 of the file in a message without downloading it? I've searched around the Telethon and Telegram documentation but i couldn't find anything useful.

How to get the Tor ExitNode IP with Python and Stem

I'm trying to get the external IP that Tor uses, as mentioned here. When using something like myip.dnsomatic.com, this is very slow. I tried what was suggested in the aforementioned link (python + stem to control tor through the control port), but all you get is circuit's IPs with no assurance of which one is the one on the exitnode, and, sometimes the real IP is not even among the results.
Any help would be appreciated.
Also, from here, at the bottom, Amine suggests a way to renew the identity in Tor. There is an instruction, controller.get_newnym_wait(), which he uses to wait until the new connection is ready (controller is from Control in steam.control), isn't there any thing like that in Steam (sorry, I checked and double/triple checked and couldn't find nothing) that tells you that Tor is changing its identity?
You can get the exit node ip without calling a geoip site.
This is however on a different stackexchange site here - https://tor.stackexchange.com/questions/3253/how-do-i-trap-circuit-id-none-errors-in-the-stem-script-exit-used-py
As posted by #mirimir his code below essentially attaches a stream event listener function, which is then used to get the circuit id, circuit fingerprint, then finally the exit ip address -
#!/usr/bin/python
import functools
import time
from stem import StreamStatus
from stem.control import EventType, Controller
def main():
print "Tracking requests for tor exits. Press 'enter' to end."
print
with Controller.from_port() as controller:
controller.authenticate()
stream_listener = functools.partial(stream_event, controller)
controller.add_event_listener(stream_listener, EventType.STREAM)
raw_input() # wait for user to press enter
def stream_event(controller, event):
if event.status == StreamStatus.SUCCEEDED and event.circ_id:
circ = controller.get_circuit(event.circ_id)
exit_fingerprint = circ.path[-1][0]
exit_relay = controller.get_network_status(exit_fingerprint)
t = time.localtime()
print "datetime|%d-%02d-%02d %02d:%02d:%02d % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
print "website|%s" % (event.target)
print "exitip|%s" % (exit_relay.address)
print "exitport|%i" % (exit_relay.or_port)
print "fingerprint|%s" % exit_relay.fingerprint
print "nickname|%s" % exit_relay.nickname
print "locale|%s" % controller.get_info("ip-to-country/%s" % exit_relay.address, 'unknown')
print
You can use this code for check current IP (change SOCKS_PORT value to yours):
import re
import stem.process
import requesocks
SOCKS_PORT = 9053
tor_process = stem.process.launch_tor()
proxy_address = 'socks5://127.0.0.1:{}'.format(SOCKS_PORT)
proxies = {
'http': proxy_address,
'https': proxy_address
}
response = requesocks.get("http://httpbin.org/ip", proxies=proxies)
print re.findall(r'[\d.-]+', response.text)[0]
tor_process.kill()
If you want to use socks you should do:
pip install requests[socks]
Then you can do:
import requests
import json
import stem.process
import stem
SOCKS_PORT = "9999"
tor = stem.process.launch_tor_with_config(
config={
'SocksPort': SOCKS_PORT,
},
tor_cmd= 'absolute_path/to/tor.exe',
)
r = requests.Session()
proxies = {
'http': 'socks5://localhost:' + SOCKS_PORT,
'https': 'socks5://localhost:' + SOCKS_PORT
}
response = r.get("http://httpbin.org/ip", proxies=proxies)
self.current_ip = response.json()['origin']

Adding multiple nginx to collectd's monitoring plugin

Collectd queries nginx's HttpStubStatusModule
in order to find the active connections.
The config end looks like-
<Plugin "nginx">
URL "https://localhost:8433/nginx_status"
</Plugin>
The plugin is here.
i have a setup wherein i have 4 Nginx instances running on the same physical host, each listening at a different port. How do i make collectd monitor multiple Nginxes? The following does not work-
<Plugin "nginx">
URL "https://localhost:8433/nginx_status"
</Plugin>
<Plugin "nginx">
URL "https://localhost:8434/nginx_status"
</Plugin>
I have written a small script for the collectd Python plugin:
https://github.com/magazov/collectd-multinginx-python
It is very simple to use.
Here is the source code:
#! /usr/bin/env python
import re
import urllib2
import collectd
class Nginx(object):
def __init__(self):
self.pattern = re.compile("([A-Z][\w]*).+?(\d+)")
self.urls = {}
def do_nginx_status(self):
for instance, url in self.urls.items():
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError, e:
collectd.error(str(e))
except urllib2.URLError, e:
collectd.error(str(e))
else:
data = response.read()
m = self.pattern.findall(data)
for key, value in m:
metric = collectd.Values()
metric.plugin = 'nginx-%s' % instance
metric.type_instance = key.lower()
metric.type = 'nginx_connections'
metric.values = [value]
metric.dispatch()
requests = data.split('\n')[2].split()[-1]
collectd.debug('Requests %s' % requests)
metric = collectd.Values()
metric.plugin = 'nginx-%s' % instance
metric.type = 'nginx_requests'
metric.values = [requests]
metric.dispatch()
def config(self, obj):
self.urls = dict((node.key, node.values[0]) for node in obj.children)
nginx = Nginx()
collectd.register_config(nginx.config)
collectd.register_read(nginx.do_nginx_status)
struct curl_slist *curl_list = NULL;
curl_list = curl_slist_append(curl_list, header);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, curl_list);

Development Mode For uWSGI/Pylons (Reload new code)

I have a setup such that an nginx server passes control off to uWsgi, which launches a pylons app using the following in my xml configuration file:
<ini-paste>...</ini-paste>
Everything is working nicely, and I was able to set it to debug mode using the following in the associated ini file, like:
debug = true
Except debug mode only prints out errors, and doesn't reload the code everytime a file has been touched. If I was running directly through paste, I could use the --reload option, but going through uWsgi complicates things.
Does anybody know of a way to tell uWsgi to tell paste to set the --reload option, or to do this directly in the paste .ini file?
I used something like the following code to solve this, the monitorFiles(...) method is called on application initialization, and it monitors the files, sending the TERM signal when it sees a change.
I'd still much prefer a solution using paster's --reload argument, as I imagine this solution has bugs:
import os
import time
import signal
from deepthought.system import deployment
from multiprocessing.process import Process
def monitorFiles():
if deployment.getDeployment().dev and not FileMonitor.isRunning:
monitor = FileMonitor(os.getpid())
try: monitor.start()
except: print "Something went wrong..."
class FileMonitor(Process):
isRunning = False
def __init__(self, masterPid):
self.updates = {}
self.rootDir = deployment.rootDir() + "/src/python"
self.skip = len(self.rootDir)
self.masterPid = masterPid
FileMonitor.isRunning = True
Process.__init__(self)
def run(self):
while True:
self._loop()
time.sleep(5)
def _loop(self):
for root, _, files in os.walk(self.rootDir):
for file in files:
if file.endswith(".py"):
self._monitorFile(root, file)
def _monitorFile(self, root, file):
mtime = os.path.getmtime("%s/%s" % (root, file))
moduleName = "%s/%s" % (root[self.skip+1:], file[:-3])
moduleName = moduleName.replace("/",".")
if not moduleName in self.updates:
self.updates[moduleName] = mtime
elif self.updates[moduleName] < mtime:
print "Change detected in %s" % moduleName
self._restartWorker()
self.updates[moduleName] = mtime
def _restartWorker(self):
os.kill(self.masterPid, signal.SIGTERM)
Use the signal framework in 0.9.7 tree
http://projects.unbit.it/uwsgi/wiki/SignalFramework
An example of auto-reloading:
import uwsgi
uwsgi.register_signal(1, "", uwsgi.reload)
uwsgi.add_file_monitor(1, 'myfile.py')
def application(env, start_response):
...

Resources