I'm trying to extract openvpn.service status using Systemd D-Bus API.
In [1]: import dbus
In [2]: sysbus = dbus.SystemBus()
In [3]: systemd1 = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
In [4]: manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
In [5]: service = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Service')
In [6]: unit = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Unit')
In [7]: unit.ActiveState('openvpn.service')
---------------------------------------------------------------------------
DBusException Traceback (most recent call last)
<ipython-input-7-22857e7dcbd7> in <module>()
----> 1 unit.ActiveState('openvpn.service')
/usr/local/lib/python3.4/dist-packages/dbus/proxies.py in __call__(self, *args, **keywords)
68 # we're being synchronous, so block
69 self._block()
---> 70 return self._proxy_method(*args, **keywords)
71
72 def call_async(self, *args, **keywords):
/usr/local/lib/python3.4/dist-packages/dbus/proxies.py in __call__(self, *args, **keywords)
143 signature,
144 args,
--> 145 **keywords)
146
147 def call_async(self, *args, **keywords):
/usr/local/lib/python3.4/dist-packages/dbus/connection.py in call_blocking(self, bus_name, object_path, dbus_interface, method, signature, args, timeout, byte_arrays, **kwargs)
649 # make a blocking call
650 reply_message = self.send_message_with_reply_and_block(
--> 651 message, timeout)
652 args_list = reply_message.get_args_list(**get_args_opts)
653 if len(args_list) == 0:
DBusException: org.freedesktop.DBus.Error.UnknownMethod: Unknown method 'ActiveState' or interface 'org.freedesktop.systemd1.Unit'.
In [8]: manager.GetUnit('openvpn.service')
Out[8]: dbus.ObjectPath('/org/freedesktop/systemd1/unit/openvpn_2eservice')
In [9]: u = manager.GetUnit('openvpn.service')
In [10]: u.ActiveState
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-10-6998e589f206> in <module>()
----> 1 u.ActiveState
AttributeError: 'dbus.ObjectPath' object has no attribute 'ActiveState'
In [11]: manager.GetUnitFileState('openvpn.service')
Out[11]: dbus.String('enabled')
There's an ActiveState property, which contains a state value that reflects whether the unit is currently active or not. I've succeeded in reading UnitFileState, however I can't figure out how to read ActiveState property.
After having a look at some random examples [more permanent link], I've had some luck with
import dbus
sysbus = dbus.SystemBus()
systemd1 = sysbus.get_object('org.freedesktop.systemd1',
'/org/freedesktop/systemd1')
manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
service = sysbus.get_object('org.freedesktop.systemd1',
object_path=manager.GetUnit('openvpn.service'))
interface = dbus.Interface(service,
dbus_interface='org.freedesktop.DBus.Properties')
print(interface.Get('org.freedesktop.systemd1.Unit', 'ActiveState'))
[Edit:]
Documenting the versions, just in case:
dbus.version == (1, 2, 14)
systemd 244 (244.1-1-arch)
Python 3.8.1
You probably need to call dbus.Interface(sysbus.get_object('org.freedesktop.systemd1', u), 'org.freedesktop.systemd1.Unit') and access the ActiveState property on that object. Your code currently tries to access the ActiveState property on an object path, which is not the object itself.
Related
I have a script that allow me to connect to Azure ML Workspace. But it only works in local (interactive authentification) or on Cluster Instances while doing experiences.
But I can not manage to make it work in Compute Instances.
from azureml.core import Workspace
from azureml.core.run import Run, _OfflineRun
run = Run.get_context()
if isinstance(run, _OfflineRun):
workspace = Workspace(
"subscription_id",
"resource_group",
"workspace_name",
)
else:
workspace = run.experiment.workspace
I tried to use https://learn.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.msiauthentication?view=azure-ml-py but it did not work.
from azureml.core.authentication import MsiAuthentication
from azureml.core import Workspace
msi_auth = MsiAuthentication()
workspace = Workspace(
"subscription_id",
"resource_group",
"workspace_name",
auth=msi_auth,
)
File ~/localfiles/.venv/lib/python3.8/site-packages/azureml/_vendor/azure_cli_core/auth/adal_authentication.py:65, in MSIAuthenticationWrapper.set_token(self)
63 from azureml._vendor.azure_cli_core.azclierror import AzureConnectionError, AzureResponseError
64 try:
---> 65 super(MSIAuthenticationWrapper, self).set_token()
66 except requests.exceptions.ConnectionError as err:
67 logger.debug('throw requests.exceptions.ConnectionError when doing MSIAuthentication: \n%s',
68 traceback.format_exc())
File ~/localfiles/.venv/lib/python3.8/site-packages/msrestazure/azure_active_directory.py:596, in MSIAuthentication.set_token(self)
594 def set_token(self):
595 if _is_app_service():
--> 596 self.scheme, _, self.token = get_msi_token_webapp(self.resource, self.msi_conf)
597 elif "MSI_ENDPOINT" in os.environ:
598 self.scheme, _, self.token = get_msi_token(self.resource, self.port, self.msi_conf)
File ~/localfiles/.venv/lib/python3.8/site-packages/msrestazure/azure_active_directory.py:548, in get_msi_token_webapp(resource, msi_conf)
546 raise RuntimeError(err_msg)
547 _LOGGER.debug('MSI: token retrieved')
--> 548 token_entry = result.json()
549 return token_entry['token_type'], token_entry['access_token'], token_entry
File ~/localfiles/.venv/lib/python3.8/site-packages/requests/models.py:975, in Response.json(self, **kwargs)
971 return complexjson.loads(self.text, **kwargs)
972 except JSONDecodeError as e:
973 # Catch JSON-related errors and raise as requests.JSONDecodeError
974 # This aliases json.JSONDecodeError and simplejson.JSONDecodeError
--> 975 raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
JSONDecodeError: Expecting value: line 1 column 1 (char 0)
I do not want to use SDK2, it will break all my existing code.
I don't understand why the identidy is not automatically managed when starting the Compute Instances like Cluster Instances.
Does any one has a solution for this?
Use the below code blocks to connect to the existing workspace using the python SDK using the compute instances.
Create a compute instance
from azureml.core import Workspace
ws = Workspace.from_config()
ws.get_details()
Output:
Connecting to workspace using Config file:
import os
from azureml.core import Workspace
ws = Workspace.from_config()
I have a simple application that uses Google Cloud Pub/Sub, and as of a few days ago it has ceased working. Whenever I try to interact with the service (create a topic, subscribe to a topic, etc), I receive a DeadlineExceeded: 504 Deadline Exceeded error. For example:
from google.cloud import pubsub_v1
client = pubsub_v1.PublisherClient()
client.create_topic(request={"name": "test_topic"})
I have tried:
Creating new service account credentials
Lowering security on my Xfinity/Comcast router
The one thing that worked temporarily was a factory reset of the router, but then an hour later it stopped working.
Also, this problem exists across all the Python libraries for Google Cloud.
I am quite unfamiliar with networking so don't even know where to begin to solve this problem. Thanks in advance.
EDIT:
Full error
In [14]: from google.cloud import pubsub_v1
...: client = pubsub_v1.PublisherClient()
...: client.create_topic(request={"name": "test_topic"})
---------------------------------------------------------------------------
_InactiveRpcError Traceback (most recent call last)
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/google/api_core/grpc_helpers.py in error_remapped_callable(*args, **kwargs)
66 try:
---> 67 return callable_(*args, **kwargs)
68 except grpc.RpcError as exc:
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/grpc/_channel.py in __call__(self, request, timeout, metadata, credentials, wait_for_ready, compression)
945 wait_for_ready, compression)
--> 946 return _end_unary_response_blocking(state, call, False, None)
947
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/grpc/_channel.py in _end_unary_response_blocking(state, call, with_call, deadline)
848 else:
--> 849 raise _InactiveRpcError(state)
850
_InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.DEADLINE_EXCEEDED
details = "Deadline Exceeded"
debug_error_string = "{"created":"#1634864865.356495000","description":"Deadline Exceeded","file":"src/core/ext/filters/deadline/deadline_filter.cc","file_line":81,"grpc_status":4}"
>
The above exception was the direct cause of the following exception:
DeadlineExceeded Traceback (most recent call last)
<ipython-input-14-e6f64ca6ba79> in <module>
1 from google.cloud import pubsub_v1
2 client = pubsub_v1.PublisherClient()
----> 3 client.create_topic(request={"name": "test_topic"})
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/google/cloud/pubsub_v1/_gapic.py in <lambda>(self, *a, **kw)
38 return staticmethod(functools.wraps(wrapped_fx)(fx))
39 else:
---> 40 fx = lambda self, *a, **kw: wrapped_fx(self.api, *a, **kw) # noqa
41 return functools.wraps(wrapped_fx)(fx)
42
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/google/pubsub_v1/services/publisher/client.py in create_topic(self, request, name, retry, timeout, metadata)
479
480 # Send the request.
--> 481 response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
482
483 # Done; return the response.
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/google/api_core/gapic_v1/method.py in __call__(self, *args, **kwargs)
143 kwargs["metadata"] = metadata
144
--> 145 return wrapped_func(*args, **kwargs)
146
147
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/google/api_core/retry.py in retry_wrapped_func(*args, **kwargs)
284 self._initial, self._maximum, multiplier=self._multiplier
285 )
--> 286 return retry_target(
287 target,
288 self._predicate,
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/google/api_core/retry.py in retry_target(target, predicate, sleep_generator, deadline, on_error)
187 for sleep in sleep_generator:
188 try:
--> 189 return target()
190
191 # pylint: disable=broad-except
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/google/api_core/timeout.py in func_with_timeout(*args, **kwargs)
100 """Wrapped function that adds timeout."""
101 kwargs["timeout"] = self._timeout
--> 102 return func(*args, **kwargs)
103
104 return func_with_timeout
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/google/api_core/grpc_helpers.py in error_remapped_callable(*args, **kwargs)
67 return callable_(*args, **kwargs)
68 except grpc.RpcError as exc:
---> 69 six.raise_from(exceptions.from_grpc_error(exc), exc)
70
71 return error_remapped_callable
~/anaconda3/envs/testvenv/lib/python3.8/site-packages/six.py in raise_from(value, from_value)
DeadlineExceeded: 504 Deadline Exceeded
EDIT 2:
Operating system: macOS
Software firewall: Checked Network settings on my box and the firewall is turned off
Anti-virus software: None
Location: Chicago, USA
Region: Unsure what region my request would go to as it isn't specified
Also, I used my phone as a hotspot and I was able to connect to GCP.
This question already has answers here:
Seaborn load_dataset
(4 answers)
why is jupyter notebook not accepting my csv file link? [duplicate]
(1 answer)
Closed last month.
Hi I'm a student looking to use jupyter notebook to represent a dataset for a school task.
import seaborn as sns
spotify = sns.load_dataset('top10s.csv')
this is a data set that I found online and when I try to run this code I get and HTTPError
---------------------------------------------------------------------------
HTTPError Traceback (most recent call last)
<ipython-input-2-af1fc80c3c1b> in <module>
1 import seaborn as sns
----> 2 spotify = sns.load_dataset('top10s.csv')
~\Anaconda3\lib\site-packages\seaborn\utils.py in load_dataset(name, cache, data_home, **kws)
426 os.path.basename(full_path))
427 if not os.path.exists(cache_path):
--> 428 urlretrieve(full_path, cache_path)
429 full_path = cache_path
430
~\Anaconda3\lib\urllib\request.py in urlretrieve(url, filename, reporthook, data)
245 url_type, path = splittype(url)
246
--> 247 with contextlib.closing(urlopen(url, data)) as fp:
248 headers = fp.info()
249
~\Anaconda3\lib\urllib\request.py in urlopen(url, data, timeout, cafile, capath, cadefault, context)
220 else:
221 opener = _opener
--> 222 return opener.open(url, data, timeout)
223
224 def install_opener(opener):
~\Anaconda3\lib\urllib\request.py in open(self, fullurl, data, timeout)
529 for processor in self.process_response.get(protocol, []):
530 meth = getattr(processor, meth_name)
--> 531 response = meth(req, response)
532
533 return response
~\Anaconda3\lib\urllib\request.py in http_response(self, request, response)
639 if not (200 <= code < 300):
640 response = self.parent.error(
--> 641 'http', request, response, code, msg, hdrs)
642
643 return response
~\Anaconda3\lib\urllib\request.py in error(self, proto, *args)
567 if http_err:
568 args = (dict, 'default', 'http_error_default') + orig_args
--> 569 return self._call_chain(*args)
570
571 # XXX probably also want an abstract factory that knows when it makes
~\Anaconda3\lib\urllib\request.py in _call_chain(self, chain, kind, meth_name, *args)
501 for handler in handlers:
502 func = getattr(handler, meth_name)
--> 503 result = func(*args)
504 if result is not None:
505 return result
~\Anaconda3\lib\urllib\request.py in http_error_default(self, req, fp, code, msg, hdrs)
647 class HTTPDefaultErrorHandler(BaseHandler):
648 def http_error_default(self, req, fp, code, msg, hdrs):
--> 649 raise HTTPError(req.full_url, code, msg, hdrs, fp)
650
651 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 404: Not Found
I do not know how to fix this
or why I am even getting is issue
I hope somebody can help me
and thank you for your time
There are 2 errors in the code.
We can load only datasets present in the seaborn website using sns.load_dataset as it for online CSV files on https://github.com/mwaskom/seaborn-data.
While specifying the dataset name no need to specify the extension of the dataset. Below is the sample code to load tips dataset.
import seaborn as sns
tips = sns.load_dataset("tips")
tips.head()
sns.load_dataset() searches for a dataset from online. It does not import the dataset from your working directory.
Here is the documentation for seaborn load_dataset function
Assuming that your dataset top10s.csv is located in the same folder as your python file, you should use pandas for this instead.
import pandas as pd
spotify = pd.read_csv('top10s.csv')
Be aware that you have to install this library before importing via pip like so:
pip install pandas
You'll need to install the latest master branch of zipline. You can do that by:
pip install zipline
I am totally new to telegram_bot. I want to execute a bot.send_message() in the following code (as commented in the code), but can't do so. Please help.
!pip install python-telegram-bot --upgrade
import telegram
bot = telegram.Bot(token='**************')
bot.send_message(chat_id='******', text="I'm sorry Dave I'm afraid I can't do that.")
It errors out as :
1 bot.send_message('mardepbot',"I'm sorry Dave I'm afraid I can't do that.")
/usr/local/lib/python3.6/dist-packages/telegram/bot.py in decorator(self, *args, **kwargs)
63 def decorator(self, *args, **kwargs):
64 logger.debug('Entering: %s', func.__name__)
---> 65 result = func(self, *args, **kwargs)
66 logger.debug(result)
67 logger.debug('Exiting: %s', func.__name__)
/usr/local/lib/python3.6/dist-packages/telegram/bot.py in decorator(self, *args, **kwargs)
88 data['reply_markup'] = reply_markup
89
---> 90 result = self._request.post(url, data, timeout=kwargs.get('timeout'))
91
92 if result is True:
/usr/local/lib/python3.6/dist-packages/telegram/utils/request.py in post(self, url, data, timeout)
307 result = self._request_wrapper('POST', url,
308 body=json.dumps(data).encode('utf-8'),
--> 309 headers={'Content-Type': 'application/json'})
310
311 return self._parse(result)
/usr/local/lib/python3.6/dist-packages/telegram/utils/request.py in _request_wrapper(self, *args, **kwargs)
221 raise Unauthorized(message)
222 elif resp.status == 400:
--> 223 raise BadRequest(message)
224 elif resp.status == 404:
225 raise InvalidToken()
BadRequest: Chat not found
I just solved the similar issue. Please check:
Your bot settings Group Privacy must be OFF (Bot Father -> Group Privacy -> disable).
If your bot has been already added to chat, delete it, and re-add
Make sure, there is a '-' sign before your chat id, so if your chat link looks like: https://web.telegram.org/#/im?p=g123456789, then your chat_id = '-123456789'
You can't send a message to a string ('mardepbot') as address. If mardepbot is the bot, you are using, then you made 2 errors; then you should use your own ID to send a message to yourself.
I have a little issue I cannot seem to get my head get around. I am trying to query the serial number of a sub-domain. I keep getting no answer error tho but it will work fine on root domains. Easier if I just show you:
import socket, dns.resolver
host = "google.com"
querytype = "SOA"
cachingserverslist = {'server1': '4.1.1.1', 'server2': '4.2.2.2'}
for cachingservername, cachingserver in sorted(cachingserverslist.iteritems()) :
query = dns.resolver.Resolver()
query.nameservers=[socket.gethostbyname(cachingserver)]
query.Timeout = 2.0
for a in query.query( host , querytype ) :
print a.serial
Which gives me the expected result. What I don't understand is when I change the host variable to any subdomain or www's it errors out with a no answer. Here is an ipython session which shows what I mean:
In [1]: import socket, dns.resolver
In [2]: host = "google.com"
In [3]: querytype = "SOA"
In [4]: cachingserverslist = {'server1': '4.1.1.1', 'server2': '4.2.2.2'}
In [5]: for cachingservername, cachingserver in sorted(cachingserverslist.iteritems()) :
...: query = dns.resolver.Resolver()
...: query.nameservers=[socket.gethostbyname(cachingserver)]
...: query.Timeout = 2.0
...:
In [6]: for a in query.query( host , querytype ) :
...: print a.serial
...:
2011121901
In [7]:
In [8]: host = "www.google.com"
In [9]: for a in query.query( host , querytype ) :
print a.serial
....:
....:
---------------------------------------------------------------------------
NoAnswer Traceback (most recent call last)
/var/www/pydns/<ipython console> in <module>()
/usr/local/lib/python2.6/dist-packages/dns/resolver.pyc in query(self, qname, rdtype, rdclass, tcp, source, raise_on_no_answer)
707 raise NXDOMAIN
708 answer = Answer(qname, rdtype, rdclass, response,
--> 709 raise_on_no_answer)
710 if self.cache:
711 self.cache.put((qname, rdtype, rdclass), answer)
/usr/local/lib/python2.6/dist-packages/dns/resolver.pyc in __init__(self, qname, rdtype, rdclass, response, raise_on_no_answer)
127 except KeyError:
128 if raise_on_no_answer:
--> 129 raise NoAnswer
130 if raise_on_no_answer:
131 raise NoAnswer
NoAnswer:
Any insight would be most appreciated. Thanks.
The serial number is an attribute of the SOA 'start of authority' record. www.google.com is a CNAME, so it doesn't have a serial number associated with it.