import os
import time
CONSOLE = "/dev/tty0"
logfd = None
def log_me(logstr):
global logfd
global CONSOLE
print(time.strftime("%c")+": "+logstr)
if not logfd:
if not os.path.exists(CONSOLE):
return
logfd = open(CONSOLE, "a")
if not logfd:
return
logfd.write(logstr+"\n")
logfd.flush()
return
test.py
from consolelog import log_me
print ("Hello World!")
log_me("This is a Hello World script")
log_me("Logging 2nd time to see how it works.")
log_me("loging 3rd time")
The same code is working when used with python2 but throwing error 29 illegal seek when run with python3
[root#869ebe33-77e8-41b4-918b-eafda978fd98 ~]# python test.py
Hello World!
Wed Apr 14 07:37:03 2021: This is a Hello World script
Wed Apr 14 07:37:03 2021: Logging 2nd time to see how it works.
Wed Apr 14 07:37:03 2021: loging 3rd time
output with python3
[root#869ebe33-77e8-41b4-918b-eafda978fd98 ~]# python3 test.py
Hello World!
Wed Apr 14 07:37:10 2021: This is a Hello World script
Traceback (most recent call last):
File "test.py", line 4, in
log_me("This is a Hello World script")
File "/root/consolelog.py", line 14, in log_me
logfd = open(CONSOLE, "a")
OSError: [Errno 29] Illegal seek
with python3.x append doesn't work with open(/dev/tty0) so we have to use write instead of append.
Related
I am running the simplest of examples on asyncio:
import asyncio
async def main():
print("A")
await asyncio.sleep.sleep(1)
print("B")
asyncio.run(main())
and I get a runtime error:
RuntimeError: asyncio.run() cannot be called from a running event loop
I am using Spyder (Python 3.9) on an M1 Mac (...if that matters).
the outcome expected is:
A
B
Process finished with exit code 0
But for the ".sleep.sleep" this code is fine - "event loop already running" is certainly not an issue for a standalone script with this code.
Maybe you are running it in as a notebook cell, with some asyncio state already set-up?
In a bash terminal, I pasted your code as is, and just replaced the incorrect function name:
[gwidion#fedora tmp01]$ cat >bla42.py
import asyncio
async def main():
print("A")
await asyncio.sleep.sleep(1)
print("B")
asyncio.run(main())
[gwidion#fedora tmp01]$ python bla42.py
A
Traceback (most recent call last):
[...]
File "/home/gwidion/tmp01/bla42.py", line 5, in main
await asyncio.sleep.sleep(1)
AttributeError: 'function' object has no attribute 'sleep'
[gwidion#fedora tmp01]$ python -c 'open("bla43.py", "w").write(open("bla42.py").read().replace(".sleep.sleep", ".sleep"))'
[gwidion#fedora tmp01]$ python bla43.py
A
B
[gwidion#fedora tmp01]$
I'm trying to use a julia function from a streamlit app. Created a toy example to test the interaction, simply returning a matrix from a julia functions based on a single parameter to specify the value of the diagonal elements.
Will also note at the outset that both julia_import_method = "api_compiled_false" and julia_import_method = "main_include" works when importing the function in Spyder IDE (rather than at the command line to launch the streamlit app via streamlit run streamlit_julia_test.py).
My project directory looks like:
├── my_project_directory
│ ├── julia_test.jl
│ └── streamlit_julia_test.py
The julia function is in julia_test.jl and just simply returns a matrix with diagonals specified by the v parameter:
function get_matrix_from_julia(v::Int)
m=[v 0
0 v]
return m
end
The streamlit app is streamlit_julia_test.py and is defined as:
import os
from io import BytesIO
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import streamlit as st
# options:
# main_include
# api_compiled_false
# dont_import_julia
julia_import_method = "api_compiled_false"
if julia_import_method == "main_include":
# works in Spyder IDE
import julia
from julia import Main
Main.include("julia_test.jl")
elif julia_import_method == "api_compiled_false":
# works in Spyder IDE
from julia.api import Julia
jl = Julia(compiled_modules=False)
this_dir = os.getcwd()
julia_test_path = """include(\""""+ this_dir + """/julia_test.jl\"""" +")"""
print(julia_test_path)
jl.eval(julia_test_path)
get_matrix_from_julia = jl.eval("get_matrix_from_julia")
elif julia_import_method == "dont_import_julia":
print("Not importing ")
else:
ValueError("Not handling this case:" + julia_import_method)
st.header('Using Julia in Streamlit App Example')
st.text("Using Method:" + julia_import_method)
matrix_element = st.selectbox('Set Matrix Diagonal to:', [1,2,3])
matrix_numpy = np.array([[matrix_element,0],[0,matrix_element]])
col1, col2 = st.columns([4,4])
with col1:
fig, ax = plt.subplots(figsize=(5,5))
sns.heatmap(matrix_numpy, ax = ax, cmap="Blues",annot=True)
ax.set_title('Matrix Using Python Numpy')
buf = BytesIO()
fig.savefig(buf, format="png")
st.image(buf)
with col2:
if julia_import_method == "dont_import_julia":
matrix_julia = matrix_numpy
else:
matrix_julia = get_matrix_from_julia(matrix_element)
fig, ax = plt.subplots(figsize=(5,5))
sns.heatmap(matrix_julia, ax = ax, cmap="Blues",annot=True)
ax.set_title('Matrix from External Julia Script')
buf = BytesIO()
fig.savefig(buf, format="png")
st.image(buf)
If the app were working correctly, it would look like this (which can be reproduced by setting the julia_import_method = "dont_import_julia" on line 13):
Testing
When I try julia_import_method = "main_include", I get the well known error:
julia.core.UnsupportedPythonError: It seems your Julia and PyJulia setup are not supported.
Julia executable:
julia
Python interpreter and libpython used by PyCall.jl:
/Users/myusername/opt/anaconda3/bin/python3
/Users/myusername/opt/anaconda3/lib/libpython3.9.dylib
Python interpreter used to import PyJulia and its libpython.
/Users/myusername/opt/anaconda3/bin/python
/Users/myusername/opt/anaconda3/lib/libpython3.9.dylib
Your Python interpreter "/Users/myusername/opt/anaconda3/bin/python"
is statically linked to libpython. Currently, PyJulia does not fully
support such Python interpreter.
The easiest workaround is to pass `compiled_modules=False` to `Julia`
constructor. To do so, first *reboot* your Python REPL (if this happened
inside an interactive session) and then evaluate:
>>> from julia.api import Julia
>>> jl = Julia(compiled_modules=False)
Another workaround is to run your Python script with `python-jl`
command bundled in PyJulia. You can simply do:
$ python-jl PATH/TO/YOUR/SCRIPT.py
See `python-jl --help` for more information.
For more information, see:
https://pyjulia.readthedocs.io/en/latest/troubleshooting.html
As suggested, when I set julia_import_method = "api_compiled_false", I get a seg fault:
include("my_project_directory/julia_test.jl")
2022-04-03 10:23:13.406 Traceback (most recent call last):
File "/Users/myusername/opt/anaconda3/lib/python3.9/site-packages/streamlit/script_runner.py", line 430, in _run_script
exec(code, module.__dict__)
File "my_project_directory/streamlit_julia_test.py", line 25, in <module>
jl = Julia(compiled_modules=False)
File "/Users/myusername/.local/lib/python3.9/site-packages/julia/core.py", line 502, in __init__
if not self.api.was_initialized: # = jl_is_initialized()
File "/Users/myusername/.local/lib/python3.9/site-packages/julia/libjulia.py", line 114, in __getattr__
return getattr(self.libjulia, name)
File "/Users/myusername/opt/anaconda3/lib/python3.9/ctypes/__init__.py", line 395, in __getattr__
func = self.__getitem__(name)
File "/Users/myuserame/opt/anaconda3/lib/python3.9/ctypes/__init__.py", line 400, in __getitem__
func = self._FuncPtr((name_or_ordinal, self))
AttributeError: dlsym(0x21b8e5840, was_initialized): symbol not found
signal (11): Segmentation fault: 11
in expression starting at none:0
Allocations: 35278345 (Pool: 35267101; Big: 11244); GC: 36
zsh: segmentation fault streamlit run streamlit_julia_test.py
I've also tried the alternative recommendation provided in the PyJulia response message regarding the use of:
python-jl my_project_directory/streamlit_julia_test.py
But I get this error when running the python-jl command:
INTEL MKL ERROR: dlopen(/Users/myusername/opt/anaconda3/lib/libmkl_intel_thread.1.dylib, 0x0009): Library not loaded: #rpath/libiomp5.dylib
Referenced from: /Users/myusername/opt/anaconda3/lib/libmkl_intel_thread.1.dylib
Reason: tried: '/Applications/Julia-1.7.app/Contents/Resources/julia/bin/../lib/libiomp5.dylib' (no such file), '/usr/local/lib/libiomp5.dylib' (no such file), '/usr/lib/libiomp5.dylib' (no such file).
Intel MKL FATAL ERROR: Cannot load libmkl_intel_thread.1.dylib.
So I'm stuck, thanks in advance for a modified reproducible example or instructions for the following system specs!
System specs:
Mac OS Monterey 12.2.1 (Chip - Mac M1 Pro)
Python 3.9.7
Julia 1.7.2
PyJulia 0.5.8.dev
Streamlit 1.7.0
yes we can use it streamlit_julia_test.py NumPy for instance
Long story short: I have been given some Python code for a custom openAI gym environment. I can successfully run the code via ExperimentGrid from the command line but would like to be able to run the entire experiment from within Jupyter notebook, rather than calling scripts. This would be more convenient for some experiments that I will be doing farther down the road.
My question: Is it possible to execute an experiment on a custom OpenAI gym environment entirely from within Jupyter Notebook and if so, how? I've seen plenty of examples of people executing gym's standard environments (like SpaceInvaders-v0 or CartPole-v0) from Jupyter but even then, they are calling the environment with
env=gym.make('SpaceInvaders-v0')
and essentially executing that environment's script behind the scenes.
Below is a basic description of how my code is set-up to run from the command line and the errors that I'm getting in Jupyter.
Any advice would be appreciated. I am admittedly rather new to Gym, Python and Linux.
My basic environment code is structured like this in, say, envs/mygames/Custom_Env.py:
various import statements (numpy, gym, pyglet, copy)
class Entity()
class State()
class The_Custom_Env(core.Env) # This is the main environment class
class Shell_Class # This class calls The_Custom_Env and provides some arguments
In mygames/__ init__.py,I import the Shell_Class:
from gym.envs.mygames.Custom_Env import Shell_Class
In envs/__ init__.py, I have the environment registered
register(
id='TEST-v0',
entry_point='gym.envs.mygames:Shell_Class',
max_episode_steps=200,
reward_threshold=25.0,)
Finally, if I execute a script containing this code from the command line, the experiment works without issue:
from spinup.utils.run_utils import ExperimentGrid
from spinup import ppo_pytorch
import torch
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--num_runs', type=int, default=1)
args = parser.parse_args()
eg = ExperimentGrid(name='super-cool-test')
eg.add('env_name', 'TEST-v0', '', True)
eg.add('seed', [10*i for i in range(args.num_runs)])
eg.add('epochs', [10])
eg.add('steps_per_epoch', 4000)
eg.add('ac_kwargs:hidden_sizes', [(32, 32)], 'hid')
eg.add('ac_kwargs:activation', [torch.nn.ReLU], '')
eg.add('pi_lr', [0.001])
eg.add('clip_ratio', 0.3)
eg.run(ppo_pytorch, num_cpu=args.cpu)
My Jupyter Attempt
I put all of the code from Custom_env.py in cell #1.
I then registered the environment in cell #2:
gym.register(
id='TEST-v1',
entry_point='__main__:Shell_Class',
max_episode_steps=200,
reward_threshold=25.0,)
based on this Q/A: Register gym environment that is defined inside a jupyter notebook cell
, I make the environment in cell #3:
gym.make('TEST-v1')
and get this non-descriptive output:
<TimeLimit<Shell_Class< TEST-v1 >>>
In cell #4, I tried to execute ExperimentGrid code directly within Jupyter like so:
from spinup.utils.run_utils import ExperimentGrid
from spinup import ppo_pytorch
import torch
num_runs=1
cpu=4
env_name='TEST-v1'
eg = ExperimentGrid(name='Jupyter-test')
eg.add('env_name', env_name, '', True)
eg.add('seed', [10*i for i in range(num_runs)])
eg.add('epochs', 500)
eg.add('steps_per_epoch', 4000)
eg.add('ac_kwargs:hidden_sizes', [(32, 32)], 'hid')
eg.add('ac_kwargs:activation', [torch.nn.ReLU], '')
eg.add('pi_lr', 0.001)
eg.add('clip_ratio', 0.3)
eg.run(ppo_pytorch, num_cpu=cpu)
The experiment starts up as usual but then runs into some kind of error:
> ================================================================================
ExperimentGrid [Jupyter-test] runs over parameters:
env_name []
TEST-v1
seed [see]
0
epochs [epo]
500
steps_per_epoch [ste]
4000
ac_kwargs:hidden_sizes [hid]
(32, 32)
ac_kwargs:activation []
ReLU
pi_lr [pi]
0.001
clip_ratio [cli]
0.3
Variants, counting seeds: 1
Variants, not counting seeds: 1
================================================================================
Preparing to run the following experiments...
Jupyter-test_test-v1
================================================================================
Launch delayed to give you a few seconds to review your experiments.
To customize or disable this behavior, change WAIT_BEFORE_LAUNCH in
spinup/user_config.py.
================================================================================
Running experiment:
Jupyter-test_test-v1
with kwargs:
{
"ac_kwargs": {
"activation": "ReLU",
"hidden_sizes": [
32,
32
]
},
"clip_ratio": 0.3,
"env_name": "TEST-v1",
"epochs": 500,
"pi_lr": 0.001,
"seed": 0,
"steps_per_epoch": 4000
}
================================================================================
There appears to have been an error in your experiment.
Check the traceback above to see what actually went wrong. The
traceback below, included for completeness (but probably not useful
for diagnosing the error), shows the stack leading up to the
experiment launch.
================================================================================
---------------------------------------------------------------------------
CalledProcessError Traceback (most recent call last)
<ipython-input-14-de843fd528cf> in <module>
15 eg.add('pi_lr', 0.001)
16 eg.add('clip_ratio', 0.3)
---> 17 eg.run(ppo_pytorch, num_cpu=cpu)
~/Downloads/spinningup/spinup/utils/run_utils.py in run(self, thunk, num_cpu, data_dir, datestamp)
544
545 call_experiment(exp_name, thunk_, num_cpu=num_cpu,
--> 546 data_dir=data_dir, datestamp=datestamp, **var)
547
548
~/Downloads/spinningup/spinup/utils/run_utils.py in call_experiment(exp_name, thunk, seed, num_cpu, data_dir, datestamp, **kwargs)
169 cmd = [sys.executable if sys.executable else 'python', entrypoint, encoded_thunk]
170 try:
--> 171 subprocess.check_call(cmd, env=os.environ)
172 except CalledProcessError:
173 err_msg = '\n'*3 + '='*DIV_LINE_WIDTH + '\n' + dedent("""
~/anaconda3/envs/spinningup/lib/python3.6/subprocess.py in check_call(*popenargs, **kwargs)
309 if cmd is None:
310 cmd = popenargs[0]
--> 311 raise CalledProcessError(retcode, cmd)
312 return 0
313
I have a couple of DAGs running, which execute python scripts which are basically copying data from A to B. But one of the dags throws an error - but data is still getting copied, so somehow it does not have influence on the execution of the python script.
The only "special" what is within this dag is, that the python script builds up a connecton to a postgres database using psycopg2~=2.8.5 but not sure if this is somehow the root cause.
I also checked the permissions for the user, which seem to be fine at least in the dags folder.
Is there any specific timeout value I have to adjust in the config?
[2021-05-19 12:53:42,036] {taskinstance.py:1145} ERROR - Bash command failed 255
Traceback (most recent call last):
File "/hereisthepath/venv/lib64/python3.6/site-packages/airflow/models/taskinstance.py", line 983, in _run_raw_task
result = task_copy.execute(context=context)
File "/hereisthepath/venv/lib64/python3.6/site-packages/airflow/operators/bash_operator.py", line 134, in execute
raise AirflowException("Bash command failed")
airflow.exceptions.AirflowException: Bash command failed
Update: This is the passage of the operator, which fails. I copied the entire function, however the error throws at line 134 ("raise AirflowException("Bash command failed"))
def execute(self, context):
"""
Execute the bash command in a temporary directory
which will be cleaned afterwards
"""
self.log.info("Tmp dir root location: \n %s", gettempdir())
# Prepare env for child process.
env = self.env
if env is None:
env = os.environ.copy()
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.debug('Exporting the following env vars:\n%s',
'\n'.join(["{}={}".format(k, v)
for k, v in airflow_context_vars.items()]))
env.update(airflow_context_vars)
self.lineage_data = self.bash_command
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:
f.write(bytes(self.bash_command, 'utf_8'))
f.flush()
fname = f.name
script_location = os.path.abspath(fname)
self.log.info(
"Temporary script location: %s",
script_location
)
def pre_exec():
# Restore default signal disposition and invoke setsid
for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
if hasattr(signal, sig):
signal.signal(getattr(signal, sig), signal.SIG_DFL)
os.setsid()
self.log.info("Running command: %s", self.bash_command)
self.sub_process = Popen(
['bash', fname],
stdout=PIPE, stderr=STDOUT,
cwd=tmp_dir, env=env,
preexec_fn=pre_exec)
self.log.info("Output:")
line = ''
for line in iter(self.sub_process.stdout.readline, b''):
line = line.decode(self.output_encoding).rstrip()
self.log.info(line)
self.sub_process.wait()
self.log.info(
"Command exited with return code %s",
self.sub_process.returncode
)
if self.sub_process.returncode:
raise AirflowException("Bash command failed")
if self.xcom_push_flag:
return line
Update2: It really seems, that this behavior is related to the psycopg2: I now tested all other possible error sources and only when I test with the postgres datasource using psycopg2 package, the error occurs. Meanwhile I also upgraded to the most recent version of psycopg2 (2.8.6) but without success.
Maybe this helps for further investigation
I'm learning Python3, especially the daemon library. I want my daemon to be called with two possible arguments : start & stop.
So far I have this code :
def start():
with context:
pidfile = open(Config.WDIR+scriptname+".pid",'w')
pidfile.write(str(getpid()))
pidfile.close()
feed_the_db()
def stop(pid):
try:
kill(int(pid),15)
except ProcessLookupError:
print("Nothing to kill… (No process with PID "+pid+")")
if __name__ == "__main__":
scriptname = sys.argv[0]
context = daemon.DaemonContext(
working_directory=Config.WDIR,
pidfile=lockfile.FileLock(Config.WDIR+scriptname),
stdout=sys.stdout,
stderr=sys.stderr)
try:
if sys.argv[1] == 'start':
start()
elif sys.argv[1] == 'stop':
try:
pidfile = open(Config.WDIR+scriptname+".pid",'r')
pid = pidfile.read()
pidfile.close()
remove(name+".pid")
print(name+" (PID "+pid+")")
stop(pid)
except FileNotFoundError:
print("Nothing to kill… ("+scriptname+".pid not found)")
else:
print("\nUnknown option : "+sys.argv[1]+"\n\nUsage "+sys.argv[0]+" <start|stop>\n")
except IndexError:
print("\nUsage "+sys.argv[0]+" <start|stop>\n")
It's working but I wonder if I'm doing it the right way.
In particular, why do I have to manually store the PID. Why is it not already contained in the automatically created file :
myhostname-a6982700.3392-7990643415029806679
or the lock file ?
I think you are mixing up the daemon script and the code responsible for managing it.
Usually in say Ubuntu for example you would control this via upstart
description "Some Description"
author "your#email-address.com"
start on runlevel [2345]
stop on runlevel [!2345]
exec /path/to/script
The actual running python application would never need to store its pid because it always has access to it.
So what are writing is a script that essentially manages daemon processes , is that really what you want?
PS: do yourself a favour and get to know the argparse library.
import argparse
parser = argparse.ArgumentParser(description='Some Description')
parser.add_argument('command', help='Either stop or start', choices=['start', 'stop'])
args = parser.parse_args()
print(args.command)
It is well worth it