How to create a App like jupyter notebook with interactive programming? - jupyter-notebook

Now, i want to develope a application about interactive programming ,just like jupyter notebook ,but i know little about this aspect,can someone tell me some ways or knowledge to start develope this app.

Stages:
Create a virtual environment for your application.
Create a virtual environment for running jupyter-cells of code.
Implement a mechanism for interaction between two virtual environments on a client-server basis.
The server sends cells for execution to the client.
The client executes the cells and returns the result to the server.
Implement the graphical interface you need for the server side.
The mechanism of interaction between two virtual environments can be organized as follows
server.py:
# venv-1
import sys
from multiprocessing.connection import Listener, Connection
def read_write_function(conn_for_execution: Connection, conn_for_interrupting: Connection):
try:
while True:
try:
std, received_output = conn_for_execution.recv()
except (ConnectionResetError, KeyboardInterrupt, EOFError) as e:
print(e)
break
if std in ('<stderr>', '<stdout>'):
file = sys.stderr if std == '<stderr>' else sys.stdout
print('stream:', std)
print('message:', repr(received_output)[1:-1], file=file)
elif std == '<error>': # error
print('error:', repr(received_output)[1:-1], file=sys.stderr)
elif std in ('<block>', '<read>', '<readlines>'): # next block query or read input
print('[Ctrl+C to send code block to client]')
lines = []
try:
while True:
line = input(std[1:] + ' ')
lines.append(line)
except (KeyboardInterrupt, EOFError):
conn_for_execution.send('\n'.join(lines))
print(('' if lines else 'nothing ') + 'sended')
# --------------------- <!-- only to emulate "interrupt execution"
if lines and lines[-1] == '#interrupt':
print('[SERVER] Sleep before')
import time
time.sleep(3)
conn_for_interrupting.send('interrupt')
print('[SERVER] Interrupt message sended')
# --------------------- --> only to emulate "interrupt execution"
# --------------------- <!-- only to emulate "exit"
if lines and lines[-1] == '#exit':
print('[SERVER] Sleep before')
import time
time.sleep(3)
conn_for_interrupting.send('exit')
print('[SERVER] Exit message sended')
# --------------------- --> only to emulate "exit"
elif std == '<readline>':
print('[one line to send input data to client]')
conn_for_execution.send(input(std[1:] + ' '))
print(std[1:] + ' sended')
except:
__import__('traceback').print_exc()
ADDRESS = 'localhost'
PORT = 60000
PASS = 'secret'
print('#' * 42)
print('Address:', ADDRESS)
print('Port:', PORT)
print('Pass:', PASS)
print('#' * 42)
print('Waiting for a client...')
# --------------------- <!-- only to run the client app on the server side and prevent Ctrl+C crashes
"""
import signal
import subprocess
import os
def pre_exec():
signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore CTRL+C signal in the new process
executable = [os.path.join(os.path.abspath('ClientSide'), 'venv', 'Scripts', 'python'), '-uBq', 'client.py',
f'--address={ADDRESS}',
f'--port={PORT}',
f'--password={PASS}',
stdin=subprocess.DEVNULL]
if sys.platform.startswith('win'):
exec_process = subprocess.Popen(executable, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
exec_process = subprocess.Popen(executable, preexec_fn=pre_exec)
"""
# --------------------- --> only to run the client app on the server side and prevent Ctrl+C crashes
# backlog = 2 --> Two clients: one for executing code blocks and one for interrupting execution
try:
with Listener((ADDRESS, PORT), authkey=PASS.encode(encoding='utf-8'), backlog=2) as listener, \
listener.accept() as conn_for_execution, listener.accept() as conn_for_interrupting:
print('Connections accepted')
print('#' * 42)
read_write_function(conn_for_execution, conn_for_interrupting)
except:
pass
Run:
ServerSide/venv/Scripts/python -uB server.py
client.py:
# venv-2
import argparse
import os
import sys
from _thread import get_native_id
from code import InteractiveInterpreter
from io import TextIOWrapper, BytesIO
from multiprocessing.connection import Client, Connection
from threading import Thread, Event
parser = argparse.ArgumentParser(prog='client.py')
parser.add_argument('--address', nargs='?', help='address ("localhost" by default)')
parser.add_argument('--port', nargs='?', help='port ("60000" by default)')
parser.add_argument('--password', nargs='?', help='password ("secret" by default)')
args = parser.parse_args()
if os.path.exists(__file__) and os.path.basename(__file__).startswith('tmp'):
os.remove(__file__)
class Redirector(TextIOWrapper):
def __init__(self, conn: Connection, std: TextIOWrapper):
super().__init__(buffer=BytesIO(), encoding=std.encoding, errors=std.errors,
newline=std.newlines, line_buffering=std.line_buffering,
write_through=std.write_through)
self.std = std
self._conn = conn
def read(self, size: int | None = None) -> str:
try:
self._conn.send(('<read>', 'read operation'))
return self._conn.recv()
except BaseException as e:
print(e, file=sys.__stderr__)
return ''
def readline(self, size: int | None = None) -> str:
try:
self._conn.send(('<readline>', 'readline operation'))
return self._conn.recv()
except BaseException as e:
print(e, file=sys.__stderr__)
return ''
def readlines(self, hint: int | None = None) -> list[str]:
try:
self._conn.send(('<readlines>', 'readlines operation'))
return self._conn.recv().splitlines()
except BaseException as e:
print(e, file=sys.__stderr__)
return []
def write(self, data):
try:
self._conn.send((self.std.name, data))
except BaseException as e:
print(e, file=sys.__stderr__)
def writelines(self, lines: list[str]):
try:
self._conn.send((self.std.name, os.linesep.join(lines)))
except BaseException as e:
print(e, file=sys.__stderr__)
class CodeBlocksInterpreter(InteractiveInterpreter):
def __init__(self, conn_for_execution: Connection, conn_for_interrupting: Connection, locals: dict = None):
super().__init__()
self.locals = locals
self._conn_for_execution = conn_for_execution
self._conn_for_interrupting = conn_for_interrupting
self._main_thread_id = get_native_id()
self._ready_for_next_block = Event()
self._ready_for_next_block.clear()
self._can_interrupt = Event()
self._can_interrupt.clear()
self._thread = Thread(target=self._stop_and_exit_thread, daemon=False)
def interact(self):
self._thread.start()
try:
filename = '<input>'
symbol = 'exec'
while True:
self._can_interrupt.clear()
self._ready_for_next_block.wait()
try:
self._conn_for_execution.send(('<block>', 'give me next block'))
code_block = self._conn_for_execution.recv() + '\n'
code = self.compile(source=code_block, filename=filename, symbol=symbol)
if code is None:
self.write('EOFError. Code block is incomplete')
continue
self._can_interrupt.set()
self.runcode(code)
self._can_interrupt.clear()
except KeyboardInterrupt as e:
print(e, file=sys.__stderr__)
except (OverflowError, SyntaxError, ValueError):
self.showsyntaxerror(filename)
except SystemExit:
break
except BaseException as e:
print(e, file=sys.__stderr__)
try:
self._conn_for_execution.close()
except:
pass
try:
self._conn_for_interrupting.close()
except:
pass
def _stop_and_exit_thread(self):
try:
while True:
try:
self._ready_for_next_block.set()
received = self._conn_for_interrupting.recv()
if received == 'interrupt':
self._ready_for_next_block.clear()
if self._can_interrupt.is_set():
import ctypes
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self._main_thread_id),
ctypes.py_object(KeyboardInterrupt))
elif received == 'exit':
import ctypes
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self._main_thread_id),
ctypes.py_object(SystemExit))
break
except (ConnectionResetError, EOFError):
break
except BaseException as e:
print(e, file=sys.__stderr__)
def write(self, data: str):
self._conn_for_execution.send(('<error>', data))
ADDRESS = args.address.strip('"\'') if isinstance(args.address, str) else 'localhost'
PORT = int(args.port) if isinstance(args.port, str) and args.port.isdigit() else 60000
PASS = args.password.strip('"\'').encode('utf-8') if isinstance(args.password, str) else b'secret'
# Two clients: one for executing code blocks and one for interrupting execution
try:
with Client((ADDRESS, PORT), authkey=PASS) as conn_for_execution, \
Client((ADDRESS, PORT), authkey=PASS) as conn_for_interrupting:
sys.stdin = Redirector(conn_for_execution, sys.stdin)
sys.stdout = Redirector(conn_for_execution, sys.stdout)
sys.stderr = Redirector(conn_for_execution, sys.stderr)
sys.__stdin__ = Redirector(conn_for_execution, sys.__stdin__)
sys.__stdout__ = Redirector(conn_for_execution, sys.__stdout__)
sys.__stderr__ = Redirector(conn_for_execution, sys.__stderr__)
code_blocks_interpreter = CodeBlocksInterpreter(conn_for_execution, conn_for_interrupting,
locals={'__name__': '__main__'})
code_blocks_interpreter.interact()
except:
pass
if isinstance(sys.stdin, Redirector):
sys.stdin = sys.stdin.std
if isinstance(sys.stdout, Redirector):
sys.stdout = sys.stdout.std
if isinstance(sys.stderr, Redirector):
sys.stderr = sys.stderr.std
if isinstance(sys.__stdin__, Redirector):
sys.__stdin__ = sys.__stdin__.std
if isinstance(sys.__stdout__, Redirector):
sys.__stdout__ = sys.__stdout__.std
if isinstance(sys.__stderr__, Redirector):
sys.__stderr__ = sys.__stderr__.std
Run after server.py:
ClientSide/venv/Scripts/python -uB client.py
On the server side, enter code block and send Ctrl+C.
On the client side, it is executed, and the result is transmitted back to the server side.
Examples:
Print to stdout:
[Ctrl+C to send code block to client]
block> print(42 ** 42)
block> <Ctrl+C>
Print to stdout and stderr, raise Exception:
[Ctrl+C to send code block to client]
block> import sys, time
block> print('1', file=sys.stdout); time.sleep(1)
block> print('2', file=sys.stderr); time.sleep(1)
block> raise Exception('3')
block> <Ctrl+C>
Read:
[Ctrl+C to send code block to client]
block> import sys
block> s1 = sys.stdin.read()
block> <Ctrl+C>
read> <Multi-line>
read> <Ctrl+C>
block> s2 = sys.stdin.readline() (or s2 = input())
block> <Ctrl+C>
readline> <One-line>
block> s3 = sys.stdin.readlines()
block> <Ctrl+C>
readlines> <Multi-line>
readlines> <Ctrl+C>
block> print(s1, s2, s3)
block> <Ctrl+C>
Interrupt (#interrupt must be the last line of code):
[Ctrl+C to send code block to client]
block> import time
block> for i in range(10):
block> print(i)
block> time.sleep(1)
block> #interrupt
block> <Ctrl+C>
[SERVER] Sleep before
[SERVER] Interrupt message sended
Exit (#exit must be the last line of code):
[Ctrl+C to send code block to client]
block> import time
block> for i in range(10):
block> print(i)
block> time.sleep(1)
block> #exit
block> <Ctrl+C>
[SERVER] Sleep before
[SERVER] Exit message sended

Related

Python script runs but does not work and no errors are thrown

This program is an API based program that has been working for a few months and all of a sudden has went days without pushing anything to Discord. The script looks fine in CMD, but no errors are being thrown. I was wondering if there was a way to eliminate possible issues such as an API instability issue or something obvious. The program is supposed to go to the site www.bitskins.com and pull skins based on parameters set and push them as an embed to a Discord channel every 10 minutes.
There are two files that run this program.
Here is the one that uses Bitskins API (bitskins.py):
import requests, json
from datetime import datetime, timedelta
class Item:
def __init__(self, item):
withdrawable_at= item['withdrawable_at']
price= float(item['price'])
self.available_in= withdrawable_at- datetime.timestamp(datetime.now())
if self.available_in< 0:
self.available= True
else:
self.available= False
self.suggested_price= float(item['suggested_price'])
self.price= price
self.margin= round(self.suggested_price- self.price, 2)
self.reduction= round((1- (self.price/self.suggested_price))*100, 2)
self.image= item['image']
self.name= item['market_hash_name']
self.item_id= item['item_id']
def __str__(self):
if self.available:
return "Name: {}\nPrice: {}\nSuggested Price: {}\nReduction: {}%\nAvailable Now!\nLink: https://bitskins.com/view_item?app_id=730&item_id={}".format(self.name, self.price, self.suggested_price, self.reduction, self.item_id)
else:
return "Name: {}\nPrice: {}\nSuggested Price: {}\nReduction: {}%\nAvailable in: {}\nLink: https://bitskins.com/view_item?app_id=730&item_id={}".format(self.name, self.price, self.suggested_price, self.reduction, str(timedelta(seconds= self.available_in)), self.item_id)
def __lt__(self, other):
return self.reduction < other.reduction
def __gt__(self, other):
return self.reduction > other.reduction
def get_url(API_KEY, code):
PER_PAGE= 30 # the number of items to retrieve. Either 30 or 480.
return "https://bitskins.com/api/v1/get_inventory_on_sale/?api_key="+ API_KEY+"&code=" + code + "&per_page="+ str(PER_PAGE)
def get_data(url):
r= requests.get(url)
data= r.json()
return data
def get_items(code, API_KEY):
url= get_url(API_KEY, code)
try:
data= get_data(url)
if data['status']=="success":
items= []
items_dic= data['data']['items']
for item in items_dic:
tmp= Item(item)
if tmp.reduction>=25 and tmp.price<=200: # Minimum discount and maximum price to look for when grabbing items. Currently set at minimum discount of 25% and maxmimum price of $200.
items.append(tmp)
return items
else:
raise Exception(data["data"]["error_message"])
except:
raise Exception("Couldn't connect to BitSkins.")
# my_token = pyotp.TOTP(my_secret)
# print(my_token.now()) # in python3
And here is the file with Discord's API (solution.py):
#!/bin/env python3.6
import bitskins
import discord
import pyotp, base64, asyncio
from datetime import timedelta, datetime
TOKEN= "Not input for obvious reasons"
API_KEY= "Not input for obvious reasons"
my_secret= 'Not input for obvious reasons'
client = discord.Client()
def get_embed(item):
embed=discord.Embed(title=item.name, url= "https://bitskins.com/view_item?app_id=730&item_id={}".format(item.item_id), color=0xA3FFE8)
embed.set_author(name="Skin Bot", url="http://www.reactor.gg/",icon_url="https://pbs.twimg.com/profile_images/1050077525471158272/4_R8PsrC_400x400.jpg")
embed.set_thumbnail(url=item.image)
embed.add_field(name="Price:", value="${}".format(item.price))
embed.add_field(name="Discount:", value="{}%".format(item.reduction), inline=True)
if item.available:
tmp= "Instantly Withdrawable"
else:
tmp= str(timedelta(seconds= item.available_in))
embed.add_field(name="Availability:", value=tmp, inline=True)
embed.add_field(name="Suggested Price:", value="${}".format(item.suggested_price), inline=True)
embed.add_field(name="Profit:", value="${}".format(item.margin), inline=True)
embed.set_footer(text="Made by Aqyl#0001 | {}".format(datetime.now()), icon_url="https://www.discordapp.com/assets/6debd47ed13483642cf09e832ed0bc1b.png")
return embed
async def status_task(wait_time= 60* 5):
while True:
print("Updated on: {}".format(datetime.now()))
code= pyotp.TOTP(my_secret)
try:
items= bitskins.get_items(code.now(), API_KEY)
for item in items:
await client.send_message(client.get_channel("656913641832185878"), embed=get_embed(item))
except:
pass
await asyncio.sleep(wait_time)
#client.event
async def on_ready():
wait_time= 60 * 10 # 10 mins in this case
print('CSGO BitSkins Bot')
print('Made by Aqyl#0001')
print('Version 1.0.6')
print('')
print('Logged in as:')
print(client.user.name)
print('------------------------------------------')
client.loop.create_task(status_task(wait_time))
try:
client.run(TOKEN)
except:
print("Couldn't connect to the Discord Server.")
You have a general exception, this will lead to catching exceptions that you really don't want to catch.
try:
items= bitskins.get_items(code.now(), API_KEY)
for item in items:
await client.send_message(client.get_channel("656913641832185878"), embed=get_embed(item))
except:
pass
This is the same as catching any exception that appears there (Exceptions that inherit BaseException
To avoid those problems, you should always catch specific exceptions. (i.e. TypeError).
Example:
try:
raise Exception("Example exc")
except Exception as e:
print(f"Exception caught! {e}")

Airflow Custom Sensor: AttributeError: 'NoneType' object has no attribute 'get_records'

I am running Airflow v1.9.0 with Celery Executor. I have configured different workers with different queue names like DEV, QA, UAT, PROD. I have written a custom sensor which polls a source db connection and a target db connection and run different queries and do some checks before triggering downstream-tasks. This has been running fine for multiple workers. In one of the workers, this sensor is giving an AttributeError Issue:
$ airflow test PDI_Incr_20190407_v1 checkCCWatermarkDt 2019-04-09
[2019-04-09 10:02:57,769] {configuration.py:206} WARNING - section/key [celery/celery_ssl_active] not found in config
[2019-04-09 10:02:57,770] {default_celery.py:41} WARNING - Celery Executor will run without SSL
[2019-04-09 10:02:57,771] {__init__.py:45} INFO - Using executor CeleryExecutor
[2019-04-09 10:02:57,817] {models.py:189} INFO - Filling up the DagBag from /home/airflow/airflow/dags
/usr/local/lib/python2.7/site-packages/airflow/models.py:2160: PendingDeprecationWarning: Invalid arguments were passed to ExternalTaskSensor. Support for passing such arguments will be dropped in Airflow 2.0. Invalid arguments were:
*args: ()
**kwargs: {'check_existence': True}
category=PendingDeprecationWarning
[2019-04-09 10:02:57,989] {base_hook.py:80} INFO - Using connection to: 172.16.20.11:1521/GWPROD
[2019-04-09 10:02:57,991] {base_hook.py:80} INFO - Using connection to: dmuat.cwmcwghvymd3.us-east-1.rds.amazonaws.com:1521/DMUAT
Traceback (most recent call last):
File "/usr/local/bin/airflow", line 27, in <module>
args.func(args)
File "/usr/local/lib/python2.7/site-packages/airflow/bin/cli.py", line 528, in test
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
File "/usr/local/lib/python2.7/site-packages/airflow/utils/db.py", line 50, in wrapper
result = func(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/airflow/models.py", line 1584, in run
session=session)
File "/usr/local/lib/python2.7/site-packages/airflow/utils/db.py", line 50, in wrapper
result = func(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/airflow/models.py", line 1493, in _run_raw_task
result = task_copy.execute(context=context)
File "/usr/local/lib/python2.7/site-packages/airflow/operators/sensors.py", line 78, in execute
while not self.poke(context):
File "/home/airflow/airflow/plugins/PDIPlugin.py", line 29, in poke
wm_dt_src = hook_src.get_records(self.sql)
AttributeError: 'NoneType' object has no attribute 'get_records'
Although when I run the same test command from Scheduler CLI, it is running fine. The above issue looks like a database connection issue.
For debugging, I checked the DB Connections from Airflow UI:
Data Profiling -> Ad Hoc Query
Query: Select 1 from dual; -- This worked fine
I also did telnet from the worker node to the DB Host and port and that also went fine.
Custom Sensor Code:
from airflow.plugins_manager import AirflowPlugin
from airflow.hooks.base_hook import BaseHook
from airflow.operators.sensors import SqlSensor
class SensorWatermarkDt(SqlSensor):
def __init__(self, conn_id, sql, conn_id_tgt, sql_tgt, *args, **kwargs):
self.sql = sql
self.conn_id = conn_id
self.sql_tgt = sql_tgt
self.conn_id_tgt = conn_id_tgt
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
hook_src = BaseHook.get_connection(self.conn_id).get_hook()
hook_tgt = BaseHook.get_connection(self.conn_id_tgt).get_hook()
self.log.info('Poking: %s', self.sql)
self.log.info('Poking: %s', self.sql_tgt)
wm_dt_src = hook_src.get_records(self.sql)
wm_dt_tgt = hook_tgt.get_records(self.sql_tgt)
if wm_dt_src <= wm_dt_tgt:
return False
else:
return True
class PDIPlugin(AirflowPlugin):
name = "PDIPlugin"
operators = [SensorWatermarkDt]
Airflow DAG Snippet:
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.email_operator import EmailOperator
from datetime import timedelta,datetime
from airflow.operators import SensorWatermarkDt
from airflow.operators.sensors import ExternalTaskSensor
from airflow.operators.dummy_operator import DummyOperator
default_args = {
'owner': 'SenseTeam',
#'depends_on_past': True,
'depends_on_past' : False,
'start_date': datetime(2019, 4, 7, 17, 00),
'email': [],
'email_on_failure': False,
'email_on_retry': False,
'queue': 'PENTAHO_UAT'
}
dag = DAG(dag_id='PDI_Incr_20190407_v1',
default_args=default_args,
max_active_runs=1,
concurrency=1,
catchup=False,
schedule_interval=timedelta(hours=24),
dagrun_timeout=timedelta(minutes=23*60))
checkCCWatermarkDt = \
SensorWatermarkDt(task_id='checkCCWatermarkDt',
conn_id='CCUSER_SOURCE_GWPROD_RPT',
sql="SELECT MAX(CC_WM.CREATETIME) as CURRENT_WATERMARK_DATE FROM CCUSER.CCX_CAPTUREREASON_ETL CC_WM INNER JOIN CCUSER.CCTL_CAPTUREREASON_ETL CC_WMLKP ON CC_WM.CAPTUREREASON_ETL = CC_WMLKP.ID AND UPPER(CC_WMLKP.DESCRIPTION)= 'WATERMARK'",
conn_id_tgt = 'RDS_DMUAT_DMCONFIG',
sql_tgt = "SELECT MAX(CURRENT_WATERMARK_DATE) FROM DMCONFIG.PRESTG_DM_WMD_WATERMARKDATE WHERE SCHEMA_NAME = 'CCUSER'",
poke_interval=60,
dag=dag)
...
I have restarted web server, scheduler and airflow worker after adding this plugin in this worker node.
What am I missing here?
I have met this problem as well when I tried to use airflow's hook to connect to Teradata database, so I have read the code in airflow, we can see the get_hook() function in the path: /<your python path(may like: /usr/lib64/python2.7/)>/site-packages/airflow/models/connection.py:
def get_hook(self):
try:
if self.conn_type == 'mysql':
from airflow.hooks.mysql_hook import MySqlHook
return MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'google_cloud_platform':
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
return BigQueryHook(bigquery_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
from airflow.hooks.postgres_hook import PostgresHook
return PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
from airflow.hooks.hive_hooks import HiveCliHook
return HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
from airflow.hooks.presto_hook import PrestoHook
return PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
from airflow.hooks.hive_hooks import HiveServer2Hook
return HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
from airflow.hooks.sqlite_hook import SqliteHook
return SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
from airflow.hooks.jdbc_hook import JdbcHook
return JdbcHook(jdbc_conn_id=self.conn_id)
elif self.conn_type == 'mssql':
from airflow.hooks.mssql_hook import MsSqlHook
return MsSqlHook(mssql_conn_id=self.conn_id)
elif self.conn_type == 'oracle':
from airflow.hooks.oracle_hook import OracleHook
return OracleHook(oracle_conn_id=self.conn_id)
elif self.conn_type == 'vertica':
from airflow.contrib.hooks.vertica_hook import VerticaHook
return VerticaHook(vertica_conn_id=self.conn_id)
elif self.conn_type == 'cloudant':
from airflow.contrib.hooks.cloudant_hook import CloudantHook
return CloudantHook(cloudant_conn_id=self.conn_id)
elif self.conn_type == 'jira':
from airflow.contrib.hooks.jira_hook import JiraHook
return JiraHook(jira_conn_id=self.conn_id)
elif self.conn_type == 'redis':
from airflow.contrib.hooks.redis_hook import RedisHook
return RedisHook(redis_conn_id=self.conn_id)
elif self.conn_type == 'wasb':
from airflow.contrib.hooks.wasb_hook import WasbHook
return WasbHook(wasb_conn_id=self.conn_id)
elif self.conn_type == 'docker':
from airflow.hooks.docker_hook import DockerHook
return DockerHook(docker_conn_id=self.conn_id)
elif self.conn_type == 'azure_data_lake':
from airflow.contrib.hooks.azure_data_lake_hook import AzureDataLakeHook
return AzureDataLakeHook(azure_data_lake_conn_id=self.conn_id)
elif self.conn_type == 'azure_cosmos':
from airflow.contrib.hooks.azure_cosmos_hook import AzureCosmosDBHook
return AzureCosmosDBHook(azure_cosmos_conn_id=self.conn_id)
elif self.conn_type == 'cassandra':
from airflow.contrib.hooks.cassandra_hook import CassandraHook
return CassandraHook(cassandra_conn_id=self.conn_id)
elif self.conn_type == 'mongo':
from airflow.contrib.hooks.mongo_hook import MongoHook
return MongoHook(conn_id=self.conn_id)
elif self.conn_type == 'gcpcloudsql':
from airflow.contrib.hooks.gcp_sql_hook import CloudSqlDatabaseHook
return CloudSqlDatabaseHook(gcp_cloudsql_conn_id=self.conn_id)
except Exception:
pass
It means if you don't have this kind of type connection then the get_hook will return a 'NoneType' Type. So that is the reason why it happened.
How to Resolve:
Add a your own hook is best way in airflow , I had a sample for Teradata here:
# cat teradata_hook.py
from builtins import str
import jaydebeapi
from airflow.hooks.dbapi_hook import DbApiHook
class TeradataJdbcHook(DbApiHook):
conn_name_attr = 'teradata_conn_id'
default_conn_name = 'teradata_default'
supports_autocommit = True
def get_conn(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
url = 'jdbc:teradata://' + host + '/TMODE=TERA'
login = conn.login
psw = conn.password
jdbc_driver_loc = '/opt/spark-2.3.1-bin-without-hadoop/jars/terajdbc4-16.20.00.06.jar,/opt/spark-2.3.1-bin-without-hadoop/jars/tdgssconfig-16.20.00.06.jar'
jdbc_driver_name = "com.teradata.jdbc.TeraDriver"
conn = jaydebeapi.connect(jclassname=jdbc_driver_name,
url=url,
driver_args=[str(login), str(psw)],
jars=jdbc_driver_loc.split(","))
return conn
def set_autocommit(self, conn, autocommit):
"""
Enable or disable autocommit for the given connection.
:param conn: The connection
:return:
"""
conn.jconn.setAutoCommit(autocommit)
Then you can call this hook to connect teradata database (or other database which has jdbc driver ):
[root#myhost transfer]# cat h.py
import util
from airflow.hooks.base_hook import BaseHook
from teradata_hook import TeradataJdbcHook
sql = "SELECT COUNT(*) FROM TERADATA_TABLE where month_key='202009'"
conn_id='teradata_account#dbname' # this is my environment's id format
hook = TeradataJdbcHook(conn_id)
records = hook.get_records(sql)
print(records)
if str(records[0][0]) in ('0', '',):
print("No Records")
else:
print("Has Records")
It return's result: [(7734133,)]

Tornado receive UDP packets from multicast group

I have a server where I want to receive data from multicast group. Is there any inbuilt function that I can use to receive this multicast UDP packets?
Edit: Code implementation
I have implemented the code and that follows like this:
#!/usr/bin/env python
import socket
import struct
import os
import errno
import binascii
import tornado.ioloop
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
class UDPHandler():
"""
Connect to multicast group
"""
def __init__(self, ip, port, io_loop):
self.io_loop = io_loop
self._multiIP = ip
self.port = port
self._sock = None
self._socket = {} # fd -> socket object
def conn(self):
"""
Listner to multicast group
"""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.settimeout(3)
self._sock.bind(('', self.port))
self._sock.setblocking(0)
group = socket.inet_aton(self._multiIP)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
self._sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
self._socket[self._sock.fileno()] = self._sock
print("self._sock:", self._sock)
def onRx(self, data, addr):
print("addr, data:", addr, len(str(data)))
print(data)
def r(self):
self.conn()
add_socket_handler(self._sock, self.onRx, self.io_loop)
def add_socket_handler(sock, callback, io_loop):
def accept_handler(fd, events):
while True:
try:
data, address = sock.recvfrom(1024)
except socket.error as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
callback(None, None)
except Exception as e:
print("except:", e)
callback(None, None)
callback(data, address)
io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ)
def periodic():
# print("periodic")
None
def main():
MULTICAST_IP = "224.1.1.10"
RECEIVE_PORT = 10003
udpRx = UDPHandler(MULTICAST_IP, RECEIVE_PORT, tornado.ioloop.IOLoop.current())
udpRx.r()
tornado.ioloop.PeriodicCallback(periodic, 1000).start()
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
Now the problem is is am getting same packet in a loop even if I receive one packet I am receiving the same packet over and over again. Is there something wrong with the code? Especially with add_socket_handler?
Edit 2:
I have added a break statement in the while loop that I had in add_socket_handler and now it seems to be working good.
def add_socket_handler(sock, callback, io_loop):
def accept_handler(fd, events):
while True:
try:
data, address = sock.recvfrom(1024)
callback(data, address)
except socket.error as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
except Exception as e:
raise
break ## change in here
io_loop.add_handler(sock.fileno(), accept_handler, io_loop.READ)
Is this how it is suppose to be done ?
The break in your add_socket_handler looks backwards. You want to loop until you get EWOULDBLOCK/EAGAIN. (with the break as written, it will still work, but it will be slightly less efficient and might miss packets).
def add_socket_handler(sock, callback, io_loop):
def read_handler(fd, events):
while True:
try:
data, address = sock.recvfrom(1024)
callback(data, address):
except socket.error as e:
if e.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
io_loop.add_handler(sock, read_handler, io_loop.READ)
Other than that, this looks right, although I haven't worked with multicast UDP myself.

Nginx server with uwsgi,flask and sleekxmpp

I'm trying to handling some messages by using nginx server with uwsgi, flask and sleekxmpp.
Here is the code.
import ssl, json, logging, threading, time
from flask import Flask
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import IqError, IqTimeout
smsg = """{
"version":1,
"type":"request",
"messageId":"xxyyzz",
"payload":
{
"deviceType":"ctlr",
"command":"getDeviceInfo"
}
}"""
class XMPP(ClientXMPP):
rosterList=[]
def __init__(self, jid, password):
ClientXMPP.__init__(self, jid, password)
self.add_event_handler('session_start', self.session_start, threaded = True)
self.add_event_handler('message', self.message, threaded=True)
self.ssl_version = ssl.PROTOCOL_SSLv23
def session_start(self, event):
self.send_presence(pshow='online')
try:
self.rosterList.append(self.get_roster())
except IqError as err:
print 'Error: %' % err.iq['error']['condition']
except IqTimeout:
print 'Error: Request time out'
def message(self, msg):
data = msg['body'][12:]
dictData = json.loads(data)
print data
if 'payload' in dictData.keys():
for lists in dictData['payload']['indexes']:
print lists
elif 'message' in dictData.keys():
print 'Request accepted'
app = Flask(__name__)
#logging.basicConfig(level = logging.DEBUG)
xmpp = XMPP('jid', 'password')
class XmppThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
if xmpp.connect(('server', '5222')):
xmpp.process(block=True)
xt = XmppThread()
xt.start()
#app.route('/')
def send():
xmpp.send_message(mto='receiver', mbody=smsg, mtype='chat')
return '<h1>Send</h1>'
I run the code by uwsgi with these options.
[uwsgi]
uid = uwsgi
gid = uwsgi
pidfile = /run/uwsgi/uwsgi.pid
emperor = /etc/uwsgi.d
stats = /run/uwsgi/stats.sock
chmod-socket = 660
emperor-tyrant = true
cap = setgid,setuid
[uwsgi]
plugin = python
http-socket = :8080
wsgi-file = /var/www/uwsgi/flask_uwsgi.py
callable = app
module = app
enable-threads = True
logto = /var/www/uwsgi/flask_uwsgi.log
When I run uwsgi by typing command, like '/usr/sbin/uwsgi --ini uwsgi.ini', it works well. I can send and recieve the messages. But, when I run this on CentOS 7's service, recieve is working, but send is not working.
Did i need some more options or missing something?

How do I share data between worker processes in Elixir?

I have 2 workers
worker(Mmoserver.MessageReceiver, []),
worker(Mmoserver.Main, [])
The MessageReceiver will wait until messages are received on TCP and process them, the Main loop will take that information and act on it. How do I share the info obtained by worker1 with worker2?
Mmoserver.ex
This is the main file that starts the workers
defmodule Mmoserver do
use Application
def start(_type, _args) do
import Supervisor.Spec, warn: false
IO.puts "Listening for packets..."
children = [
# We will add our children here later
worker(Mmoserver.MessageReceiver, []),
worker(Mmoserver.Main, [])
]
# Start the main supervisor, and restart failed children individually
opts = [strategy: :one_for_one, name: AcmeUdpLogger.Supervisor]
Supervisor.start_link(children, opts)
end
end
MessageReceiver.ex
This will just start a tcp listener. It should be able to get a message, figure out what it is (by it's id) then parse data and send it to a specific function in Main
defmodule Mmoserver.MessageReceiver do
use GenServer
require Logger
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, :ok, opts)
end
def init (:ok) do
{:ok, _socket} = :gen_udp.open(21337)
end
# Handle UDP data
def handle_info({:udp, _socket, _ip, _port, data}, state) do
parse_packet(data)
# Logger.info "Received a secret message! " <> inspect(message)
{:noreply, state}
end
# Ignore everything else
def handle_info({_, _socket}, state) do
{:noreply, state}
end
def parse_packet(data) do
# Convert data to string, then split all data
# WARNING - SPLIT MAY BE EXPENSIVE
dataString = Kernel.inspect(data)
vars = String.split(dataString, ",")
# Get variables
packetID = Enum.at(vars, 0)
x = Enum.at(vars, 1)
# Do stuff with them
IO.puts "Packet ID:"
IO.puts packetID
IO.puts x
# send data to main
Mmoserver.Main.handle_data(vars)
end
end
Main.ex
This is the main loop. It will process all the most recent data received by the tcp listener and act on it. Eventually it will update the game state too.
defmodule Mmoserver.Main do
use GenServer
#tickDelay 33
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, [], name: Main)
end
def init (state) do
IO.puts "Main Server Loop started..."
# start the main loop, parameter is the initial tick value
mainLoop(0)
# return, why 1??
{:ok, 1}
end
def handle_data(data) do
GenServer.cast(:main, {:handle_data, data})
end
def handle_info({:handle_data, data}, state) do
# my_function(data)
IO.puts "Got here2"
IO.puts inspect(data)
{:noreply, state}
end
# calls respective game functions
def mainLoop(-1) do
IO.inspect "Server Loop has ended!" # base case, end of loop
end
def mainLoop(times) do
# do shit
# IO.inspect(times) # operation, or body of for loop
# sleep
:timer.sleep(#tickDelay);
# continue the loop RECURSIVELY
mainLoop(times + 1)
end
end
Because Mmoserver.MessageReceiver is going to send messages to Mmoserver.Main, Main has to be started in first place, plus, it needs to have name associated:
worker(Mmoserver.Main, []),
worker(Mmoserver.MessageReceiver, [])
The easiest way could be, in your Mmoserver.Main, assuming it is a GenServer:
defmodule Mmoserver.Main do
use GenServer
def start_link do
GenServer.start_link(__MODULE__, [], name: :main)
end
# ...
end
You can add convenience function, plus the implementation one like:
defmodule Mmoserver.Main do
# ...
def handle_data(data) do
GenServer.cast(:main, {:handle_data, data})
end
def handle_info({:handle_data, data}, state) do
my_function(data)
{:noreply, state}
end
end
So, your MessageReceiver, can send a message like:
defmodule Mmoserver.MessageReceiver do
def when_data_received(data) do
Mmoserver.Main.handle_data(data)
end
end
This assumes Mmoserver.MessageReceiver doesn't expect Mmoserver.Main to respond. I've decided to do it this way as you didn't specify the way you want to handle the data and this seems the easies example of how to do this.

Resources