Tornado receive UDP packets from multicast group - asynchronous

I have a server where I want to receive data from multicast group. Is there any inbuilt function that I can use to receive this multicast UDP packets?
Edit: Code implementation
I have implemented the code and that follows like this:
#!/usr/bin/env python
import socket
import struct
import os
import errno
import binascii
import tornado.ioloop
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
class UDPHandler():
"""
Connect to multicast group
"""
def __init__(self, ip, port, io_loop):
self.io_loop = io_loop
self._multiIP = ip
self.port = port
self._sock = None
self._socket = {} # fd -> socket object
def conn(self):
"""
Listner to multicast group
"""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.settimeout(3)
self._sock.bind(('', self.port))
self._sock.setblocking(0)
group = socket.inet_aton(self._multiIP)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
self._sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
self._socket[self._sock.fileno()] = self._sock
print("self._sock:", self._sock)
def onRx(self, data, addr):
print("addr, data:", addr, len(str(data)))
print(data)
def r(self):
self.conn()
add_socket_handler(self._sock, self.onRx, self.io_loop)
def add_socket_handler(sock, callback, io_loop):
def accept_handler(fd, events):
while True:
try:
data, address = sock.recvfrom(1024)
except socket.error as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
callback(None, None)
except Exception as e:
print("except:", e)
callback(None, None)
callback(data, address)
io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ)
def periodic():
# print("periodic")
None
def main():
MULTICAST_IP = "224.1.1.10"
RECEIVE_PORT = 10003
udpRx = UDPHandler(MULTICAST_IP, RECEIVE_PORT, tornado.ioloop.IOLoop.current())
udpRx.r()
tornado.ioloop.PeriodicCallback(periodic, 1000).start()
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
Now the problem is is am getting same packet in a loop even if I receive one packet I am receiving the same packet over and over again. Is there something wrong with the code? Especially with add_socket_handler?
Edit 2:
I have added a break statement in the while loop that I had in add_socket_handler and now it seems to be working good.
def add_socket_handler(sock, callback, io_loop):
def accept_handler(fd, events):
while True:
try:
data, address = sock.recvfrom(1024)
callback(data, address)
except socket.error as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
except Exception as e:
raise
break ## change in here
io_loop.add_handler(sock.fileno(), accept_handler, io_loop.READ)
Is this how it is suppose to be done ?

The break in your add_socket_handler looks backwards. You want to loop until you get EWOULDBLOCK/EAGAIN. (with the break as written, it will still work, but it will be slightly less efficient and might miss packets).
def add_socket_handler(sock, callback, io_loop):
def read_handler(fd, events):
while True:
try:
data, address = sock.recvfrom(1024)
callback(data, address):
except socket.error as e:
if e.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
io_loop.add_handler(sock, read_handler, io_loop.READ)
Other than that, this looks right, although I haven't worked with multicast UDP myself.

Related

How to create a App like jupyter notebook with interactive programming?

Now, i want to develope a application about interactive programming ,just like jupyter notebook ,but i know little about this aspect,can someone tell me some ways or knowledge to start develope this app.
Stages:
Create a virtual environment for your application.
Create a virtual environment for running jupyter-cells of code.
Implement a mechanism for interaction between two virtual environments on a client-server basis.
The server sends cells for execution to the client.
The client executes the cells and returns the result to the server.
Implement the graphical interface you need for the server side.
The mechanism of interaction between two virtual environments can be organized as follows
server.py:
# venv-1
import sys
from multiprocessing.connection import Listener, Connection
def read_write_function(conn_for_execution: Connection, conn_for_interrupting: Connection):
try:
while True:
try:
std, received_output = conn_for_execution.recv()
except (ConnectionResetError, KeyboardInterrupt, EOFError) as e:
print(e)
break
if std in ('<stderr>', '<stdout>'):
file = sys.stderr if std == '<stderr>' else sys.stdout
print('stream:', std)
print('message:', repr(received_output)[1:-1], file=file)
elif std == '<error>': # error
print('error:', repr(received_output)[1:-1], file=sys.stderr)
elif std in ('<block>', '<read>', '<readlines>'): # next block query or read input
print('[Ctrl+C to send code block to client]')
lines = []
try:
while True:
line = input(std[1:] + ' ')
lines.append(line)
except (KeyboardInterrupt, EOFError):
conn_for_execution.send('\n'.join(lines))
print(('' if lines else 'nothing ') + 'sended')
# --------------------- <!-- only to emulate "interrupt execution"
if lines and lines[-1] == '#interrupt':
print('[SERVER] Sleep before')
import time
time.sleep(3)
conn_for_interrupting.send('interrupt')
print('[SERVER] Interrupt message sended')
# --------------------- --> only to emulate "interrupt execution"
# --------------------- <!-- only to emulate "exit"
if lines and lines[-1] == '#exit':
print('[SERVER] Sleep before')
import time
time.sleep(3)
conn_for_interrupting.send('exit')
print('[SERVER] Exit message sended')
# --------------------- --> only to emulate "exit"
elif std == '<readline>':
print('[one line to send input data to client]')
conn_for_execution.send(input(std[1:] + ' '))
print(std[1:] + ' sended')
except:
__import__('traceback').print_exc()
ADDRESS = 'localhost'
PORT = 60000
PASS = 'secret'
print('#' * 42)
print('Address:', ADDRESS)
print('Port:', PORT)
print('Pass:', PASS)
print('#' * 42)
print('Waiting for a client...')
# --------------------- <!-- only to run the client app on the server side and prevent Ctrl+C crashes
"""
import signal
import subprocess
import os
def pre_exec():
signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore CTRL+C signal in the new process
executable = [os.path.join(os.path.abspath('ClientSide'), 'venv', 'Scripts', 'python'), '-uBq', 'client.py',
f'--address={ADDRESS}',
f'--port={PORT}',
f'--password={PASS}',
stdin=subprocess.DEVNULL]
if sys.platform.startswith('win'):
exec_process = subprocess.Popen(executable, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
exec_process = subprocess.Popen(executable, preexec_fn=pre_exec)
"""
# --------------------- --> only to run the client app on the server side and prevent Ctrl+C crashes
# backlog = 2 --> Two clients: one for executing code blocks and one for interrupting execution
try:
with Listener((ADDRESS, PORT), authkey=PASS.encode(encoding='utf-8'), backlog=2) as listener, \
listener.accept() as conn_for_execution, listener.accept() as conn_for_interrupting:
print('Connections accepted')
print('#' * 42)
read_write_function(conn_for_execution, conn_for_interrupting)
except:
pass
Run:
ServerSide/venv/Scripts/python -uB server.py
client.py:
# venv-2
import argparse
import os
import sys
from _thread import get_native_id
from code import InteractiveInterpreter
from io import TextIOWrapper, BytesIO
from multiprocessing.connection import Client, Connection
from threading import Thread, Event
parser = argparse.ArgumentParser(prog='client.py')
parser.add_argument('--address', nargs='?', help='address ("localhost" by default)')
parser.add_argument('--port', nargs='?', help='port ("60000" by default)')
parser.add_argument('--password', nargs='?', help='password ("secret" by default)')
args = parser.parse_args()
if os.path.exists(__file__) and os.path.basename(__file__).startswith('tmp'):
os.remove(__file__)
class Redirector(TextIOWrapper):
def __init__(self, conn: Connection, std: TextIOWrapper):
super().__init__(buffer=BytesIO(), encoding=std.encoding, errors=std.errors,
newline=std.newlines, line_buffering=std.line_buffering,
write_through=std.write_through)
self.std = std
self._conn = conn
def read(self, size: int | None = None) -> str:
try:
self._conn.send(('<read>', 'read operation'))
return self._conn.recv()
except BaseException as e:
print(e, file=sys.__stderr__)
return ''
def readline(self, size: int | None = None) -> str:
try:
self._conn.send(('<readline>', 'readline operation'))
return self._conn.recv()
except BaseException as e:
print(e, file=sys.__stderr__)
return ''
def readlines(self, hint: int | None = None) -> list[str]:
try:
self._conn.send(('<readlines>', 'readlines operation'))
return self._conn.recv().splitlines()
except BaseException as e:
print(e, file=sys.__stderr__)
return []
def write(self, data):
try:
self._conn.send((self.std.name, data))
except BaseException as e:
print(e, file=sys.__stderr__)
def writelines(self, lines: list[str]):
try:
self._conn.send((self.std.name, os.linesep.join(lines)))
except BaseException as e:
print(e, file=sys.__stderr__)
class CodeBlocksInterpreter(InteractiveInterpreter):
def __init__(self, conn_for_execution: Connection, conn_for_interrupting: Connection, locals: dict = None):
super().__init__()
self.locals = locals
self._conn_for_execution = conn_for_execution
self._conn_for_interrupting = conn_for_interrupting
self._main_thread_id = get_native_id()
self._ready_for_next_block = Event()
self._ready_for_next_block.clear()
self._can_interrupt = Event()
self._can_interrupt.clear()
self._thread = Thread(target=self._stop_and_exit_thread, daemon=False)
def interact(self):
self._thread.start()
try:
filename = '<input>'
symbol = 'exec'
while True:
self._can_interrupt.clear()
self._ready_for_next_block.wait()
try:
self._conn_for_execution.send(('<block>', 'give me next block'))
code_block = self._conn_for_execution.recv() + '\n'
code = self.compile(source=code_block, filename=filename, symbol=symbol)
if code is None:
self.write('EOFError. Code block is incomplete')
continue
self._can_interrupt.set()
self.runcode(code)
self._can_interrupt.clear()
except KeyboardInterrupt as e:
print(e, file=sys.__stderr__)
except (OverflowError, SyntaxError, ValueError):
self.showsyntaxerror(filename)
except SystemExit:
break
except BaseException as e:
print(e, file=sys.__stderr__)
try:
self._conn_for_execution.close()
except:
pass
try:
self._conn_for_interrupting.close()
except:
pass
def _stop_and_exit_thread(self):
try:
while True:
try:
self._ready_for_next_block.set()
received = self._conn_for_interrupting.recv()
if received == 'interrupt':
self._ready_for_next_block.clear()
if self._can_interrupt.is_set():
import ctypes
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self._main_thread_id),
ctypes.py_object(KeyboardInterrupt))
elif received == 'exit':
import ctypes
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self._main_thread_id),
ctypes.py_object(SystemExit))
break
except (ConnectionResetError, EOFError):
break
except BaseException as e:
print(e, file=sys.__stderr__)
def write(self, data: str):
self._conn_for_execution.send(('<error>', data))
ADDRESS = args.address.strip('"\'') if isinstance(args.address, str) else 'localhost'
PORT = int(args.port) if isinstance(args.port, str) and args.port.isdigit() else 60000
PASS = args.password.strip('"\'').encode('utf-8') if isinstance(args.password, str) else b'secret'
# Two clients: one for executing code blocks and one for interrupting execution
try:
with Client((ADDRESS, PORT), authkey=PASS) as conn_for_execution, \
Client((ADDRESS, PORT), authkey=PASS) as conn_for_interrupting:
sys.stdin = Redirector(conn_for_execution, sys.stdin)
sys.stdout = Redirector(conn_for_execution, sys.stdout)
sys.stderr = Redirector(conn_for_execution, sys.stderr)
sys.__stdin__ = Redirector(conn_for_execution, sys.__stdin__)
sys.__stdout__ = Redirector(conn_for_execution, sys.__stdout__)
sys.__stderr__ = Redirector(conn_for_execution, sys.__stderr__)
code_blocks_interpreter = CodeBlocksInterpreter(conn_for_execution, conn_for_interrupting,
locals={'__name__': '__main__'})
code_blocks_interpreter.interact()
except:
pass
if isinstance(sys.stdin, Redirector):
sys.stdin = sys.stdin.std
if isinstance(sys.stdout, Redirector):
sys.stdout = sys.stdout.std
if isinstance(sys.stderr, Redirector):
sys.stderr = sys.stderr.std
if isinstance(sys.__stdin__, Redirector):
sys.__stdin__ = sys.__stdin__.std
if isinstance(sys.__stdout__, Redirector):
sys.__stdout__ = sys.__stdout__.std
if isinstance(sys.__stderr__, Redirector):
sys.__stderr__ = sys.__stderr__.std
Run after server.py:
ClientSide/venv/Scripts/python -uB client.py
On the server side, enter code block and send Ctrl+C.
On the client side, it is executed, and the result is transmitted back to the server side.
Examples:
Print to stdout:
[Ctrl+C to send code block to client]
block> print(42 ** 42)
block> <Ctrl+C>
Print to stdout and stderr, raise Exception:
[Ctrl+C to send code block to client]
block> import sys, time
block> print('1', file=sys.stdout); time.sleep(1)
block> print('2', file=sys.stderr); time.sleep(1)
block> raise Exception('3')
block> <Ctrl+C>
Read:
[Ctrl+C to send code block to client]
block> import sys
block> s1 = sys.stdin.read()
block> <Ctrl+C>
read> <Multi-line>
read> <Ctrl+C>
block> s2 = sys.stdin.readline() (or s2 = input())
block> <Ctrl+C>
readline> <One-line>
block> s3 = sys.stdin.readlines()
block> <Ctrl+C>
readlines> <Multi-line>
readlines> <Ctrl+C>
block> print(s1, s2, s3)
block> <Ctrl+C>
Interrupt (#interrupt must be the last line of code):
[Ctrl+C to send code block to client]
block> import time
block> for i in range(10):
block> print(i)
block> time.sleep(1)
block> #interrupt
block> <Ctrl+C>
[SERVER] Sleep before
[SERVER] Interrupt message sended
Exit (#exit must be the last line of code):
[Ctrl+C to send code block to client]
block> import time
block> for i in range(10):
block> print(i)
block> time.sleep(1)
block> #exit
block> <Ctrl+C>
[SERVER] Sleep before
[SERVER] Exit message sended

When calling a function, how to feed variable as an arg

I have the code below that works, but instead of calling the function with "www.google.com", i need to be able to pass as arg:
python certexp.py www.google.com:
import ssl
import OpenSSL
import time
def get_SSL_Expiry_Date(host, port):
cert = ssl.get_server_certificate((host, 443))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
raw_date = x509.get_notAfter()
decoded_date = raw_date.decode("utf-8")
print (decoded_date)
dexpires = time.strptime(decoded_date, "%Y%m%d%H%M%Sz")
print (dexpires.tm_mon,"/",dexpires.tm_mday,"/",dexpires.tm_year)
get_SSL_Expiry_Date("google.com", 443)
Thank you
In python the sys module handles command line arguments.
This gives you an array of command line parameters, with sys.argv[0] being the name of the executable, then any subsequent elements being user parameters.
This makes your code:
import ssl
import OpenSSL
import time
import sys
def get_SSL_Expiry_Date(host, port):
cert = ssl.get_server_certificate((host, 443))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
raw_date = x509.get_notAfter()
decoded_date = raw_date.decode("utf-8")
print (decoded_date)
dexpires = time.strptime(decoded_date, "%Y%m%d%H%M%Sz")
print (dexpires.tm_mon,"/",dexpires.tm_mday,"/",dexpires.tm_year)
if (len(sys.argv) == 1):
sys.stderr.write("%s: Give hostname as an argument, optionally a port too" % (sys.argv[0]))
sys.exit(1)
hostname = sys.argv[1]
port = 443
if (len(sys.argv) == 3):
port = int(sys.argv[2])
get_SSL_Expiry_Date(hostname, port)
Obviously you could do that for the port too. There's other command line parsing modules too, so you can say --port= etc.

Writing files asynchronously

I've been trying to create a server-process that receives an input file path and an output path from client processes asynchronously. The server does some database-reliant transformations, but for the sake of simplicity let's assume it merely puts everything to the upper case. Here is a toy example of the server:
import asyncio
import aiofiles as aiof
import logging
import sys
ADDRESS = ("localhost", 10000)
logging.basicConfig(level=logging.DEBUG,
format="%(name)s: %(message)s",
stream=sys.stderr)
log = logging.getLogger("main")
loop = asyncio.get_event_loop()
async def server(reader, writer):
log = logging.getLogger("process at {}:{}".format(*ADDRESS))
paths = await reader.read()
in_fp, out_fp = paths.splitlines()
log.debug("connection accepted")
log.debug("processing file {!r}, writing output to {!r}".format(in_fp, out_fp))
async with aiof.open(in_fp, loop=loop) as inp, aiof.open(out_fp, "w", loop=loop) as out:
async for line in inp:
out.write(line.upper())
out.flush()
writer.write(b"done")
await writer.drain()
log.debug("closing")
writer.close()
return
factory = asyncio.start_server(server, *ADDRESS)
server = loop.run_until_complete(factory)
log.debug("starting up on {} port {}".format(*ADDRESS))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
log.debug("closing server")
server.close()
loop.run_until_complete(server.wait_closed())
log.debug("closing event loop")
loop.close()
The client:
import asyncio
import logging
import sys
import random
ADDRESS = ("localhost", 10000)
MESSAGES = ["/path/to/a/big/file.txt\n",
"/output/file_{}.txt\n".format(random.randint(0, 99999))]
logging.basicConfig(level=logging.DEBUG,
format="%(name)s: %(message)s",
stream=sys.stderr)
log = logging.getLogger("main")
loop = asyncio.get_event_loop()
async def client(address, messages):
log = logging.getLogger("client")
log.debug("connecting to {} port {}".format(*address))
reader, writer = await asyncio.open_connection(*address)
writer.writelines([bytes(line, "utf8") for line in messages])
if writer.can_write_eof():
writer.write_eof()
await writer.drain()
log.debug("waiting for response")
response = await reader.read()
log.debug("received {!r}".format(response))
writer.close()
return
try:
loop.run_until_complete(client(ADDRESS, MESSAGES))
finally:
log.debug("closing event loop")
loop.close()
I activated the server and several clients at once. The server's logs:
asyncio: Using selector: KqueueSelector
main: starting up on localhost port 10000
process at localhost:10000: connection accepted
process at localhost:10000: processing file b'/path/to/a/big/file.txt', writing output to b'/output/file_79609.txt'
process at localhost:10000: connection accepted
process at localhost:10000: processing file b'/path/to/a/big/file.txt', writing output to b'/output/file_68917.txt'
process at localhost:10000: connection accepted
process at localhost:10000: processing file b'/path/to/a/big/file.txt', writing output to b'/output/file_2439.txt'
process at localhost:10000: closing
process at localhost:10000: closing
process at localhost:10000: closing
All clients print this:
asyncio: Using selector: KqueueSelector
client: connecting to localhost port 10000
client: waiting for response
client: received b'done'
main: closing event loop
The output files are created, but they remain empty. I believe they are not being flushed. Any way I can fix it?
You are missing an await before out.write() and out.flush():
import asyncio
from pathlib import Path
import aiofiles as aiof
FILENAME = "foo.txt"
async def bad():
async with aiof.open(FILENAME, "w") as out:
out.write("hello world")
out.flush()
print("done")
async def good():
async with aiof.open(FILENAME, "w") as out:
await out.write("hello world")
await out.flush()
print("done")
loop = asyncio.get_event_loop()
server = loop.run_until_complete(bad())
print(Path(FILENAME).stat().st_size) # prints 0
server = loop.run_until_complete(good())
print(Path(FILENAME).stat().st_size) # prints 11
However, I would strongly recommend trying to skip aiofiles and use regular, synchronized disk I/O, and keep asyncio for network activity:
with open(file, "w") as out: # regular file I/O
async for s in network_request(): # asyncio for slow network work. measure it!
out.write(s) # should be really quick, measure it!

How to get the Tor ExitNode IP with Python and Stem

I'm trying to get the external IP that Tor uses, as mentioned here. When using something like myip.dnsomatic.com, this is very slow. I tried what was suggested in the aforementioned link (python + stem to control tor through the control port), but all you get is circuit's IPs with no assurance of which one is the one on the exitnode, and, sometimes the real IP is not even among the results.
Any help would be appreciated.
Also, from here, at the bottom, Amine suggests a way to renew the identity in Tor. There is an instruction, controller.get_newnym_wait(), which he uses to wait until the new connection is ready (controller is from Control in steam.control), isn't there any thing like that in Steam (sorry, I checked and double/triple checked and couldn't find nothing) that tells you that Tor is changing its identity?
You can get the exit node ip without calling a geoip site.
This is however on a different stackexchange site here - https://tor.stackexchange.com/questions/3253/how-do-i-trap-circuit-id-none-errors-in-the-stem-script-exit-used-py
As posted by #mirimir his code below essentially attaches a stream event listener function, which is then used to get the circuit id, circuit fingerprint, then finally the exit ip address -
#!/usr/bin/python
import functools
import time
from stem import StreamStatus
from stem.control import EventType, Controller
def main():
print "Tracking requests for tor exits. Press 'enter' to end."
print
with Controller.from_port() as controller:
controller.authenticate()
stream_listener = functools.partial(stream_event, controller)
controller.add_event_listener(stream_listener, EventType.STREAM)
raw_input() # wait for user to press enter
def stream_event(controller, event):
if event.status == StreamStatus.SUCCEEDED and event.circ_id:
circ = controller.get_circuit(event.circ_id)
exit_fingerprint = circ.path[-1][0]
exit_relay = controller.get_network_status(exit_fingerprint)
t = time.localtime()
print "datetime|%d-%02d-%02d %02d:%02d:%02d % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
print "website|%s" % (event.target)
print "exitip|%s" % (exit_relay.address)
print "exitport|%i" % (exit_relay.or_port)
print "fingerprint|%s" % exit_relay.fingerprint
print "nickname|%s" % exit_relay.nickname
print "locale|%s" % controller.get_info("ip-to-country/%s" % exit_relay.address, 'unknown')
print
You can use this code for check current IP (change SOCKS_PORT value to yours):
import re
import stem.process
import requesocks
SOCKS_PORT = 9053
tor_process = stem.process.launch_tor()
proxy_address = 'socks5://127.0.0.1:{}'.format(SOCKS_PORT)
proxies = {
'http': proxy_address,
'https': proxy_address
}
response = requesocks.get("http://httpbin.org/ip", proxies=proxies)
print re.findall(r'[\d.-]+', response.text)[0]
tor_process.kill()
If you want to use socks you should do:
pip install requests[socks]
Then you can do:
import requests
import json
import stem.process
import stem
SOCKS_PORT = "9999"
tor = stem.process.launch_tor_with_config(
config={
'SocksPort': SOCKS_PORT,
},
tor_cmd= 'absolute_path/to/tor.exe',
)
r = requests.Session()
proxies = {
'http': 'socks5://localhost:' + SOCKS_PORT,
'https': 'socks5://localhost:' + SOCKS_PORT
}
response = r.get("http://httpbin.org/ip", proxies=proxies)
self.current_ip = response.json()['origin']

Python asynchronous processing in existing loop

I'm creating a module for OpenERP in which I have to launch an ongoing process.
OpenERP runs in a continuous loop. My process has to be launched when I click on a button, and it has to keep running without holding up OpenERP's execution.
To simplify it, I have this code:
#!/usr/bin/python
import multiprocessing
import time
def f(name):
while True:
try:
print 'hello', name
time.sleep(1)
except KeyboardInterrupt:
return
if __name__ == "__main__":
count = 0
while True:
count += 1
print "Pass %d" % count
pool = multiprocessing.Pool(1)
result = pool.apply_async(f, args=['bob'])
try:
result.get()
except KeyboardInterrupt:
#pass
print 'Interrupted'
time.sleep(1)
When executed, Pass 1 is printed once and then an endless series of hello bob is printed until CTRL+C is pressed. Then Pass 2 is obtained and so on, as shown below:
Pass 1
hello bob
hello bob
hello bob
^CInterrupted
Pass 2
hello bob
hello bob
hello bob
hello bob
I would like the passes to keep increasing in parallel with the hello bob's.
How do I do that?
Here what you can do id you can create then Multi Threaded Implementation of Python under the server memory, which will run independently then server execution thread.
Trick behind this will be used is we will fork one thread from server on your required click and we will assign all server variable separate copy to the new Thread so that thread will execute independently and at then end of process you have to commit the transaction as this process will be not main server process. Here the small example of it how you can do it .
import pprint
import pooler
from threading import Thread
import datetime
import logging
pp = pprint.PrettyPrinter(indent=4)
class myThread(Thread):
"""
"""
def __init__(self, obj, cr, uid, context=None):
Thread.__init__(self)
self.external_id_field = 'id'
self.obj = obj
self.cr = cr
self.uid = uid
self.context = context or {}
self.logger = logging.getLogger(module_name)
self.initialize()
"""
Abstract Method to be implemented in the real instance
"""
def initialize(self):
"""
init before import
usually for the login
"""
pass
def init_run(self):
"""
call after intialize run in the thread, not in the main process
TO use for long initialization operation
"""
pass
def run(self):
"""
this is the Entry point to launch the process(Thread)
"""
try:
self.init_run()
#Your Code Goes Here
#TODO Add Business Logic
self.cr.commit()
except Exception, err:
sh = StringIO.StringIO()
traceback.print_exc(file=sh)
error = sh.getvalue()
print error
self.cr.close()
LIke this you can add some code in some module like (import_base module in 6.1 or trunk)
Now what Next you can do is you can make extended implementation of this and then make instace of the service or you can directly start forking the treads like following code:
service = myServcie(self, cr, uid, context)
service.start()
now this we start background services which run faster and give you freedom to use the UI.
Hope this will help you
Thank You

Resources