Best way to use httpx async client and tenacity? - python-requests

I'm getting fairly different results with two different implementations.
Here is implementation 1
request_semaphore = asyncio.Semaphore(5)
async def _send_async_request(client: AsyncClient, method, auth, url, body):
async with request_semaphore:
try:
async for attempt in AsyncRetrying(stop=stop_after_attempt(3), wait=wait_fixed(1)):
with attempt:
response = await client.request(method=method, url=url, auth=auth, json=body)
response.raise_for_status()
return response
except RetryError as e:
pass
And here is implementation 2:
request_semaphore = asyncio.Semaphore(5)
#retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
async def _send_single_async_request(self, client: AsyncClient, method, auth, url, body):
async with request_semaphore:
response = await client.request(method=method, url=url, auth=auth, json=body)
response.raise_for_status()
return response
async def _send_async_request(self, client: AsyncClient, method, auth, url, body):
try:
await self._send_single_async_request(client, method, auth, request)
except RetryError as e:
pass
I'm testing it against a stable REST API. Here are the bench marks:
100 successful POST requests:
Implementation 1: 0:59 mins
Implementation 2: 0:57 mins
100 failed POST requests:
Implementation 1: 3:26 mins
Implementation 2: 2:09 mins
These results are consistent. Can anyone help me understand why my first implementation is slower than my second?
edit: FYI, here's how i'm calling the above functions (the above funcs actually receive a request tuple with url and body, edited it for clarity)
async def _prepare_async_requests(method, auth, requests):
async with AsyncClient() as client:
task_list = [self._send_async_request(client, method, auth, request) for request in requests]
return [await task for task in asyncio.as_completed(task_list)]
def send_async_requests(auth, method, requests):
loop = asyncio.get_event_loop()
responses = loop.run_until_complete(self._prepare_async_requests(method, auth, requests))
return responses

Related

FastAPI endpoint blocked until a message has been consumed with aio-pika

I am using a FastAPI endpoint to do some sort of long polling and also aiopika to wait messages that a client is publishing to rabbitmq connection/channel/exchange/queue.
My question: When I make a request to the endpoint it is blocked waiting for a message to arrive but as you can see in the below snipped if I put a print() instruction before it starts consuming it is not shown until a message arrives. Why this happens?
#router.post("/long_polling")
async def long_polling(request):
device_id = "1234"
print("THIS IS NOT PRINTED UNTIL A MESSAGE IS PUBLISHED!!!")
connection = await aio_pika.connect("amqp://user:pwd#rabbitmq:5672/")
async with connection:
channel = await connection.channel()
exchange = await channel.declare_exchange("exchange", aio_pika.ExchangeType.TOPIC)
queue = await channel.declare_queue("")
await queue.bind(exchange, routing_key="{}.outgoing".format(device_id))
print("NOT PRINTED UNTIL A MESSAGE ARRIVE ALTHOUGH BEFORE CONSUMING")
# START CONSUMING
async with queue.iterator() as queue_iter:
async for message in queue_iter:
async with message.process():
print(message.body)
return
When I published a message, the endpoint returns properly, so just to know why the previous instructions are not executed until a new message arrives.

Customized asyncContextManger not working concurrently on http requests

I am using asyncio to send a bunch of requests to a web service concurrently. I cannot use aiohttp.ClientSession becuase the target service provides a client package with a lot of proprietary functions. Even if I can hack the authentication process to establish a session, I still have to implement those client's functions. I implemented my own Async Context Manager as in the following code snippet. It seems fine, except apparently the requests are executed sequentially. It takes 22 seconds where a concurrent run costs less than 3 seconds.
import aiohttp, asyncio, requests, time, json
start_time = time.time()
class MyRequests:
def __init__(self, requests):
self.requests = requests
async def __aenter__(self) -> "MyRequests":
return self
async def __aexit__(
self, exc_type, exc, tb
) -> None:
pass
def get(self, url) -> "MyResponse":
return MyResponse(self.requests.get(url))
class MyResponse:
def __init__(self, resp) -> None:
self.response = resp
self.url = self.response.url
async def __aenter__(self) -> "MyResponse":
return self
async def __aexit__(self, exc_type, exc, tb) -> None:
pass
async def get_pokemon(session, url):
async with session.get(url) as resp:
return resp.url
async def main():
async with MyRequests(requests) as session:
tasks = []
for number in range(1, 50):
url = f'https://pokeapi.co/api/v2/pokemon/{number}'
tasks.append(asyncio.create_task(get_pokemon(session, url)))
original_pokemon = await asyncio.gather(*tasks)
for pokemon in original_pokemon:
print(pokemon)
asyncio.run(main())
print("--- %s seconds ---" % (time.time() - start_time))

Stop propogation of request to next middleware if response returned from first middleware in FastApi

I am working on a problem where in I am creating a middleware that responds to one api and if that api is different then we go to next middleware else we exit out from the first middleware.
Maybe I have not understood how middleware works in FastApi but my code is not working so any assistance will be welcomed.
My code is as below
#app.middleware("http")
async def verify_user_agent(request: Request, call_next):
if request.url.path == "/something" and request.method == "GET":
return JSONResponse(content={"Something": True}, status_code=200)
else:
await call_next(request)
return JSONResponse(content={
"message": "redirecting to api middleware"
}, status_code=307)
# This middleware should be hit only if route is not /something
#app.middleware("http")
async def api_route(request: Request, call_back):
if request.url.path == "/api":
return JSONResponse(content={"api": True}, status_code=200)
return JSONResponse(content={"api": False})
Here even if the first route called is /something still the second middleware is called even though the response is already sent by the first middleware
If you need any more information then please do lemme know
So I think you have understood middleware just fine, however there is just one thing you are missing and that is in relation to the precedence of your middleware. Precedence can be determined by the order of your middleware and FastApi will register your middleware in the reverse order that they are defined in. This means in your example the first middleware in line is the one that checks for the /api route, which means it never gets to the /something check. all you need to do is swap them around.
# This middleware will be called second
#app.middleware("http")
async def api_route(request: Request, call_back):
if request.url.path == "/api":
return JSONResponse(content={"api": True}, status_code=200)
return JSONResponse(content={"api": False})
# this middleware will be called first
#app.middleware("http")
async def verify_user_agent(request: Request, call_next):
if request.url.path == "/something" and request.method == "GET":
return JSONResponse(content={"Something": True}, status_code=200)
else:
await call_next(request)
return JSONResponse(content={
"message": "redirecting to api middleware"
}, status_code=307)

FastAPI as backend for Telethon

I'm trying to make api auth with telethon work. I'm sending request to endpoint where telegram client is initialized and trying to send code request to telegram. But there is input() and I didn't find any solution to pass code as variable
#router.get('/code')
async def send_code_request(phone: str):
client = get_telegram_client(phone)
await client.start(phone)
return {'msg': 'code sent'}
I found easier solution, but there is one con - when authorizing via session sign_in() method is requiring to execute send_code_request() method first so there is will be 2 same code messages
async def get_telegram_client(session: str = None) -> TelegramClient:
return TelegramClient(
StringSession(session),
api_id=settings.TELEGRAM_API_ID,
api_hash=settings.TELEGRAM_API_HASH
)
#router.post('/code')
async def send_authorizarion_code(payload: TelegramSendCode):
client = await get_telegram_client()
await client.connect()
try:
await client.send_code_request(payload.phone)
except FloodWaitError as e:
return {
'FloodWaitError': {
'phone_number': e.request.phone_number,
'seconds': e.seconds
}}
else:
return {
'msg': 'code sent',
'session': client.session.save()
}
#router.post('/auth')
async def authorize(payload: TelegramAuth):
client = await get_telegram_client(payload.session)
await client.connect()
await client.send_code_request(payload.phone)
await client.sign_in(code=payload.code, phone=payload.phone)
return {'msg': 'signed in'}
I'm assuming you're using .start() for that.
.start() accepts a callback that is by default input() you can pass your own input like so.
client.start(code_callback=your_callback) and your callback should should return the code.
This can all be found here in the start docs

python3.5: with aiohttp is it possible to serve several responses concurently?

I'm using the latest version (1.0.2) of aiohttp with python3.5 I have the following server code
import asyncio
from aiohttp.web import Application, Response, StreamResponse, run_app
async def long(request):
resp = StreamResponse()
name = request.match_info.get('name', 'Anonymous')
resp.content_type = 'text/plain'
for _ in range(1000000):
answer = ('Hello world\n').encode('utf8')
await resp.prepare(request)
resp.write(answer)
await resp.write_eof()
return resp
async def init(loop):
app = Application(loop=loop)
app.router.add_get('/long', long)
return app
loop = asyncio.get_event_loop()
app = loop.run_until_complete(init(loop))
run_app(app)
If I then run two curl requests curl http://localhost:8080/long in different terminals, only the first one will receive data
My thought was that using asyncio you could, in a monothreaded code, start serving other response, while an other is waiting for I/O
Most of the code I found online about concurent+asyncio only talks about the client side, but not server side
Am I missing something or is my comprehension of how asyncio works is flawed ?
Just push await resp.drain() after resp.write() for giving aiohttp a chance to switch between tasks:
import asyncio
from aiohttp.web import Application, Response, StreamResponse, run_app
async def long(request):
resp = StreamResponse()
name = request.match_info.get('name', 'Anonymous')
resp.content_type = 'text/plain'
await resp.prepare(request) # prepare should be called once
for _ in range(1000000):
answer = ('Hello world\n').encode('utf8')
resp.write(answer)
await resp.drain() # switch point
await resp.write_eof()
return resp
async def init(loop):
app = Application(loop=loop)
app.router.add_get('/long', long)
return app
loop = asyncio.get_event_loop()
app = loop.run_until_complete(init(loop))
run_app(app)

Resources