Paramiko ssh to windows with ConnectionResetError: [WinError 10054] - paramiko

My code below works fine with Linux servers but the script doesn't work with Windows 2008 Server with OpenSSH installed. I have also tested to ssh with teraterm with the same username, password, ipaddress and port22 it worked fine as well.
I am getting the below error.
C:\Users\>python auto-ssh_v1.py
Socket exception: An existing connection was forcibly closed by the remote host (10054)
Traceback (most recent call last):
File "auto-ssh_v1.py", line 71, in <module>
results = executer.execute()
File "auto-ssh_v1.py", line 53, in execute
stdin, stdout, stderr = ssh.exec_command(self.command)
File "C:\Anaconda3\lib\site-packages\paramiko\client.py", line 405, in exec_command
chan.exec_command(command)
File "C:\Anaconda3\lib\site-packages\paramiko\channel.py", line 60, in _check
return func(self, *args, **kwds)
File "C:\Anaconda3\lib\site-packages\paramiko\channel.py", line 229, in exec_command
self._wait_for_event()
File "C:\Anaconda3\lib\site-packages\paramiko\channel.py", line 1086, in _wait_for_event
raise e
File "C:\Anaconda3\lib\site-packages\paramiko\transport.py", line 1726, in run
ptype, m = self.packetizer.read_message()
File "C:\U\Anaconda3\lib\site-packages\paramiko\packet.py", line 386, in read_message
header = self.read_all(self.__block_size_in, check_rekey=True)
File "C:\Anaconda3\lib\site-packages\paramiko\packet.py", line 249, in read_all
x = self.__socket.recv(n)
ConnectionResetError: [WinError 10054] An existing connection was forcibly closed by the remote host
The code is
#Modules
import paramiko
#Variables
USER = 'Administrator'
PSWD = 'Passw0rd'
#Classes and Functions
class InputReader:
def __init__(self, commands_path, hosts_path):
self.commands_path = commands_path
self.hosts_path = hosts_path
def read(self):
self.commands = self.__readlines(self.commands_path)
self.hosts = self.__readlines(self.hosts_path)
def __readlines(self, path):
with open(path) as f:
return [v.strip() for v in f.readlines()] #List comprehension
class CommandExecuter:
def __init__(self, host, command):
self.host = host
self.command = command
def execute(self):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(self.host, username=USER, password=PSWD)
stdin, stdout, stderr = ssh.exec_command(self.command)
errors = stderr.readlines()
if len(errors) != 0:
raise Exception(errors)
lines = [v.strip() for v in stdout.readlines()]
ssh.close()
return lines
#Main Procedure
if __name__ == '__main__':
reader = InputReader("commands.txt", "systems.txt")
reader.read()
for h in reader.hosts:
for c in reader.commands:
executer = CommandExecuter(h, c)
results = executer.execute()
print("{0}({1}):".format(h, c))
for i in results:
print(i)
print('\n')

You can try to run paramiko line by line to ssh to your windows server and see the trace.
Hope that will help,
Trinh

Related

FastAPI internal server error Exception in ASGI application

I developed an API with FastAPI and when I run it in my conda Ubuntu enviroment, and try any function all runs perfectly. But, when I run it in my conda Windows enviroment the API runs okey but when I try the functoin, I get the internal server error message as output and in the terminal i can see the following error described below. Note, the code is the same in both enviroments
Also, the conexion in jupyter notebook(in the conda windows enviroment) to the node works fine as well as any function I run.
This is the conexion via jupyter, thath works fine in both enviroments:
rpc_user = os.getenv("u1")
rpc_password = os.getenv("key2")
rpc_host = os.getenv("host")
rpc_client = AuthServiceProxy(f"http://{rpc_user}:{rpc_password}#{rpc_host}", timeout=120)
block_count = rpc_client.getblockcount()
print("---------------------------------------------------------------")
print("Block Count:", block_count)
print("---------------------------------------------------------------\n")
output:
---------------------------------------------------------------
Block Count: 773863
---------------------------------------------------------------
This is the code for the connection in the API that it is in a directory named blockchain_data :
import os
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
import json
client = os.getenv("u1")
key = os.getenv("key2")
host= os.getenv("host")
rpc_client = AuthServiceProxy(f"http://{client}:{key}#{host}", timeout=120)
This is the router for getblockcount, that works fine in my ubuntu enviroment but not in the windows.
from fastapi import APIRouter
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from blockchain_data.node import rpc_client
router = APIRouter()
#router.get("/get/block/count")
def get_block_count():
block_count = rpc_client.getblockcount()
return {"Last block":block_count}
And the main:
from fastapi import FastAPI
from routers import routers
app = FastAPI()
app.include_router(routers.router)
#app.get("/")
def inicio():
return{
"message": "welcome"
}
ERROR:
←[1mGET /get/block/count HTTP/1.1←[0m" ←[91m500 Internal Server Error←[0m
←[31mERROR←[0m: Exception in ASGI application
Traceback (most recent call last):
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\uvicorn\protocols\http\h11_impl.py", line 407, in run_asgi
result = await app( # type: ignore[func-returns-value]
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\uvicorn\middleware\proxy_headers.py", line 78, in __call__
return await self.app(scope, receive, send)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\fastapi\applications.py", line 270, in __call__
await super().__call__(scope, receive, send)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\starlette\applications.py", line 124, in __call__
await self.middleware_stack(scope, receive, send)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\starlette\middleware\errors.py", line 184, in __call__
raise exc
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\starlette\middleware\errors.py", line 162, in __call__
await self.app(scope, receive, _send)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\starlette\middleware\exceptions.py", line 79, in __call__
raise exc
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\starlette\middleware\exceptions.py", line 68, in __call__
await self.app(scope, receive, sender)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\fastapi\middleware\asyncexitstack.py", line 21, in __call__
raise e
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\fastapi\middleware\asyncexitstack.py", line 18, in __call__
await self.app(scope, receive, send)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\starlette\routing.py", line 706, in __call__
await route.handle(scope, receive, send)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\starlette\routing.py", line 276, in handle
await self.app(scope, receive, send)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\starlette\routing.py", line 66, in app
response = await func(request)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\fastapi\routing.py", line 237, in app
raw_response = await run_endpoint_function(
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\fastapi\routing.py", line 165, in run_endpoint_function
return await run_in_threadpool(dependant.call, **values)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\starlette\concurrency.py", line 41, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\anyio\to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\anyio\_backends\_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\anyio\_backends\_asyncio.py", line 867, in run
result = context.run(func, *args)
File ".\routers\routers.py", line 17, in get_block_count
block_count = rpc_client.getblockcount()
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\bitcoinrpc\authproxy.py", line 139, in __call__
response = self._get_response()
File "C:\Users\grupo\miniconda3\envs\entorno1\lib\site-packages\bitcoinrpc\authproxy.py", line 186, in _get_response
raise JSONRPCException({
bitcoinrpc.authproxy.JSONRPCException: -342: non-JSON HTTP response with '401 Unauthorized' from server
I tried to run the conexion with the node in jupter and works fine, it something related to FastAPI or uvicorn

Lost connection to MySQL server during query with Sanic and Asyncmy (MySQL)

I'm facing an issue I'm having a hard time to identify.
I made a Database context system to wrap requests inside with that creates a connection to Mysql. Here's the full code :
custom/database/database.py
# -*- coding:utf-8 -*-
from sqlalchemy import exc, event
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession as SQLAlchemyAsyncSession
from sqlalchemy.orm import sessionmaker, Session
from sqlalchemy.pool import Pool, QueuePool # NullPool
from sqlalchemy.exc import OperationalError
from contextvars import ContextVar
from sanic import Sanic
class EngineNotInitialisedError(Exception):
pass
class DBSessionContext:
def __init__(self, read_session: Session, write_session: Session, commit_on_exit: bool = True) -> None:
self.read_session = read_session
self.write_session = write_session
self.commit_on_exit = commit_on_exit
self.token = None
self._read = None
self._write = None
def _disable_flush(self, *args, **kwargs):
raise NotImplementedError('Unable to flush a read-only session.')
async def close(self, exc_type=None, exc_value=None, traceback=None):
if self._write:
if exc_value and getattr(exc_value, 'status_code', 500) > 300:
await self._write.rollback()
else:
await self._write.commit()
try:
await self._write.close()
except OperationalError as e:
if e.orig.args[0] != 2013: # Lost connection to MySQL server during query
raise e
if self._read:
try:
await self._read.close()
except OperationalError as e:
if e.orig.args[0] != 2013: # Lost connection to MySQL server during query
raise e
def set_token(self, token):
self.token = token
#property
def read(self) -> Session:
if not self._read:
self._read = self.read_session()
self._read.flush = self._disable_flush
return self._read
#property
def write(self) -> Session:
if not self._write:
self._write = self.write_session()
return self._write
class AsyncSession(SQLAlchemyAsyncSession):
async def execute(self, statement, **parameters):
return await super().execute(statement, parameters)
async def first(self, statement, **parameters):
executed = await self.execute(statement, **parameters)
return executed.first()
async def all(self, statement, **parameters):
executed = await self.execute(statement, **parameters)
return executed.all()
class DBSession:
def __init__(self):
self.app = None
self.read_engine = None
self.read_session = None
self.write_engine = None
self.write_session = None
self._session = None
self.context = ContextVar("context", default=None)
self.commit_on_exit = True
def init_app(self, app: Sanic) -> None:
self.app = app
self.commit_on_exit = self.app.config.get('DATABASE_COMMIT_ON_EXIT', cast=bool, default=True)
engine_args = {
'echo': self.app.config.get('DATABASE_ECHO', cast=bool, default=False),
'echo_pool': self.app.config.get('DATABASE_ECHO_POOL', cast=bool, default=False),
'poolclass': QueuePool, # will be used to create a connection pool instance using the connection parameters given in the URL
# if pool_class is not NullPool:
# if True will enable the connection pool “pre-ping” feature that tests connections for liveness upon each checkout
'pool_pre_ping': self.app.config.get('DATABASE_POOL_PRE_PING', cast=bool, default=True),
# the number of connections to allow in connection pool “overflow”
'max_overflow': self.app.config.get('DATABASE_MAX_OVERFLOW', cast=int, default=10),
# the number of connections to keep open inside the connection pool
'pool_size': self.app.config.get('DATABASE_POOL_SIZE', cast=int, default=100),
# this setting causes the pool to recycle connections after the given number of seconds has passed
'pool_recycle': self.app.config.get('DATABASE_POOL_RECYCLE', cast=int, default=3600),
# number of seconds to wait before giving up on getting a connection from the pool
'pool_timeout': self.app.config.get('DATABASE_POOL_TIMEOUT', cast=int, default=5),
}
self.read_engine = create_async_engine(
self.app.config.get('DATABASE_READ_URL'),
connect_args={
'connect_timeout': self.app.config.get('DATABASE_CONNECT_TIMEOUT', cast=int, default=3)
},
**engine_args
)
# #see https://writeonly.wordpress.com/2009/07/16/simple-read-only-sqlalchemy-sessions/
self.read_session = sessionmaker(
bind=self.read_engine,
expire_on_commit=False,
class_=AsyncSession,
autoflush=False,
autocommit=False
)
self.write_engine = create_async_engine(
self.app.config.get('DATABASE_WRITE_URL'),
connect_args={
'connect_timeout': self.app.config.get('DATABASE_CONNECT_TIMEOUT', cast=int, default=3)
},
**engine_args
)
self.write_session = sessionmaker(
bind=self.write_engine,
expire_on_commit=False,
class_=AsyncSession,
autoflush=True
)
async def __aenter__(self):
session_ctx = DBSessionContext(self.read_session, self.write_session, self.commit_on_exit)
session_ctx.set_token(self.context.set(session_ctx))
return session_ctx
async def __aexit__(self, exc_type, exc_value, traceback):
session_ctx = self.context.get()
await session_ctx.close(exc_type, exc_value, traceback)
self.context.reset(session_ctx.token)
#property
def read(self) -> Session:
return self.context.get().read
#property
def write(self) -> Session:
return self.context.get().write
#event.listens_for(Pool, "checkout")
def check_connection(dbapi_con, con_record, con_proxy):
'''Listener for Pool checkout events that pings every connection before using.
Implements pessimistic disconnect handling strategy. See also:
http://docs.sqlalchemy.org/en/rel_0_8/core/pooling.html#disconnect-handling-pessimistic'''
cursor = dbapi_con.cursor()
try:
cursor.execute("SELECT 1")
except exc.OperationalError as ex:
if ex.args[0] in (2006, # MySQL server has gone away
2013, # Lost connection to MySQL server during query
2055): # Lost connection to MySQL server at '%s', system error: %d
raise exc.DisconnectionError() # caught by pool, which will retry with a new connection
else:
raise
cursor.close()
db = DBSession()
Using it is quite simple, I do the following. In the router, I made a wrapper that calls the handler with the db initiated:
custom/route.py
class Route:
async def __call__(self, request: Request, **kwargs):
async with db:
response = await self.handler(*args)
# process the response, such as chaning a str to a text response, etc
return response
Unfortunately, I noticed that I have a lot of
(2013, 'Lost connection to MySQL server during query')
And I don't know how or why this happens. This happens to relatively small queries (that contains "LIMIT 1" with indexed columns that should be fast)
Here's the full stack trace:
[2022-05-19 09:35:25 +0000] [92185] [ERROR] Exception occurred while handling uri: 'https://api.pdfshift.io/redacted'
Traceback (most recent call last):
File "asyncmy/connection.pyx", line 610, in asyncmy.connection.Connection._read_bytes
data = await self._reader.readexactly(num_bytes)
File "/usr/lib/python3.9/asyncio/streams.py", line 721, in readexactly
raise exceptions.IncompleteReadError(incomplete, n)
asyncio.exceptions.IncompleteReadError: 0 bytes read on a total of 4 expected bytes
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1802, in _execute_context
self.dialect.do_execute(
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/engine/default.py", line 732, in do_execute
cursor.execute(statement, parameters)
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/asyncmy.py", line 92, in execute
return self.await_(self._execute_async(operation, parameters))
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/util/_concurrency_py3k.py", line 76, in await_only
return current.driver.switch(awaitable)
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/util/_concurrency_py3k.py", line 129, in greenlet_spawn
value = await result
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/asyncmy.py", line 104, in _execute_async
result = await self._cursor.execute(operation, parameters)
File "asyncmy/cursors.pyx", line 180, in execute
result = await self._query(query)
File "asyncmy/cursors.pyx", line 365, in _query
await conn.query(q)
File "asyncmy/connection.pyx", line 455, in query
await self._read_query_result(unbuffered=unbuffered)
File "asyncmy/connection.pyx", line 636, in _read_query_result
await result.read()
File "asyncmy/connection.pyx", line 1023, in read
first_packet = await self.connection.read_packet()
File "asyncmy/connection.pyx", line 578, in read_packet
packet_header = await self._read_bytes(4)
File "asyncmy/connection.pyx", line 618, in _read_bytes
raise errors.OperationalError(CR_SERVER_LOST, msg) from e
asyncmy.errors.OperationalError: (2013, 'Lost connection to MySQL server during query')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "handle_request", line 83, in handle_request
)
File "/var/www/project/www/custom/route.py", line 162, in __call__
response = await response
File "/var/www/project/www/apps/webhooks/views.py", line 104, in stripe
await account.reset_usage()
File "/var/www/project/www/apps/accounts/models.py", line 133, in reset_usage
while await db.read.first(query, uuid=self.uuid):
File "/var/www/project/www/custom/database/database.py", line 73, in first
executed = await self.execute(statement, **parameters)
File "/var/www/project/www/custom/database/database.py", line 70, in execute
return await super().execute(statement, parameters)
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/ext/asyncio/session.py", line 211, in execute
return await greenlet_spawn(
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/util/_concurrency_py3k.py", line 134, in greenlet_spawn
result = context.throw(*sys.exc_info())
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/orm/session.py", line 1692, in execute
result = conn._execute_20(statement, params or {}, execution_options)
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1614, in _execute_20
return meth(self, args_10style, kwargs_10style, execution_options)
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/sql/elements.py", line 325, in _execute_on_connection
return connection._execute_clauseelement(
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1481, in _execute_clauseelement
ret = self._execute_context(
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1845, in _execute_context
self._handle_dbapi_exception(
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 2026, in _handle_dbapi_exception
util.raise_(
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/util/compat.py", line 207, in raise_
raise exception
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1802, in _execute_context
self.dialect.do_execute(
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/engine/default.py", line 732, in do_execute
cursor.execute(statement, parameters)
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/asyncmy.py", line 92, in execute
return self.await_(self._execute_async(operation, parameters))
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/util/_concurrency_py3k.py", line 76, in await_only
return current.driver.switch(awaitable)
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/util/_concurrency_py3k.py", line 129, in greenlet_spawn
value = await result
File "/var/www/project/env/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/asyncmy.py", line 104, in _execute_async
result = await self._cursor.execute(operation, parameters)
File "asyncmy/cursors.pyx", line 180, in execute
result = await self._query(query)
File "asyncmy/cursors.pyx", line 365, in _query
await conn.query(q)
File "asyncmy/connection.pyx", line 455, in query
await self._read_query_result(unbuffered=unbuffered)
File "asyncmy/connection.pyx", line 636, in _read_query_result
await result.read()
File "asyncmy/connection.pyx", line 1023, in read
first_packet = await self.connection.read_packet()
File "asyncmy/connection.pyx", line 578, in read_packet
packet_header = await self._read_bytes(4)
File "asyncmy/connection.pyx", line 618, in _read_bytes
raise errors.OperationalError(CR_SERVER_LOST, msg) from e
sqlalchemy.exc.OperationalError: (asyncmy.errors.OperationalError) (2013, 'Lost connection to MySQL server during query')
[SQL: SELECT id FROM conversions WHERE [redacted] LIMIT 1]
[parameters: ('redacted',)]
(Background on this error at: https://sqlalche.me/e/14/e3q8)
When connecting to the database, here's the parameter I provide:
DATABASE_POOL_PRE_PING = True
DATABASE_MAX_OVERFLOW = 10
DATABASE_POOL_SIZE = 100
DATABASE_POOL_RECYCLE = 3600
DATABASE_POOL_TIMEOUT = 5
DATABASE_CONNECT_TIMEOUT = 3
(If you need details from the MySQL server side, let me know which command to run and I'll add the output here).
My assumption is that somehow, the connection is not properly closed when exiting the async with db part, so when another requests comes in, the same connection is used, but ultimately, MySQL kills it, causing the above error Lost connection to MySQL server during query
Further details :
The error is the same, but the queries changes, showing that the error is not from a specific part of the code, but related to the connection
I was able to catch this issue when sending a webhook event from Stripe. The error returned by Stripe is "Expired". Which seems to indicate that before being stopped, the connection hangs (probably waiting on the SQL query to finish)
This doesn't happen everytime : I was able to run some webhooks successfully, and other not, for the same event (Stripe), so again, it doesn't seems to be an error with the code related to handling the request (but maybe on how the DB is managed)
Thank you for your help !

Setting up Chrome DevTools (Selenium 4) Using Remote WebDriver in Python

I've been trying to set up the use of the Chrome DevTools using Selenium 4 and Python. I've been able to get it to run locally (without any of the async stuff), but when I try to use the webdriver.Remote implementation, it crashes.
Here is an example from the Selenium docs: https://www.selenium.dev/de/documentation/support_packages/chrome_devtools/
Below is how I tried to run it.
import asyncio
from selenium import webdriver
import selenium.webdriver.common.devtools.v96 as devtools
async def geo_location_test():
try:
chrome_options = webdriver.ChromeOptions()
driver = webdriver.Remote(
command_executor='http://D5365900:4444/wd/hub',
options=chrome_options
)
async with driver.bidi_connection() as session:
cdp_session = session.session
await cdp_session.execute(devtools.emulation.set_geolocation_override(latitude=41.8781,
longitude=-87.6298,
accuracy=100))
driver.get("https://my-location.org/")
finally:
driver.quit()
async def main():
await geo_location_test()
if __name__ == "__main__":
asyncio.run(main())
It runs up to the line async with driver.bidi_connection() as session: (session is established and Chrome browser opens). But the it crashes with the following trace.
Traceback (most recent call last):
File "C:\Users\y04082\eclipse-workspace\WWI-Testautomation\TestScripts\Josh\async_sel_4.py", line 54, in <module>
asyncio.run(main())
File "C:\Program Files\Python310\lib\asyncio\runners.py", line 44, in run
return loop.run_until_complete(main)
File "C:\Program Files\Python310\lib\asyncio\base_events.py", line 641, in run_until_complete
return future.result()
File "C:\Users\y04082\eclipse-workspace\WWI-Testautomation\TestScripts\Josh\async_sel_4.py", line 51, in main
await geo_location_test()
File "C:\Users\y04082\eclipse-workspace\WWI-Testautomation\TestScripts\Josh\async_sel_4.py", line 40, in geo_location_test
async with driver.bidi_connection() as session:
File "C:\Program Files\Python310\lib\contextlib.py", line 199, in __aenter__
return await anext(self.gen)
File "C:\Program Files\Python310\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 1576, in bidi_connection
async with cdp.open_cdp(ws_url) as conn:
File "C:\Program Files\Python310\lib\contextlib.py", line 199, in __aenter__
return await anext(self.gen)
File "C:\Program Files\Python310\lib\site-packages\selenium\webdriver\common\bidi\cdp.py", line 457, in open_cdp
async with trio.open_nursery() as nursery:
File "C:\Program Files\Python310\lib\site-packages\trio\_core\_run.py", line 796, in __aenter__
self._scope.__enter__()
File "C:\Program Files\Python310\lib\site-packages\trio\_core\_ki.py", line 159, in wrapper
return fn(*args, **kwargs)
File "C:\Program Files\Python310\lib\site-packages\trio\_core\_run.py", line 449, in __enter__
task = _core.current_task()
File "C:\Program Files\Python310\lib\site-packages\trio\_core\_run.py", line 2285, in current_task
raise RuntimeError("must be called from async context") from None
RuntimeError: must be called from async context
As you can see, I'm using Python 3.10. I also upgraded the Selenium bindings to 4.1.0 and am running a Selenium 4.0.0 Hub/Node config to automate Chrome 96.
Any ideas, what is the problem here? Am I handling the asynchronous coroutines incorrectly?
Any help is much appreciated!
Update
After trying to run it with trio (as suggested in Henry Ashton-Martyn's comment), I get the following error.
Traceback (most recent call last):
File "C:\Program Files\Python310\lib\site-packages\trio\_highlevel_open_tcp_stream.py", line 332, in attempt_connect
await sock.connect(sockaddr)
File "C:\Program Files\Python310\lib\site-packages\trio\_socket.py", line 682, in connect
raise OSError(err, "Error in connect: " + os.strerror(err))
OSError: [Errno 10049] Error in connect: Unknown error
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\y04082\eclipse-workspace\WWI-Testautomation\TestScripts\Josh\async_sel_4.py", line 58, in <module>
trio.run(main)
File "C:\Program Files\Python310\lib\site-packages\trio\_core\_run.py", line 1932, in run
raise runner.main_task_outcome.error
File "C:\Users\y04082\eclipse-workspace\WWI-Testautomation\TestScripts\Josh\async_sel_4.py", line 55, in main
await geo_location_test()
File "C:\Users\y04082\eclipse-workspace\WWI-Testautomation\TestScripts\Josh\async_sel_4.py", line 44, in geo_location_test
async with driver.bidi_connection() as session:
File "C:\Program Files\Python310\lib\contextlib.py", line 199, in __aenter__
return await anext(self.gen)
File "C:\Program Files\Python310\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 1576, in bidi_connection
async with cdp.open_cdp(ws_url) as conn:
File "C:\Program Files\Python310\lib\contextlib.py", line 199, in __aenter__
return await anext(self.gen)
File "C:\Program Files\Python310\lib\site-packages\selenium\webdriver\common\bidi\cdp.py", line 458, in open_cdp
conn = await connect_cdp(nursery, url)
File "C:\Program Files\Python310\lib\site-packages\selenium\webdriver\common\bidi\cdp.py", line 479, in connect_cdp
ws = await connect_websocket_url(nursery, url,
File "C:\Program Files\Python310\lib\site-packages\trio_websocket\_impl.py", line 262, in connect_websocket_url
return await connect_websocket(nursery, host, port, resource,
File "C:\Program Files\Python310\lib\site-packages\trio_websocket\_impl.py", line 171, in connect_websocket
stream = await trio.open_tcp_stream(host, port)
File "C:\Program Files\Python310\lib\site-packages\trio\_highlevel_open_tcp_stream.py", line 367, in open_tcp_stream
raise OSError(msg) from trio.MultiError(oserrors)
OSError: all attempts to connect to 0.0.0.0:4444 failed
It seems that selenium doesn't use python's builtin async framework but uses a package called trio as well. You should be able to fix this problem by changing this code from:
if __name__ == "__main__":
asyncio.run(main())
to:
if __name__ == "__main__":
import trio
trio.run(main)

How can I send a file using to a HTTP server and read it?

So, I created the following HTTP server tunneled via ngrok, and I am trying to send a file to the server, to then read it and display it on the web page of the server.
Here's the code for the server:
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from pyngrok import ngrok
import time
port = os.environ.get("PORT", 80)
server_address = ("127.0.0.1", port)
class MyServer(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write(bytes("<html><head><title>https://pythonbasics.org</title></head>", "utf-8"))
self.wfile.write(bytes("<p>Request: %s</p>" % self.path, "utf-8"))
self.wfile.write(bytes("<body>", "utf-8"))
self.wfile.write(bytes("<p>This is an example web server.</p>", "utf-8"))
self.wfile.write(bytes("</body></html>", "utf-8"))
def do_POST(self):
'''Reads post request body'''
self._set_headers()
content_len = int(self.headers.getheader('content-length', 0))
post_body = self.rfile.read(content_len)
self.wfile.write("received post request:<br>{}".format(post_body))
def do_PUT(self):
self.do_POST()
httpd = HTTPServer(server_address, MyServer)
public_url = ngrok.connect(port).public_url
print("ngrok tunnel \"{}\" -> \"http://127.0.0.1:{}\"".format(public_url, port))
try:
# Block until CTRL-C or some other terminating event
httpd.serve_forever()
except KeyboardInterrupt:
print(" Shutting down server.")
httpd.socket.close()
And I have been trying to send a file using POST as follow
>>> url = 'https://httpbin.org/post'
>>> files = {'file': open('report.xls', 'rb')}
>>> r = requests.post(url, files=files)
>>> r.text
I imported requests of course, and here's what I get
Exception occurred during processing of request from ('127.0.0.1', 60603)
Traceback (most recent call last):
File "C:\Program Files\Python39\lib\socketserver.py", line 316, in _handle_request_noblock
self.process_request(request, client_address)
File "C:\Program Files\Python39\lib\socketserver.py", line 347, in process_request
self.finish_request(request, client_address)
File "C:\Program Files\Python39\lib\socketserver.py", line 360, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "C:\Program Files\Python39\lib\socketserver.py", line 720, in __init__
self.handle()
File "C:\Program Files\Python39\lib\http\server.py", line 427, in handle
self.handle_one_request()
File "C:\Program Files\Python39\lib\http\server.py", line 415, in handle_one_request
method()
File "C:\Users\pierr\OneDrive\Desktop\SpyWare-20210104T124335Z-001\SpyWare\Ngrok_Test.py", line 28, in do_POST
content_len = int(self.headers.getheader('content-length', 0))
AttributeError: 'HTTPMessage' object has no attribute 'getheader'
Could someone please help me fix this error ? I don't get where it comes from.
Syntax has changed. You need to use
content_len = int(self.headers.get('Content-Length'))
Instead of
content_len = int(self.headers.getheader('content-length', 0))
The rest should be the same

python telegram bot, "socket.error: [Errno 98] Address already in use"

Im developing a telegram bot with this api and I tried to use its webhook example to set up my own bot with webhook method.
I have an Ubuntu server and I have set up nginx on it.
now when trying to run my python bot, I get this error:
Traceback (most recent call last):
File "bot.py", line 106, in <module>
router.run(host=WEBHOOK_LISTEN, port=int(WEBHOOK_PORT), ssl_context= (WEBHOOK_SSL_CERT, WEBHOOK_PRIV_CERT), debug=True)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 841, in run
run_simple(host, port, self, **options)
File "/usr/local/lib/python2.7/dist-packages/werkzeug/serving.py", line 720, in run_simple
s.bind((hostname, port))
File "/usr/lib/python2.7/socket.py", line 228, in meth
return getattr(self._sock,name)(*args)
socket.error: [Errno 98] Address already in use
so I checked what is using my port 443 and the process is nginx:
root 30734 1 0 Aug21 ? 00:00:00 nginx: master process /usr/sbin/nginx -g daemon on; master_process on;
I can't turn off nginx because my website is on it, and I need the port to set up telegram bot.
EDIT: I will put my code here for more clarification:
WEBHOOK_HOST = 'mywebsite.com'
WEBHOOK_PORT = '8443'
WEBHOOK_LISTEN = '0.0.0.0'
WEBHOOK_SSL_CERT = "/etc/letsencrypt/live/mywebsite.com/cert.pem"
WEBHOOK_PRIV_CERT = "/etc/letsencrypt/live/mywebsite.com/privkey.pem"
WEBHOOK_URL_BASE = "https://%s:%s" % (WEBHOOK_HOST, WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/%s/" % (TOKEN.get_token())
router = flask.Flask(__name__)
#router.route('/', methods=['GET', 'HEAD'])
def index():
return 'OK'
#router.route(WEBHOOK_URL_PATH, methods=['POST'])
def webhook():
if flask.request.headers.get('content-type') == 'application/json':
json_string = flask.request.json
print json_string["message"]["text"] # here I get the text of message
return ''
else:
flask.abort(403)
bot.remove_webhook()
time.sleep(3)
bot.set_webhook(url=WEBHOOK_URL_BASE+WEBHOOK_URL_PATH,certificate=open(WEBHOOK_SSL_CERT, 'r'))
router.run(host=WEBHOOK_LISTEN, port=int(WEBHOOK_PORT), ssl_context=(WEBHOOK_SSL_CERT, WEBHOOK_PRIV_CERT), debug=True)

Resources