Qt WebEngine set socks5 proxy - qt

I want to set socks5 proxy for my Qt WebEngine app. I use PyQt5.8 , QT5.8.
I set up a socks5 server by danted v1.4.1. I test my socks5 server and it worked good. But when I use it in my app, danted log errors:
error after reading 3 bytes in 0 seconds: client offered no acceptable authentication method
This is my code:
def set_proxy():
from PyQt5.QtNetwork import QNetworkProxy
proxy = QNetworkProxy()
from six.moves.urllib import parse as urlparse
string_proxy = "socks5://username:password#ip:port"
urlinfo = urlparse.urlparse(string_proxy)
proxy = QNetworkProxy()
if urlinfo.scheme == 'socks5':
proxy.setType(QNetworkProxy.Socks5Proxy)
else:
proxy.setType(QNetworkProxy.NoProxy)
if urlinfo.hostname != None:
proxy.setHostName(urlinfo.hostname)
if urlinfo.port != None:
proxy.setPort(urlinfo.port)
if urlinfo.username != None:
proxy.setUser(urlinfo.username)
else:
proxy.setUser('')
if urlinfo.password != None:
proxy.setPassword(urlinfo.password)
else:
proxy.setPassword('')
QNetworkProxy.setApplicationProxy(proxy)
Can anyone help me?
update on 2017/03/29
add proxyAuthenticationRequired signal
def set_proxy(string_proxy):
proxy = QNetworkProxy()
urlinfo = urlparse.urlparse(string_proxy)
if urlinfo.scheme == 'socks5':
proxy.setType(QNetworkProxy.Socks5Proxy)
elif urlinfo.scheme == 'http':
proxy.setType(QNetworkProxy.HttpProxy)
else:
proxy.setType(QNetworkProxy.NoProxy)
proxy.setHostName(urlinfo.hostname)
proxy.setPort(urlinfo.port)
proxy.setUser(urlinfo.username)
proxy.setPassword(urlinfo.password)
QNetworkProxy.setApplicationProxy(proxy)
def handleProxyAuthReq(url, auth, proxyhost):
auth.setUser(username)
auth.setPassword(password)
webView = QtWebEngineWidgets.QWebEngineView()
#proxy_string = "http://username:password#ip:port"
proxy_string = "socks5://username:password#ip:port"
set_proxy(proxy_string)
webView.page().proxyAuthenticationRequired.connect(handleProxyAuthReq)
I test it by my Http proxy and it worded. But when I use Socks5 proxy, the proxyAuthenticationRequired signal can not be emited.

QtWebEngine does not handle the username/password information from QNetworkProxy:
All other proxy settings such as QNetworkProxy::rawHeader(), QNetworkProxy::user(), or QNetworkProxy::password() are ignored.
You'll need to handle proxyAuthenticationRequired and handle authentication there.
update on 2017/03/30
Looks like Chromium does not support authentication with SOCKS proxies.

Related

How to implement nginx proxy_pass in gae standard?

I'm using firebase-ui-web for authentication in my GAE app. There is an annoying issue recently where Safari is blocking 3rd party cookies and this breaks the login process.
The best solution (described here) seems to be implement a reverse-proxy config with nginx. Here are the details:
# reverse proxy for signin-helpers for popup/redirect sign in.
location /__/auth {
proxy_pass https://<project>.firebaseapp.com;
}
Is it possible to accomplish the same thing in GAE where we are not able to add nginx rules? I'm using Python3/Flask if it matters.
With some Googling, I came up with this:
#app.route('/<path:path>', methods=['GET', 'POST'])
def proxy(path):
url = f'https://www.example.com/{path}'
excluded_headers = ['content-encoding', 'content-length',
'transfer-encoding', 'connection']
if request.method == 'GET':
resp = requests.get(url)
elif request.method == 'POST':
resp = requests.post(url, data=request.form)
headers = [(name, value) for (name, value) in resp.raw.headers.items()
if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
return response
Though I'm not confident that the sources are good so feedback is welcome.

How to run my Flask server so i can get to it with my domain name?

i currently have a very small http server and i would like to add it to my vm in google cloud platform, and then be able to access it with my domain name connected to it. But my server does not have permission to be on port 443 because it is already occupied how can I change this?
ERROR:
* Serving Flask app 'main'
* Debug mode: off
Address already in use
Port 443 is in use by another program. Either identify and stop that program, or start the server with a different port.```
SCRIPT:
from flask import Flask, redirect, url_for, request, render_template
app = Flask(__name__)
#app.route("/") #home page
def home():
return "Hello! this is the main page <h1>HELLO<h1>"
#app.route("/admin")
def admin():
return redirect(url_for("home"))
#app.route("/<usr>")
def user(usr):
return f"<h1>{usr}<h1>"
#app.route('/login', methods=["POST","GET" ])
def login():
if request.method == "POST":
user = request.form['id']
return redirect(url_for("user", usr=user))
else:
return 'GET'
if __name__ == "__main__":
app.run('0.0.0.0',443)

How to disable "check_hostname" using Requests library and Python 3.8.5?

using latest Requests library and Python 3.8.5, I can't seem to "disable" certificate checking on my API call. I understand the reasons not to disable, but I'd like this to work.
When i attempt to use "verify=True", the servers I connect to throw this error:
(Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1123)')))
When i attempt to use "verify=False", I get:
Error making PS request to [<redacted server name>] at URL https://<redacted server name/rest/v2/api_endpoint: Cannot set verify_mode to CERT_NONE when check_hostname is enabled.
I don't know how to also disable "check_hostname" as I haven't seen a way to do that with the requests library (which I plan to keep and use).
My code:
self.ps_server = server
self.ps_base_url = 'https://{}/rest/v2/'.format(self.ps_server)
url = self.ps_base_url + endpoint
response = None
try:
if req_type == 'POST':
response = requests.post(url, json=post_data, auth=(self.ps_username, self.ps_password), verify=self.verify, timeout=60)
return json.loads(response.text)
elif req_type == 'GET':
response = requests.get(url, auth=(self.ps_username, self.ps_password), verify=self.verify, timeout=60)
if response.status_code == 200:
return json.loads(response.text)
else:
logging.error("Error making PS request to [{}] at URL {} [{}]".format(server, url, response.status_code))
return {'status': 'error', 'trace': '{} - {}'.format(response.text, response.status_code)}
elif req_type == 'DELETE':
response = requests.delete(url, auth=(self.ps_username, self.ps_password), verify=self.verify, timeout=60)
return response.text
elif req_type == 'PUT':
response = requests.put(url, json=post_data, auth=(self.ps_username, self.ps_password), verify=self.verify, timeout=60)
return response.text
except Exception as e:
logging.error("Error making PS request to [{}] at URL {}: {}".format(server, url, e))
return {'status': 'error', 'trace': '{}'.format(e)}
Can someone shed some light on how I can disable check_hostname as well, so that I can test this without SSL checking?
If you have pip-system-certs, it monkey-patches requests as well. Here's a link to the code: https://gitlab.com/alelec/pip-system-certs/-/blob/master/pip_system_certs/wrapt_requests.py
After digging through requests and urllib3 source for awhile, this is the culprit in pip-system-certs:
ssl_context = ssl.create_default_context()
ssl_context.load_default_certs()
kwargs['ssl_context'] = ssl_context
That dict is used to grab an ssl_context later from a urllib3 connection pool but it has .check_hostname set to True on it.
As far as replacing the utility of the pip-system-certs package, I think forking it and making it only monkey-patch pip would be the right way forward. That or just adding --trusted-host args to any pip install commands.
EDIT:
Here's how it's normally initialized through requests (versions I'm using):
https://github.com/psf/requests/blob/v2.21.0/requests/adapters.py#L163
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
# NOTE: pool_kwargs doesn't have ssl_context in it
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
And here's how it's monkey-patched:
def init_poolmanager(self, *args, **kwargs):
import ssl
ssl_context = ssl.create_default_context()
ssl_context.load_default_certs()
kwargs['ssl_context'] = ssl_context
return super(SslContextHttpAdapter, self).init_poolmanager(*args, **kwargs)

Unable to modify request in middleware using Scrapy

I am in the process of scraping public data regarding metheorology for a project (data science), and in order to effectively do that I need to change the proxy used on my scrapy requests in the event of a 403 response code.
For this, I have defined a download middleware to handle such situation, which is as follows
class ProxyMiddleware(object):
def process_response(self, request, response, spider):
if response.status == 403:
f = open("Proxies.txt")
proxy = random_line(f) # Just returns a random line from the file with a valid structure ("http://IP:port")
new_request = Request(url=request.url)
new_request.meta['proxy'] = proxy
spider.logger.info("[Response 403] Changed proxy to %s" % proxy)
return new_request
return response
After properly adding the class to settings.py, I expected this middleware to deal with 403 responses by generating a new request with the new proxy, hence finishing in a 200 response. The observed behaviour is that it actually gets executed (I can see the Logger info about Changed proxy), but the new request does not seem to be made. Instead, I'm getting this:
2018-12-26 23:33:19 [bot_2] INFO: [Response] Changed proxy to https://154.65.93.126:53281
2018-12-26 23:33:26 [bot_2] INFO: [Response] Changed proxy to https://176.196.84.138:51336
... indefinitely with random proxies, which makes me think that I'm still retrieving 403 errors and the proxy is not changing.
Reading the documentation, regarding process_response, it states:
(...) If it returns a Request object, the middleware chain is halted and the returned request is rescheduled to be downloaded in the future. This is the same behavior as if a request is returned from process_request().
Is it possible that "in the future" is not "right after it is returned"? How should I do to change the proxy for all requests from that moment on?
Scrapy will drop duplicate requests to the same url by default, so that's probably what's happening on your spider. To check if this is your case you can set this settings:
DUPEFILTER_DEBUG=True
LOG_LEVEL='DEBUG'
To solve this you should add dont_filter=True:
new_request = Request(url=request.url, dont_filter=True)
Try this:
class ProxyMiddleware(object):
def process_response(self, request, response, spider):
if response.status == 403:
f = open("Proxies.txt")
proxy = random_line(f)
new_request = Request(url=request.url)
new_request.meta['proxy'] = proxy
spider.logger.info("[Response 403] Changed proxy to %s" % proxy)
return new_request
else:
return response
A better approach would be to use scrapy random proxies module instead:
'DOWNLOADER_MIDDLEWARES' : {
'rotating_proxies.middlewares.RotatingProxyMiddleware': 610,
'rotating_proxies.middlewares.BanDetectionMiddleware': 620
},

Google Firebase SSL Certificate - My certificate has a large number of other websites listed

Problem: Other domains are listed in my Google Firebase SSL certificate.
I created a firebase project to test firebase authentication emails from Cloud Functions. firebase.jhanley.com
I have separate code that runs in Cloud Functions that validates SSL certificates for each domain that I own / manage (code below). The primary purpose of this code is to send an email when a domain's SSL certificate is about to expire. Some of our SSL certificates must be renewed manually.
The problem is that my code that checks the SSL certificate is returning a huge number of other domain names for my SSL certificate. When I look at the SSL certificate with Chrome, I also see these other domain names. I do want my site associated with these other sites.
A reduced list of the domains that I see in my SSL certificate for Firebase:
2r5consultoria.com.br
addlix.com
admin.migrationcover.ae
admin.thermoply.com
akut-med.zeitnachweise.de
...
firebase.jhanley.com
...
Q) Why is this happening with Firebase SSL and is there a solution?
Q) Does Firebase support installing your own SSL certificate?
Python 3.x code that runs in Cloud Functions that processes SSL certificates by connecting to each domain name from a list.
Note: This code does not have any (known) problems. I am including the source code to create added value for others in the community.
""" Routines to process SSL certificates """
import sys
import datetime
import socket
import ssl
import time
import myhtml
g_email_required = False # This is set during processing if a warning or error was detected
def get_email_requred():
return g_email_required
def ssl_get_cert(hostname):
""" This function returns an SSL certificate from a host """
context = ssl.create_default_context()
conn = context.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=hostname)
# 3 second timeout because Google Cloud Functions has runtime limitations
conn.settimeout(3.0)
try:
conn.connect((hostname, 443))
except Exception as ex:
print("{}: Exception: {}".format(hostname, ex), file=sys.stderr)
return False, str(ex)
host_ssl_info = conn.getpeercert()
return host_ssl_info, ''
def get_ssl_info(host):
""" This function retrieves the SSL certificate for host """
# If we receive an error, retry up to three times waiting 10 seconds each time.
retry = 0
err = ''
while retry < 3:
ssl_info, err = ssl_get_cert(host)
if ssl_info is not False:
return ssl_info, ''
retry += 1
print(' retrying ...')
time.sleep(10)
return False, err
def get_ssl_issuer_name(ssl_info):
""" Return the IssuerName from the SSL certificate """
issuerName = ''
issuer = ssl_info['issuer']
# pylint: disable=line-too-long
# issuer looks like this:
# This is a set of a set of a set of key / value pairs.
# ((('countryName', 'US'),), (('organizationName', "Let's Encrypt"),), (('commonName', "Let's Encrypt Authority X3"),))
for item in issuer:
# item will look like this as it goes thru the issuer set
# Note that this is a set of a set
#
# (('countryName', 'US'),)
# (('organizationName', "Let's Encrypt"),)
# (('commonName', "Let's Encrypt Authority X3"),)
s = item[0]
# s will look like this as it goes thru the isser set
# Note that this is now a set
#
# ('countryName', 'US')
# ('organizationName', "Let's Encrypt")
# ('commonName', "Let's Encrypt Authority X3")
# break the set into "key" and "value" pairs
k = s[0]
v = s[1]
if k == 'organizationName':
if v != '':
issuerName = v
continue
if k == 'commonName':
if v != '':
issuerName = v
return issuerName
def get_ssl_subject_alt_names(ssl_info):
""" Return the Subject Alt Names """
altNames = ''
subjectAltNames = ssl_info['subjectAltName']
index = 0
for item in subjectAltNames:
altNames += item[1]
index += 1
if index < len(subjectAltNames):
altNames += ', '
return altNames
def process_hostnames(msg_body, hostnames, days_left):
""" Process the SSL certificate for each hostname """
# pylint: disable=global-statement
global g_email_required
ssl_date_fmt = r'%b %d %H:%M:%S %Y %Z'
for host in hostnames:
f_expired = False
print('Processing host:', host)
ssl_info, err = get_ssl_info(host)
if ssl_info is False:
msg_body = myhtml.add_row(msg_body, host, err, '', '', '', True)
g_email_required = True
continue
#print(ssl_info)
issuerName = get_ssl_issuer_name(ssl_info)
altNames = get_ssl_subject_alt_names(ssl_info)
l_expires = datetime.datetime.strptime(ssl_info['notAfter'], ssl_date_fmt)
remaining = l_expires - datetime.datetime.utcnow()
if remaining < datetime.timedelta(days=0):
# cert has already expired - uhoh!
cert_status = "Expired"
f_expired = True
g_email_required = True
elif remaining < datetime.timedelta(days=days_left):
# expires sooner than the buffer
cert_status = "Time to Renew"
f_expired = True
g_email_required = True
else:
# everything is fine
cert_status = "OK"
f_expired = False
msg_body = myhtml.add_row(msg_body, host, cert_status, str(l_expires), issuerName, altNames, f_expired)
return msg_body
This is happening because Firebase will automatically create shared certificates for customers. This does not represent a security risk for your site, as Firebase retains full control of the certificate private keys. Certificates are shared to allow us to offer HTTPS + custom domains without an additional fee for our free plan customers.
If you are on the Blaze (pay-as-you-go) plan for your project, you can send a request to Firebase support and we can migrate you to a dedicated certificate. This is only available for Blaze plan customers.
Firebase Hosting does not currently support uploading custom certificates. If this is a use case that's important to you, I'd recommend filing a feature request (again, through Firebase support) so that we can evaluate it for future improvements to the product.

Resources