Properly forwarding visitor's IP address from flask_restful to nginx - nginx

I'm running a flask_restful API service that is being forwarded traffic via an nginx proxy. While the IP address is being forward through the proxy via some variables, flask_restful doesn't seem to be able to see these variables, as indicated by its output which points to 127.0.0.1:
127.0.0.1 - - [25/Oct/2017 21:55:37] "HEAD sne/event/SN2014J/photometry HTTP/1.0" 200 -
While I know I can retrieve the IP address via the request object (nginx forwards X-Forwarded-For and X-Real-IP), I don't know how to make the above output from flask_restful show/use this IP address, which is important if you want to say limit the number of API calls from a given IP address with flask_limiter. Any way to make this happen?

You can use (for older version of werkzeug)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
For newer version of werkzeug (1.0.0+)
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
This will fix the IP using X-Forwarded-For. If you need a enhanced version you case use
class SaferProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
If you have more than one proxy server in front of your app, set
num_proxy_servers accordingly
Do not use this middleware in non-proxy setups for security reasons.
get_remote_addr will raise an exception if it sees a request that
does not seem to have enough proxy servers behind it so long as
detect_misconfiguration is True.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
"""
def __init__(self, app, num_proxy_servers=1, detect_misconfiguration=False):
self.app = app
self.num_proxy_servers = num_proxy_servers
self.detect_misconfiguration = detect_misconfiguration
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default the last one is picked. Specify
num_proxy_servers=2 to pick the second to last one, and so on.
"""
if self.detect_misconfiguration and not forwarded_for:
raise Exception("SaferProxyFix did not detect a proxy server. Do not use this fixer if you are not behind a proxy.")
if self.detect_misconfiguration and len(forwarded_for) < self.num_proxy_servers:
raise Exception("SaferProxyFix did not detect enough proxy servers. Check your num_proxy_servers setting.")
if forwarded_for and len(forwarded_for) >= self.num_proxy_servers:
return forwarded_for[-1 * self.num_proxy_servers]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
from saferproxyfix import SaferProxyFix
app.wsgi_app = SaferProxyFix(app.wsgi_app)
PS: Code taken from http://esd.io/blog/flask-apps-heroku-real-ip-spoofing.html

Related

Equivalent of `curl --connect-to` in Python Requests library

curl has an option connect-to
--connect-to <HOST1:PORT1:HOST2:PORT2>
For a request to the given HOST:PORT pair, connect to CONNECT-TO-HOST:CONNECT-TO-PORT instead. This option is suitable to direct requests at a specific
server, e.g. at a specific cluster node in a cluster of servers. This option is only used to establish the network connection. It does NOT affect the host-
name/port that is used for TLS/SSL (e.g. SNI, certificate verification) or for the application protocols. "host" and "port" may be the empty string, meaning
"any host/port". "connect-to-host" and "connect-to-port" may also be the empty string, meaning "use the request's original host/port".
This option can be used many times to add many connect rules.
What is the equivalent in Python Requests library?
There is no such equivalent but you can patch lower levels to rewrite the remote address when creating the connection.
This works in Python 3:
from unittest.mock import patch
# contextmanager for forcing a connection to a given host, port
def connect_to(host, port):
from urllib3.util.connection import create_connection as orig_create_connection
def _forced_address_connection(address, *args, **kwargs):
forced_address = (host, port)
return orig_create_connection(forced_address, *args, **kwargs)
return patch('urllib3.util.connection.create_connection', _forced_address_connection)
# force connections to 127.0.0.1:8080
with connect_to('127.0.0.1', 8080):
res = requests.get('http://service.example.com/')
Other solutions (patching PoolManager, using custom adapter) were not enough because the URL was rewritten as well (and thus the Host: header). When you use curl --connect-to, nothing is touched at HTTP level.
I also needed to optionally force http connections despite the URL scheme. This is the working augmented version for that:
import contextlib
from unittest.mock import patch
#contextlib.contextmanager
def connect_to(host, port, force_http=False):
from urllib3.connection import HTTPConnection
from urllib3.util.connection import create_connection as orig_create_connection
def _forced_address_connection(address, *args, **kwargs):
forced_address = (host, port)
return orig_create_connection(forced_address, *args, **kwargs)
class ForcedHTTPConnection(HTTPConnection):
def __init__(self, **kwargs):
httpconn_kw = ('host', 'port', 'timeout', 'source_address', 'blocksize')
httpconn_kwargs = dict([(k, kwargs[k]) for k in httpconn_kw if k in kwargs])
super().__init__(**httpconn_kwargs)
patchers = [patch('urllib3.util.connection.create_connection', _forced_address_connection)]
if force_http:
patchers.append(patch('urllib3.connectionpool.HTTPSConnectionPool.ConnectionCls', ForcedHTTPConnection))
for p in patchers:
p.start()
yield
for p in patchers:
p.stop()
# force connections to 127.0.0.1:8080 using http
with connect_to('127.0.0.1', 8080, force_http=True):
res = requests.get('https://service.example.com/')
see also Python 'requests' library - define specific DNS?.

How to redirect HTTPS traffic to local HTTP server using mitmproxy?

I am trying to setup mitmproxy so that I can make a request from my browser to https://{my-domain} and have it return a response from my local server running at http://localhost:3000 instead, but I cannot get the https request to reach my local server. I see the debugging statements from mitmproxy. Also, I can get it working for http traffic, but not for https.
I read the mitmproxy addon docs and api docs
I've installed the cert and I can monitor https through the proxy.
I'm using Mitmproxy: 4.0.4 and Python: 3.7.4
This is my addon (local-redirect.py) and how I run mitmproxy:
from mitmproxy import ctx
import mitmproxy.http
class LocalRedirect:
def __init__(self):
print('Loaded redirect addon')
def request(self, flow: mitmproxy.http.HTTPFlow):
if 'my-actual-domain-here' in flow.request.pretty_host:
ctx.log.info("pretty host is: %s" % flow.request.pretty_host)
flow.request.host = "localhost"
flow.request.port = 3000
flow.request.scheme = 'http'
addons = [
LocalRedirect()
]
$ mitmdump -s local-redirect.py | grep pretty
When I visit the url form my server, I see the logging statement, but my browser hangs on the request and there is no request made to my local server.
The above addon was fine, however my local server did not support HTTP2.
Using the --no-http2 option was a quick fix:
mitmproxy -s local-redirect.py --no-http2 --view-filter localhost
or
mitmdump -s local-redirect.py --no-http2 localhost

Nginx reverse proxy.. dynamic hostname with header key and value or url path

I have got some nginx problem.I hope you will help me to solve this problem.
There are sevral servers
User PC internet networked;
Nginx proxy, hostnamed "nginxproxy", located in internal network, and it has only server which has Public IP "1.1.1.1" but jumphost, 8090 listen.
server1 hostnamed "tomcat1" located in internal network (only has private IP "70.1.1.1")
server2 hostnamed "tomcat2" located in internal network (only has private IP "70.1.1.2")
and 5, 6, ... There are more servers hostnamed apache1, apache2, redis1 etc...
Now my client wants to send http request call to server located in internal network directly. but it is not possible (because there don't have Public ips..) so the call has to passed in to nginx proxy first.
I just wander that when i call request from user pc, destination server hostname put on the request's header or url, the nginx can parse it and combine to there destination in internal network?
for example i call like this,
http://nginxproxy:1888/[destination hostname]/[path, files like index.html, some keys and values.&k1=v1. etc....]
i hope nginx pass and convert it and call there destination host like this
http://[destination hostname]:8888/[path, files like index.html, some keys and values.&k1=v1. etc....]
i tried to do this. there were some errors..
error log printed
"localhost could not be resolved (10060: Operation timed out), client: 127.0.0.1, server: localhost, request: "GET /localhost/8080/index"
server {
listen 1888;
server_name localhost;
location ~^\/([a-zA-Z0-9]+)\/([0-9]+)\/([a-zA-Z0-9]+) {
proxy_pass http://$1:$2/$3;
}
}
and one more..
in the java code,
i set like this
import org.apache.http.HttpMessage;
HttpMessage request;
request.addHeader("destinationHost", "tomcat2");
request.addHeader("destinationPort", "8888");
and call to this url
http://nginxproxy:1888/[path, files like index.html, some keys and values.&k1=v1. etc....]
can nginx convert url to
http://tomcat2:8888/[path, files like index.html, some keys and values.&k1=v1. etc....]
and pass to there??
if so, how can i set nginx.conf
thank you so much and have a nice day..

Fixing unescaped # in the URL with nginx

A bad HTTP client isn't escaping hash signs and is sending them to nginx, like so:
GET /foo/escaped#stuff
Instead of:
GET /foo/escaped%23stuff
This breaks my nginx configuration, since nginx strips the text after the # in the proxy_pass directive. How do I escape the hash sign?
Using return 200 "$request_uri"; does show me that nginx is reading it, so it seems like it's possible. Nginx, however, ignores it in location blocks, so I can't actually match it with anything.
You can use the below code to send unescaped HTTP GET requests in Python:
import socket
def get(host, port, uri):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
sock.send('GET {} HTTP/1.0\r\nHost: {}\r\n\r\n'.format(uri, host))
return sock.recv(1000)

tornadoweb gzip=True not working under supervisord and nginx

While working with tornado, I've discovered gzip=True feature, which works fine while running application from command line, below settings:
define("port", default=settings.LISTEN_PORT, help="run on the given port", type=int)
define("debug", default=True, help="run in debug mode", type=bool)
define("dont_optimize_static_content", default=False,
help="Don't combine static resources", type=bool)
define("dont_embed_static_url", default=False,
help="Don't put embed the static URL in static_url()", type=bool)
tornado.options.parse_command_line()
tornado.options.options['log_file_prefix'].set('/var/log/tmp.log')
app_settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=False,
gzip=True,
debug=True,
)
However, deploying app with supervisord/nginx responses from tornado servers are not gziped.
[program:app-8001]
command=python /var/app/server/app.py --port=8001 --logging=debug ----dont_optimize_static_content=False
directory=/var/app/server/
stderr_logfile = /var/log/app-stderr.log
stderr_logfile_backups=10
stdout_logfile = /var/log/app-stdout.log
stdout_logfile_backups=10
process_name=%(program_name)s
loglevel=debug
Any ideas what am i doing wrong?
By default nginx doesn't do HTTP/1.1 requests when it proxies requests to Tornado (or anything for that matter). Tornado requires HTTP/1.1 support for returning gzip'ed content.
Significant code fragment from web.py
def __init__(self, request):
self._gzipping = request.supports_http_1_1() and \
"gzip" in request.headers.get("Accept-Encoding", "")
It should be overrideable by adding the following to your config file - however it doesn't work on my instance.
proxy_http_version 1.1;

Resources