aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc6
-rw-r--r--doc-src/features/reverseproxy.html17
-rw-r--r--doc-src/howmitmproxy.html9
-rw-r--r--doc-src/modes.html14
-rw-r--r--examples/change_upstream_proxy.py47
-rw-r--r--examples/dns_spoofing.py47
-rw-r--r--examples/ignore_websocket.py37
-rw-r--r--examples/stub.py13
-rw-r--r--libmproxy/cmdline.py248
-rw-r--r--libmproxy/console/common.py2
-rw-r--r--libmproxy/console/flowview.py2
-rw-r--r--libmproxy/console/statusbar.py11
-rw-r--r--libmproxy/contrib/README4
-rw-r--r--libmproxy/contrib/tls/__init__.py5
-rw-r--r--libmproxy/contrib/tls/_constructs.py213
-rw-r--r--libmproxy/contrib/tls/utils.py26
-rw-r--r--libmproxy/exceptions.py34
-rw-r--r--libmproxy/filt.py6
-rw-r--r--libmproxy/flow.py37
-rw-r--r--libmproxy/main.py6
-rw-r--r--libmproxy/models/__init__.py16
-rw-r--r--libmproxy/models/connections.py (renamed from libmproxy/proxy/connection.py)46
-rw-r--r--libmproxy/models/flow.py166
-rw-r--r--libmproxy/models/http.py (renamed from libmproxy/protocol/http_wrappers.py)303
-rw-r--r--libmproxy/protocol/__init__.py13
-rw-r--r--libmproxy/protocol/base.py179
-rw-r--r--libmproxy/protocol/handle.py20
-rw-r--r--libmproxy/protocol/http.py1179
-rw-r--r--libmproxy/protocol/http_replay.py96
-rw-r--r--libmproxy/protocol/primitives.py294
-rw-r--r--libmproxy/protocol/rawtcp.py66
-rw-r--r--libmproxy/protocol/tcp.py97
-rw-r--r--libmproxy/protocol/tls.py298
-rw-r--r--libmproxy/proxy/__init__.py11
-rw-r--r--libmproxy/proxy/config.py221
-rw-r--r--libmproxy/proxy/modes/__init__.py12
-rw-r--r--libmproxy/proxy/modes/http_proxy.py26
-rw-r--r--libmproxy/proxy/modes/reverse_proxy.py17
-rw-r--r--libmproxy/proxy/modes/socks_proxy.py60
-rw-r--r--libmproxy/proxy/modes/transparent_proxy.py24
-rw-r--r--libmproxy/proxy/primitives.py178
-rw-r--r--libmproxy/proxy/root_context.py93
-rw-r--r--libmproxy/proxy/server.py404
-rw-r--r--libmproxy/utils.py4
-rw-r--r--setup.py4
-rw-r--r--test/scripts/stream_modify.py4
-rw-r--r--test/test_cmdline.py10
-rw-r--r--test/test_dump.py10
-rw-r--r--test/test_filt.py2
-rw-r--r--test/test_flow.py89
-rw-r--r--test/test_protocol_http.py3
-rw-r--r--test/test_proxy.py45
-rw-r--r--test/test_server.py156
-rw-r--r--test/tservers.py79
-rw-r--r--test/tutils.py23
55 files changed, 2620 insertions, 2412 deletions
diff --git a/.coveragerc b/.coveragerc
index 70ff48e7..fef1089b 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,6 +1,10 @@
-[rum]
+[run]
branch = True
[report]
omit = *contrib*, *tnetstring*, *platform*, *console*, *main.py
include = *libmproxy*
+exclude_lines =
+ pragma: nocover
+ pragma: no cover
+ raise NotImplementedError() \ No newline at end of file
diff --git a/doc-src/features/reverseproxy.html b/doc-src/features/reverseproxy.html
index 5ef4efc5..af5a5c53 100644
--- a/doc-src/features/reverseproxy.html
+++ b/doc-src/features/reverseproxy.html
@@ -7,22 +7,17 @@ mitmproxy forwards HTTP proxy requests to an upstream proxy server.
<table class="table">
<tbody>
<tr>
- <th width="20%">command-line</th> <td>-R <i>schema</i>://hostname[:port]</td>
+ <th width="20%">command-line</th> <td>-R <i>scheme</i>://hostname[:port]</td>
</tr>
</tbody>
</table>
-Here, **schema** is one of http, https, http2https or https2http. The latter
-two extended schema specifications control the use of HTTP and HTTPS on
-mitmproxy and the upstream server. You can indicate that mitmproxy should use
-HTTP, and the upstream server uses HTTPS like this:
+Here, **scheme** signifies if the proxy should use TLS to connect to the server.
+mitmproxy accepts both encrypted and unencrypted requests and transforms them to what the server
+expects.
- http2https://hostname:port
-
-And you can indicate that mitmproxy should use HTTPS while the upstream
-service uses HTTP like this:
-
- https2http://hostname:port
+ mitmdump -R https://httpbin.org -p 80
+ mitmdump -R https://httpbin.org -p 443
### Host Header
diff --git a/doc-src/howmitmproxy.html b/doc-src/howmitmproxy.html
index fabd393a..16b5f722 100644
--- a/doc-src/howmitmproxy.html
+++ b/doc-src/howmitmproxy.html
@@ -145,15 +145,6 @@ passed to us. Now we can pause the conversation, and initiate an upstream
connection using the correct SNI value, which then serves us the correct
upstream certificate, from which we can extract the expected CN and SANs.
-There's another wrinkle here. Due to a limitation of the SSL library mitmproxy
-uses, we can't detect that a connection _hasn't_ sent an SNI request until it's
-too late for upstream certificate sniffing. In practice, we therefore make a
-vanilla SSL connection upstream to sniff non-SNI certificates, and then discard
-the connection if the client sends an SNI notification. If you're watching your
-traffic with a packet sniffer, you'll see two connections to the server when an
-SNI request is made, the first of which is immediately closed after the SSL
-handshake. Luckily, this is almost never an issue in practice.
-
## Putting it all together
Lets put all of this together into the complete explicitly proxied HTTPS flow.
diff --git a/doc-src/modes.html b/doc-src/modes.html
index b5a38696..6bd92167 100644
--- a/doc-src/modes.html
+++ b/doc-src/modes.html
@@ -149,7 +149,7 @@ this:
<h1>Reverse Proxy</h1>
</div>
-Mitmproxy is usually used with a client that uses the proxy to access the
+mitmproxy is usually used with a client that uses the proxy to access the
Internet. Using reverse proxy mode, you can use mitmproxy to act like a normal
HTTP server:
@@ -173,15 +173,15 @@ on port 80. You can test your app on the example.com domain and get all
requests recorded in mitmproxy.
- Say you have some toy project that should get SSL support. Simply set up
-mitmproxy with SSL termination and you're done (<code>mitmdump -p 443 -R
-https2http://localhost:80/</code>). There are better tools for this specific
-task, but mitmproxy is very quick and simple way to set up an SSL-speaking
-server.
+mitmproxy as a reverse proxy on port 443 and you're done (<code>mitmdump -p 443 -R
+http://localhost:80/</code>). mitmproxy auto-detects TLS traffic and intercepts it dynamically.
+There are better tools for this specific task, but mitmproxy is very quick and simple way to
+set up an SSL-speaking server.
- Want to add a non-SSL-capable compression proxy in front of your server? You
-could even spawn a mitmproxy instance that terminates SSL (https2http://...),
+could even spawn a mitmproxy instance that terminates SSL (-R http://...),
point it to the compression proxy and let the compression proxy point to a
-SSL-initiating mitmproxy (http2https://...), which then points to the real
+SSL-initiating mitmproxy (-R https://...), which then points to the real
server. As you see, it's a fairly flexible thing.
Note that mitmproxy supports either an HTTP or an HTTPS upstream server, not
diff --git a/examples/change_upstream_proxy.py b/examples/change_upstream_proxy.py
index 7782dd84..8f58e1f2 100644
--- a/examples/change_upstream_proxy.py
+++ b/examples/change_upstream_proxy.py
@@ -1,29 +1,34 @@
# This scripts demonstrates how mitmproxy can switch to a second/different upstream proxy
# in upstream proxy mode.
#
-# Usage: mitmdump -U http://default-upstream-proxy.local:8080/ -s
-# "change_upstream_proxy.py host"
-from libmproxy.protocol.http import send_connect_request
-
-alternative_upstream_proxy = ("localhost", 8082)
+# Usage: mitmdump -U http://default-upstream-proxy.local:8080/ -s change_upstream_proxy.py
+#
+# If you want to change the target server, you should modify flow.request.host and flow.request.port
+# flow.live.set_server should only be used by inline scripts to change the upstream proxy.
-def should_redirect(flow):
- return flow.request.host == "example.com"
+def proxy_address(flow):
+ # Poor man's loadbalancing: route every second domain through the alternative proxy.
+ if hash(flow.request.host) % 2 == 1:
+ return ("localhost", 8082)
+ else:
+ return ("localhost", 8081)
def request(context, flow):
- if flow.live and should_redirect(flow):
-
- # If you want to change the target server, you should modify flow.request.host and flow.request.port
- # flow.live.change_server should only be used by inline scripts to change the upstream proxy,
- # unless you are sure that you know what you are doing.
- server_changed = flow.live.change_server(
- alternative_upstream_proxy,
- persistent_change=True)
- if flow.request.scheme == "https" and server_changed:
- send_connect_request(
- flow.live.c.server_conn,
- flow.request.host,
- flow.request.port)
- flow.live.c.establish_ssl(server=True)
+ if flow.request.method == "CONNECT":
+ # If the decision is done by domain, one could also modify the server address here.
+ # We do it after CONNECT here to have the request data available as well.
+ return
+ address = proxy_address(flow)
+ if flow.live:
+ if flow.request.scheme == "http":
+ # For a normal HTTP request, we just change the proxy server and we're done!
+ if address != flow.live.server_conn.address:
+ flow.live.set_server(address, depth=1)
+ else:
+ # If we have CONNECTed (and thereby established "destination state"), the story is
+ # a bit more complex. Now we don't want to change the top level address (which is
+ # the connect destination) but the address below that. (Notice the `.via` and depth=2).
+ if address != flow.live.server_conn.via.address:
+ flow.live.set_server(address, depth=2)
diff --git a/examples/dns_spoofing.py b/examples/dns_spoofing.py
index dddf172c..98495d45 100644
--- a/examples/dns_spoofing.py
+++ b/examples/dns_spoofing.py
@@ -9,29 +9,42 @@ Using transparent mode is the better option most of the time.
Usage:
mitmproxy
- -p 80
- -R http://example.com/ // Used as the target location if no Host header is present
- mitmproxy
-p 443
- -R https://example.com/ // Used as the target locaction if neither SNI nor host header are present.
+ -s dns_spoofing.py
+ # Used as the target location if neither SNI nor host header are present.
+ -R http://example.com/
+ mitmdump
+ -p 80
+ -R http://localhost:443/
-mitmproxy will always connect to the default location first, so it must be reachable.
-As a workaround, you can spawn an arbitrary HTTP server and use that for both endpoints, e.g.
-mitmproxy -p 80 -R http://localhost:8000
-mitmproxy -p 443 -R https2http://localhost:8000
+ (Setting up a single proxy instance and using iptables to redirect to it
+ works as well)
"""
+import re
+
+
+# This regex extracts splits the host header into host and port.
+# Handles the edge case of IPv6 addresses containing colons.
+# https://bugzilla.mozilla.org/show_bug.cgi?id=45891
+parse_host_header = re.compile(r"^(?P<host>[^:]+|\[.+\])(?::(?P<port>\d+))?$")
def request(context, flow):
if flow.client_conn.ssl_established:
- # TLS SNI or Host header
- flow.request.host = flow.client_conn.connection.get_servername(
- ) or flow.request.pretty_host(hostheader=True)
-
- # If you use a https2http location as default destination, these
- # attributes need to be corrected as well:
- flow.request.port = 443
flow.request.scheme = "https"
+ sni = flow.client_conn.connection.get_servername()
+ port = 443
else:
- # Host header
- flow.request.host = flow.request.pretty_host(hostheader=True)
+ flow.request.scheme = "http"
+ sni = None
+ port = 80
+
+ host_header = flow.request.pretty_host(hostheader=True)
+ m = parse_host_header.match(host_header)
+ if m:
+ host_header = m.group("host").strip("[]")
+ if m.group("port"):
+ port = int(m.group("port"))
+
+ flow.request.host = sni or host_header
+ flow.request.port = port \ No newline at end of file
diff --git a/examples/ignore_websocket.py b/examples/ignore_websocket.py
deleted file mode 100644
index 57e11d5b..00000000
--- a/examples/ignore_websocket.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# This script makes mitmproxy switch to passthrough mode for all HTTP
-# responses with "Connection: Upgrade" header. This is useful to make
-# WebSockets work in untrusted environments.
-#
-# Note: Chrome (and possibly other browsers), when explicitly configured
-# to use a proxy (i.e. mitmproxy's regular mode), send a CONNECT request
-# to the proxy before they initiate the websocket connection.
-# To make WebSockets work in these cases, supply
-# `--ignore :80$` as an additional parameter.
-# (see http://mitmproxy.org/doc/features/passthrough.html)
-
-import netlib.http.semantics
-
-from libmproxy.protocol.tcp import TCPHandler
-from libmproxy.protocol import KILL
-from libmproxy.script import concurrent
-
-
-def start(context, argv):
- netlib.http.semantics.Request._headers_to_strip_off.remove("Connection")
- netlib.http.semantics.Request._headers_to_strip_off.remove("Upgrade")
-
-
-def done(context):
- netlib.http.semantics.Request._headers_to_strip_off.append("Connection")
- netlib.http.semantics.Request._headers_to_strip_off.append("Upgrade")
-
-
-@concurrent
-def response(context, flow):
- value = flow.response.headers.get_first("Connection", None)
- if value and value.upper() == "UPGRADE":
- # We need to send the response manually now...
- flow.client_conn.send(flow.client_conn.protocol.assemble(flow.response))
- # ...and then delegate to tcp passthrough.
- TCPHandler(flow.live.c, log=False).handle_messages()
- flow.reply(KILL)
diff --git a/examples/stub.py b/examples/stub.py
index d5502a47..bd3e7cd0 100644
--- a/examples/stub.py
+++ b/examples/stub.py
@@ -10,7 +10,7 @@ def start(context, argv):
context.log("start")
-def clientconnect(context, conn_handler):
+def clientconnect(context, root_layer):
"""
Called when a client initiates a connection to the proxy. Note that a
connection can correspond to multiple HTTP requests
@@ -18,7 +18,7 @@ def clientconnect(context, conn_handler):
context.log("clientconnect")
-def serverconnect(context, conn_handler):
+def serverconnect(context, server_connection):
"""
Called when the proxy initiates a connection to the target server. Note that a
connection can correspond to multiple HTTP requests
@@ -58,7 +58,14 @@ def error(context, flow):
context.log("error")
-def clientdisconnect(context, conn_handler):
+def serverdisconnect(context, server_connection):
+ """
+ Called when the proxy closes the connection to the target server.
+ """
+ context.log("serverdisconnect")
+
+
+def clientdisconnect(context, root_layer):
"""
Called when a client disconnects from the proxy.
"""
diff --git a/libmproxy/cmdline.py b/libmproxy/cmdline.py
index 6125bfbf..7f6f69ef 100644
--- a/libmproxy/cmdline.py
+++ b/libmproxy/cmdline.py
@@ -2,8 +2,8 @@ from __future__ import absolute_import
import os
import re
import configargparse
+from netlib.tcp import Address, sslversion_choices
-from netlib import http
import netlib.utils
from . import filt, utils, version
@@ -108,26 +108,9 @@ def parse_server_spec(url):
raise configargparse.ArgumentTypeError(
"Invalid server specification: %s" % url
)
-
- if p[0].lower() == "https":
- ssl = [True, True]
- else:
- ssl = [False, False]
-
- return ssl + list(p[1:3])
-
-
-def parse_server_spec_special(url):
- """
- Provides additional support for http2https and https2http schemes.
- """
- normalized_url = re.sub("^https?2", "", url)
- ret = parse_server_spec(normalized_url)
- if url.lower().startswith("https2http"):
- ret[0] = True
- elif url.lower().startswith("http2https"):
- ret[0] = False
- return ret
+ address = Address(p[1:3])
+ scheme = p[0].lower()
+ return config.ServerSpec(scheme, address)
def get_common_options(options):
@@ -192,24 +175,24 @@ def get_common_options(options):
outfile=options.outfile,
verbosity=options.verbose,
nopop=options.nopop,
- replay_ignore_content = options.replay_ignore_content,
- replay_ignore_params = options.replay_ignore_params,
- replay_ignore_payload_params = options.replay_ignore_payload_params,
- replay_ignore_host = options.replay_ignore_host
+ replay_ignore_content=options.replay_ignore_content,
+ replay_ignore_params=options.replay_ignore_params,
+ replay_ignore_payload_params=options.replay_ignore_payload_params,
+ replay_ignore_host=options.replay_ignore_host
)
-def common_options(parser):
+def basic_options(parser):
parser.add_argument(
'--version',
- action= 'version',
- version= "%(prog)s" + " " + version.VERSION
+ action='version',
+ version="%(prog)s" + " " + version.VERSION
)
parser.add_argument(
'--shortversion',
- action= 'version',
- help = "show program's short version number and exit",
- version = version.VERSION
+ action='version',
+ help="show program's short version number and exit",
+ version=version.VERSION
)
parser.add_argument(
"--anticache",
@@ -301,11 +284,42 @@ def common_options(parser):
"""
)
+
+def proxy_modes(parser):
+ group = parser.add_argument_group("Proxy Modes").add_mutually_exclusive_group()
+ group.add_argument(
+ "-R", "--reverse",
+ action="store",
+ type=parse_server_spec,
+ dest="reverse_proxy",
+ default=None,
+ help="""
+ Forward all requests to upstream HTTP server:
+ http[s][2http[s]]://host[:port]
+ """
+ )
+ group.add_argument(
+ "--socks",
+ action="store_true", dest="socks_proxy", default=False,
+ help="Set SOCKS5 proxy mode."
+ )
+ group.add_argument(
+ "-T", "--transparent",
+ action="store_true", dest="transparent_proxy", default=False,
+ help="Set transparent proxy mode."
+ )
+ group.add_argument(
+ "-U", "--upstream",
+ action="store",
+ type=parse_server_spec,
+ dest="upstream_proxy",
+ default=None,
+ help="Forward all requests to upstream proxy server: http://host[:port]"
+ )
+
+
+def proxy_options(parser):
group = parser.add_argument_group("Proxy Options")
- # We could make a mutually exclusive group out of -R, -U, -T, but we don't
- # do that because - --upstream-server should be in that group as well, but
- # it's already in a different group. - our own error messages are more
- # helpful
group.add_argument(
"-b", "--bind-address",
action="store", type=str, dest="addr", default='',
@@ -344,70 +358,78 @@ def common_options(parser):
action="store", type=int, dest="port", default=8080,
help="Proxy service port."
)
+
+
+def proxy_ssl_options(parser):
+ # TODO: Agree to consistently either use "upstream" or "server".
+ group = parser.add_argument_group("SSL")
group.add_argument(
- "-R", "--reverse",
- action="store",
- type=parse_server_spec_special,
- dest="reverse_proxy",
- default=None,
- help="""
- Forward all requests to upstream HTTP server:
- http[s][2http[s]]://host[:port]
- """
- )
+ "--cert",
+ dest='certs',
+ default=[],
+ type=str,
+ metavar="SPEC",
+ action="append",
+ help='Add an SSL certificate. SPEC is of the form "[domain=]path". '
+ 'The domain may include a wildcard, and is equal to "*" if not specified. '
+ 'The file at path is a certificate in PEM format. If a private key is included '
+ 'in the PEM, it is used, else the default key in the conf dir is used. '
+ 'The PEM file should contain the full certificate chain, with the leaf certificate '
+ 'as the first entry. Can be passed multiple times.')
group.add_argument(
- "--socks",
- action="store_true", dest="socks_proxy", default=False,
- help="Set SOCKS5 proxy mode."
+ "--ciphers-client", action="store",
+ type=str, dest="ciphers_client", default=config.DEFAULT_CLIENT_CIPHERS,
+ help="Set supported ciphers for client connections. (OpenSSL Syntax)"
)
group.add_argument(
- "-T", "--transparent",
- action="store_true", dest="transparent_proxy", default=False,
- help="Set transparent proxy mode."
+ "--ciphers-server", action="store",
+ type=str, dest="ciphers_server", default=None,
+ help="Set supported ciphers for server connections. (OpenSSL Syntax)"
)
group.add_argument(
- "-U", "--upstream",
- action="store",
- type=parse_server_spec,
- dest="upstream_proxy",
- default=None,
- help="Forward all requests to upstream proxy server: http://host[:port]"
+ "--client-certs", action="store",
+ type=str, dest="clientcerts", default=None,
+ help="Client certificate directory."
)
group.add_argument(
- "--spoof",
- action="store_true", dest="spoof_mode", default=False,
- help="Use Host header to connect to HTTP servers."
+ "--no-upstream-cert", default=False,
+ action="store_true", dest="no_upstream_cert",
+ help="Don't connect to upstream server to look up certificate details."
)
group.add_argument(
- "--ssl-spoof",
- action="store_true", dest="ssl_spoof_mode", default=False,
- help="Use TLS SNI to connect to HTTPS servers."
+ "--verify-upstream-cert", default=False,
+ action="store_true", dest="ssl_verify_upstream_cert",
+ help="Verify upstream server SSL/TLS certificates and fail if invalid "
+ "or not present."
)
group.add_argument(
- "--spoofed-port",
- action="store", dest="spoofed_ssl_port", type=int, default=443,
- help="Port number of upstream HTTPS servers in SSL spoof mode."
+ "--upstream-trusted-cadir", default=None, action="store",
+ dest="ssl_verify_upstream_trusted_cadir",
+ help="Path to a directory of trusted CA certificates for upstream "
+ "server verification prepared using the c_rehash tool."
)
-
- group = parser.add_argument_group(
- "Advanced Proxy Options",
- """
- The following options allow a custom adjustment of the proxy
- behavior. Normally, you don't want to use these options directly and
- use the provided wrappers instead (-R, -U, -T).
- """
+ group.add_argument(
+ "--upstream-trusted-ca", default=None, action="store",
+ dest="ssl_verify_upstream_trusted_ca",
+ help="Path to a PEM formatted trusted CA certificate."
)
group.add_argument(
- "--http-form-in", dest="http_form_in", default=None,
- action="store", choices=("relative", "absolute"),
- help="Override the HTTP request form accepted by the proxy"
+ "--ssl-version-client", dest="ssl_version_client",
+ default="secure", action="store",
+ choices=sslversion_choices.keys(),
+ help="Set supported SSL/TLS versions for client connections. "
+ "SSLv2, SSLv3 and 'all' are INSECURE. Defaults to secure, which is TLS1.0+."
)
group.add_argument(
- "--http-form-out", dest="http_form_out", default=None,
- action="store", choices=("relative", "absolute"),
- help="Override the HTTP request form sent upstream by the proxy"
+ "--ssl-version-server", dest="ssl_version_server",
+ default="secure", action="store",
+ choices=sslversion_choices.keys(),
+ help="Set supported SSL/TLS versions for server connections. "
+ "SSLv2, SSLv3 and 'all' are INSECURE. Defaults to secure, which is TLS1.0+."
)
+
+def onboarding_app(parser):
group = parser.add_argument_group("Onboarding App")
group.add_argument(
"--noapp",
@@ -433,6 +455,8 @@ def common_options(parser):
help="Port to serve the onboarding app from."
)
+
+def client_replay(parser):
group = parser.add_argument_group("Client Replay")
group.add_argument(
"-c", "--client-replay",
@@ -440,6 +464,8 @@ def common_options(parser):
help="Replay client requests from a saved file."
)
+
+def server_replay(parser):
group = parser.add_argument_group("Server Replay")
group.add_argument(
"-S", "--server-replay",
@@ -504,6 +530,8 @@ def common_options(parser):
default=False,
help="Ignore request's destination host while searching for a saved flow to replay")
+
+def replacements(parser):
group = parser.add_argument_group(
"Replacements",
"""
@@ -520,14 +548,16 @@ def common_options(parser):
)
group.add_argument(
"--replace-from-file",
- action = "append", type=str, dest="replace_file", default=[],
- metavar = "PATH",
- help = """
+ action="append", type=str, dest="replace_file", default=[],
+ metavar="PATH",
+ help="""
Replacement pattern, where the replacement clause is a path to a
file.
"""
)
+
+def set_headers(parser):
group = parser.add_argument_group(
"Set Headers",
"""
@@ -543,21 +573,22 @@ def common_options(parser):
help="Header set pattern."
)
+
+def proxy_authentication(parser):
group = parser.add_argument_group(
"Proxy Authentication",
"""
Specify which users are allowed to access the proxy and the method
used for authenticating them.
"""
- )
- user_specification_group = group.add_mutually_exclusive_group()
- user_specification_group.add_argument(
+ ).add_mutually_exclusive_group()
+ group.add_argument(
"--nonanonymous",
action="store_true", dest="auth_nonanonymous",
help="Allow access to any user long as a credentials are specified."
)
- user_specification_group.add_argument(
+ group.add_argument(
"--singleuser",
action="store", dest="auth_singleuser", type=str,
metavar="USER",
@@ -566,14 +597,25 @@ def common_options(parser):
username:password.
"""
)
- user_specification_group.add_argument(
+ group.add_argument(
"--htpasswd",
action="store", dest="auth_htpasswd", type=str,
metavar="PATH",
help="Allow access to users specified in an Apache htpasswd file."
)
- config.ssl_option_group(parser)
+
+def common_options(parser):
+ basic_options(parser)
+ proxy_modes(parser)
+ proxy_options(parser)
+ proxy_ssl_options(parser)
+ onboarding_app(parser)
+ client_replay(parser)
+ server_replay(parser)
+ replacements(parser)
+ set_headers(parser)
+ proxy_authentication(parser)
def mitmproxy():
@@ -583,13 +625,13 @@ def mitmproxy():
parser = configargparse.ArgumentParser(
usage="%(prog)s [options]",
- args_for_setting_config_path = ["--conf"],
- default_config_files = [
+ args_for_setting_config_path=["--conf"],
+ default_config_files=[
os.path.join(config.CA_DIR, "common.conf"),
os.path.join(config.CA_DIR, "mitmproxy.conf")
],
- add_config_file_help = True,
- add_env_var_help = True
+ add_config_file_help=True,
+ add_env_var_help=True
)
common_options(parser)
parser.add_argument(
@@ -633,20 +675,20 @@ def mitmproxy():
def mitmdump():
parser = configargparse.ArgumentParser(
usage="%(prog)s [options] [filter]",
- args_for_setting_config_path = ["--conf"],
- default_config_files = [
+ args_for_setting_config_path=["--conf"],
+ default_config_files=[
os.path.join(config.CA_DIR, "common.conf"),
os.path.join(config.CA_DIR, "mitmdump.conf")
],
- add_config_file_help = True,
- add_env_var_help = True
+ add_config_file_help=True,
+ add_env_var_help=True
)
common_options(parser)
parser.add_argument(
"--keepserving",
- action= "store_true", dest="keepserving", default=False,
- help= """
+ action="store_true", dest="keepserving", default=False,
+ help="""
Continue serving after client playback or file read. We exit by
default.
"""
@@ -663,13 +705,13 @@ def mitmdump():
def mitmweb():
parser = configargparse.ArgumentParser(
usage="%(prog)s [options]",
- args_for_setting_config_path = ["--conf"],
- default_config_files = [
+ args_for_setting_config_path=["--conf"],
+ default_config_files=[
os.path.join(config.CA_DIR, "common.conf"),
os.path.join(config.CA_DIR, "mitmweb.conf")
],
- add_config_file_help = True,
- add_env_var_help = True
+ add_config_file_help=True,
+ add_env_var_help=True
)
group = parser.add_argument_group("Mitmweb")
diff --git a/libmproxy/console/common.py b/libmproxy/console/common.py
index 1940e390..c25f7267 100644
--- a/libmproxy/console/common.py
+++ b/libmproxy/console/common.py
@@ -8,7 +8,7 @@ from netlib.http.semantics import CONTENT_MISSING
import netlib.utils
from .. import utils
-from ..protocol.http import decoded
+from ..models import decoded
from . import signals
diff --git a/libmproxy/console/flowview.py b/libmproxy/console/flowview.py
index 1e0f0c17..8b828653 100644
--- a/libmproxy/console/flowview.py
+++ b/libmproxy/console/flowview.py
@@ -9,7 +9,7 @@ from netlib.http.semantics import CONTENT_MISSING
from . import common, grideditor, contentview, signals, searchable, tabs
from . import flowdetailview
from .. import utils, controller
-from ..protocol.http import HTTPRequest, HTTPResponse, decoded
+from ..models import HTTPRequest, HTTPResponse, decoded
class SearchError(Exception):
diff --git a/libmproxy/console/statusbar.py b/libmproxy/console/statusbar.py
index 7eb2131b..ea2dbfa8 100644
--- a/libmproxy/console/statusbar.py
+++ b/libmproxy/console/statusbar.py
@@ -199,11 +199,12 @@ class StatusBar(urwid.WidgetWrap):
r.append("[%s]" % (":".join(opts)))
if self.master.server.config.mode in ["reverse", "upstream"]:
- dst = self.master.server.config.mode.dst
- scheme = "https" if dst[0] else "http"
- if dst[1] != dst[0]:
- scheme += "2https" if dst[1] else "http"
- r.append("[dest:%s]" % utils.unparse_url(scheme, *dst[2:]))
+ dst = self.master.server.config.upstream_server
+ r.append("[dest:%s]" % netlib.utils.unparse_url(
+ dst.scheme,
+ dst.address.host,
+ dst.address.port
+ ))
if self.master.scripts:
r.append("[")
r.append(("heading_key", "s"))
diff --git a/libmproxy/contrib/README b/libmproxy/contrib/README
index 3b0f7512..e5ce11da 100644
--- a/libmproxy/contrib/README
+++ b/libmproxy/contrib/README
@@ -8,3 +8,7 @@ jsbeautifier, git checkout 25/03/12, MIT license
wbxml
- https://github.com/davidpshaw/PyWBXMLDecoder
+
+tls, BSD license
+ - https://github.com/mhils/tls/tree/mitmproxy
+ - limited to required files. \ No newline at end of file
diff --git a/libmproxy/contrib/tls/__init__.py b/libmproxy/contrib/tls/__init__.py
new file mode 100644
index 00000000..4b540884
--- /dev/null
+++ b/libmproxy/contrib/tls/__init__.py
@@ -0,0 +1,5 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from __future__ import absolute_import, division, print_function
diff --git a/libmproxy/contrib/tls/_constructs.py b/libmproxy/contrib/tls/_constructs.py
new file mode 100644
index 00000000..9c57a799
--- /dev/null
+++ b/libmproxy/contrib/tls/_constructs.py
@@ -0,0 +1,213 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from __future__ import absolute_import, division, print_function
+
+from construct import (Array, Bytes, Struct, UBInt16, UBInt32, UBInt8, PascalString, Embed, TunnelAdapter, GreedyRange,
+ Switch, OptionalGreedyRange)
+
+from .utils import UBInt24
+
+ProtocolVersion = Struct(
+ "version",
+ UBInt8("major"),
+ UBInt8("minor"),
+)
+
+TLSPlaintext = Struct(
+ "TLSPlaintext",
+ UBInt8("type"),
+ ProtocolVersion,
+ UBInt16("length"), # TODO: Reject packets with length > 2 ** 14
+ Bytes("fragment", lambda ctx: ctx.length),
+)
+
+TLSCompressed = Struct(
+ "TLSCompressed",
+ UBInt8("type"),
+ ProtocolVersion,
+ UBInt16("length"), # TODO: Reject packets with length > 2 ** 14 + 1024
+ Bytes("fragment", lambda ctx: ctx.length),
+)
+
+TLSCiphertext = Struct(
+ "TLSCiphertext",
+ UBInt8("type"),
+ ProtocolVersion,
+ UBInt16("length"), # TODO: Reject packets with length > 2 ** 14 + 2048
+ Bytes("fragment", lambda ctx: ctx.length),
+)
+
+Random = Struct(
+ "random",
+ UBInt32("gmt_unix_time"),
+ Bytes("random_bytes", 28),
+)
+
+SessionID = Struct(
+ "session_id",
+ UBInt8("length"),
+ Bytes("session_id", lambda ctx: ctx.length),
+)
+
+CipherSuites = Struct(
+ "cipher_suites",
+ UBInt16("length"), # TODO: Reject packets of length 0
+ Array(lambda ctx: ctx.length // 2, UBInt16("cipher_suites")),
+)
+
+CompressionMethods = Struct(
+ "compression_methods",
+ UBInt8("length"), # TODO: Reject packets of length 0
+ Array(lambda ctx: ctx.length, UBInt8("compression_methods")),
+)
+
+ServerName = Struct(
+ "",
+ UBInt8("type"),
+ PascalString("name", length_field=UBInt16("length")),
+)
+
+SNIExtension = Struct(
+ "",
+ TunnelAdapter(
+ PascalString("server_names", length_field=UBInt16("length")),
+ TunnelAdapter(
+ PascalString("", length_field=UBInt16("length")),
+ GreedyRange(ServerName)
+ ),
+ ),
+)
+
+ALPNExtension = Struct(
+ "",
+ TunnelAdapter(
+ PascalString("alpn_protocols", length_field=UBInt16("length")),
+ TunnelAdapter(
+ PascalString("", length_field=UBInt16("length")),
+ GreedyRange(PascalString("name"))
+ ),
+ ),
+)
+
+UnknownExtension = Struct(
+ "",
+ PascalString("bytes", length_field=UBInt16("extensions_length"))
+)
+
+Extension = Struct(
+ "Extension",
+ UBInt16("type"),
+ Embed(
+ Switch(
+ "", lambda ctx: ctx.type,
+ {
+ 0x00: SNIExtension,
+ 0x10: ALPNExtension
+ },
+ default=UnknownExtension
+ )
+ )
+)
+
+extensions = TunnelAdapter(
+ PascalString("extensions", length_field=UBInt16("extensions_length")),
+ OptionalGreedyRange(Extension)
+)
+
+ClientHello = Struct(
+ "ClientHello",
+ ProtocolVersion,
+ Random,
+ SessionID,
+ CipherSuites,
+ CompressionMethods,
+ extensions,
+)
+
+ServerHello = Struct(
+ "ServerHello",
+ ProtocolVersion,
+ Random,
+ SessionID,
+ Bytes("cipher_suite", 2),
+ UBInt8("compression_method"),
+ extensions,
+)
+
+ClientCertificateType = Struct(
+ "certificate_types",
+ UBInt8("length"), # TODO: Reject packets of length 0
+ Array(lambda ctx: ctx.length, UBInt8("certificate_types")),
+)
+
+SignatureAndHashAlgorithm = Struct(
+ "algorithms",
+ UBInt8("hash"),
+ UBInt8("signature"),
+)
+
+SupportedSignatureAlgorithms = Struct(
+ "supported_signature_algorithms",
+ UBInt16("supported_signature_algorithms_length"),
+ # TODO: Reject packets of length 0
+ Array(
+ lambda ctx: ctx.supported_signature_algorithms_length / 2,
+ SignatureAndHashAlgorithm,
+ ),
+)
+
+DistinguishedName = Struct(
+ "certificate_authorities",
+ UBInt16("length"),
+ Bytes("certificate_authorities", lambda ctx: ctx.length),
+)
+
+CertificateRequest = Struct(
+ "CertificateRequest",
+ ClientCertificateType,
+ SupportedSignatureAlgorithms,
+ DistinguishedName,
+)
+
+ServerDHParams = Struct(
+ "ServerDHParams",
+ UBInt16("dh_p_length"),
+ Bytes("dh_p", lambda ctx: ctx.dh_p_length),
+ UBInt16("dh_g_length"),
+ Bytes("dh_g", lambda ctx: ctx.dh_g_length),
+ UBInt16("dh_Ys_length"),
+ Bytes("dh_Ys", lambda ctx: ctx.dh_Ys_length),
+)
+
+PreMasterSecret = Struct(
+ "pre_master_secret",
+ ProtocolVersion,
+ Bytes("random_bytes", 46),
+)
+
+ASN1Cert = Struct(
+ "ASN1Cert",
+ UBInt32("length"), # TODO: Reject packets with length not in 1..2^24-1
+ Bytes("asn1_cert", lambda ctx: ctx.length),
+)
+
+Certificate = Struct(
+ "Certificate", # TODO: Reject packets with length > 2 ** 24 - 1
+ UBInt32("certificates_length"),
+ Bytes("certificates_bytes", lambda ctx: ctx.certificates_length),
+)
+
+Handshake = Struct(
+ "Handshake",
+ UBInt8("msg_type"),
+ UBInt24("length"),
+ Bytes("body", lambda ctx: ctx.length),
+)
+
+Alert = Struct(
+ "Alert",
+ UBInt8("level"),
+ UBInt8("description"),
+)
diff --git a/libmproxy/contrib/tls/utils.py b/libmproxy/contrib/tls/utils.py
new file mode 100644
index 00000000..4c917303
--- /dev/null
+++ b/libmproxy/contrib/tls/utils.py
@@ -0,0 +1,26 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from __future__ import absolute_import, division, print_function
+
+import construct
+
+import six
+
+
+class _UBInt24(construct.Adapter):
+ def _encode(self, obj, context):
+ return (
+ six.int2byte((obj & 0xFF0000) >> 16) +
+ six.int2byte((obj & 0x00FF00) >> 8) +
+ six.int2byte(obj & 0x0000FF)
+ )
+
+ def _decode(self, obj, context):
+ obj = bytearray(obj)
+ return (obj[0] << 16 | obj[1] << 8 | obj[2])
+
+
+def UBInt24(name): # noqa
+ return _UBInt24(construct.Bytes(name, 3))
diff --git a/libmproxy/exceptions.py b/libmproxy/exceptions.py
new file mode 100644
index 00000000..f34d9707
--- /dev/null
+++ b/libmproxy/exceptions.py
@@ -0,0 +1,34 @@
+from __future__ import (absolute_import, print_function, division)
+
+
+class ProxyException(Exception):
+ """
+ Base class for all exceptions thrown by libmproxy.
+ """
+ def __init__(self, message, cause=None):
+ """
+ :param message: Error Message
+ :param cause: Exception object that caused this exception to be thrown.
+ """
+ super(ProxyException, self).__init__(message)
+ self.cause = cause
+
+
+class ProtocolException(ProxyException):
+ pass
+
+
+class Socks5Exception(ProtocolException):
+ pass
+
+
+class HttpException(ProtocolException):
+ pass
+
+
+class InvalidCredentials(HttpException):
+ pass
+
+
+class ServerException(ProxyException):
+ pass
diff --git a/libmproxy/filt.py b/libmproxy/filt.py
index bd17a807..cfd3a1bc 100644
--- a/libmproxy/filt.py
+++ b/libmproxy/filt.py
@@ -35,7 +35,7 @@ from __future__ import absolute_import
import re
import sys
import pyparsing as pp
-from .protocol.http import decoded
+from .models import decoded
class _Token:
@@ -246,14 +246,14 @@ class FSrc(_Rex):
help = "Match source address"
def __call__(self, f):
- return f.client_conn and re.search(self.expr, repr(f.client_conn.address))
+ return f.client_conn.address and re.search(self.expr, repr(f.client_conn.address))
class FDst(_Rex):
code = "dst"
help = "Match destination address"
def __call__(self, f):
- return f.server_conn and re.search(self.expr, repr(f.server_conn.address))
+ return f.server_conn.address and re.search(self.expr, repr(f.server_conn.address))
class _Int(_Action):
def __init__(self, num):
diff --git a/libmproxy/flow.py b/libmproxy/flow.py
index 3d9ef722..5eac8da9 100644
--- a/libmproxy/flow.py
+++ b/libmproxy/flow.py
@@ -8,17 +8,18 @@ import Cookie
import cookielib
import os
import re
+import urlparse
+
-from netlib import odict, wsgi, tcp
+from netlib import odict, wsgi
from netlib.http.semantics import CONTENT_MISSING
import netlib.http
-
-from . import controller, protocol, tnetstring, filt, script, version
+from . import controller, tnetstring, filt, script, version
from .onboarding import app
-from .protocol import http, handle
from .proxy.config import HostMatcher
-from .proxy.connection import ClientConnection, ServerConnection
-import urlparse
+from .protocol.http_replay import RequestReplayThread
+from .protocol import Kill
+from .models import ClientConnection, ServerConnection, HTTPResponse, HTTPFlow, HTTPRequest
class AppRegistry:
@@ -788,7 +789,7 @@ class FlowMaster(controller.Master):
rflow = self.server_playback.next_flow(flow)
if not rflow:
return None
- response = http.HTTPResponse.from_state(rflow.response.get_state())
+ response = HTTPResponse.from_state(rflow.response.get_state())
response.is_replay = True
if self.refresh_server_playback:
response.refresh()
@@ -834,10 +835,10 @@ class FlowMaster(controller.Master):
sni=host,
ssl_established=True
))
- f = http.HTTPFlow(c, s)
+ f = HTTPFlow(c, s)
headers = odict.ODictCaseless()
- req = http.HTTPRequest(
+ req = HTTPRequest(
"absolute",
method,
scheme,
@@ -859,9 +860,9 @@ class FlowMaster(controller.Master):
"""
if self.server and self.server.config.mode == "reverse":
- f.request.host, f.request.port = self.server.config.mode.dst[2:]
- f.request.scheme = "https" if self.server.config.mode.dst[
- 1] else "http"
+ f.request.host = self.server.config.upstream_server.address.host
+ f.request.port = self.server.config.upstream_server.address.port
+ f.request.scheme = re.sub("^https?2", "", self.server.config.upstream_server.scheme)
f.reply = controller.DummyReply()
if f.request:
@@ -934,7 +935,7 @@ class FlowMaster(controller.Master):
f.response = None
f.error = None
self.process_new_request(f)
- rt = http.RequestReplayThread(
+ rt = RequestReplayThread(
self.server.config,
f,
self.masterq if run_scripthooks else False,
@@ -960,6 +961,10 @@ class FlowMaster(controller.Master):
self.run_script_hook("serverconnect", sc)
sc.reply()
+ def handle_serverdisconnect(self, sc):
+ self.run_script_hook("serverdisconnect", sc)
+ sc.reply()
+
def handle_error(self, f):
self.state.update_flow(f)
self.run_script_hook("error", f)
@@ -979,7 +984,7 @@ class FlowMaster(controller.Master):
)
if err:
self.add_event("Error in wsgi app. %s" % err, "error")
- f.reply(protocol.KILL)
+ f.reply(Kill)
return
if f not in self.state.flows: # don't add again on replay
self.state.add_flow(f)
@@ -996,7 +1001,7 @@ class FlowMaster(controller.Master):
if self.stream_large_bodies:
self.stream_large_bodies.run(f, False)
except netlib.http.HttpError:
- f.reply(protocol.KILL)
+ f.reply(Kill)
return
f.reply()
@@ -1089,7 +1094,7 @@ class FlowReader:
"Incompatible serialized data version: %s" % v
)
off = self.fo.tell()
- yield handle.protocols[data["type"]]["flow"].from_state(data)
+ yield HTTPFlow.from_state(data)
except ValueError as v:
# Error is due to EOF
if self.fo.tell() == off and self.fo.read() == '':
diff --git a/libmproxy/main.py b/libmproxy/main.py
index abf3fb9c..23cb487c 100644
--- a/libmproxy/main.py
+++ b/libmproxy/main.py
@@ -2,11 +2,11 @@ from __future__ import print_function, absolute_import
import os
import signal
import sys
-import netlib.version
from netlib.version_check import check_pyopenssl_version, check_mitmproxy_version
from . import version, cmdline
-from .proxy import process_proxy_options, ProxyServerError
+from .exceptions import ServerException
from .proxy.server import DummyServer, ProxyServer
+from .proxy.config import process_proxy_options
def assert_utf8_env():
@@ -31,7 +31,7 @@ def get_server(dummy_server, options):
else:
try:
return ProxyServer(options)
- except ProxyServerError as v:
+ except ServerException as v:
print(str(v), file=sys.stderr)
sys.exit(1)
diff --git a/libmproxy/models/__init__.py b/libmproxy/models/__init__.py
new file mode 100644
index 00000000..a54f305f
--- /dev/null
+++ b/libmproxy/models/__init__.py
@@ -0,0 +1,16 @@
+from __future__ import (absolute_import, print_function, division)
+
+from .http import (
+ HTTPFlow, HTTPRequest, HTTPResponse, decoded,
+ make_error_response, make_connect_request, make_connect_response
+)
+from .connections import ClientConnection, ServerConnection
+from .flow import Flow, Error
+
+__all__ = [
+ "HTTPFlow", "HTTPRequest", "HTTPResponse", "decoded",
+ "make_error_response", "make_connect_request",
+ "make_connect_response",
+ "ClientConnection", "ServerConnection",
+ "Flow", "Error",
+]
diff --git a/libmproxy/proxy/connection.py b/libmproxy/models/connections.py
index 9e03157a..f1e10de9 100644
--- a/libmproxy/proxy/connection.py
+++ b/libmproxy/models/connections.py
@@ -1,6 +1,8 @@
-from __future__ import absolute_import
+from __future__ import (absolute_import, print_function, division)
+
import copy
import os
+
from netlib import tcp, certutils
from .. import stateobject, utils
@@ -10,7 +12,7 @@ class ClientConnection(tcp.BaseHandler, stateobject.StateObject):
# Eventually, this object is restored from state. We don't have a
# connection then.
if client_connection:
- tcp.BaseHandler.__init__(self, client_connection, address, server)
+ super(ClientConnection, self).__init__(client_connection, address, server)
else:
self.connection = None
self.server = None
@@ -25,6 +27,9 @@ class ClientConnection(tcp.BaseHandler, stateobject.StateObject):
self.timestamp_ssl_setup = None
self.protocol = None
+ def __nonzero__(self):
+ return bool(self.connection) and not self.finished
+
def __repr__(self):
return "<ClientConnection: {ssl}{host}:{port}>".format(
ssl="[ssl] " if self.ssl_established else "",
@@ -32,6 +37,10 @@ class ClientConnection(tcp.BaseHandler, stateobject.StateObject):
port=self.address.port
)
+ @property
+ def tls_established(self):
+ return self.ssl_established
+
_stateobject_attributes = dict(
ssl_established=bool,
timestamp_start=float,
@@ -71,20 +80,11 @@ class ClientConnection(tcp.BaseHandler, stateobject.StateObject):
return f
def convert_to_ssl(self, *args, **kwargs):
- # TODO: read ALPN from server and select same proto for client conn
- # alpn_select = 'h2'
- # def alpn_select_callback(conn_, options):
- # if alpn_select in options:
- # return bytes(alpn_select)
- # else: # pragma no cover
- # return options[0]
- # tcp.BaseHandler.convert_to_ssl(self, alpn_select=alpn_select_callback, *args, **kwargs)
-
- tcp.BaseHandler.convert_to_ssl(self, *args, **kwargs)
+ super(ClientConnection, self).convert_to_ssl(*args, **kwargs)
self.timestamp_ssl_setup = utils.timestamp()
def finish(self):
- tcp.BaseHandler.finish(self)
+ super(ClientConnection, self).finish()
self.timestamp_end = utils.timestamp()
@@ -92,13 +92,16 @@ class ServerConnection(tcp.TCPClient, stateobject.StateObject):
def __init__(self, address):
tcp.TCPClient.__init__(self, address)
- self.state = [] # a list containing (conntype, state) tuples
+ self.via = None
self.timestamp_start = None
self.timestamp_end = None
self.timestamp_tcp_setup = None
self.timestamp_ssl_setup = None
self.protocol = None
+ def __nonzero__(self):
+ return bool(self.connection) and not self.finished
+
def __repr__(self):
if self.ssl_established and self.sni:
ssl = "[ssl: {0}] ".format(self.sni)
@@ -112,8 +115,11 @@ class ServerConnection(tcp.TCPClient, stateobject.StateObject):
port=self.address.port
)
+ @property
+ def tls_established(self):
+ return self.ssl_established
+
_stateobject_attributes = dict(
- state=list,
timestamp_start=float,
timestamp_end=float,
timestamp_tcp_setup=float,
@@ -131,8 +137,8 @@ class ServerConnection(tcp.TCPClient, stateobject.StateObject):
d.update(
address={"address": self.address(),
"use_ipv6": self.address.use_ipv6},
- source_address= ({"address": self.source_address(),
- "use_ipv6": self.source_address.use_ipv6} if self.source_address else None),
+ source_address=({"address": self.source_address(),
+ "use_ipv6": self.source_address.use_ipv6} if self.source_address else None),
cert=self.cert.to_pem() if self.cert else None
)
return d
@@ -176,9 +182,6 @@ class ServerConnection(tcp.TCPClient, stateobject.StateObject):
if os.path.exists(path):
clientcert = path
- # TODO: read ALPN from client and use same list for server conn
- # self.convert_to_ssl(cert=clientcert, sni=sni, alpn_protos=[netlib.http.http2.HTTP2Protocol.ALPN_PROTO_H2], **kwargs)
-
self.convert_to_ssl(cert=clientcert, sni=sni, **kwargs)
self.sni = sni
self.timestamp_ssl_setup = utils.timestamp()
@@ -186,3 +189,6 @@ class ServerConnection(tcp.TCPClient, stateobject.StateObject):
def finish(self):
tcp.TCPClient.finish(self)
self.timestamp_end = utils.timestamp()
+
+
+ServerConnection._stateobject_attributes["via"] = ServerConnection
diff --git a/libmproxy/models/flow.py b/libmproxy/models/flow.py
new file mode 100644
index 00000000..8eff18f4
--- /dev/null
+++ b/libmproxy/models/flow.py
@@ -0,0 +1,166 @@
+from __future__ import (absolute_import, print_function, division)
+import copy
+import uuid
+
+from .. import stateobject, utils, version
+from .connections import ClientConnection, ServerConnection
+
+
+class Error(stateobject.StateObject):
+ """
+ An Error.
+
+ This is distinct from an protocol error response (say, a HTTP code 500),
+ which is represented by a normal HTTPResponse object. This class is
+ responsible for indicating errors that fall outside of normal protocol
+ communications, like interrupted connections, timeouts, protocol errors.
+
+ Exposes the following attributes:
+
+ flow: Flow object
+ msg: Message describing the error
+ timestamp: Seconds since the epoch
+ """
+
+ def __init__(self, msg, timestamp=None):
+ """
+ @type msg: str
+ @type timestamp: float
+ """
+ self.flow = None # will usually be set by the flow backref mixin
+ self.msg = msg
+ self.timestamp = timestamp or utils.timestamp()
+
+ _stateobject_attributes = dict(
+ msg=str,
+ timestamp=float
+ )
+
+ def __str__(self):
+ return self.msg
+
+ @classmethod
+ def from_state(cls, state):
+ # the default implementation assumes an empty constructor. Override
+ # accordingly.
+ f = cls(None)
+ f.load_state(state)
+ return f
+
+ def copy(self):
+ c = copy.copy(self)
+ return c
+
+
+class Flow(stateobject.StateObject):
+ """
+ A Flow is a collection of objects representing a single transaction.
+ This class is usually subclassed for each protocol, e.g. HTTPFlow.
+ """
+
+ def __init__(self, type, client_conn, server_conn, live=None):
+ self.type = type
+ self.id = str(uuid.uuid4())
+ self.client_conn = client_conn
+ """@type: ClientConnection"""
+ self.server_conn = server_conn
+ """@type: ServerConnection"""
+ self.live = live
+ """@type: LiveConnection"""
+
+ self.error = None
+ """@type: Error"""
+ self.intercepted = False
+ """@type: bool"""
+ self._backup = None
+ self.reply = None
+
+ _stateobject_attributes = dict(
+ id=str,
+ error=Error,
+ client_conn=ClientConnection,
+ server_conn=ServerConnection,
+ type=str,
+ intercepted=bool
+ )
+
+ def get_state(self, short=False):
+ d = super(Flow, self).get_state(short)
+ d.update(version=version.IVERSION)
+ if self._backup and self._backup != d:
+ if short:
+ d.update(modified=True)
+ else:
+ d.update(backup=self._backup)
+ return d
+
+ def __eq__(self, other):
+ return self is other
+
+ def copy(self):
+ f = copy.copy(self)
+
+ f.id = str(uuid.uuid4())
+ f.live = False
+ f.client_conn = self.client_conn.copy()
+ f.server_conn = self.server_conn.copy()
+
+ if self.error:
+ f.error = self.error.copy()
+ return f
+
+ def modified(self):
+ """
+ Has this Flow been modified?
+ """
+ if self._backup:
+ return self._backup != self.get_state()
+ else:
+ return False
+
+ def backup(self, force=False):
+ """
+ Save a backup of this Flow, which can be reverted to using a
+ call to .revert().
+ """
+ if not self._backup:
+ self._backup = self.get_state()
+
+ def revert(self):
+ """
+ Revert to the last backed up state.
+ """
+ if self._backup:
+ self.load_state(self._backup)
+ self._backup = None
+
+ def kill(self, master):
+ """
+ Kill this request.
+ """
+ from ..protocol import Kill
+
+ self.error = Error("Connection killed")
+ self.intercepted = False
+ self.reply(Kill)
+ master.handle_error(self)
+
+ def intercept(self, master):
+ """
+ Intercept this Flow. Processing will stop until accept_intercept is
+ called.
+ """
+ if self.intercepted:
+ return
+ self.intercepted = True
+ master.handle_intercept(self)
+
+ def accept_intercept(self, master):
+ """
+ Continue with the flow - called after an intercept().
+ """
+ if not self.intercepted:
+ return
+ self.intercepted = False
+ self.reply()
+ master.handle_accept_intercept(self)
diff --git a/libmproxy/protocol/http_wrappers.py b/libmproxy/models/http.py
index ed5759ea..fb2f305b 100644
--- a/libmproxy/protocol/http_wrappers.py
+++ b/libmproxy/models/http.py
@@ -1,47 +1,16 @@
-from __future__ import absolute_import
+from __future__ import (absolute_import, print_function, division)
import Cookie
import copy
-import threading
-import time
-import urllib
-import urlparse
from email.utils import parsedate_tz, formatdate, mktime_tz
+import time
-import netlib
-from netlib import http, tcp, odict, utils, encoding
-from netlib.http import cookies, semantics, http1
-
-from .tcp import TCPHandler
-from .primitives import KILL, ProtocolHandler, Flow, Error
-from ..proxy.connection import ServerConnection
-from .. import utils, controller, stateobject, proxy
-
-
-class decoded(object):
- """
- A context manager that decodes a request or response, and then
- re-encodes it with the same encoding after execution of the block.
-
- Example:
- with decoded(request):
- request.content = request.content.replace("foo", "bar")
- """
-
- def __init__(self, o):
- self.o = o
- ce = o.headers.get_first("content-encoding")
- if ce in encoding.ENCODINGS:
- self.ce = ce
- else:
- self.ce = None
-
- def __enter__(self):
- if self.ce:
- self.o.decode()
-
- def __exit__(self, type, value, tb):
- if self.ce:
- self.o.encode(self.ce)
+from libmproxy import utils
+from netlib import odict, encoding
+from netlib.http import status_codes
+from netlib.tcp import Address
+from netlib.http.semantics import Request, Response, CONTENT_MISSING
+from .. import version, stateobject
+from .flow import Flow
class MessageMixin(stateobject.StateObject):
@@ -124,7 +93,7 @@ class MessageMixin(stateobject.StateObject):
return c
-class HTTPRequest(MessageMixin, semantics.Request):
+class HTTPRequest(MessageMixin, Request):
"""
An HTTP request.
@@ -170,21 +139,21 @@ class HTTPRequest(MessageMixin, semantics.Request):
"""
def __init__(
- self,
- form_in,
- method,
- scheme,
- host,
- port,
- path,
- httpversion,
- headers,
- body,
- timestamp_start=None,
- timestamp_end=None,
- form_out=None,
+ self,
+ form_in,
+ method,
+ scheme,
+ host,
+ port,
+ path,
+ httpversion,
+ headers,
+ body,
+ timestamp_start=None,
+ timestamp_end=None,
+ form_out=None,
):
- semantics.Request.__init__(
+ Request.__init__(
self,
form_in,
method,
@@ -240,31 +209,15 @@ class HTTPRequest(MessageMixin, semantics.Request):
def from_protocol(
self,
protocol,
- include_body=True,
- body_size_limit=None,
+ *args,
+ **kwargs
):
- req = protocol.read_request(
- include_body = include_body,
- body_size_limit = body_size_limit,
- )
-
- return HTTPRequest(
- req.form_in,
- req.method,
- req.scheme,
- req.host,
- req.port,
- req.path,
- req.httpversion,
- req.headers,
- req.body,
- req.timestamp_start,
- req.timestamp_end,
- )
+ req = protocol.read_request(*args, **kwargs)
+ return self.wrap(req)
@classmethod
def wrap(self, request):
- return HTTPRequest(
+ req = HTTPRequest(
form_in=request.form_in,
method=request.method,
scheme=request.scheme,
@@ -278,6 +231,9 @@ class HTTPRequest(MessageMixin, semantics.Request):
timestamp_end=request.timestamp_end,
form_out=(request.form_out if hasattr(request, 'form_out') else None),
)
+ if hasattr(request, 'stream_id'):
+ req.stream_id = request.stream_id
+ return req
def __hash__(self):
return id(self)
@@ -298,7 +254,7 @@ class HTTPRequest(MessageMixin, semantics.Request):
return c
-class HTTPResponse(MessageMixin, semantics.Response):
+class HTTPResponse(MessageMixin, Response):
"""
An HTTP response.
@@ -331,7 +287,7 @@ class HTTPResponse(MessageMixin, semantics.Response):
timestamp_start=None,
timestamp_end=None,
):
- semantics.Response.__init__(
+ Response.__init__(
self,
httpversion,
status_code,
@@ -362,29 +318,15 @@ class HTTPResponse(MessageMixin, semantics.Response):
def from_protocol(
self,
protocol,
- request_method,
- include_body=True,
- body_size_limit=None
+ *args,
+ **kwargs
):
- resp = protocol.read_response(
- request_method,
- body_size_limit,
- include_body=include_body
- )
-
- return HTTPResponse(
- resp.httpversion,
- resp.status_code,
- resp.msg,
- resp.headers,
- resp.body,
- resp.timestamp_start,
- resp.timestamp_end,
- )
+ resp = protocol.read_response(*args, **kwargs)
+ return self.wrap(resp)
@classmethod
def wrap(self, response):
- return HTTPResponse(
+ resp = HTTPResponse(
httpversion=response.httpversion,
status_code=response.status_code,
msg=response.msg,
@@ -393,6 +335,9 @@ class HTTPResponse(MessageMixin, semantics.Response):
timestamp_start=response.timestamp_start,
timestamp_end=response.timestamp_end,
)
+ if hasattr(response, 'stream_id'):
+ resp.stream_id = response.stream_id
+ return resp
def _refresh_cookie(self, c, delta):
"""
@@ -443,3 +388,167 @@ class HTTPResponse(MessageMixin, semantics.Response):
c.append(self._refresh_cookie(i, delta))
if c:
self.headers["set-cookie"] = c
+
+
+class HTTPFlow(Flow):
+ """
+ A HTTPFlow is a collection of objects representing a single HTTP
+ transaction. The main attributes are:
+
+ request: HTTPRequest object
+ response: HTTPResponse object
+ error: Error object
+ server_conn: ServerConnection object
+ client_conn: ClientConnection object
+
+ Note that it's possible for a Flow to have both a response and an error
+ object. This might happen, for instance, when a response was received
+ from the server, but there was an error sending it back to the client.
+
+ The following additional attributes are exposed:
+
+ intercepted: Is this flow currently being intercepted?
+ live: Does this flow have a live client connection?
+ """
+
+ def __init__(self, client_conn, server_conn, live=None):
+ super(HTTPFlow, self).__init__("http", client_conn, server_conn, live)
+ self.request = None
+ """@type: HTTPRequest"""
+ self.response = None
+ """@type: HTTPResponse"""
+
+ _stateobject_attributes = Flow._stateobject_attributes.copy()
+ _stateobject_attributes.update(
+ request=HTTPRequest,
+ response=HTTPResponse
+ )
+
+ @classmethod
+ def from_state(cls, state):
+ f = cls(None, None)
+ f.load_state(state)
+ return f
+
+ def __repr__(self):
+ s = "<HTTPFlow"
+ for a in ("request", "response", "error", "client_conn", "server_conn"):
+ if getattr(self, a, False):
+ s += "\r\n %s = {flow.%s}" % (a, a)
+ s += ">"
+ return s.format(flow=self)
+
+ def copy(self):
+ f = super(HTTPFlow, self).copy()
+ if self.request:
+ f.request = self.request.copy()
+ if self.response:
+ f.response = self.response.copy()
+ return f
+
+ def match(self, f):
+ """
+ Match this flow against a compiled filter expression. Returns True
+ if matched, False if not.
+
+ If f is a string, it will be compiled as a filter expression. If
+ the expression is invalid, ValueError is raised.
+ """
+ if isinstance(f, basestring):
+ from .. import filt
+
+ f = filt.parse(f)
+ if not f:
+ raise ValueError("Invalid filter expression.")
+ if f:
+ return f(self)
+ return True
+
+ def replace(self, pattern, repl, *args, **kwargs):
+ """
+ Replaces a regular expression pattern with repl in both request and
+ response of the flow. Encoded content will be decoded before
+ replacement, and re-encoded afterwards.
+
+ Returns the number of replacements made.
+ """
+ c = self.request.replace(pattern, repl, *args, **kwargs)
+ if self.response:
+ c += self.response.replace(pattern, repl, *args, **kwargs)
+ return c
+
+
+class decoded(object):
+ """
+ A context manager that decodes a request or response, and then
+ re-encodes it with the same encoding after execution of the block.
+
+ Example:
+ with decoded(request):
+ request.content = request.content.replace("foo", "bar")
+ """
+
+ def __init__(self, o):
+ self.o = o
+ ce = o.headers.get_first("content-encoding")
+ if ce in encoding.ENCODINGS:
+ self.ce = ce
+ else:
+ self.ce = None
+
+ def __enter__(self):
+ if self.ce:
+ self.o.decode()
+
+ def __exit__(self, type, value, tb):
+ if self.ce:
+ self.o.encode(self.ce)
+
+
+def make_error_response(status_code, message, headers=None):
+ response = status_codes.RESPONSES.get(status_code, "Unknown")
+ body = """
+ <html>
+ <head>
+ <title>%d %s</title>
+ </head>
+ <body>%s</body>
+ </html>
+ """.strip() % (status_code, response, message)
+
+ if not headers:
+ headers = odict.ODictCaseless()
+ headers["Server"] = [version.NAMEVERSION]
+ headers["Connection"] = ["close"]
+ headers["Content-Length"] = [len(body)]
+ headers["Content-Type"] = ["text/html"]
+
+ return HTTPResponse(
+ (1, 1), # FIXME: Should be a string.
+ status_code,
+ response,
+ headers,
+ body,
+ )
+
+
+def make_connect_request(address):
+ address = Address.wrap(address)
+ return HTTPRequest(
+ "authority", "CONNECT", None, address.host, address.port, None, (1, 1),
+ odict.ODictCaseless(), ""
+ )
+
+
+def make_connect_response(httpversion):
+ headers = odict.ODictCaseless([
+ ["Content-Length", "0"],
+ ["Proxy-Agent", version.NAMEVERSION]
+ ])
+ return HTTPResponse(
+ httpversion,
+ 200,
+ "Connection established",
+ headers,
+ "",
+ )
diff --git a/libmproxy/protocol/__init__.py b/libmproxy/protocol/__init__.py
index bbc20dba..c582592b 100644
--- a/libmproxy/protocol/__init__.py
+++ b/libmproxy/protocol/__init__.py
@@ -1 +1,12 @@
-from .primitives import *
+from __future__ import (absolute_import, print_function, division)
+from .base import Layer, ServerConnectionMixin, Log, Kill
+from .http import Http1Layer, Http2Layer
+from .tls import TlsLayer, is_tls_record_magic
+from .rawtcp import RawTCPLayer
+
+__all__ = [
+ "Layer", "ServerConnectionMixin", "Log", "Kill",
+ "Http1Layer", "Http2Layer",
+ "TlsLayer", "is_tls_record_magic",
+ "RawTCPLayer"
+]
diff --git a/libmproxy/protocol/base.py b/libmproxy/protocol/base.py
new file mode 100644
index 00000000..40ec0536
--- /dev/null
+++ b/libmproxy/protocol/base.py
@@ -0,0 +1,179 @@
+"""
+mitmproxy protocol architecture
+
+In mitmproxy, protocols are implemented as a set of layers, which are composed on top each other.
+For example, the following scenarios depict possible settings (lowest layer first):
+
+Transparent HTTP proxy, no SSL:
+ TransparentProxy
+ Http1Layer
+ HttpLayer
+
+Regular proxy, CONNECT request with WebSockets over SSL:
+ HttpProxy
+ Http1Layer
+ HttpLayer
+ SslLayer
+ WebsocketLayer (or TcpLayer)
+
+Automated protocol detection by peeking into the buffer:
+ TransparentProxy
+ TLSLayer
+ Http2Layer
+ HttpLayer
+
+Communication between layers is done as follows:
+ - lower layers provide context information to higher layers
+ - higher layers can call functions provided by lower layers,
+ which are propagated until they reach a suitable layer.
+
+Further goals:
+ - Connections should always be peekable to make automatic protocol detection work.
+ - Upstream connections should be established as late as possible;
+ inline scripts shall have a chance to handle everything locally.
+"""
+from __future__ import (absolute_import, print_function, division)
+from netlib import tcp
+from ..models import ServerConnection
+from ..exceptions import ProtocolException
+
+
+class _LayerCodeCompletion(object):
+ """
+ Dummy class that provides type hinting in PyCharm, which simplifies development a lot.
+ """
+
+ def __init__(self, *args, **kwargs): # pragma: nocover
+ super(_LayerCodeCompletion, self).__init__(*args, **kwargs)
+ if True:
+ return
+ self.config = None
+ """@type: libmproxy.proxy.ProxyConfig"""
+ self.client_conn = None
+ """@type: libmproxy.models.ClientConnection"""
+ self.server_conn = None
+ """@type: libmproxy.models.ServerConnection"""
+ self.channel = None
+ """@type: libmproxy.controller.Channel"""
+
+
+class Layer(_LayerCodeCompletion):
+ def __init__(self, ctx, *args, **kwargs):
+ """
+ Args:
+ ctx: The (read-only) higher layer.
+ """
+ self.ctx = ctx
+ """@type: libmproxy.protocol.Layer"""
+ super(Layer, self).__init__(*args, **kwargs)
+
+ def __call__(self):
+ """
+ Logic of the layer.
+ Raises:
+ ProtocolException in case of protocol exceptions.
+ """
+ raise NotImplementedError()
+
+ def __getattr__(self, name):
+ """
+ Attributes not present on the current layer may exist on a higher layer.
+ """
+ return getattr(self.ctx, name)
+
+ def log(self, msg, level, subs=()):
+ full_msg = [
+ "{}: {}".format(repr(self.client_conn.address), msg)
+ ]
+ for i in subs:
+ full_msg.append(" -> " + i)
+ full_msg = "\n".join(full_msg)
+ self.channel.tell("log", Log(full_msg, level))
+
+ @property
+ def layers(self):
+ return [self] + self.ctx.layers
+
+ def __repr__(self):
+ return type(self).__name__
+
+
+class ServerConnectionMixin(object):
+ """
+ Mixin that provides a layer with the capabilities to manage a server connection.
+ """
+
+ def __init__(self, server_address=None):
+ super(ServerConnectionMixin, self).__init__()
+ self.server_conn = ServerConnection(server_address)
+ self._check_self_connect()
+
+ def reconnect(self):
+ address = self.server_conn.address
+ self._disconnect()
+ self.server_conn.address = address
+ self.connect()
+
+ def _check_self_connect(self):
+ """
+ We try to protect the proxy from _accidentally_ connecting to itself,
+ e.g. because of a failed transparent lookup or an invalid configuration.
+ """
+ address = self.server_conn.address
+ if address:
+ self_connect = (
+ address.port == self.config.port and
+ address.host in ("localhost", "127.0.0.1", "::1")
+ )
+ if self_connect:
+ raise ProtocolException(
+ "Invalid server address: {}\r\n"
+ "The proxy shall not connect to itself.".format(repr(address))
+ )
+
+ def set_server(self, address, server_tls=None, sni=None, depth=1):
+ if depth == 1:
+ if self.server_conn:
+ self._disconnect()
+ self.log("Set new server address: " + repr(address), "debug")
+ self.server_conn.address = address
+ self._check_self_connect()
+ if server_tls:
+ raise ProtocolException(
+ "Cannot upgrade to TLS, no TLS layer on the protocol stack."
+ )
+ else:
+ self.ctx.set_server(address, server_tls, sni, depth - 1)
+
+ def _disconnect(self):
+ """
+ Deletes (and closes) an existing server connection.
+ """
+ self.log("serverdisconnect", "debug", [repr(self.server_conn.address)])
+ self.server_conn.finish()
+ self.server_conn.close()
+ self.channel.tell("serverdisconnect", self.server_conn)
+ self.server_conn = ServerConnection(None)
+
+ def connect(self):
+ if not self.server_conn.address:
+ raise ProtocolException("Cannot connect to server, no server address given.")
+ self.log("serverconnect", "debug", [repr(self.server_conn.address)])
+ self.channel.ask("serverconnect", self.server_conn)
+ try:
+ self.server_conn.connect()
+ except tcp.NetLibError as e:
+ raise ProtocolException(
+ "Server connection to %s failed: %s" % (repr(self.server_conn.address), e), e)
+
+
+class Log(object):
+ def __init__(self, msg, level="info"):
+ self.msg = msg
+ self.level = level
+
+
+class Kill(Exception):
+ """
+ Kill a connection.
+ """
diff --git a/libmproxy/protocol/handle.py b/libmproxy/protocol/handle.py
deleted file mode 100644
index 49cb3c1b..00000000
--- a/libmproxy/protocol/handle.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from __future__ import absolute_import
-from . import http, tcp
-
-protocols = {
- 'http': dict(handler=http.HTTPHandler, flow=http.HTTPFlow),
- 'tcp': dict(handler=tcp.TCPHandler)
-}
-
-
-def protocol_handler(protocol):
- """
- @type protocol: str
- @returns: libmproxy.protocol.primitives.ProtocolHandler
- """
- if protocol in protocols:
- return protocols[protocol]["handler"]
-
- raise NotImplementedError(
- "Unknown Protocol: %s" %
- protocol) # pragma: nocover
diff --git a/libmproxy/protocol/http.py b/libmproxy/protocol/http.py
index 4c15c80d..7f57d17c 100644
--- a/libmproxy/protocol/http.py
+++ b/libmproxy/protocol/http.py
@@ -1,606 +1,347 @@
-from __future__ import absolute_import
-import Cookie
-import copy
-import threading
-import time
-import urllib
-import urlparse
-from email.utils import parsedate_tz, formatdate, mktime_tz
-
-import netlib
-from netlib import http, tcp, odict, utils, encoding
-from netlib.http import cookies, http1, http2
+from __future__ import (absolute_import, print_function, division)
+
+from netlib import tcp
+from netlib.http import http1, HttpErrorConnClosed, HttpError
from netlib.http.semantics import CONTENT_MISSING
+from netlib import odict
+from netlib.tcp import NetLibError, Address
+from netlib.http.http1 import HTTP1Protocol
+from netlib.http.http2 import HTTP2Protocol
+from netlib.http.http2.frame import WindowUpdateFrame
-from .tcp import TCPHandler
-from .primitives import KILL, ProtocolHandler, Flow, Error
-from ..proxy.connection import ServerConnection
-from .. import utils, controller, stateobject, proxy
-
-from .http_wrappers import decoded, HTTPRequest, HTTPResponse
-
-
-class KillSignal(Exception):
- pass
-
-
-def send_connect_request(conn, host, port, update_state=True):
- upstream_request = HTTPRequest(
- "authority",
- "CONNECT",
- None,
- host,
- port,
- None,
- (1, 1),
- odict.ODictCaseless(),
- ""
- )
-
- # we currently only support HTTP/1 CONNECT requests
- protocol = http1.HTTP1Protocol(conn)
-
- conn.send(protocol.assemble(upstream_request))
- resp = HTTPResponse.from_protocol(protocol, upstream_request.method)
- if resp.status_code != 200:
- raise proxy.ProxyError(resp.status_code,
- "Cannot establish SSL " +
- "connection with upstream proxy: \r\n" +
- repr(resp))
- if update_state:
- conn.state.append(("http", {
- "state": "connect",
- "host": host,
- "port": port}
- ))
- return resp
-
-
-class HTTPFlow(Flow):
- """
- A HTTPFlow is a collection of objects representing a single HTTP
- transaction. The main attributes are:
+from .. import utils
+from ..exceptions import InvalidCredentials, HttpException, ProtocolException
+from ..models import (
+ HTTPFlow, HTTPRequest, HTTPResponse, make_error_response, make_connect_response, Error
+)
+from .base import Layer, Kill
- request: HTTPRequest object
- response: HTTPResponse object
- error: Error object
- server_conn: ServerConnection object
- client_conn: ClientConnection object
- Note that it's possible for a Flow to have both a response and an error
- object. This might happen, for instance, when a response was received
- from the server, but there was an error sending it back to the client.
+class _HttpLayer(Layer):
+ supports_streaming = False
- The following additional attributes are exposed:
+ def read_request(self):
+ raise NotImplementedError()
- intercepted: Is this flow currently being intercepted?
- live: Does this flow have a live client connection?
- """
+ def send_request(self, request):
+ raise NotImplementedError()
- def __init__(self, client_conn, server_conn, live=None):
- super(HTTPFlow, self).__init__("http", client_conn, server_conn, live)
- self.request = None
- """@type: HTTPRequest"""
- self.response = None
- """@type: HTTPResponse"""
-
- _stateobject_attributes = Flow._stateobject_attributes.copy()
- _stateobject_attributes.update(
- request=HTTPRequest,
- response=HTTPResponse
- )
-
- @classmethod
- def from_state(cls, state):
- f = cls(None, None)
- f.load_state(state)
- return f
-
- def __repr__(self):
- s = "<HTTPFlow"
- for a in ("request", "response", "error", "client_conn", "server_conn"):
- if getattr(self, a, False):
- s += "\r\n %s = {flow.%s}" % (a, a)
- s += ">"
- return s.format(flow=self)
-
- def copy(self):
- f = super(HTTPFlow, self).copy()
- if self.request:
- f.request = self.request.copy()
- if self.response:
- f.response = self.response.copy()
- return f
-
- def match(self, f):
- """
- Match this flow against a compiled filter expression. Returns True
- if matched, False if not.
+ def read_response(self, request_method):
+ raise NotImplementedError()
- If f is a string, it will be compiled as a filter expression. If
- the expression is invalid, ValueError is raised.
- """
- if isinstance(f, basestring):
- from .. import filt
+ def send_response(self, response):
+ raise NotImplementedError()
- f = filt.parse(f)
- if not f:
- raise ValueError("Invalid filter expression.")
- if f:
- return f(self)
- return True
- def replace(self, pattern, repl, *args, **kwargs):
- """
- Replaces a regular expression pattern with repl in both request and
- response of the flow. Encoded content will be decoded before
- replacement, and re-encoded afterwards.
+class _StreamingHttpLayer(_HttpLayer):
+ supports_streaming = True
- Returns the number of replacements made.
- """
- c = self.request.replace(pattern, repl, *args, **kwargs)
- if self.response:
- c += self.response.replace(pattern, repl, *args, **kwargs)
- return c
+ def read_response_headers(self):
+ raise NotImplementedError
+ def read_response_body(self, headers, request_method, response_code, max_chunk_size=None):
+ raise NotImplementedError()
+ yield "this is a generator" # pragma: no cover
-class HTTPHandler(ProtocolHandler):
- """
- HTTPHandler implements mitmproxys understanding of the HTTP protocol.
+ def send_response_headers(self, response):
+ raise NotImplementedError
- """
+ def send_response_body(self, response, chunks):
+ raise NotImplementedError()
- def __init__(self, c):
- super(HTTPHandler, self).__init__(c)
- self.expected_form_in = c.config.mode.http_form_in
- self.expected_form_out = c.config.mode.http_form_out
- self.skip_authentication = False
- def handle_messages(self):
- while self.handle_flow():
- pass
+class Http1Layer(_StreamingHttpLayer):
+ def __init__(self, ctx, mode):
+ super(Http1Layer, self).__init__(ctx)
+ self.mode = mode
+ self.client_protocol = HTTP1Protocol(self.client_conn)
+ self.server_protocol = HTTP1Protocol(self.server_conn)
- def get_response_from_server(self, flow):
- self.c.establish_server_connection()
+ def read_request(self):
+ return HTTPRequest.from_protocol(
+ self.client_protocol,
+ body_size_limit=self.config.body_size_limit
+ )
- for attempt in (0, 1):
- try:
- if not self.c.server_conn.protocol:
- # instantiate new protocol if connection does not have one yet
- # TODO: select correct protocol based on ALPN (?)
- self.c.server_conn.protocol = http1.HTTP1Protocol(self.c.server_conn)
- # self.c.server_conn.protocol = http2.HTTP2Protocol(self.c.server_conn)
- # self.c.server_conn.protocol.perform_connection_preface()
-
- self.c.server_conn.send(self.c.server_conn.protocol.assemble(flow.request))
-
- # Only get the headers at first...
- flow.response = HTTPResponse.from_protocol(
- self.c.server_conn.protocol,
- flow.request.method,
- body_size_limit=self.c.config.body_size_limit,
- include_body=False,
- )
- break
- except (tcp.NetLibError, http.HttpErrorConnClosed) as v:
- self.c.log(
- "error in server communication: %s" % repr(v),
- level="debug"
- )
- if attempt == 0:
- # In any case, we try to reconnect at least once. This is
- # necessary because it might be possible that we already
- # initiated an upstream connection after clientconnect that
- # has already been expired, e.g consider the following event
- # log:
- # > clientconnect (transparent mode destination known)
- # > serverconnect
- # > read n% of large request
- # > server detects timeout, disconnects
- # > read (100-n)% of large request
- # > send large request upstream
- self.c.server_reconnect()
- else:
- raise
+ def send_request(self, request):
+ self.server_conn.send(self.server_protocol.assemble(request))
- # call the appropriate script hook - this is an opportunity for an
- # inline script to set flow.stream = True
- flow = self.c.channel.ask("responseheaders", flow)
- if flow is None or flow == KILL:
- raise KillSignal()
- else:
- # now get the rest of the request body, if body still needs to be
- # read but not streaming this response
- if flow.response.stream:
- flow.response.content = CONTENT_MISSING
- else:
- if isinstance(self.c.server_conn.protocol, http1.HTTP1Protocol):
- # streaming is only supported with HTTP/1 at the moment
- flow.response.content = self.c.server_conn.protocol.read_http_body(
- flow.response.headers,
- self.c.config.body_size_limit,
- flow.request.method,
- flow.response.code,
- False
- )
- flow.response.timestamp_end = utils.timestamp()
-
- def handle_flow(self):
- flow = HTTPFlow(self.c.client_conn, self.c.server_conn, self.live)
+ def read_response(self, request_method):
+ return HTTPResponse.from_protocol(
+ self.server_protocol,
+ request_method=request_method,
+ body_size_limit=self.config.body_size_limit,
+ include_body=True
+ )
- try:
- try:
- if not flow.client_conn.protocol:
- # instantiate new protocol if connection does not have one yet
- # the first request might be a CONNECT - which is currently only supported with HTTP/1
- flow.client_conn.protocol = http1.HTTP1Protocol(self.c.client_conn)
-
- req = HTTPRequest.from_protocol(
- flow.client_conn.protocol,
- body_size_limit=self.c.config.body_size_limit
- )
- except tcp.NetLibError:
- # don't throw an error for disconnects that happen
- # before/between requests.
- return False
-
- self.c.log(
- "request",
- "debug",
- [repr(req)]
- )
- ret = self.process_request(flow, req)
- if ret:
- # instantiate new protocol if connection does not have one yet
- # TODO: select correct protocol based on ALPN (?)
- flow.client_conn.protocol = http1.HTTP1Protocol(self.c.client_conn)
- # flow.client_conn.protocol = http2.HTTP2Protocol(self.c.client_conn, is_server=True)
- if ret is not None:
- return ret
-
- # Be careful NOT to assign the request to the flow before
- # process_request completes. This is because the call can raise an
- # exception. If the request object is already attached, this results
- # in an Error object that has an attached request that has not been
- # sent through to the Master.
- flow.request = req
- request_reply = self.c.channel.ask("request", flow)
- if request_reply is None or request_reply == KILL:
- raise KillSignal()
-
- # The inline script may have changed request.host
- self.process_server_address(flow)
-
- if isinstance(request_reply, HTTPResponse):
- flow.response = request_reply
- else:
- self.get_response_from_server(flow)
+ def send_response(self, response):
+ self.client_conn.send(self.client_protocol.assemble(response))
- # no further manipulation of self.c.server_conn beyond this point
- # we can safely set it as the final attribute value here.
- flow.server_conn = self.c.server_conn
+ def read_response_headers(self):
+ return HTTPResponse.from_protocol(
+ self.server_protocol,
+ request_method=None, # does not matter if we don't read the body.
+ body_size_limit=self.config.body_size_limit,
+ include_body=False
+ )
- self.c.log(
- "response",
- "debug",
- [repr(flow.response)]
- )
- response_reply = self.c.channel.ask("response", flow)
- if response_reply is None or response_reply == KILL:
- raise KillSignal()
-
- self.send_response_to_client(flow)
-
- if self.check_close_connection(flow):
- return False
-
- # We sent a CONNECT request to an upstream proxy.
- if flow.request.form_in == "authority" and flow.response.code == 200:
- # TODO: Possibly add headers (memory consumption/usefulness
- # tradeoff) Make sure to add state info before the actual
- # processing of the CONNECT request happens. During an SSL
- # upgrade, we may receive an SNI indication from the client,
- # which resets the upstream connection. If this is the case, we
- # must already re-issue the CONNECT request at this point.
- self.c.server_conn.state.append(
- (
- "http", {
- "state": "connect",
- "host": flow.request.host,
- "port": flow.request.port
- }
- )
- )
- if not self.process_connect_request(
- (flow.request.host, flow.request.port)):
- return False
-
- # If the user has changed the target server on this connection,
- # restore the original target server
- flow.live.restore_server()
-
- return True # Next flow please.
- except (
- http.HttpAuthenticationError,
- http.HttpError,
- proxy.ProxyError,
- tcp.NetLibError,
- ) as e:
- self.handle_error(e, flow)
- except KillSignal:
- self.c.log("Connection killed", "info")
- finally:
- flow.live = None # Connection is not live anymore.
- return False
-
- def handle_server_reconnect(self, state):
- if state["state"] == "connect":
- send_connect_request(
- self.c.server_conn,
- state["host"],
- state["port"],
- update_state=False
- )
- else: # pragma: nocover
- raise RuntimeError("Unknown State: %s" % state["state"])
-
- def handle_error(self, error, flow=None):
- message = repr(error)
- message_debug = None
-
- if isinstance(error, tcp.NetLibError):
- message = None
- message_debug = "TCP connection closed unexpectedly."
- elif "tlsv1 alert unknown ca" in message:
- message = "TLSv1 Alert Unknown CA: The client does not trust the proxy's certificate."
- elif "handshake error" in message:
- message_debug = message
- message = "SSL handshake error: The client may not trust the proxy's certificate."
-
- if message:
- self.c.log(message, level="info")
- if message_debug:
- self.c.log(message_debug, level="debug")
-
- if flow:
- # TODO: no flows without request or with both request and response
- # at the moment.
- if flow.request and not flow.response:
- flow.error = Error(message or message_debug)
- self.c.channel.ask("error", flow)
- try:
- status_code = getattr(error, "code", 502)
- headers = getattr(error, "headers", None)
-
- html_message = message or ""
- if message_debug:
- html_message += "<pre>%s</pre>" % message_debug
- self.send_error(status_code, html_message, headers)
- except:
- pass
-
- def send_error(self, status_code, message, headers):
- response = http.status_codes.RESPONSES.get(status_code, "Unknown")
- body = """
- <html>
- <head>
- <title>%d %s</title>
- </head>
- <body>%s</body>
- </html>
- """ % (status_code, response, message)
-
- if not headers:
- headers = odict.ODictCaseless()
- assert isinstance(headers, odict.ODictCaseless)
-
- headers["Server"] = [self.c.config.server_version]
- headers["Connection"] = ["close"]
- headers["Content-Length"] = [len(body)]
- headers["Content-Type"] = ["text/html"]
-
- resp = HTTPResponse(
- (1, 1), # if HTTP/2 is used, this value is ignored anyway
- status_code,
- response,
+ def read_response_body(self, headers, request_method, response_code, max_chunk_size=None):
+ return self.server_protocol.read_http_body_chunked(
headers,
- body,
+ self.config.body_size_limit,
+ request_method,
+ response_code,
+ False,
+ max_chunk_size
)
- # if no protocol is assigned yet - just assume HTTP/1
- # TODO: maybe check ALPN and use HTTP/2 if required?
- protocol = self.c.client_conn.protocol or http1.HTTP1Protocol(self.c.client_conn)
- self.c.client_conn.send(protocol.assemble(resp))
-
- def process_request(self, flow, request):
- """
- @returns:
- True, if the request should not be sent upstream
- False, if the connection should be aborted
- None, if the request should be sent upstream
- (a status code != None should be returned directly by handle_flow)
- """
-
- if not self.skip_authentication:
- self.authenticate(request)
+ def send_response_headers(self, response):
+ h = self.client_protocol._assemble_response_first_line(response)
+ self.client_conn.wfile.write(h + "\r\n")
+ h = self.client_protocol._assemble_response_headers(
+ response,
+ preserve_transfer_encoding=True
+ )
+ self.client_conn.send(h + "\r\n")
- # Determine .scheme, .host and .port attributes
- # For absolute-form requests, they are directly given in the request.
- # For authority-form requests, we only need to determine the request scheme.
- # For relative-form requests, we need to determine host and port as
- # well.
- if not request.scheme:
- request.scheme = "https" if flow.server_conn and flow.server_conn.ssl_established else "http"
- if not request.host:
- # Host/Port Complication: In upstream mode, use the server we CONNECTed to,
- # not the upstream proxy.
- if flow.server_conn:
- for s in flow.server_conn.state:
- if s[0] == "http" and s[1]["state"] == "connect":
- request.host, request.port = s[1]["host"], s[1]["port"]
- if not request.host and flow.server_conn:
- request.host, request.port = flow.server_conn.address.host, flow.server_conn.address.port
-
-
- # Now we can process the request.
- if request.form_in == "authority":
- if self.c.client_conn.ssl_established:
- raise http.HttpError(
- 400,
- "Must not CONNECT on already encrypted connection"
- )
-
- if self.c.config.mode == "regular":
- self.c.set_server_address((request.host, request.port))
- # Update server_conn attribute on the flow
- flow.server_conn = self.c.server_conn
-
- # since we currently only support HTTP/1 CONNECT requests
- # the response must be HTTP/1 as well
- self.c.client_conn.send(
- ('HTTP/%s.%s 200 ' % (request.httpversion[0], request.httpversion[1])) +
- 'Connection established\r\n' +
- 'Content-Length: 0\r\n' +
- ('Proxy-agent: %s\r\n' % self.c.config.server_version) +
- '\r\n'
- )
- return self.process_connect_request(self.c.server_conn.address)
- elif self.c.config.mode == "upstream":
- return None
- else:
- # CONNECT should never occur if we don't expect absolute-form
- # requests
- pass
-
- elif request.form_in == self.expected_form_in:
- request.form_out = self.expected_form_out
- if request.form_in == "absolute":
- if request.scheme != "http":
- raise http.HttpError(
- 400,
- "Invalid request scheme: %s" % request.scheme
- )
- if self.c.config.mode == "regular":
- # Update info so that an inline script sees the correct
- # value at flow.server_conn
- self.c.set_server_address((request.host, request.port))
- flow.server_conn = self.c.server_conn
-
- elif request.form_in == "relative":
- if self.c.config.mode == "spoof":
- # Host header
- h = request.pretty_host(hostheader=True)
- if h is None:
- raise http.HttpError(
- 400,
- "Invalid request: No host information"
- )
- p = netlib.utils.parse_url("http://" + h)
- request.scheme = p[0]
- request.host = p[1]
- request.port = p[2]
- self.c.set_server_address((request.host, request.port))
- flow.server_conn = self.c.server_conn
-
- if self.c.config.mode == "sslspoof":
- # SNI is processed in server.py
- if not (flow.server_conn and flow.server_conn.ssl_established):
- raise http.HttpError(
- 400,
- "Invalid request: No host information"
- )
-
- return None
-
- raise http.HttpError(
- 400, "Invalid HTTP request form (expected: %s, got: %s)" % (
- self.expected_form_in, request.form_in
+ def send_response_body(self, response, chunks):
+ if self.client_protocol.has_chunked_encoding(response.headers):
+ chunks = (
+ "%d\r\n%s\r\n" % (len(chunk), chunk)
+ for chunk in chunks
)
+ for chunk in chunks:
+ self.client_conn.send(chunk)
+
+ def connect(self):
+ self.ctx.connect()
+ self.server_protocol = HTTP1Protocol(self.server_conn)
+
+ def reconnect(self):
+ self.ctx.reconnect()
+ self.server_protocol = HTTP1Protocol(self.server_conn)
+
+ def set_server(self, *args, **kwargs):
+ self.ctx.set_server(*args, **kwargs)
+ self.server_protocol = HTTP1Protocol(self.server_conn)
+
+ def __call__(self):
+ layer = HttpLayer(self, self.mode)
+ layer()
+
+
+# TODO: The HTTP2 layer is missing multiplexing, which requires a major rewrite.
+class Http2Layer(_HttpLayer):
+ def __init__(self, ctx, mode):
+ super(Http2Layer, self).__init__(ctx)
+ self.mode = mode
+ self.client_protocol = HTTP2Protocol(self.client_conn, is_server=True,
+ unhandled_frame_cb=self.handle_unexpected_frame)
+ self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
+ unhandled_frame_cb=self.handle_unexpected_frame)
+
+ def read_request(self):
+ request = HTTPRequest.from_protocol(
+ self.client_protocol,
+ body_size_limit=self.config.body_size_limit
+ )
+ self._stream_id = request.stream_id
+ return request
+
+ def send_request(self, message):
+ # TODO: implement flow control and WINDOW_UPDATE frames
+ self.server_conn.send(self.server_protocol.assemble(message))
+
+ def read_response(self, request_method):
+ return HTTPResponse.from_protocol(
+ self.server_protocol,
+ request_method=request_method,
+ body_size_limit=self.config.body_size_limit,
+ include_body=True,
+ stream_id=self._stream_id
)
- def process_server_address(self, flow):
- # Depending on the proxy mode, server handling is entirely different
- # We provide a mostly unified API to the user, which needs to be
- # unfiddled here
- # ( See also: https://github.com/mitmproxy/mitmproxy/issues/337 )
- address = tcp.Address((flow.request.host, flow.request.port))
+ def send_response(self, message):
+ # TODO: implement flow control and WINDOW_UPDATE frames
+ self.client_conn.send(self.client_protocol.assemble(message))
+
+ def connect(self):
+ self.ctx.connect()
+ self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
+ unhandled_frame_cb=self.handle_unexpected_frame)
+ self.server_protocol.perform_connection_preface()
+
+ def reconnect(self):
+ self.ctx.reconnect()
+ self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
+ unhandled_frame_cb=self.handle_unexpected_frame)
+ self.server_protocol.perform_connection_preface()
+
+ def set_server(self, *args, **kwargs):
+ self.ctx.set_server(*args, **kwargs)
+ self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
+ unhandled_frame_cb=self.handle_unexpected_frame)
+ self.server_protocol.perform_connection_preface()
+
+ def __call__(self):
+ self.server_protocol.perform_connection_preface()
+ layer = HttpLayer(self, self.mode)
+ layer()
+
+ def handle_unexpected_frame(self, frame):
+ if isinstance(frame, WindowUpdateFrame):
+ # Clients are sending WindowUpdate frames depending on their flow control algorithm.
+ # Since we cannot predict these frames, and we do not need to respond to them,
+ # simply accept them, and hide them from the log.
+ # Ideally we should keep track of our own flow control window and
+ # stall transmission if the outgoing flow control buffer is full.
+ return
+ self.log("Unexpected HTTP2 Frame: %s" % frame.human_readable(), "info")
+
+
+class ConnectServerConnection(object):
+ """
+ "Fake" ServerConnection to represent state after a CONNECT request to an upstream proxy.
+ """
- ssl = (flow.request.scheme == "https")
-
- if self.c.config.mode == "upstream":
- # The connection to the upstream proxy may have a state we may need
- # to take into account.
- connected_to = None
- for s in flow.server_conn.state:
- if s[0] == "http" and s[1]["state"] == "connect":
- connected_to = tcp.Address((s[1]["host"], s[1]["port"]))
-
- # We need to reconnect if the current flow either requires a
- # (possibly impossible) change to the connection state, e.g. the
- # host has changed but we already CONNECTed somewhere else.
- needs_server_change = (
- ssl != self.c.server_conn.ssl_established
- or
- # HTTP proxying is "stateless", CONNECT isn't.
- (connected_to and address != connected_to)
- )
+ def __init__(self, address, ctx):
+ self.address = tcp.Address.wrap(address)
+ self._ctx = ctx
- if needs_server_change:
- # force create new connection to the proxy server to reset
- # state
- self.live.change_server(self.c.server_conn.address, force=True)
- if ssl:
- send_connect_request(
- self.c.server_conn,
- address.host,
- address.port
- )
- self.c.establish_ssl(server=True)
- else:
- # If we're not in upstream mode, we just want to update the host
- # and possibly establish TLS. This is a no op if the addresses
- # match.
- self.live.change_server(address, ssl=ssl)
+ @property
+ def via(self):
+ return self._ctx.server_conn
- flow.server_conn = self.c.server_conn
+ def __getattr__(self, item):
+ return getattr(self.via, item)
- def send_response_to_client(self, flow):
- if not flow.response.stream:
- # no streaming:
- # we already received the full response from the server and can
- # send it to the client straight away.
- self.c.client_conn.send(self.c.client_conn.protocol.assemble(flow.response))
+ def __nonzero__(self):
+ return bool(self.via)
+
+
+class UpstreamConnectLayer(Layer):
+ def __init__(self, ctx, connect_request):
+ super(UpstreamConnectLayer, self).__init__(ctx)
+ self.connect_request = connect_request
+ self.server_conn = ConnectServerConnection(
+ (connect_request.host, connect_request.port),
+ self.ctx
+ )
+
+ def __call__(self):
+ layer = self.ctx.next_layer(self)
+ layer()
+
+ def _send_connect_request(self):
+ self.send_request(self.connect_request)
+ resp = self.read_response("CONNECT")
+ if resp.code != 200:
+ raise ProtocolException("Reconnect: Upstream server refuses CONNECT request")
+
+ def connect(self):
+ if not self.server_conn:
+ self.ctx.connect()
+ self._send_connect_request()
else:
- if isinstance(self.c.client_conn.protocol, http2.HTTP2Protocol):
- raise NotImplementedError("HTTP streaming with HTTP/2 is currently not supported.")
+ pass # swallow the message
+
+ def reconnect(self):
+ self.ctx.reconnect()
+ self._send_connect_request()
+
+ def set_server(self, address, server_tls=None, sni=None, depth=1):
+ if depth == 1:
+ if self.ctx.server_conn:
+ self.ctx.reconnect()
+ address = Address.wrap(address)
+ self.connect_request.host = address.host
+ self.connect_request.port = address.port
+ self.server_conn.address = address
+ else:
+ self.ctx.set_server(address, server_tls, sni, depth - 1)
- # streaming:
- # First send the headers and then transfer the response
- # incrementally:
- h = self.c.client_conn.protocol._assemble_response_first_line(flow.response)
- self.c.client_conn.send(h + "\r\n")
- h = self.c.client_conn.protocol._assemble_response_headers(flow.response, preserve_transfer_encoding=True)
- self.c.client_conn.send(h + "\r\n")
-
- chunks = self.c.server_conn.protocol.read_http_body_chunked(
- flow.response.headers,
- self.c.config.body_size_limit,
- flow.request.method,
- flow.response.code,
- False,
- 4096
- )
+class HttpLayer(Layer):
+ def __init__(self, ctx, mode):
+ super(HttpLayer, self).__init__(ctx)
+ self.mode = mode
+ self.__original_server_conn = None
+ "Contains the original destination in transparent mode, which needs to be restored"
+ "if an inline script modified the target server for a single http request"
- if callable(flow.response.stream):
- chunks = flow.response.stream(chunks)
+ def __call__(self):
+ if self.mode == "transparent":
+ self.__original_server_conn = self.server_conn
+ while True:
+ try:
+ flow = HTTPFlow(self.client_conn, self.server_conn, live=self)
+
+ try:
+ request = self.read_request()
+ except tcp.NetLibError:
+ # don't throw an error for disconnects that happen
+ # before/between requests.
+ return
+
+ self.log("request", "debug", [repr(request)])
+
+ # Handle Proxy Authentication
+ self.authenticate(request)
+
+ # Regular Proxy Mode: Handle CONNECT
+ if self.mode == "regular" and request.form_in == "authority":
+ self.handle_regular_mode_connect(request)
+ return
+
+ # Make sure that the incoming request matches our expectations
+ self.validate_request(request)
+
+ flow.request = request
+ self.process_request_hook(flow)
+
+ if not flow.response:
+ self.establish_server_connection(flow)
+ self.get_response_from_server(flow)
+
+ self.send_response_to_client(flow)
+
+ if self.check_close_connection(flow):
+ return
+
+ # TODO: Implement HTTP Upgrade
+
+ # Upstream Proxy Mode: Handle CONNECT
+ if flow.request.form_in == "authority" and flow.response.code == 200:
+ self.handle_upstream_mode_connect(flow.request.copy())
+ return
+
+ except (HttpErrorConnClosed, NetLibError, HttpError, ProtocolException) as e:
+ if flow.request and not flow.response:
+ flow.error = Error(repr(e))
+ self.channel.ask("error", flow)
+ try:
+ self.send_response(make_error_response(
+ getattr(e, "code", 502),
+ repr(e)
+ ))
+ except NetLibError:
+ pass
+ if isinstance(e, ProtocolException):
+ raise e
+ else:
+ raise ProtocolException("Error in HTTP connection: %s" % repr(e), e)
+ finally:
+ flow.live = False
- for chunk in chunks:
- for part in chunk:
- self.c.client_conn.wfile.write(part)
- self.c.client_conn.wfile.flush()
+ def handle_regular_mode_connect(self, request):
+ self.set_server((request.host, request.port))
+ self.send_response(make_connect_response(request.httpversion))
+ layer = self.ctx.next_layer(self)
+ layer()
- flow.response.timestamp_end = utils.timestamp()
+ def handle_upstream_mode_connect(self, connect_request):
+ layer = UpstreamConnectLayer(self, connect_request)
+ layer()
def check_close_connection(self, flow):
"""
@@ -622,157 +363,183 @@ class HTTPHandler(ProtocolHandler):
False,
flow.request.method,
flow.response.code) == -1
- )
- if close_connection:
- if flow.request.form_in == "authority" and flow.response.code == 200:
- # Workaround for
- # https://github.com/mitmproxy/mitmproxy/issues/313: Some
- # proxies (e.g. Charles) send a CONNECT response with HTTP/1.0
- # and no Content-Length header
- pass
- else:
- return True
- return False
+ )
+ if flow.request.form_in == "authority" and flow.response.code == 200:
+ # Workaround for
+ # https://github.com/mitmproxy/mitmproxy/issues/313: Some
+ # proxies (e.g. Charles) send a CONNECT response with HTTP/1.0
+ # and no Content-Length header
- def process_connect_request(self, address):
- """
- Process a CONNECT request.
- Returns True if the CONNECT request has been processed successfully.
- Returns False, if the connection should be closed immediately.
- """
- address = tcp.Address.wrap(address)
- if self.c.config.check_ignore(address):
- self.c.log("Ignore host: %s:%s" % address(), "info")
- TCPHandler(self.c, log=False).handle_messages()
return False
+ return close_connection
+
+ def send_response_to_client(self, flow):
+ if not (self.supports_streaming and flow.response.stream):
+ # no streaming:
+ # we already received the full response from the server and can
+ # send it to the client straight away.
+ self.send_response(flow.response)
else:
- self.expected_form_in = "relative"
- self.expected_form_out = "relative"
- self.skip_authentication = True
-
- # In practice, nobody issues a CONNECT request to send unencrypted
- # HTTP requests afterwards. If we don't delegate to TCP mode, we
- # should always negotiate a SSL connection.
- #
- # FIXME: Turns out the previous statement isn't entirely true.
- # Chrome on Windows CONNECTs to :80 if an explicit proxy is
- # configured and a websocket connection should be established. We
- # don't support websocket at the moment, so it fails anyway, but we
- # should come up with a better solution to this if we start to
- # support WebSockets.
- should_establish_ssl = (
- address.port in self.c.config.ssl_ports
- or
- not self.c.config.check_tcp(address)
+ # streaming:
+ # First send the headers and then transfer the response incrementally
+ self.send_response_headers(flow.response)
+ chunks = self.read_response_body(
+ flow.response.headers,
+ flow.request.method,
+ flow.response.code,
+ max_chunk_size=4096
)
+ if callable(flow.response.stream):
+ chunks = flow.response.stream(chunks)
+ self.send_response_body(flow.response, chunks)
+ flow.response.timestamp_end = utils.timestamp()
- if should_establish_ssl:
- self.c.log(
- "Received CONNECT request to SSL port. "
- "Upgrading to SSL...", "debug"
- )
- server_ssl = not self.c.config.no_upstream_cert
- if server_ssl:
- self.c.establish_server_connection()
- self.c.establish_ssl(server=server_ssl, client=True)
- self.c.log("Upgrade to SSL completed.", "debug")
-
- if self.c.config.check_tcp(address):
- self.c.log(
- "Generic TCP mode for host: %s:%s" % address(),
- "info"
- )
- TCPHandler(self.c).handle_messages()
- return False
-
- return True
+ def get_response_from_server(self, flow):
+ def get_response():
+ self.send_request(flow.request)
+ if self.supports_streaming:
+ flow.response = self.read_response_headers()
+ else:
+ flow.response = self.read_response(flow.request.method)
- def authenticate(self, request):
- if self.c.config.authenticator:
- if self.c.config.authenticator.authenticate(request.headers):
- self.c.config.authenticator.clean(request.headers)
+ try:
+ get_response()
+ except (tcp.NetLibError, HttpErrorConnClosed) as v:
+ self.log(
+ "server communication error: %s" % repr(v),
+ level="debug"
+ )
+ # In any case, we try to reconnect at least once. This is
+ # necessary because it might be possible that we already
+ # initiated an upstream connection after clientconnect that
+ # has already been expired, e.g consider the following event
+ # log:
+ # > clientconnect (transparent mode destination known)
+ # > serverconnect (required for client tls handshake)
+ # > read n% of large request
+ # > server detects timeout, disconnects
+ # > read (100-n)% of large request
+ # > send large request upstream
+ self.reconnect()
+ get_response()
+
+ # call the appropriate script hook - this is an opportunity for an
+ # inline script to set flow.stream = True
+ flow = self.channel.ask("responseheaders", flow)
+ if flow == Kill:
+ raise Kill()
+
+ if self.supports_streaming:
+ if flow.response.stream:
+ flow.response.content = CONTENT_MISSING
else:
- raise http.HttpAuthenticationError(
- self.c.config.authenticator.auth_challenge_headers())
- return request.headers
+ flow.response.content = "".join(self.read_response_body(
+ flow.response.headers,
+ flow.request.method,
+ flow.response.code
+ ))
+ flow.response.timestamp_end = utils.timestamp()
+ # no further manipulation of self.server_conn beyond this point
+ # we can safely set it as the final attribute value here.
+ flow.server_conn = self.server_conn
-class RequestReplayThread(threading.Thread):
- name = "RequestReplayThread"
+ self.log(
+ "response",
+ "debug",
+ [repr(flow.response)]
+ )
+ response_reply = self.channel.ask("response", flow)
+ if response_reply == Kill:
+ raise Kill()
- def __init__(self, config, flow, masterq, should_exit):
- """
- masterqueue can be a queue or None, if no scripthooks should be
- processed.
- """
- self.config, self.flow = config, flow
- if masterq:
- self.channel = controller.Channel(masterq, should_exit)
+ def process_request_hook(self, flow):
+ # Determine .scheme, .host and .port attributes for inline scripts.
+ # For absolute-form requests, they are directly given in the request.
+ # For authority-form requests, we only need to determine the request scheme.
+ # For relative-form requests, we need to determine host and port as
+ # well.
+ if self.mode == "regular":
+ pass # only absolute-form at this point, nothing to do here.
+ elif self.mode == "upstream":
+ if flow.request.form_in == "authority":
+ flow.request.scheme = "http" # pseudo value
+ else:
+ flow.request.host = self.__original_server_conn.address.host
+ flow.request.port = self.__original_server_conn.address.port
+ flow.request.scheme = "https" if self.__original_server_conn.tls_established else "http"
+
+ request_reply = self.channel.ask("request", flow)
+ if request_reply == Kill:
+ raise Kill()
+ if isinstance(request_reply, HTTPResponse):
+ flow.response = request_reply
+ return
+
+ def establish_server_connection(self, flow):
+ address = tcp.Address((flow.request.host, flow.request.port))
+ tls = (flow.request.scheme == "https")
+
+ if self.mode == "regular" or self.mode == "transparent":
+ # If there's an existing connection that doesn't match our expectations, kill it.
+ if address != self.server_conn.address or tls != self.server_conn.ssl_established:
+ self.set_server(address, tls, address.host)
+ # Establish connection is neccessary.
+ if not self.server_conn:
+ self.connect()
else:
- self.channel = None
- super(RequestReplayThread, self).__init__()
+ if not self.server_conn:
+ self.connect()
+ if tls:
+ raise HttpException("Cannot change scheme in upstream proxy mode.")
+ """
+ # This is a very ugly (untested) workaround to solve a very ugly problem.
+ if self.server_conn and self.server_conn.tls_established and not ssl:
+ self.reconnect()
+ elif ssl and not hasattr(self, "connected_to") or self.connected_to != address:
+ if self.server_conn.tls_established:
+ self.reconnect()
+
+ self.send_request(make_connect_request(address))
+ tls_layer = TlsLayer(self, False, True)
+ tls_layer._establish_tls_with_server()
+ """
+
+ def validate_request(self, request):
+ if request.form_in == "absolute" and request.scheme != "http":
+ self.send_response(
+ make_error_response(400, "Invalid request scheme: %s" % request.scheme))
+ raise HttpException("Invalid request scheme: %s" % request.scheme)
+
+ expected_request_forms = {
+ "regular": ("absolute",), # an authority request would already be handled.
+ "upstream": ("authority", "absolute"),
+ "transparent": ("relative",)
+ }
+
+ allowed_request_forms = expected_request_forms[self.mode]
+ if request.form_in not in allowed_request_forms:
+ err_message = "Invalid HTTP request form (expected: %s, got: %s)" % (
+ " or ".join(allowed_request_forms), request.form_in
+ )
+ self.send_response(make_error_response(400, err_message))
+ raise HttpException(err_message)
- def run(self):
- r = self.flow.request
- form_out_backup = r.form_out
- try:
- self.flow.response = None
-
- # If we have a channel, run script hooks.
- if self.channel:
- request_reply = self.channel.ask("request", self.flow)
- if request_reply is None or request_reply == KILL:
- raise KillSignal()
- elif isinstance(request_reply, HTTPResponse):
- self.flow.response = request_reply
-
- if not self.flow.response:
- # In all modes, we directly connect to the server displayed
- if self.config.mode == "upstream":
- server_address = self.config.mode.get_upstream_server(
- self.flow.client_conn
- )[2:]
- server = ServerConnection(server_address)
- server.connect()
- if r.scheme == "https":
- send_connect_request(server, r.host, r.port)
- server.establish_ssl(
- self.config.clientcerts,
- sni=self.flow.server_conn.sni
- )
- r.form_out = "relative"
- else:
- r.form_out = "absolute"
- else:
- server_address = (r.host, r.port)
- server = ServerConnection(server_address)
- server.connect()
- if r.scheme == "https":
- server.establish_ssl(
- self.config.clientcerts,
- sni=self.flow.server_conn.sni
- )
- r.form_out = "relative"
-
- server.send(self.flow.server_conn.protocol.assemble(r))
- self.flow.server_conn = server
- self.flow.server_conn.protocol = http1.HTTP1Protocol(self.flow.server_conn)
- self.flow.response = HTTPResponse.from_protocol(
- self.flow.server_conn.protocol,
- r.method,
- body_size_limit=self.config.body_size_limit,
- )
- if self.channel:
- response_reply = self.channel.ask("response", self.flow)
- if response_reply is None or response_reply == KILL:
- raise KillSignal()
- except (proxy.ProxyError, http.HttpError, tcp.NetLibError) as v:
- self.flow.error = Error(repr(v))
- if self.channel:
- self.channel.ask("error", self.flow)
- except KillSignal:
- # KillSignal should only be raised if there's a channel in the
- # first place.
- self.channel.tell("log", proxy.Log("Connection killed", "info"))
- finally:
- r.form_out = form_out_backup
+ if self.mode == "regular":
+ request.form_out = "relative"
+
+ def authenticate(self, request):
+ if self.config.authenticator:
+ if self.config.authenticator.authenticate(request.headers):
+ self.config.authenticator.clean(request.headers)
+ else:
+ self.send_response(make_error_response(
+ 407,
+ "Proxy Authentication Required",
+ odict.ODictCaseless(
+ [
+ [k, v] for k, v in
+ self.config.authenticator.auth_challenge_headers().items()
+ ])
+ ))
+ raise InvalidCredentials("Proxy Authentication Required")
diff --git a/libmproxy/protocol/http_replay.py b/libmproxy/protocol/http_replay.py
new file mode 100644
index 00000000..2759a019
--- /dev/null
+++ b/libmproxy/protocol/http_replay.py
@@ -0,0 +1,96 @@
+from __future__ import (absolute_import, print_function, division)
+import threading
+
+from netlib.http import HttpError
+from netlib.http.http1 import HTTP1Protocol
+from netlib.tcp import NetLibError
+from ..controller import Channel
+from ..models import Error, HTTPResponse, ServerConnection, make_connect_request
+from .base import Log, Kill
+
+
+# TODO: Doesn't really belong into libmproxy.protocol...
+
+
+class RequestReplayThread(threading.Thread):
+ name = "RequestReplayThread"
+
+ def __init__(self, config, flow, masterq, should_exit):
+ """
+ masterqueue can be a queue or None, if no scripthooks should be
+ processed.
+ """
+ self.config, self.flow = config, flow
+ if masterq:
+ self.channel = Channel(masterq, should_exit)
+ else:
+ self.channel = None
+ super(RequestReplayThread, self).__init__()
+
+ def run(self):
+ r = self.flow.request
+ form_out_backup = r.form_out
+ try:
+ self.flow.response = None
+
+ # If we have a channel, run script hooks.
+ if self.channel:
+ request_reply = self.channel.ask("request", self.flow)
+ if request_reply == Kill:
+ raise Kill()
+ elif isinstance(request_reply, HTTPResponse):
+ self.flow.response = request_reply
+
+ if not self.flow.response:
+ # In all modes, we directly connect to the server displayed
+ if self.config.mode == "upstream":
+ server_address = self.config.upstream_server.address
+ server = ServerConnection(server_address)
+ server.connect()
+ protocol = HTTP1Protocol(server)
+ if r.scheme == "https":
+ connect_request = make_connect_request((r.host, r.port))
+ server.send(protocol.assemble(connect_request))
+ resp = protocol.read_response("CONNECT")
+ if resp.code != 200:
+ raise HttpError(502, "Upstream server refuses CONNECT request")
+ server.establish_ssl(
+ self.config.clientcerts,
+ sni=self.flow.server_conn.sni
+ )
+ r.form_out = "relative"
+ else:
+ r.form_out = "absolute"
+ else:
+ server_address = (r.host, r.port)
+ server = ServerConnection(server_address)
+ server.connect()
+ protocol = HTTP1Protocol(server)
+ if r.scheme == "https":
+ server.establish_ssl(
+ self.config.clientcerts,
+ sni=self.flow.server_conn.sni
+ )
+ r.form_out = "relative"
+
+ server.send(protocol.assemble(r))
+ self.flow.server_conn = server
+ self.flow.response = HTTPResponse.from_protocol(
+ protocol,
+ r.method,
+ body_size_limit=self.config.body_size_limit,
+ )
+ if self.channel:
+ response_reply = self.channel.ask("response", self.flow)
+ if response_reply == Kill:
+ raise Kill()
+ except (HttpError, NetLibError) as v:
+ self.flow.error = Error(repr(v))
+ if self.channel:
+ self.channel.ask("error", self.flow)
+ except Kill:
+ # KillSignal should only be raised if there's a channel in the
+ # first place.
+ self.channel.tell("log", Log("Connection killed", "info"))
+ finally:
+ r.form_out = form_out_backup
diff --git a/libmproxy/protocol/primitives.py b/libmproxy/protocol/primitives.py
deleted file mode 100644
index 92fc95e5..00000000
--- a/libmproxy/protocol/primitives.py
+++ /dev/null
@@ -1,294 +0,0 @@
-from __future__ import absolute_import
-import copy
-import uuid
-import netlib.tcp
-from .. import stateobject, utils, version
-from ..proxy.connection import ClientConnection, ServerConnection
-
-
-KILL = 0 # const for killed requests
-
-
-class Error(stateobject.StateObject):
- """
- An Error.
-
- This is distinct from an protocol error response (say, a HTTP code 500),
- which is represented by a normal HTTPResponse object. This class is
- responsible for indicating errors that fall outside of normal protocol
- communications, like interrupted connections, timeouts, protocol errors.
-
- Exposes the following attributes:
-
- flow: Flow object
- msg: Message describing the error
- timestamp: Seconds since the epoch
- """
-
- def __init__(self, msg, timestamp=None):
- """
- @type msg: str
- @type timestamp: float
- """
- self.flow = None # will usually be set by the flow backref mixin
- self.msg = msg
- self.timestamp = timestamp or utils.timestamp()
-
- _stateobject_attributes = dict(
- msg=str,
- timestamp=float
- )
-
- def __str__(self):
- return self.msg
-
- @classmethod
- def from_state(cls, state):
- # the default implementation assumes an empty constructor. Override
- # accordingly.
- f = cls(None)
- f.load_state(state)
- return f
-
- def copy(self):
- c = copy.copy(self)
- return c
-
-
-class Flow(stateobject.StateObject):
- """
- A Flow is a collection of objects representing a single transaction.
- This class is usually subclassed for each protocol, e.g. HTTPFlow.
- """
-
- def __init__(self, type, client_conn, server_conn, live=None):
- self.type = type
- self.id = str(uuid.uuid4())
- self.client_conn = client_conn
- """@type: ClientConnection"""
- self.server_conn = server_conn
- """@type: ServerConnection"""
- self.live = live
- """@type: LiveConnection"""
-
- self.error = None
- """@type: Error"""
- self.intercepted = False
- """@type: bool"""
- self._backup = None
- self.reply = None
-
- _stateobject_attributes = dict(
- id=str,
- error=Error,
- client_conn=ClientConnection,
- server_conn=ServerConnection,
- type=str,
- intercepted=bool
- )
-
- def get_state(self, short=False):
- d = super(Flow, self).get_state(short)
- d.update(version=version.IVERSION)
- if self._backup and self._backup != d:
- if short:
- d.update(modified=True)
- else:
- d.update(backup=self._backup)
- return d
-
- def __eq__(self, other):
- return self is other
-
- def copy(self):
- f = copy.copy(self)
-
- f.id = str(uuid.uuid4())
- f.live = False
- f.client_conn = self.client_conn.copy()
- f.server_conn = self.server_conn.copy()
-
- if self.error:
- f.error = self.error.copy()
- return f
-
- def modified(self):
- """
- Has this Flow been modified?
- """
- if self._backup:
- return self._backup != self.get_state()
- else:
- return False
-
- def backup(self, force=False):
- """
- Save a backup of this Flow, which can be reverted to using a
- call to .revert().
- """
- if not self._backup:
- self._backup = self.get_state()
-
- def revert(self):
- """
- Revert to the last backed up state.
- """
- if self._backup:
- self.load_state(self._backup)
- self._backup = None
-
- def kill(self, master):
- """
- Kill this request.
- """
- self.error = Error("Connection killed")
- self.intercepted = False
- self.reply(KILL)
- master.handle_error(self)
-
- def intercept(self, master):
- """
- Intercept this Flow. Processing will stop until accept_intercept is
- called.
- """
- if self.intercepted:
- return
- self.intercepted = True
- master.handle_intercept(self)
-
- def accept_intercept(self, master):
- """
- Continue with the flow - called after an intercept().
- """
- if not self.intercepted:
- return
- self.intercepted = False
- self.reply()
- master.handle_accept_intercept(self)
-
-
-
-class ProtocolHandler(object):
- """
- A ProtocolHandler implements an application-layer protocol, e.g. HTTP.
- See: libmproxy.protocol.http.HTTPHandler
- """
-
- def __init__(self, c):
- self.c = c
- """@type: libmproxy.proxy.server.ConnectionHandler"""
- self.live = LiveConnection(c)
- """@type: LiveConnection"""
-
- def handle_messages(self):
- """
- This method gets called if a client connection has been made. Depending
- on the proxy settings, a server connection might already exist as well.
- """
- raise NotImplementedError # pragma: nocover
-
- def handle_server_reconnect(self, state):
- """
- This method gets called if a server connection needs to reconnect and
- there's a state associated with the server connection (e.g. a
- previously-sent CONNECT request or a SOCKS proxy request). This method
- gets called after the connection has been restablished but before SSL is
- established.
- """
- raise NotImplementedError # pragma: nocover
-
- def handle_error(self, error):
- """
- This method gets called should there be an uncaught exception during the
- connection. This might happen outside of handle_messages, e.g. if the
- initial SSL handshake fails in transparent mode.
- """
- raise error # pragma: nocover
-
-
-class LiveConnection(object):
- """
- This facade allows interested parties (FlowMaster, inline scripts) to
- interface with a live connection, without exposing the internals
- of the ConnectionHandler.
- """
-
- def __init__(self, c):
- self.c = c
- """@type: libmproxy.proxy.server.ConnectionHandler"""
- self._backup_server_conn = None
- """@type: libmproxy.proxy.connection.ServerConnection"""
-
- def change_server(
- self,
- address,
- ssl=None,
- sni=None,
- force=False,
- persistent_change=False):
- """
- Change the server connection to the specified address.
- @returns:
- True, if a new connection has been established,
- False, if an existing connection has been used
- """
- address = netlib.tcp.Address.wrap(address)
-
- ssl_mismatch = (
- ssl is not None and
- (
- (self.c.server_conn.connection and ssl != self.c.server_conn.ssl_established)
- or
- (sni is not None and sni != self.c.server_conn.sni)
- )
- )
- address_mismatch = (address != self.c.server_conn.address)
-
- if persistent_change:
- self._backup_server_conn = None
-
- if ssl_mismatch or address_mismatch or force:
-
- self.c.log(
- "Change server connection: %s:%s -> %s:%s [persistent: %s]" % (
- self.c.server_conn.address.host,
- self.c.server_conn.address.port,
- address.host,
- address.port,
- persistent_change
- ),
- "debug"
- )
-
- if not self._backup_server_conn and not persistent_change:
- self._backup_server_conn = self.c.server_conn
- self.c.server_conn = None
- else:
- # This is at least the second temporary change. We can kill the
- # current connection.
- self.c.del_server_connection()
-
- self.c.set_server_address(address)
- self.c.establish_server_connection(ask=False)
- if ssl:
- self.c.establish_ssl(server=True, sni=sni)
- return True
- return False
-
- def restore_server(self):
- # TODO: Similar to _backup_server_conn, introduce _cache_server_conn,
- # which keeps the changed connection open This may be beneficial if a
- # user is rewriting all requests from http to https or similar.
- if not self._backup_server_conn:
- return
-
- self.c.log("Restore original server connection: %s:%s -> %s:%s" % (
- self.c.server_conn.address.host,
- self.c.server_conn.address.port,
- self._backup_server_conn.address.host,
- self._backup_server_conn.address.port
- ), "debug")
-
- self.c.del_server_connection()
- self.c.server_conn = self._backup_server_conn
- self._backup_server_conn = None
diff --git a/libmproxy/protocol/rawtcp.py b/libmproxy/protocol/rawtcp.py
new file mode 100644
index 00000000..86468773
--- /dev/null
+++ b/libmproxy/protocol/rawtcp.py
@@ -0,0 +1,66 @@
+from __future__ import (absolute_import, print_function, division)
+import socket
+import select
+
+from OpenSSL import SSL
+
+from netlib.tcp import NetLibError
+from netlib.utils import cleanBin
+from ..exceptions import ProtocolException
+from .base import Layer
+
+
+class RawTCPLayer(Layer):
+ chunk_size = 4096
+
+ def __init__(self, ctx, logging=True):
+ self.logging = logging
+ super(RawTCPLayer, self).__init__(ctx)
+
+ def __call__(self):
+ self.connect()
+
+ buf = memoryview(bytearray(self.chunk_size))
+
+ client = self.client_conn.connection
+ server = self.server_conn.connection
+ conns = [client, server]
+
+ try:
+ while True:
+ r, _, _ = select.select(conns, [], [], 10)
+ for conn in r:
+ dst = server if conn == client else client
+
+ size = conn.recv_into(buf, self.chunk_size)
+ if not size:
+ conns.remove(conn)
+ # Shutdown connection to the other peer
+ if isinstance(conn, SSL.Connection):
+ # We can't half-close a connection, so we just close everything here.
+ # Sockets will be cleaned up on a higher level.
+ return
+ else:
+ dst.shutdown(socket.SHUT_WR)
+
+ if len(conns) == 0:
+ return
+ continue
+
+ dst.sendall(buf[:size])
+
+ if self.logging:
+ # log messages are prepended with the client address,
+ # hence the "weird" direction string.
+ if dst == server:
+ direction = "-> tcp -> {}".format(repr(self.server_conn.address))
+ else:
+ direction = "<- tcp <- {}".format(repr(self.server_conn.address))
+ data = cleanBin(buf[:size].tobytes())
+ self.log(
+ "{}\r\n{}".format(direction, data),
+ "info"
+ )
+
+ except (socket.error, NetLibError, SSL.Error) as e:
+ raise ProtocolException("TCP connection closed unexpectedly: {}".format(repr(e)), e)
diff --git a/libmproxy/protocol/tcp.py b/libmproxy/protocol/tcp.py
deleted file mode 100644
index 0feb77c6..00000000
--- a/libmproxy/protocol/tcp.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from __future__ import absolute_import
-import select
-import socket
-from .primitives import ProtocolHandler
-from netlib.utils import cleanBin
-from netlib.tcp import NetLibError
-
-
-class TCPHandler(ProtocolHandler):
- """
- TCPHandler acts as a generic TCP forwarder.
- Data will be .log()ed, but not stored any further.
- """
-
- chunk_size = 4096
-
- def __init__(self, c, log=True):
- super(TCPHandler, self).__init__(c)
- self.log = log
-
- def handle_messages(self):
- self.c.establish_server_connection()
-
- server = "%s:%s" % self.c.server_conn.address()[:2]
- buf = memoryview(bytearray(self.chunk_size))
- conns = [self.c.client_conn.rfile, self.c.server_conn.rfile]
-
- try:
- while True:
- r, _, _ = select.select(conns, [], [], 10)
- for rfile in r:
- if self.c.client_conn.rfile == rfile:
- src, dst = self.c.client_conn, self.c.server_conn
- direction = "-> tcp ->"
- src_str, dst_str = "client", server
- else:
- dst, src = self.c.client_conn, self.c.server_conn
- direction = "<- tcp <-"
- dst_str, src_str = "client", server
-
- closed = False
- if src.ssl_established:
- # Unfortunately, pyOpenSSL lacks a recv_into function.
- # We need to read a single byte before .pending()
- # becomes usable
- contents = src.rfile.read(1)
- contents += src.rfile.read(src.connection.pending())
- if not contents:
- closed = True
- else:
- size = src.connection.recv_into(buf)
- if not size:
- closed = True
-
- if closed:
- conns.remove(src.rfile)
- # Shutdown connection to the other peer
- if dst.ssl_established:
- # We can't half-close a connection, so we just close everything here.
- # Sockets will be cleaned up on a higher level.
- return
- else:
- dst.connection.shutdown(socket.SHUT_WR)
-
- if len(conns) == 0:
- return
- continue
-
- if src.ssl_established or dst.ssl_established:
- # if one of the peers is over SSL, we need to send
- # bytes/strings
- if not src.ssl_established:
- # we revc'd into buf but need bytes/string now.
- contents = buf[:size].tobytes()
- if self.log:
- self.c.log(
- "%s %s\r\n%s" % (
- direction, dst_str, cleanBin(contents)
- ),
- "info"
- )
- # Do not use dst.connection.send here, which may raise
- # OpenSSL-specific errors.
- dst.send(contents)
- else:
- # socket.socket.send supports raw bytearrays/memoryviews
- if self.log:
- self.c.log(
- "%s %s\r\n%s" % (
- direction, dst_str, cleanBin(buf.tobytes())
- ),
- "info"
- )
- dst.connection.send(buf[:size])
- except (socket.error, NetLibError) as e:
- self.c.log("TCP connection closed unexpectedly.", "debug")
- return
diff --git a/libmproxy/protocol/tls.py b/libmproxy/protocol/tls.py
new file mode 100644
index 00000000..a8dc8bb2
--- /dev/null
+++ b/libmproxy/protocol/tls.py
@@ -0,0 +1,298 @@
+from __future__ import (absolute_import, print_function, division)
+
+import struct
+
+from construct import ConstructError
+
+from netlib.tcp import NetLibError, NetLibInvalidCertificateError
+from netlib.http.http1 import HTTP1Protocol
+from ..contrib.tls._constructs import ClientHello
+from ..exceptions import ProtocolException
+from .base import Layer
+
+
+def is_tls_record_magic(d):
+ """
+ Returns:
+ True, if the passed bytes start with the TLS record magic bytes.
+ False, otherwise.
+ """
+ d = d[:3]
+
+ # TLS ClientHello magic, works for SSLv3, TLSv1.0, TLSv1.1, TLSv1.2
+ # http://www.moserware.com/2009/06/first-few-milliseconds-of-https.html#client-hello
+ return (
+ len(d) == 3 and
+ d[0] == '\x16' and
+ d[1] == '\x03' and
+ d[2] in ('\x00', '\x01', '\x02', '\x03')
+ )
+
+
+class TlsLayer(Layer):
+ def __init__(self, ctx, client_tls, server_tls):
+ self.client_sni = None
+ self.client_alpn_protocols = None
+
+ super(TlsLayer, self).__init__(ctx)
+ self._client_tls = client_tls
+ self._server_tls = server_tls
+
+ self._sni_from_server_change = None
+
+ def __call__(self):
+ """
+ The strategy for establishing SSL is as follows:
+ First, we determine whether we need the server cert to establish ssl with the client.
+ If so, we first connect to the server and then to the client.
+ If not, we only connect to the client and do the server_ssl lazily on a Connect message.
+
+ An additional complexity is that establish ssl with the server may require a SNI value from the client.
+ In an ideal world, we'd do the following:
+ 1. Start the SSL handshake with the client
+ 2. Check if the client sends a SNI.
+ 3. Pause the client handshake, establish SSL with the server.
+ 4. Finish the client handshake with the certificate from the server.
+ There's just one issue: We cannot get a callback from OpenSSL if the client doesn't send a SNI. :(
+ Thus, we manually peek into the connection and parse the ClientHello message to obtain both SNI and ALPN values.
+
+ Further notes:
+ - OpenSSL 1.0.2 introduces a callback that would help here:
+ https://www.openssl.org/docs/ssl/SSL_CTX_set_cert_cb.html
+ - The original mitmproxy issue is https://github.com/mitmproxy/mitmproxy/issues/427
+ """
+
+ client_tls_requires_server_cert = (
+ self._client_tls and self._server_tls and not self.config.no_upstream_cert
+ )
+
+ if self._client_tls:
+ self._parse_client_hello()
+
+ if client_tls_requires_server_cert:
+ self._establish_tls_with_client_and_server()
+ elif self._client_tls:
+ self._establish_tls_with_client()
+
+ layer = self.ctx.next_layer(self)
+ layer()
+
+ def __repr__(self):
+ if self._client_tls and self._server_tls:
+ return "TlsLayer(client and server)"
+ elif self._client_tls:
+ return "TlsLayer(client)"
+ elif self._server_tls:
+ return "TlsLayer(server)"
+ else:
+ return "TlsLayer(inactive)"
+
+ def _get_client_hello(self):
+ """
+ Peek into the socket and read all records that contain the initial client hello message.
+
+ Returns:
+ The raw handshake packet bytes, without TLS record header(s).
+ """
+ client_hello = ""
+ client_hello_size = 1
+ offset = 0
+ while len(client_hello) < client_hello_size:
+ record_header = self.client_conn.rfile.peek(offset + 5)[offset:]
+ if not is_tls_record_magic(record_header) or len(record_header) != 5:
+ raise ProtocolException('Expected TLS record, got "%s" instead.' % record_header)
+ record_size = struct.unpack("!H", record_header[3:])[0] + 5
+ record_body = self.client_conn.rfile.peek(offset + record_size)[offset + 5:]
+ if len(record_body) != record_size - 5:
+ raise ProtocolException("Unexpected EOF in TLS handshake: %s" % record_body)
+ client_hello += record_body
+ offset += record_size
+ client_hello_size = struct.unpack("!I", '\x00' + client_hello[1:4])[0] + 4
+ return client_hello
+
+ def _parse_client_hello(self):
+ """
+ Peek into the connection, read the initial client hello and parse it to obtain ALPN values.
+ """
+ try:
+ raw_client_hello = self._get_client_hello()[4:] # exclude handshake header.
+ except ProtocolException as e:
+ self.log("Cannot parse Client Hello: %s" % repr(e), "error")
+ return
+
+ try:
+ client_hello = ClientHello.parse(raw_client_hello)
+ except ConstructError as e:
+ self.log("Cannot parse Client Hello: %s" % repr(e), "error")
+ self.log("Raw Client Hello:\r\n:%s" % raw_client_hello.encode("hex"), "debug")
+ return
+
+ for extension in client_hello.extensions:
+ if extension.type == 0x00:
+ if len(extension.server_names) != 1 or extension.server_names[0].type != 0:
+ self.log("Unknown Server Name Indication: %s" % extension.server_names, "error")
+ self.client_sni = extension.server_names[0].name
+ elif extension.type == 0x10:
+ self.client_alpn_protocols = list(extension.alpn_protocols)
+
+ self.log(
+ "Parsed Client Hello: sni=%s, alpn=%s" % (self.client_sni, self.client_alpn_protocols),
+ "debug"
+ )
+
+ def connect(self):
+ if not self.server_conn:
+ self.ctx.connect()
+ if self._server_tls and not self.server_conn.tls_established:
+ self._establish_tls_with_server()
+
+ def reconnect(self):
+ self.ctx.reconnect()
+ if self._server_tls and not self.server_conn.tls_established:
+ self._establish_tls_with_server()
+
+ def set_server(self, address, server_tls=None, sni=None, depth=1):
+ if depth == 1 and server_tls is not None:
+ self.ctx.set_server(address, None, None, 1)
+ self._sni_from_server_change = sni
+ self._server_tls = server_tls
+ else:
+ self.ctx.set_server(address, server_tls, sni, depth)
+
+ @property
+ def sni_for_server_connection(self):
+ if self._sni_from_server_change is False:
+ return None
+ else:
+ return self._sni_from_server_change or self.client_sni
+
+ @property
+ def alpn_for_client_connection(self):
+ return self.server_conn.get_alpn_proto_negotiated()
+
+ def __alpn_select_callback(self, conn_, options):
+ """
+ Once the client signals the alternate protocols it supports,
+ we reconnect upstream with the same list and pass the server's choice down to the client.
+ """
+
+ # This gets triggered if we haven't established an upstream connection yet.
+ default_alpn = HTTP1Protocol.ALPN_PROTO_HTTP1
+ # alpn_preference = netlib.http.http2.HTTP2Protocol.ALPN_PROTO_H2
+
+ if self.alpn_for_client_connection in options:
+ choice = bytes(self.alpn_for_client_connection)
+ elif default_alpn in options:
+ choice = bytes(default_alpn)
+ else:
+ choice = options[0]
+ self.log("ALPN for client: %s" % choice, "debug")
+ return choice
+
+ def _establish_tls_with_client_and_server(self):
+ self.ctx.connect()
+
+ # If establishing TLS with the server fails, we try to establish TLS with the client nonetheless
+ # to send an error message over TLS.
+ try:
+ self._establish_tls_with_server()
+ except Exception as e:
+ try:
+ self._establish_tls_with_client()
+ except:
+ pass
+ raise e
+
+ self._establish_tls_with_client()
+
+ def _establish_tls_with_client(self):
+ self.log("Establish TLS with client", "debug")
+ cert, key, chain_file = self._find_cert()
+
+ try:
+ self.client_conn.convert_to_ssl(
+ cert, key,
+ method=self.config.openssl_method_client,
+ options=self.config.openssl_options_client,
+ cipher_list=self.config.ciphers_client,
+ dhparams=self.config.certstore.dhparams,
+ chain_file=chain_file,
+ alpn_select_callback=self.__alpn_select_callback,
+ )
+ except NetLibError as e:
+ raise ProtocolException("Cannot establish TLS with client: %s" % repr(e), e)
+
+ def _establish_tls_with_server(self):
+ self.log("Establish TLS with server", "debug")
+ try:
+ # We only support http/1.1 and h2.
+ # If the server only supports spdy (next to http/1.1), it may select that
+ # and mitmproxy would enter TCP passthrough mode, which we want to avoid.
+ deprecated_http2_variant = lambda x: x.startswith("h2-") or x.startswith("spdy")
+ if self.client_alpn_protocols:
+ alpn = filter(lambda x: not deprecated_http2_variant(x), self.client_alpn_protocols)
+ else:
+ alpn = None
+
+ self.server_conn.establish_ssl(
+ self.config.clientcerts,
+ self.sni_for_server_connection,
+ method=self.config.openssl_method_server,
+ options=self.config.openssl_options_server,
+ verify_options=self.config.openssl_verification_mode_server,
+ ca_path=self.config.openssl_trusted_cadir_server,
+ ca_pemfile=self.config.openssl_trusted_ca_server,
+ cipher_list=self.config.ciphers_server,
+ alpn_protos=alpn,
+ )
+ tls_cert_err = self.server_conn.ssl_verification_error
+ if tls_cert_err is not None:
+ self.log(
+ "TLS verification failed for upstream server at depth %s with error: %s" %
+ (tls_cert_err['depth'], tls_cert_err['errno']),
+ "error")
+ self.log("Ignoring server verification error, continuing with connection", "error")
+ except NetLibInvalidCertificateError as e:
+ tls_cert_err = self.server_conn.ssl_verification_error
+ self.log(
+ "TLS verification failed for upstream server at depth %s with error: %s" %
+ (tls_cert_err['depth'], tls_cert_err['errno']),
+ "error")
+ self.log("Aborting connection attempt", "error")
+ raise ProtocolException("Cannot establish TLS with {address} (sni: {sni}): {e}".format(
+ address=repr(self.server_conn.address),
+ sni=self.sni_for_server_connection,
+ e=repr(e),
+ ), e)
+ except NetLibError as e:
+ raise ProtocolException("Cannot establish TLS with {address} (sni: {sni}): {e}".format(
+ address=repr(self.server_conn.address),
+ sni=self.sni_for_server_connection,
+ e=repr(e),
+ ), e)
+
+ self.log("ALPN selected by server: %s" % self.alpn_for_client_connection, "debug")
+
+ def _find_cert(self):
+ host = self.server_conn.address.host
+ sans = set()
+ # Incorporate upstream certificate
+ use_upstream_cert = (
+ self.server_conn and
+ self.server_conn.tls_established and
+ (not self.config.no_upstream_cert)
+ )
+ if use_upstream_cert:
+ upstream_cert = self.server_conn.cert
+ sans.update(upstream_cert.altnames)
+ if upstream_cert.cn:
+ sans.add(host)
+ host = upstream_cert.cn.decode("utf8").encode("idna")
+ # Also add SNI values.
+ if self.client_sni:
+ sans.add(self.client_sni)
+ if self._sni_from_server_change:
+ sans.add(self._sni_from_server_change)
+
+ sans.discard(host)
+ return self.config.certstore.get_cert(host, list(sans))
diff --git a/libmproxy/proxy/__init__.py b/libmproxy/proxy/__init__.py
index f33d323b..d5297cb1 100644
--- a/libmproxy/proxy/__init__.py
+++ b/libmproxy/proxy/__init__.py
@@ -1,2 +1,9 @@
-from .primitives import *
-from .config import ProxyConfig, process_proxy_options
+from __future__ import (absolute_import, print_function, division)
+
+from .server import ProxyServer, DummyServer
+from .config import ProxyConfig
+
+__all__ = [
+ "ProxyServer", "DummyServer",
+ "ProxyConfig",
+]
diff --git a/libmproxy/proxy/config.py b/libmproxy/proxy/config.py
index ec91a6e0..2a1b84cb 100644
--- a/libmproxy/proxy/config.py
+++ b/libmproxy/proxy/config.py
@@ -1,26 +1,31 @@
-from __future__ import absolute_import
+from __future__ import (absolute_import, print_function, division)
+import collections
import os
import re
from OpenSSL import SSL
-import netlib
-from netlib import http, certutils, tcp
+from netlib import certutils, tcp
from netlib.http import authentication
+from netlib.tcp import Address, sslversion_choices
-from .. import utils, platform, version
-from .primitives import RegularProxyMode, SpoofMode, SSLSpoofMode, TransparentProxyMode, UpstreamProxyMode, ReverseProxyMode, Socks5ProxyMode
+from .. import utils, platform
-TRANSPARENT_SSL_PORTS = [443, 8443]
CONF_BASENAME = "mitmproxy"
CA_DIR = "~/.mitmproxy"
+# We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default.
+# https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=apache-2.2.15&openssl=1.0.2&hsts=yes&profile=old
+DEFAULT_CLIENT_CIPHERS = "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA"
+
class HostMatcher(object):
- def __init__(self, patterns=[]):
+ def __init__(self, patterns=tuple()):
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
+ if not address:
+ return False
address = tcp.Address.wrap(address)
host = "%s:%s" % (address.host, address.port)
if any(rex.search(host) for rex in self.regexes):
@@ -32,61 +37,44 @@ class HostMatcher(object):
return bool(self.patterns)
+ServerSpec = collections.namedtuple("ServerSpec", "scheme address")
+
+
class ProxyConfig:
def __init__(
self,
host='',
port=8080,
- server_version=version.NAMEVERSION,
cadir=CA_DIR,
clientcerts=None,
no_upstream_cert=False,
body_size_limit=None,
- mode=None,
+ mode="regular",
upstream_server=None,
- http_form_in=None,
- http_form_out=None,
authenticator=None,
- ignore_hosts=[],
- tcp_hosts=[],
+ ignore_hosts=tuple(),
+ tcp_hosts=tuple(),
ciphers_client=None,
ciphers_server=None,
- certs=[],
- ssl_version_client=tcp.SSL_DEFAULT_METHOD,
- ssl_version_server=tcp.SSL_DEFAULT_METHOD,
- ssl_ports=TRANSPARENT_SSL_PORTS,
- spoofed_ssl_port=None,
+ certs=tuple(),
+ ssl_version_client="secure",
+ ssl_version_server="secure",
ssl_verify_upstream_cert=False,
- ssl_upstream_trusted_cadir=None,
- ssl_upstream_trusted_ca=None
+ ssl_verify_upstream_trusted_cadir=None,
+ ssl_verify_upstream_trusted_ca=None,
):
self.host = host
self.port = port
- self.server_version = server_version
self.ciphers_client = ciphers_client
self.ciphers_server = ciphers_server
self.clientcerts = clientcerts
self.no_upstream_cert = no_upstream_cert
self.body_size_limit = body_size_limit
-
- if mode == "transparent":
- self.mode = TransparentProxyMode(platform.resolver(), ssl_ports)
- elif mode == "socks5":
- self.mode = Socks5ProxyMode(ssl_ports)
- elif mode == "reverse":
- self.mode = ReverseProxyMode(upstream_server)
- elif mode == "upstream":
- self.mode = UpstreamProxyMode(upstream_server)
- elif mode == "spoof":
- self.mode = SpoofMode()
- elif mode == "sslspoof":
- self.mode = SSLSpoofMode(spoofed_ssl_port)
+ self.mode = mode
+ if upstream_server:
+ self.upstream_server = ServerSpec(upstream_server[0], Address.wrap(upstream_server[1]))
else:
- self.mode = RegularProxyMode()
-
- # Handle manual overrides of the http forms
- self.mode.http_form_in = http_form_in or self.mode.http_form_in
- self.mode.http_form_out = http_form_out or self.mode.http_form_out
+ self.upstream_server = None
self.check_ignore = HostMatcher(ignore_hosts)
self.check_tcp = HostMatcher(tcp_hosts)
@@ -94,41 +82,33 @@ class ProxyConfig:
self.cadir = os.path.expanduser(cadir)
self.certstore = certutils.CertStore.from_store(
self.cadir,
- CONF_BASENAME)
+ CONF_BASENAME
+ )
for spec, cert in certs:
self.certstore.add_cert_file(spec, cert)
- self.ssl_ports = ssl_ports
- if isinstance(ssl_version_client, int):
- self.openssl_method_client = ssl_version_client
- else:
- self.openssl_method_client = tcp.SSL_VERSIONS[ssl_version_client]
- if isinstance(ssl_version_server, int):
- self.openssl_method_server = ssl_version_server
- else:
- self.openssl_method_server = tcp.SSL_VERSIONS[ssl_version_server]
+ self.openssl_method_client, self.openssl_options_client = \
+ sslversion_choices[ssl_version_client]
+ self.openssl_method_server, self.openssl_options_server = \
+ sslversion_choices[ssl_version_server]
if ssl_verify_upstream_cert:
self.openssl_verification_mode_server = SSL.VERIFY_PEER
else:
self.openssl_verification_mode_server = SSL.VERIFY_NONE
- self.openssl_trusted_cadir_server = ssl_upstream_trusted_cadir
- self.openssl_trusted_ca_server = ssl_upstream_trusted_ca
-
- self.openssl_options_client = tcp.SSL_DEFAULT_OPTIONS
- self.openssl_options_server = tcp.SSL_DEFAULT_OPTIONS
+ self.openssl_trusted_cadir_server = ssl_verify_upstream_trusted_cadir
+ self.openssl_trusted_ca_server = ssl_verify_upstream_trusted_ca
def process_proxy_options(parser, options):
body_size_limit = utils.parse_size(options.body_size_limit)
c = 0
- mode, upstream_server, spoofed_ssl_port = None, None, None
+ mode, upstream_server = "regular", None
if options.transparent_proxy:
c += 1
if not platform.resolver:
- return parser.error(
- "Transparent mode not supported on this platform.")
+ return parser.error("Transparent mode not supported on this platform.")
mode = "transparent"
if options.socks_proxy:
c += 1
@@ -141,32 +121,33 @@ def process_proxy_options(parser, options):
c += 1
mode = "upstream"
upstream_server = options.upstream_proxy
- if options.spoof_mode:
- c += 1
- mode = "spoof"
- if options.ssl_spoof_mode:
- c += 1
- mode = "sslspoof"
- spoofed_ssl_port = options.spoofed_ssl_port
if c > 1:
return parser.error(
"Transparent, SOCKS5, reverse and upstream proxy mode "
- "are mutually exclusive.")
+ "are mutually exclusive. Read the docs on proxy modes to understand why."
+ )
if options.clientcerts:
options.clientcerts = os.path.expanduser(options.clientcerts)
- if not os.path.exists(
- options.clientcerts) or not os.path.isdir(
- options.clientcerts):
+ if not os.path.exists(options.clientcerts) or not os.path.isdir(options.clientcerts):
return parser.error(
"Client certificate directory does not exist or is not a directory: %s" %
- options.clientcerts)
+ options.clientcerts
+ )
+
+ if options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd:
+
+ if options.socks_proxy:
+ return parser.error(
+ "Proxy Authentication not supported in SOCKS mode. "
+ "https://github.com/mitmproxy/mitmproxy/issues/738"
+ )
- if (options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd):
if options.auth_singleuser:
if len(options.auth_singleuser.split(':')) != 2:
return parser.error(
- "Invalid single-user specification. Please use the format username:password")
+ "Invalid single-user specification. Please use the format username:password"
+ )
username, password = options.auth_singleuser.split(':')
password_manager = authentication.PassManSingleUser(username, password)
elif options.auth_nonanonymous:
@@ -191,12 +172,6 @@ def process_proxy_options(parser, options):
parser.error("Certificate file does not exist: %s" % parts[1])
certs.append(parts)
- ssl_ports = options.ssl_ports
- if options.ssl_ports != TRANSPARENT_SSL_PORTS:
- # arparse appends to default value by default, strip that off.
- # see http://bugs.python.org/issue16399
- ssl_ports = ssl_ports[len(TRANSPARENT_SSL_PORTS):]
-
return ProxyConfig(
host=options.addr,
port=options.port,
@@ -206,99 +181,15 @@ def process_proxy_options(parser, options):
body_size_limit=body_size_limit,
mode=mode,
upstream_server=upstream_server,
- http_form_in=options.http_form_in,
- http_form_out=options.http_form_out,
ignore_hosts=options.ignore_hosts,
tcp_hosts=options.tcp_hosts,
authenticator=authenticator,
ciphers_client=options.ciphers_client,
ciphers_server=options.ciphers_server,
- certs=certs,
+ certs=tuple(certs),
ssl_version_client=options.ssl_version_client,
ssl_version_server=options.ssl_version_server,
- ssl_ports=ssl_ports,
- spoofed_ssl_port=spoofed_ssl_port,
ssl_verify_upstream_cert=options.ssl_verify_upstream_cert,
- ssl_upstream_trusted_cadir=options.ssl_upstream_trusted_cadir,
- ssl_upstream_trusted_ca=options.ssl_upstream_trusted_ca
- )
-
-
-def ssl_option_group(parser):
- group = parser.add_argument_group("SSL")
- group.add_argument(
- "--cert",
- dest='certs',
- default=[],
- type=str,
- metavar="SPEC",
- action="append",
- help='Add an SSL certificate. SPEC is of the form "[domain=]path". '
- 'The domain may include a wildcard, and is equal to "*" if not specified. '
- 'The file at path is a certificate in PEM format. If a private key is included in the PEM, '
- 'it is used, else the default key in the conf dir is used. '
- 'The PEM file should contain the full certificate chain, with the leaf certificate as the first entry. '
- 'Can be passed multiple times.')
- group.add_argument(
- "--ciphers-client", action="store",
- type=str, dest="ciphers_client", default=None,
- help="Set supported ciphers for client connections. (OpenSSL Syntax)"
- )
- group.add_argument(
- "--ciphers-server", action="store",
- type=str, dest="ciphers_server", default=None,
- help="Set supported ciphers for server connections. (OpenSSL Syntax)"
- )
- group.add_argument(
- "--client-certs", action="store",
- type=str, dest="clientcerts", default=None,
- help="Client certificate directory."
- )
- group.add_argument(
- "--no-upstream-cert", default=False,
- action="store_true", dest="no_upstream_cert",
- help="Don't connect to upstream server to look up certificate details."
- )
- group.add_argument(
- "--verify-upstream-cert", default=False,
- action="store_true", dest="ssl_verify_upstream_cert",
- help="Verify upstream server SSL/TLS certificates and fail if invalid "
- "or not present."
- )
- group.add_argument(
- "--upstream-trusted-cadir", default=None, action="store",
- dest="ssl_upstream_trusted_cadir",
- help="Path to a directory of trusted CA certificates for upstream "
- "server verification prepared using the c_rehash tool."
- )
- group.add_argument(
- "--upstream-trusted-ca", default=None, action="store",
- dest="ssl_upstream_trusted_ca",
- help="Path to a PEM formatted trusted CA certificate."
- )
- group.add_argument(
- "--ssl-port",
- action="append",
- type=int,
- dest="ssl_ports",
- default=list(TRANSPARENT_SSL_PORTS),
- metavar="PORT",
- help="Can be passed multiple times. Specify destination ports which are assumed to be SSL. "
- "Defaults to %s." %
- str(TRANSPARENT_SSL_PORTS))
- group.add_argument(
- "--ssl-version-client", dest="ssl_version_client", type=str, default=tcp.SSL_DEFAULT_VERSION,
- choices=tcp.SSL_VERSIONS.keys(),
- help=""""
- Use a specified protocol for client connections:
- TLSv1.2, TLSv1.1, TLSv1, SSLv3, SSLv2, SSLv23.
- Default to SSLv23."""
- )
- group.add_argument(
- "--ssl-version-server", dest="ssl_version_server", type=str, default=tcp.SSL_DEFAULT_VERSION,
- choices=tcp.SSL_VERSIONS.keys(),
- help=""""
- Use a specified protocol for server connections:
- TLSv1.2, TLSv1.1, TLSv1, SSLv3, SSLv2, SSLv23.
- Default to SSLv23."""
- )
+ ssl_verify_upstream_trusted_cadir=options.ssl_verify_upstream_trusted_cadir,
+ ssl_verify_upstream_trusted_ca=options.ssl_verify_upstream_trusted_ca
+ ) \ No newline at end of file
diff --git a/libmproxy/proxy/modes/__init__.py b/libmproxy/proxy/modes/__init__.py
new file mode 100644
index 00000000..f014ed98
--- /dev/null
+++ b/libmproxy/proxy/modes/__init__.py
@@ -0,0 +1,12 @@
+from __future__ import (absolute_import, print_function, division)
+from .http_proxy import HttpProxy, HttpUpstreamProxy
+from .reverse_proxy import ReverseProxy
+from .socks_proxy import Socks5Proxy
+from .transparent_proxy import TransparentProxy
+
+__all__ = [
+ "HttpProxy", "HttpUpstreamProxy",
+ "ReverseProxy",
+ "Socks5Proxy",
+ "TransparentProxy"
+]
diff --git a/libmproxy/proxy/modes/http_proxy.py b/libmproxy/proxy/modes/http_proxy.py
new file mode 100644
index 00000000..90c54cc6
--- /dev/null
+++ b/libmproxy/proxy/modes/http_proxy.py
@@ -0,0 +1,26 @@
+from __future__ import (absolute_import, print_function, division)
+
+from ...protocol import Layer, ServerConnectionMixin
+
+
+class HttpProxy(Layer, ServerConnectionMixin):
+ def __call__(self):
+ layer = self.ctx.next_layer(self)
+ try:
+ layer()
+ finally:
+ if self.server_conn:
+ self._disconnect()
+
+
+class HttpUpstreamProxy(Layer, ServerConnectionMixin):
+ def __init__(self, ctx, server_address):
+ super(HttpUpstreamProxy, self).__init__(ctx, server_address=server_address)
+
+ def __call__(self):
+ layer = self.ctx.next_layer(self)
+ try:
+ layer()
+ finally:
+ if self.server_conn:
+ self._disconnect()
diff --git a/libmproxy/proxy/modes/reverse_proxy.py b/libmproxy/proxy/modes/reverse_proxy.py
new file mode 100644
index 00000000..b57ac5eb
--- /dev/null
+++ b/libmproxy/proxy/modes/reverse_proxy.py
@@ -0,0 +1,17 @@
+from __future__ import (absolute_import, print_function, division)
+
+from ...protocol import Layer, ServerConnectionMixin
+
+
+class ReverseProxy(Layer, ServerConnectionMixin):
+ def __init__(self, ctx, server_address, server_tls):
+ super(ReverseProxy, self).__init__(ctx, server_address=server_address)
+ self.server_tls = server_tls
+
+ def __call__(self):
+ layer = self.ctx.next_layer(self)
+ try:
+ layer()
+ finally:
+ if self.server_conn:
+ self._disconnect()
diff --git a/libmproxy/proxy/modes/socks_proxy.py b/libmproxy/proxy/modes/socks_proxy.py
new file mode 100644
index 00000000..ebaf939e
--- /dev/null
+++ b/libmproxy/proxy/modes/socks_proxy.py
@@ -0,0 +1,60 @@
+from __future__ import (absolute_import, print_function, division)
+
+from netlib import socks
+from netlib.tcp import NetLibError
+
+from ...exceptions import Socks5Exception
+from ...protocol import Layer, ServerConnectionMixin
+
+
+class Socks5Proxy(Layer, ServerConnectionMixin):
+ def __call__(self):
+ try:
+ # Parse Client Greeting
+ client_greet = socks.ClientGreeting.from_file(self.client_conn.rfile, fail_early=True)
+ client_greet.assert_socks5()
+ if socks.METHOD.NO_AUTHENTICATION_REQUIRED not in client_greet.methods:
+ raise socks.SocksError(
+ socks.METHOD.NO_ACCEPTABLE_METHODS,
+ "mitmproxy only supports SOCKS without authentication"
+ )
+
+ # Send Server Greeting
+ server_greet = socks.ServerGreeting(
+ socks.VERSION.SOCKS5,
+ socks.METHOD.NO_AUTHENTICATION_REQUIRED
+ )
+ server_greet.to_file(self.client_conn.wfile)
+ self.client_conn.wfile.flush()
+
+ # Parse Connect Request
+ connect_request = socks.Message.from_file(self.client_conn.rfile)
+ connect_request.assert_socks5()
+ if connect_request.msg != socks.CMD.CONNECT:
+ raise socks.SocksError(
+ socks.REP.COMMAND_NOT_SUPPORTED,
+ "mitmproxy only supports SOCKS5 CONNECT."
+ )
+
+ # We always connect lazily, but we need to pretend to the client that we connected.
+ connect_reply = socks.Message(
+ socks.VERSION.SOCKS5,
+ socks.REP.SUCCEEDED,
+ connect_request.atyp,
+ # dummy value, we don't have an upstream connection yet.
+ connect_request.addr
+ )
+ connect_reply.to_file(self.client_conn.wfile)
+ self.client_conn.wfile.flush()
+
+ except (socks.SocksError, NetLibError) as e:
+ raise Socks5Exception("SOCKS5 mode failure: %s" % repr(e), e)
+
+ self.server_conn.address = connect_request.addr
+
+ layer = self.ctx.next_layer(self)
+ try:
+ layer()
+ finally:
+ if self.server_conn:
+ self._disconnect()
diff --git a/libmproxy/proxy/modes/transparent_proxy.py b/libmproxy/proxy/modes/transparent_proxy.py
new file mode 100644
index 00000000..96ad86c4
--- /dev/null
+++ b/libmproxy/proxy/modes/transparent_proxy.py
@@ -0,0 +1,24 @@
+from __future__ import (absolute_import, print_function, division)
+
+from ... import platform
+from ...exceptions import ProtocolException
+from ...protocol import Layer, ServerConnectionMixin
+
+
+class TransparentProxy(Layer, ServerConnectionMixin):
+ def __init__(self, ctx):
+ super(TransparentProxy, self).__init__(ctx)
+ self.resolver = platform.resolver()
+
+ def __call__(self):
+ try:
+ self.server_conn.address = self.resolver.original_addr(self.client_conn.connection)
+ except Exception as e:
+ raise ProtocolException("Transparent mode failure: %s" % repr(e), e)
+
+ layer = self.ctx.next_layer(self)
+ try:
+ layer()
+ finally:
+ if self.server_conn:
+ self._disconnect()
diff --git a/libmproxy/proxy/primitives.py b/libmproxy/proxy/primitives.py
deleted file mode 100644
index 923f84ca..00000000
--- a/libmproxy/proxy/primitives.py
+++ /dev/null
@@ -1,178 +0,0 @@
-from __future__ import absolute_import
-from netlib import socks, tcp
-
-
-class ProxyError(Exception):
- def __init__(self, code, message, headers=None):
- super(ProxyError, self).__init__(message)
- self.code, self.headers = code, headers
-
-
-class ProxyServerError(Exception):
- pass
-
-
-class ProxyMode(object):
- http_form_in = None
- http_form_out = None
-
- def get_upstream_server(self, client_conn):
- """
- Returns the address of the server to connect to.
- Returns None if the address needs to be determined on the protocol level (regular proxy mode)
- """
- raise NotImplementedError() # pragma: nocover
-
- @property
- def name(self):
- return self.__class__.__name__.replace("ProxyMode", "").lower()
-
- def __str__(self):
- return self.name
-
- def __eq__(self, other):
- """
- Allow comparisions with "regular" etc.
- """
- if isinstance(other, ProxyMode):
- return self is other
- else:
- return self.name == other
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class RegularProxyMode(ProxyMode):
- http_form_in = "absolute"
- http_form_out = "relative"
-
- def get_upstream_server(self, client_conn):
- return None
-
-
-class SpoofMode(ProxyMode):
- http_form_in = "relative"
- http_form_out = "relative"
-
- def get_upstream_server(self, client_conn):
- return None
-
- @property
- def name(self):
- return "spoof"
-
-
-class SSLSpoofMode(ProxyMode):
- http_form_in = "relative"
- http_form_out = "relative"
-
- def __init__(self, sslport):
- self.sslport = sslport
-
- def get_upstream_server(self, client_conn):
- return None
-
- @property
- def name(self):
- return "sslspoof"
-
-
-class TransparentProxyMode(ProxyMode):
- http_form_in = "relative"
- http_form_out = "relative"
-
- def __init__(self, resolver, sslports):
- self.resolver = resolver
- self.sslports = sslports
-
- def get_upstream_server(self, client_conn):
- try:
- dst = self.resolver.original_addr(client_conn.connection)
- except Exception as e:
- raise ProxyError(502, "Transparent mode failure: %s" % str(e))
-
- if dst[1] in self.sslports:
- ssl = True
- else:
- ssl = False
- return [ssl, ssl] + list(dst)
-
-
-class Socks5ProxyMode(ProxyMode):
- http_form_in = "relative"
- http_form_out = "relative"
-
- def __init__(self, sslports):
- self.sslports = sslports
-
- def get_upstream_server(self, client_conn):
- try:
- # Parse Client Greeting
- client_greet = socks.ClientGreeting.from_file(client_conn.rfile, fail_early=True)
- client_greet.assert_socks5()
- if socks.METHOD.NO_AUTHENTICATION_REQUIRED not in client_greet.methods:
- raise socks.SocksError(
- socks.METHOD.NO_ACCEPTABLE_METHODS,
- "mitmproxy only supports SOCKS without authentication"
- )
-
- # Send Server Greeting
- server_greet = socks.ServerGreeting(
- socks.VERSION.SOCKS5,
- socks.METHOD.NO_AUTHENTICATION_REQUIRED
- )
- server_greet.to_file(client_conn.wfile)
- client_conn.wfile.flush()
-
- # Parse Connect Request
- connect_request = socks.Message.from_file(client_conn.rfile)
- connect_request.assert_socks5()
- if connect_request.msg != socks.CMD.CONNECT:
- raise socks.SocksError(
- socks.REP.COMMAND_NOT_SUPPORTED,
- "mitmproxy only supports SOCKS5 CONNECT."
- )
-
- # We do not connect here yet, as the clientconnect event has not
- # been handled yet.
-
- connect_reply = socks.Message(
- socks.VERSION.SOCKS5,
- socks.REP.SUCCEEDED,
- connect_request.atyp,
- # dummy value, we don't have an upstream connection yet.
- connect_request.addr
- )
- connect_reply.to_file(client_conn.wfile)
- client_conn.wfile.flush()
-
- ssl = bool(connect_request.addr.port in self.sslports)
- return ssl, ssl, connect_request.addr.host, connect_request.addr.port
-
- except (socks.SocksError, tcp.NetLibError) as e:
- raise ProxyError(502, "SOCKS5 mode failure: %s" % str(e))
-
-
-class _ConstDestinationProxyMode(ProxyMode):
- def __init__(self, dst):
- self.dst = dst
-
- def get_upstream_server(self, client_conn):
- return self.dst
-
-
-class ReverseProxyMode(_ConstDestinationProxyMode):
- http_form_in = "relative"
- http_form_out = "relative"
-
-
-class UpstreamProxyMode(_ConstDestinationProxyMode):
- http_form_in = "absolute"
- http_form_out = "absolute"
-
-
-class Log:
- def __init__(self, msg, level="info"):
- self.msg = msg
- self.level = level
diff --git a/libmproxy/proxy/root_context.py b/libmproxy/proxy/root_context.py
new file mode 100644
index 00000000..35909612
--- /dev/null
+++ b/libmproxy/proxy/root_context.py
@@ -0,0 +1,93 @@
+from __future__ import (absolute_import, print_function, division)
+
+from netlib.http.http1 import HTTP1Protocol
+from netlib.http.http2 import HTTP2Protocol
+
+from ..protocol import (
+ RawTCPLayer, TlsLayer, Http1Layer, Http2Layer, is_tls_record_magic, ServerConnectionMixin
+)
+from .modes import HttpProxy, HttpUpstreamProxy, ReverseProxy
+
+
+class RootContext(object):
+ """
+ The outmost context provided to the root layer.
+ As a consequence, every layer has .client_conn, .channel, .next_layer() and .config.
+ """
+
+ def __init__(self, client_conn, config, channel):
+ self.client_conn = client_conn # Client Connection
+ self.channel = channel # provides .ask() method to communicate with FlowMaster
+ self.config = config # Proxy Configuration
+
+ def next_layer(self, top_layer):
+ """
+ This function determines the next layer in the protocol stack.
+
+ Arguments:
+ top_layer: the current top layer.
+
+ Returns:
+ The next layer
+ """
+
+ # 1. Check for --ignore.
+ if self.config.check_ignore(top_layer.server_conn.address):
+ return RawTCPLayer(top_layer, logging=False)
+
+ d = top_layer.client_conn.rfile.peek(3)
+ client_tls = is_tls_record_magic(d)
+
+ # 2. Always insert a TLS layer, even if there's neither client nor server tls.
+ # An inline script may upgrade from http to https,
+ # in which case we need some form of TLS layer.
+ if isinstance(top_layer, ReverseProxy):
+ return TlsLayer(top_layer, client_tls, top_layer.server_tls)
+ if isinstance(top_layer, ServerConnectionMixin):
+ return TlsLayer(top_layer, client_tls, client_tls)
+
+ # 3. In Http Proxy mode and Upstream Proxy mode, the next layer is fixed.
+ if isinstance(top_layer, TlsLayer):
+ if isinstance(top_layer.ctx, HttpProxy):
+ return Http1Layer(top_layer, "regular")
+ if isinstance(top_layer.ctx, HttpUpstreamProxy):
+ return Http1Layer(top_layer, "upstream")
+
+ # 4. Check for other TLS cases (e.g. after CONNECT).
+ if client_tls:
+ return TlsLayer(top_layer, True, True)
+
+ # 4. Check for --tcp
+ if self.config.check_tcp(top_layer.server_conn.address):
+ return RawTCPLayer(top_layer)
+
+ # 5. Check for TLS ALPN (HTTP1/HTTP2)
+ if isinstance(top_layer, TlsLayer):
+ alpn = top_layer.client_conn.get_alpn_proto_negotiated()
+ if alpn == HTTP2Protocol.ALPN_PROTO_H2:
+ return Http2Layer(top_layer, 'transparent')
+ if alpn == HTTP1Protocol.ALPN_PROTO_HTTP1:
+ return Http1Layer(top_layer, 'transparent')
+
+ # 6. Assume HTTP1 by default
+ return Http1Layer(top_layer, 'transparent')
+
+ # In a future version, we want to implement TCP passthrough as the last fallback,
+ # but we don't have the UI part ready for that.
+ #
+ # d = top_layer.client_conn.rfile.peek(3)
+ # is_ascii = (
+ # len(d) == 3 and
+ # # better be safe here and don't expect uppercase...
+ # all(x in string.ascii_letters for x in d)
+ # )
+ # # TODO: This could block if there are not enough bytes available?
+ # d = top_layer.client_conn.rfile.peek(len(HTTP2Protocol.CLIENT_CONNECTION_PREFACE))
+ # is_http2_magic = (d == HTTP2Protocol.CLIENT_CONNECTION_PREFACE)
+
+ @property
+ def layers(self):
+ return []
+
+ def __repr__(self):
+ return "RootContext"
diff --git a/libmproxy/proxy/server.py b/libmproxy/proxy/server.py
index 2f6ee061..e9e8df09 100644
--- a/libmproxy/proxy/server.py
+++ b/libmproxy/proxy/server.py
@@ -1,13 +1,17 @@
-from __future__ import absolute_import
+from __future__ import (absolute_import, print_function, division)
+import traceback
+import sys
import socket
-from OpenSSL import SSL
from netlib import tcp
-from .primitives import ProxyServerError, Log, ProxyError
-from .connection import ClientConnection, ServerConnection
-from ..protocol.handle import protocol_handler
-from .. import version
+from netlib.http.http1 import HTTP1Protocol
+from netlib.tcp import NetLibError
+from ..exceptions import ProtocolException, ServerException
+from ..protocol import Log, Kill
+from ..models import ClientConnection, make_error_response
+from .modes import HttpUpstreamProxy, HttpProxy, ReverseProxy, TransparentProxy, Socks5Proxy
+from .root_context import RootContext
class DummyServer:
@@ -33,9 +37,9 @@ class ProxyServer(tcp.TCPServer):
"""
self.config = config
try:
- tcp.TCPServer.__init__(self, (config.host, config.port))
- except socket.error as v:
- raise ProxyServerError('Error starting proxy server: ' + repr(v))
+ super(ProxyServer, self).__init__((config.host, config.port))
+ except socket.error as e:
+ raise ServerException('Error starting proxy server: ' + repr(e), e)
self.channel = None
def start_slave(self, klass, channel):
@@ -47,340 +51,90 @@ class ProxyServer(tcp.TCPServer):
def handle_client_connection(self, conn, client_address):
h = ConnectionHandler(
- self.config,
conn,
client_address,
- self,
- self.channel)
+ self.config,
+ self.channel
+ )
h.handle()
- h.finish()
-class ConnectionHandler:
- def __init__(
- self,
- config,
- client_connection,
- client_address,
- server,
- channel):
+class ConnectionHandler(object):
+ def __init__(self, client_conn, client_address, config, channel):
self.config = config
"""@type: libmproxy.proxy.config.ProxyConfig"""
self.client_conn = ClientConnection(
- client_connection,
+ client_conn,
client_address,
- server)
+ None)
"""@type: libmproxy.proxy.connection.ClientConnection"""
- self.server_conn = None
- """@type: libmproxy.proxy.connection.ServerConnection"""
self.channel = channel
+ """@type: libmproxy.controller.Channel"""
- self.conntype = "http"
+ def _create_root_layer(self):
+ root_context = RootContext(
+ self.client_conn,
+ self.config,
+ self.channel
+ )
+
+ mode = self.config.mode
+ if mode == "upstream":
+ return HttpUpstreamProxy(
+ root_context,
+ self.config.upstream_server.address
+ )
+ elif mode == "transparent":
+ return TransparentProxy(root_context)
+ elif mode == "reverse":
+ server_tls = self.config.upstream_server.scheme == "https"
+ return ReverseProxy(
+ root_context,
+ self.config.upstream_server.address,
+ server_tls
+ )
+ elif mode == "socks5":
+ return Socks5Proxy(root_context)
+ elif mode == "regular":
+ return HttpProxy(root_context)
+ elif callable(mode): # pragma: nocover
+ return mode(root_context)
+ else: # pragma: nocover
+ raise ValueError("Unknown proxy mode: %s" % mode)
def handle(self):
- try:
- self.log("clientconnect", "info")
-
- # Can we already identify the target server and connect to it?
- client_ssl, server_ssl = False, False
- conn_kwargs = dict()
- upstream_info = self.config.mode.get_upstream_server(
- self.client_conn)
- if upstream_info:
- self.set_server_address(upstream_info[2:])
- client_ssl, server_ssl = upstream_info[:2]
- if self.config.check_ignore(self.server_conn.address):
- self.log(
- "Ignore host: %s:%s" %
- self.server_conn.address(),
- "info")
- self.conntype = "tcp"
- conn_kwargs["log"] = False
- client_ssl, server_ssl = False, False
- else:
- # No upstream info from the metadata: upstream info in the
- # protocol (e.g. HTTP absolute-form)
- pass
-
- self.channel.ask("clientconnect", self)
-
- # Check for existing connection: If an inline script already established a
- # connection, do not apply client_ssl or server_ssl.
- if self.server_conn and not self.server_conn.connection:
- self.establish_server_connection()
- if client_ssl or server_ssl:
- self.establish_ssl(client=client_ssl, server=server_ssl)
-
- if self.config.check_tcp(self.server_conn.address):
- self.log(
- "Generic TCP mode for host: %s:%s" %
- self.server_conn.address(),
- "info")
- self.conntype = "tcp"
-
- elif not self.server_conn and self.config.mode == "sslspoof":
- port = self.config.mode.sslport
- self.set_server_address(("-", port))
- self.establish_ssl(client=True)
- host = self.client_conn.connection.get_servername()
- if host:
- self.set_server_address((host, port))
- self.establish_server_connection()
- self.establish_ssl(server=True, sni=host)
-
- # Delegate handling to the protocol handler
- protocol_handler(
- self.conntype)(
- self,
- **conn_kwargs).handle_messages()
-
- self.log("clientdisconnect", "info")
- self.channel.tell("clientdisconnect", self)
-
- except ProxyError as e:
- protocol_handler(self.conntype)(self, **conn_kwargs).handle_error(e)
- except Exception:
- import traceback
- import sys
-
- self.log(traceback.format_exc(), "error")
- print >> sys.stderr, traceback.format_exc()
- print >> sys.stderr, "mitmproxy has crashed!"
- print >> sys.stderr, "Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy"
- finally:
- # Make sure that we close the server connection in any case.
- # The client connection is closed by the ProxyServer and does not
- # have be handled here.
- self.del_server_connection()
-
- def del_server_connection(self):
- """
- Deletes (and closes) an existing server connection.
- """
- if self.server_conn and self.server_conn.connection:
- self.server_conn.finish()
- self.server_conn.close()
- self.log(
- "serverdisconnect", "debug", [
- "%s:%s" %
- (self.server_conn.address.host, self.server_conn.address.port)])
- self.channel.tell("serverdisconnect", self)
- self.server_conn = None
-
- def set_server_address(self, addr):
- """
- Sets a new server address with the given priority.
- Does not re-establish either connection or SSL handshake.
- """
- address = tcp.Address.wrap(addr)
-
- # Don't reconnect to the same destination.
- if self.server_conn and self.server_conn.address == address:
- return
+ self.log("clientconnect", "info")
- if self.server_conn:
- self.del_server_connection()
+ root_layer = self._create_root_layer()
+ root_layer = self.channel.ask("clientconnect", root_layer)
+ if root_layer == Kill:
+ def root_layer():
+ raise Kill()
- self.log(
- "Set new server address: %s:%s" %
- (address.host, address.port), "debug")
- self.server_conn = ServerConnection(address)
-
- def establish_server_connection(self, ask=True):
- """
- Establishes a new server connection.
- If there is already an existing server connection, the function returns immediately.
-
- By default, this function ".ask"s the proxy master. This is deadly if this function is already called from the
- master (e.g. via change_server), because this navigates us in a simple deadlock (the master is single-threaded).
- In these scenarios, ask=False can be passed to suppress the call to the master.
- """
- if self.server_conn.connection:
- return
- self.log(
- "serverconnect", "debug", [
- "%s:%s" %
- self.server_conn.address()[
- :2]])
- if ask:
- self.channel.ask("serverconnect", self)
try:
- self.server_conn.connect()
- except tcp.NetLibError as v:
- raise ProxyError(502, v)
-
- def establish_ssl(self, client=False, server=False, sni=None):
- """
- Establishes SSL on the existing connection(s) to the server or the client,
- as specified by the parameters.
- """
-
- # Logging
- if client or server:
- subs = []
- if client:
- subs.append("with client")
- if server:
- subs.append("with server (sni: %s)" % sni)
- self.log("Establish SSL", "debug", subs)
-
- if server:
- if not self.server_conn or not self.server_conn.connection:
- raise ProxyError(502, "No server connection.")
- if self.server_conn.ssl_established:
- raise ProxyError(502, "SSL to Server already established.")
+ root_layer()
+ except Kill:
+ self.log("Connection killed", "info")
+ except ProtocolException as e:
+ self.log(e, "info")
+ # If an error propagates to the topmost level,
+ # we send an HTTP error response, which is both
+ # understandable by HTTP clients and humans.
try:
- self.server_conn.establish_ssl(
- self.config.clientcerts,
- sni,
- method=self.config.openssl_method_server,
- options=self.config.openssl_options_server,
- verify_options=self.config.openssl_verification_mode_server,
- ca_path=self.config.openssl_trusted_cadir_server,
- ca_pemfile=self.config.openssl_trusted_ca_server,
- cipher_list=self.config.ciphers_server,
- )
- ssl_cert_err = self.server_conn.ssl_verification_error
- if ssl_cert_err is not None:
- self.log(
- "SSL verification failed for upstream server at depth %s with error: %s" %
- (ssl_cert_err['depth'], ssl_cert_err['errno']),
- "error")
- self.log("Ignoring server verification error, continuing with connection", "error")
- except tcp.NetLibError as v:
- e = ProxyError(502, repr(v))
- # Workaround for https://github.com/mitmproxy/mitmproxy/issues/427
- # The upstream server may reject connections without SNI, which means we need to
- # establish SSL with the client first, hope for a SNI (which triggers a reconnect which replaces the
- # ServerConnection object) and see whether that worked.
- if client and "handshake failure" in e.message:
- self.server_conn.may_require_sni = e
- else:
- ssl_cert_err = self.server_conn.ssl_verification_error
- if ssl_cert_err is not None:
- self.log(
- "SSL verification failed for upstream server at depth %s with error: %s" %
- (ssl_cert_err['depth'], ssl_cert_err['errno']),
- "error")
- self.log("Aborting connection attempt", "error")
- raise e
- if client:
- if self.client_conn.ssl_established:
- raise ProxyError(502, "SSL to Client already established.")
- cert, key, chain_file = self.find_cert()
- try:
- self.client_conn.convert_to_ssl(
- cert, key,
- method=self.config.openssl_method_client,
- options=self.config.openssl_options_client,
- handle_sni=self.handle_sni,
- cipher_list=self.config.ciphers_client,
- dhparams=self.config.certstore.dhparams,
- chain_file=chain_file
- )
- except tcp.NetLibError as v:
- raise ProxyError(400, repr(v))
-
- # Workaround for #427 part 2
- if server and hasattr(self.server_conn, "may_require_sni"):
- raise self.server_conn.may_require_sni
-
- def server_reconnect(self, new_sni=False):
- address = self.server_conn.address
- had_ssl = self.server_conn.ssl_established
- state = self.server_conn.state
- sni = new_sni or self.server_conn.sni
- self.log("(server reconnect follows)", "debug")
- self.del_server_connection()
- self.set_server_address(address)
- self.establish_server_connection()
-
- for s in state:
- protocol_handler(s[0])(self).handle_server_reconnect(s[1])
- self.server_conn.state = state
-
- # Receiving new_sni where had_ssl is False is a weird case that happens when the workaround for
- # https://github.com/mitmproxy/mitmproxy/issues/427 is active. In this
- # case, we want to establish SSL as well.
- if had_ssl or new_sni:
- self.establish_ssl(server=True, sni=sni)
+ error_response = make_error_response(502, repr(e))
+ self.client_conn.send(HTTP1Protocol().assemble(error_response))
+ except NetLibError:
+ pass
+ except Exception:
+ self.log(traceback.format_exc(), "error")
+ print(traceback.format_exc(), file=sys.stderr)
+ print("mitmproxy has crashed!", file=sys.stderr)
+ print("Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy", file=sys.stderr)
- def finish(self):
+ self.log("clientdisconnect", "info")
+ self.channel.tell("clientdisconnect", root_layer)
self.client_conn.finish()
- def log(self, msg, level, subs=()):
- full_msg = [
- "%s:%s: %s" %
- (self.client_conn.address.host,
- self.client_conn.address.port,
- msg)]
- for i in subs:
- full_msg.append(" -> " + i)
- full_msg = "\n".join(full_msg)
- self.channel.tell("log", Log(full_msg, level))
-
- def find_cert(self):
- host = self.server_conn.address.host
- sans = []
- if self.server_conn.ssl_established and (
- not self.config.no_upstream_cert):
- upstream_cert = self.server_conn.cert
- sans.extend(upstream_cert.altnames)
- if upstream_cert.cn:
- sans.append(host)
- host = upstream_cert.cn.decode("utf8").encode("idna")
- if self.server_conn.sni:
- sans.append(self.server_conn.sni)
- # for ssl spoof mode
- if hasattr(self.client_conn, "sni"):
- sans.append(self.client_conn.sni)
-
- ret = self.config.certstore.get_cert(host, sans)
- if not ret:
- raise ProxyError(502, "Unable to generate dummy cert.")
- return ret
-
- def handle_sni(self, connection):
- """
- This callback gets called during the SSL handshake with the client.
- The client has just sent the Sever Name Indication (SNI). We now connect upstream to
- figure out which certificate needs to be served.
- """
- try:
- sn = connection.get_servername()
- if not sn:
- return
- sni = sn.decode("utf8").encode("idna")
- # for ssl spoof mode
- self.client_conn.sni = sni
-
- if sni != self.server_conn.sni:
- self.log("SNI received: %s" % sni, "debug")
- # We should only re-establish upstream SSL if one of the following conditions is true:
- # - We established SSL with the server previously
- # - We initially wanted to establish SSL with the server,
- # but the server refused to negotiate without SNI.
- if self.server_conn.ssl_established or hasattr(
- self.server_conn,
- "may_require_sni"):
- # reconnect to upstream server with SNI
- self.server_reconnect(sni)
- # Now, change client context to reflect changed certificate:
- cert, key, chain_file = self.find_cert()
- new_context = self.client_conn.create_ssl_context(
- cert, key,
- method=self.config.openssl_method_client,
- options=self.config.openssl_options_client,
- cipher_list=self.config.ciphers_client,
- dhparams=self.config.certstore.dhparams,
- chain_file=chain_file
- )
- connection.set_context(new_context)
- # An unhandled exception in this method will core dump PyOpenSSL, so
- # make dang sure it doesn't happen.
- except: # pragma: no cover
- import traceback
- self.log(
- "Error in handle_sni:\r\n" +
- traceback.format_exc(),
- "error")
+ def log(self, msg, level):
+ msg = "{}: {}".format(repr(self.client_conn.address), msg)
+ self.channel.tell("log", Log(msg, level)) \ No newline at end of file
diff --git a/libmproxy/utils.py b/libmproxy/utils.py
index 3ac3cc01..a6ca55f7 100644
--- a/libmproxy/utils.py
+++ b/libmproxy/utils.py
@@ -1,14 +1,10 @@
from __future__ import absolute_import
import os
import datetime
-import urllib
import re
import time
-import functools
-import cgi
import json
-import netlib.utils
def timestamp():
"""
diff --git a/setup.py b/setup.py
index 847a17f9..e28033ad 100644
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,9 @@ deps = {
"pyperclip>=1.5.8",
"blinker>=1.3",
"pyparsing>=1.5.2",
- "html2text>=2015.4.14"
+ "html2text>=2015.4.14",
+ "construct>=2.5.2",
+ "six>=1.9.0",
}
# A script -> additional dependencies dict.
scripts = {
diff --git a/test/scripts/stream_modify.py b/test/scripts/stream_modify.py
index e5c323be..e26d83f1 100644
--- a/test/scripts/stream_modify.py
+++ b/test/scripts/stream_modify.py
@@ -1,6 +1,6 @@
def modify(chunks):
- for prefix, content, suffix in chunks:
- yield prefix, content.replace("foo", "bar"), suffix
+ for chunk in chunks:
+ yield chunk.replace("foo", "bar")
def responseheaders(context, flow):
diff --git a/test/test_cmdline.py b/test/test_cmdline.py
index eafcbde4..bb54d011 100644
--- a/test/test_cmdline.py
+++ b/test/test_cmdline.py
@@ -38,15 +38,11 @@ def test_parse_replace_hook():
def test_parse_server_spec():
tutils.raises("Invalid server specification", cmdline.parse_server_spec, "")
assert cmdline.parse_server_spec(
- "http://foo.com:88") == [False, False, "foo.com", 88]
+ "http://foo.com:88") == ("http", ("foo.com", 88))
assert cmdline.parse_server_spec(
- "http://foo.com") == [False, False, "foo.com", 80]
+ "http://foo.com") == ("http", ("foo.com", 80))
assert cmdline.parse_server_spec(
- "https://foo.com") == [True, True, "foo.com", 443]
- assert cmdline.parse_server_spec_special(
- "https2http://foo.com") == [True, False, "foo.com", 80]
- assert cmdline.parse_server_spec_special(
- "http2https://foo.com") == [False, True, "foo.com", 443]
+ "https://foo.com") == ("https", ("foo.com", 443))
tutils.raises(
"Invalid server specification",
cmdline.parse_server_spec,
diff --git a/test/test_dump.py b/test/test_dump.py
index b3d724a5..a0ad6cb4 100644
--- a/test/test_dump.py
+++ b/test/test_dump.py
@@ -1,18 +1,18 @@
import os
from cStringIO import StringIO
+from libmproxy.models import HTTPResponse
import netlib.tutils
from netlib.http.semantics import CONTENT_MISSING
from libmproxy import dump, flow
-from libmproxy.protocol import http, http_wrappers
-from libmproxy.proxy.primitives import Log
+from libmproxy.protocol import Log
import tutils
import mock
def test_strfuncs():
- t = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ t = HTTPResponse.wrap(netlib.tutils.tresp())
t.is_replay = True
dump.str_response(t)
@@ -34,7 +34,7 @@ class TestDumpMaster:
m.handle_clientconnect(f.client_conn)
m.handle_serverconnect(f.server_conn)
m.handle_request(f)
- f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp(content))
+ f.response = HTTPResponse.wrap(netlib.tutils.tresp(content))
f = m.handle_response(f)
m.handle_clientdisconnect(f.client_conn)
return f
@@ -71,7 +71,7 @@ class TestDumpMaster:
f = tutils.tflow()
f.request.content = CONTENT_MISSING
m.handle_request(f)
- f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ f.response = HTTPResponse.wrap(netlib.tutils.tresp())
f.response.content = CONTENT_MISSING
m.handle_response(f)
assert "content missing" in cs.getvalue()
diff --git a/test/test_filt.py b/test/test_filt.py
index bcdf6e4c..aeec2485 100644
--- a/test/test_filt.py
+++ b/test/test_filt.py
@@ -2,7 +2,7 @@ import cStringIO
from netlib import odict
from libmproxy import filt, flow
from libmproxy.protocol import http
-from libmproxy.protocol.primitives import Error
+from libmproxy.models import Error
import tutils
diff --git a/test/test_flow.py b/test/test_flow.py
index 711688da..9cce26b3 100644
--- a/test/test_flow.py
+++ b/test/test_flow.py
@@ -3,20 +3,18 @@ import time
import os.path
from cStringIO import StringIO
import email.utils
+
import mock
import netlib.utils
from netlib import odict
-from netlib.http.semantics import CONTENT_MISSING, HDR_FORM_URLENCODED, HDR_FORM_MULTIPART
-
-from libmproxy import filt, protocol, controller, utils, tnetstring, flow
-from libmproxy.protocol import http_wrappers
-from libmproxy.protocol.primitives import Error, Flow
-from libmproxy.protocol.http import decoded
+from netlib.http.semantics import CONTENT_MISSING, HDR_FORM_URLENCODED
+from libmproxy import filt, protocol, controller, tnetstring, flow
+from libmproxy.models import Error, Flow, HTTPRequest, HTTPResponse, HTTPFlow, decoded
from libmproxy.proxy.config import HostMatcher
from libmproxy.proxy import ProxyConfig
from libmproxy.proxy.server import DummyServer
-from libmproxy.proxy.connection import ClientConnection
+from libmproxy.models.connections import ClientConnection
import tutils
@@ -24,7 +22,7 @@ def test_app_registry():
ar = flow.AppRegistry()
ar.add("foo", "domain", 80)
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.host = "domain"
r.port = 80
assert ar.get(r)
@@ -32,7 +30,7 @@ def test_app_registry():
r.port = 81
assert not ar.get(r)
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.host = "domain2"
r.port = 80
assert not ar.get(r)
@@ -385,7 +383,7 @@ class TestFlow:
def test_backup(self):
f = tutils.tflow()
- f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ f.response = HTTPResponse.wrap(netlib.tutils.tresp())
f.request.content = "foo"
assert not f.modified()
f.backup()
@@ -404,13 +402,13 @@ class TestFlow:
def test_getset_state(self):
f = tutils.tflow(resp=True)
state = f.get_state()
- assert f.get_state() == protocol.http.HTTPFlow.from_state(
+ assert f.get_state() == HTTPFlow.from_state(
state).get_state()
f.response = None
f.error = Error("error")
state = f.get_state()
- assert f.get_state() == protocol.http.HTTPFlow.from_state(
+ assert f.get_state() == HTTPFlow.from_state(
state).get_state()
f2 = f.copy()
@@ -518,16 +516,16 @@ class TestState:
assert c.add_flow(newf)
assert c.active_flow_count() == 2
- f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ f.response = HTTPResponse.wrap(netlib.tutils.tresp())
assert c.update_flow(f)
assert c.flow_count() == 2
assert c.active_flow_count() == 1
- _ = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ _ = HTTPResponse.wrap(netlib.tutils.tresp())
assert not c.update_flow(None)
assert c.active_flow_count() == 1
- newf.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ newf.response = HTTPResponse.wrap(netlib.tutils.tresp())
assert c.update_flow(newf)
assert c.active_flow_count() == 0
@@ -559,7 +557,7 @@ class TestState:
c.set_limit("~s")
assert c.limit_txt == "~s"
assert len(c.view) == 0
- f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ f.response = HTTPResponse.wrap(netlib.tutils.tresp())
c.update_flow(f)
assert len(c.view) == 1
c.set_limit(None)
@@ -591,7 +589,7 @@ class TestState:
def _add_response(self, state):
f = tutils.tflow()
state.add_flow(f)
- f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ f.response = HTTPResponse.wrap(netlib.tutils.tresp())
state.update_flow(f)
def _add_error(self, state):
@@ -672,11 +670,8 @@ class TestSerialize:
s = flow.State()
conf = ProxyConfig(
mode="reverse",
- upstream_server=[
- True,
- True,
- "use-this-domain",
- 80])
+ upstream_server=("https", ("use-this-domain", 80))
+ )
fm = flow.FlowMaster(DummyServer(conf), s)
fm.load_flows(r)
assert s.flows[0].request.host == "use-this-domain"
@@ -809,11 +804,11 @@ class TestFlowMaster:
fm.anticomp = True
f = tutils.tflow(req=None)
fm.handle_clientconnect(f.client_conn)
- f.request = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ f.request = HTTPRequest.wrap(netlib.tutils.treq())
fm.handle_request(f)
assert s.flow_count() == 1
- f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ f.response = HTTPResponse.wrap(netlib.tutils.tresp())
fm.handle_response(f)
assert not fm.handle_response(None)
assert s.flow_count() == 1
@@ -858,7 +853,7 @@ class TestFlowMaster:
s = flow.State()
f = tutils.tflow()
- f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp(f.request))
+ f.response = HTTPResponse.wrap(netlib.tutils.tresp(f.request))
pb = [f]
fm = flow.FlowMaster(None, s)
@@ -912,7 +907,7 @@ class TestFlowMaster:
def test_server_playback_kill(self):
s = flow.State()
f = tutils.tflow()
- f.response = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp(f.request))
+ f.response = HTTPResponse.wrap(netlib.tutils.tresp(f.request))
pb = [f]
fm = flow.FlowMaster(None, s)
fm.refresh_server_playback = True
@@ -1011,7 +1006,7 @@ class TestRequest:
assert r.get_state() == r2.get_state()
def test_get_url(self):
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
assert r.url == "http://address:22/path"
@@ -1032,7 +1027,7 @@ class TestRequest:
assert r.pretty_url(True) == "https://foo.com:22/path"
def test_path_components(self):
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.path = "/"
assert r.get_path_components() == []
r.path = "/foo/bar"
@@ -1052,7 +1047,7 @@ class TestRequest:
def test_getset_form_urlencoded(self):
d = odict.ODict([("one", "two"), ("three", "four")])
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq(content=netlib.utils.urlencode(d.lst)))
+ r = HTTPRequest.wrap(netlib.tutils.treq(content=netlib.utils.urlencode(d.lst)))
r.headers["content-type"] = [HDR_FORM_URLENCODED]
assert r.get_form_urlencoded() == d
@@ -1066,7 +1061,7 @@ class TestRequest:
def test_getset_query(self):
h = odict.ODictCaseless()
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.path = "/foo?x=y&a=b"
q = r.get_query()
assert q.lst == [("x", "y"), ("a", "b")]
@@ -1089,7 +1084,7 @@ class TestRequest:
def test_anticache(self):
h = odict.ODictCaseless()
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.headers = h
h["if-modified-since"] = ["test"]
h["if-none-match"] = ["test"]
@@ -1098,7 +1093,7 @@ class TestRequest:
assert not "if-none-match" in r.headers
def test_replace(self):
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.path = "path/foo"
r.headers["Foo"] = ["fOo"]
r.content = "afoob"
@@ -1108,31 +1103,31 @@ class TestRequest:
assert r.headers["boo"] == ["boo"]
def test_constrain_encoding(self):
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.headers["accept-encoding"] = ["gzip", "oink"]
r.constrain_encoding()
assert "oink" not in r.headers["accept-encoding"]
def test_decodeencode(self):
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.decode()
assert not r.headers["content-encoding"]
assert r.content == "falafel"
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.content = "falafel"
assert not r.decode()
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.encode("identity")
assert r.headers["content-encoding"] == ["identity"]
assert r.content == "falafel"
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.encode("gzip")
@@ -1143,7 +1138,7 @@ class TestRequest:
assert r.content == "falafel"
def test_get_decoded_content(self):
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
r.content = None
r.headers["content-encoding"] = ["identity"]
assert r.get_decoded_content() == None
@@ -1155,7 +1150,7 @@ class TestRequest:
def test_get_content_type(self):
h = odict.ODictCaseless()
h["Content-Type"] = ["text/plain"]
- resp = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ resp = HTTPResponse.wrap(netlib.tutils.tresp())
resp.headers = h
assert resp.headers.get_first("content-type") == "text/plain"
@@ -1168,7 +1163,7 @@ class TestResponse:
assert resp2.get_state() == resp.get_state()
def test_refresh(self):
- r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ r = HTTPResponse.wrap(netlib.tutils.tresp())
n = time.time()
r.headers["date"] = [email.utils.formatdate(n)]
pre = r.headers["date"]
@@ -1186,7 +1181,7 @@ class TestResponse:
r.refresh()
def test_refresh_cookie(self):
- r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ r = HTTPResponse.wrap(netlib.tutils.tresp())
# Invalid expires format, sent to us by Reddit.
c = "rfoo=bar; Domain=reddit.com; expires=Thu, 31 Dec 2037 23:59:59 GMT; Path=/"
@@ -1196,7 +1191,7 @@ class TestResponse:
assert "00:21:38" in r._refresh_cookie(c, 60)
def test_replace(self):
- r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ r = HTTPResponse.wrap(netlib.tutils.tresp())
r.headers["Foo"] = ["fOo"]
r.content = "afoob"
assert r.replace("foo(?i)", "boo") == 3
@@ -1204,21 +1199,21 @@ class TestResponse:
assert r.headers["boo"] == ["boo"]
def test_decodeencode(self):
- r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ r = HTTPResponse.wrap(netlib.tutils.tresp())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
assert r.decode()
assert not r.headers["content-encoding"]
assert r.content == "falafel"
- r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ r = HTTPResponse.wrap(netlib.tutils.tresp())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.encode("identity")
assert r.headers["content-encoding"] == ["identity"]
assert r.content == "falafel"
- r = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ r = HTTPResponse.wrap(netlib.tutils.tresp())
r.headers["content-encoding"] = ["identity"]
r.content = "falafel"
r.encode("gzip")
@@ -1235,7 +1230,7 @@ class TestResponse:
def test_get_content_type(self):
h = odict.ODictCaseless()
h["Content-Type"] = ["text/plain"]
- resp = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ resp = HTTPResponse.wrap(netlib.tutils.tresp())
resp.headers = h
assert resp.headers.get_first("content-type") == "text/plain"
@@ -1279,7 +1274,7 @@ class TestClientConnection:
def test_decoded():
- r = http_wrappers.HTTPRequest.wrap(netlib.tutils.treq())
+ r = HTTPRequest.wrap(netlib.tutils.treq())
assert r.content == "content"
assert not r.headers["content-encoding"]
r.encode("gzip")
diff --git a/test/test_protocol_http.py b/test/test_protocol_http.py
index 2da54093..cd0f77fa 100644
--- a/test/test_protocol_http.py
+++ b/test/test_protocol_http.py
@@ -4,6 +4,7 @@ from cStringIO import StringIO
from mock import MagicMock
from libmproxy.protocol.http import *
+import netlib.http
from netlib import odict
from netlib.http import http1
from netlib.http.semantics import CONTENT_MISSING
@@ -56,7 +57,7 @@ class TestInvalidRequests(tservers.HTTPProxTest):
p = self.pathoc()
r = p.request("connect:'%s:%s'" % ("127.0.0.1", self.server2.port))
assert r.status_code == 400
- assert "Must not CONNECT on already encrypted connection" in r.body
+ assert "Invalid HTTP request form" in r.body
def test_relative_request(self):
p = self.pathoc_raw()
diff --git a/test/test_proxy.py b/test/test_proxy.py
index 6ab19e02..3707fabe 100644
--- a/test/test_proxy.py
+++ b/test/test_proxy.py
@@ -1,20 +1,14 @@
-import argparse
+import mock
+from OpenSSL import SSL
+
from libmproxy import cmdline
-from libmproxy.proxy import ProxyConfig, process_proxy_options
-from libmproxy.proxy.connection import ServerConnection
-from libmproxy.proxy.primitives import ProxyError
+from libmproxy.proxy import ProxyConfig
+from libmproxy.proxy.config import process_proxy_options
+from libmproxy.models.connections import ServerConnection
from libmproxy.proxy.server import DummyServer, ProxyServer, ConnectionHandler
import tutils
from libpathod import test
from netlib import http, tcp
-import mock
-
-from OpenSSL import SSL
-
-
-def test_proxy_error():
- p = ProxyError(111, "msg")
- assert str(p)
class TestServerConnection:
@@ -97,13 +91,10 @@ class TestProcessProxyOptions:
self.assert_err("expected one argument", "-U")
self.assert_err("Invalid server specification", "-U", "upstream")
- self.assert_noerr("--spoof")
- self.assert_noerr("--ssl-spoof")
-
- self.assert_noerr("--spoofed-port", "443")
- self.assert_err("expected one argument", "--spoofed-port")
+ self.assert_err("not allowed with", "-R", "http://localhost", "-T")
- self.assert_err("mutually exclusive", "-R", "http://localhost", "-T")
+ def test_socks_auth(self):
+ self.assert_err("Proxy Authentication not supported in SOCKS mode.", "--socks", "--nonanonymous")
def test_client_certs(self):
with tutils.tmpdir() as cadir:
@@ -181,13 +172,19 @@ class TestDummyServer:
class TestConnectionHandler:
def test_fatal_error(self):
config = mock.Mock()
- config.mode.get_upstream_server.side_effect = RuntimeError
+ root_layer = mock.Mock()
+ root_layer.side_effect = RuntimeError
+ config.mode.return_value = root_layer
+ channel = mock.Mock()
+
+ def ask(_, x):
+ return x
+ channel.ask = ask
c = ConnectionHandler(
- config,
mock.MagicMock(),
- ("127.0.0.1",
- 8080),
- None,
- mock.MagicMock())
+ ("127.0.0.1", 8080),
+ config,
+ channel
+ )
with tutils.capture_stderr(c.handle) as output:
assert "mitmproxy has crashed" in output
diff --git a/test/test_server.py b/test/test_server.py
index 77ba4576..a1259b7f 100644
--- a/test/test_server.py
+++ b/test/test_server.py
@@ -1,6 +1,7 @@
import socket
import time
from OpenSSL import SSL
+from netlib.tcp import Address
import netlib.tutils
from netlib import tcp, http, socks
@@ -10,7 +11,9 @@ from netlib.http.semantics import CONTENT_MISSING
from libpathod import pathoc, pathod
from libmproxy.proxy.config import HostMatcher
-from libmproxy.protocol import KILL, Error, http_wrappers
+from libmproxy.protocol import Kill
+from libmproxy.models import Error, HTTPResponse
+
import tutils
import tservers
@@ -67,7 +70,7 @@ class CommonMixin:
# SSL with the upstream proxy.
rt = self.master.replay_request(l, block=True)
assert not rt
- if isinstance(self, tservers.HTTPUpstreamProxTest) and not self.ssl:
+ if isinstance(self, tservers.HTTPUpstreamProxTest):
assert l.response.code == 502
else:
assert l.error
@@ -319,17 +322,6 @@ class TestHTTPAuth(tservers.HTTPProxTest):
assert ret.status_code == 202
-class TestHTTPConnectSSLError(tservers.HTTPProxTest):
- certfile = True
-
- def test_go(self):
- self.config.ssl_ports.append(self.proxy.port)
- p = self.pathoc_raw()
- dst = ("localhost", self.proxy.port)
- p.connect(connect_to=dst)
- tutils.raises("502 - Bad Gateway", p.http_connect, dst)
-
-
class TestHTTPS(tservers.HTTPProxTest, CommonMixin, TcpMixin):
ssl = True
ssloptions = pathod.SSLOptions(request_client_cert=True)
@@ -390,26 +382,31 @@ class TestHTTPSUpstreamServerVerificationWBadCert(tservers.HTTPProxTest):
("untrusted-cert", tutils.test_data.path("data/untrusted-server.crt"))
])
+ def _request(self):
+ p = self.pathoc()
+ # We need to make an actual request because the upstream connection is lazy-loaded.
+ return p.request("get:/p/242")
+
def test_default_verification_w_bad_cert(self):
"""Should use no verification."""
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
- self.pathoc()
+ assert self._request().status_code == 242
def test_no_verification_w_bad_cert(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_NONE
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
- self.pathoc()
+ assert self._request().status_code == 242
def test_verification_w_bad_cert(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
- tutils.raises("SSL handshake error", self.pathoc)
+ assert self._request().status_code == 502
class TestHTTPSNoCommonName(tservers.HTTPProxTest):
@@ -469,60 +466,11 @@ class TestSocks5(tservers.SocksModeTest):
assert "SOCKS5 mode failure" in f.content
-class TestSpoof(tservers.SpoofModeTest):
- def test_http(self):
- alist = (
- ("localhost", self.server.port),
- ("127.0.0.1", self.server.port)
- )
- for a in alist:
- self.server.clear_log()
- p = self.pathoc()
- f = p.request("get:/p/304:h'Host'='%s:%s'" % a)
- assert self.server.last_log()
- assert f.status_code == 304
- l = self.master.state.view[-1]
- assert l.server_conn.address
- assert l.server_conn.address.host == a[0]
- assert l.server_conn.address.port == a[1]
-
- def test_http_without_host(self):
- p = self.pathoc()
- f = p.request("get:/p/304:r")
- assert f.status_code == 400
-
-
-class TestSSLSpoof(tservers.SSLSpoofModeTest):
- def test_https(self):
- alist = (
- ("localhost", self.server.port),
- ("127.0.0.1", self.server.port)
- )
- for a in alist:
- self.server.clear_log()
- self.config.mode.sslport = a[1]
- p = self.pathoc(sni=a[0])
- f = p.request("get:/p/304")
- assert self.server.last_log()
- assert f.status_code == 304
- l = self.master.state.view[-1]
- assert l.server_conn.address
- assert l.server_conn.address.host == a[0]
- assert l.server_conn.address.port == a[1]
-
- def test_https_without_sni(self):
- a = ("localhost", self.server.port)
- self.config.mode.sslport = a[1]
- p = self.pathoc(sni=None)
- f = p.request("get:/p/304")
- assert f.status_code == 400
-
-
class TestHttps2Http(tservers.ReverseProxTest):
@classmethod
def get_proxy_config(cls):
d = super(TestHttps2Http, cls).get_proxy_config()
- d["upstream_server"][0] = True
+ d["upstream_server"] = ("http", d["upstream_server"][1])
return d
def pathoc(self, ssl, sni=None):
@@ -530,7 +478,7 @@ class TestHttps2Http(tservers.ReverseProxTest):
Returns a connected Pathoc instance.
"""
p = pathoc.Pathoc(
- ("localhost", self.proxy.port), ssl=ssl, sni=sni, fp=None
+ ("localhost", self.proxy.port), ssl=True, sni=sni, fp=None
)
p.connect()
return p
@@ -546,7 +494,7 @@ class TestHttps2Http(tservers.ReverseProxTest):
def test_http(self):
p = self.pathoc(ssl=False)
- assert p.request("get:'/p/200'").status_code == 400
+ assert p.request("get:'/p/200'").status_code == 200
class TestTransparent(tservers.TransparentProxTest, CommonMixin, TcpMixin):
@@ -560,7 +508,7 @@ class TestTransparentSSL(tservers.TransparentProxTest, CommonMixin, TcpMixin):
p = pathoc.Pathoc(("localhost", self.proxy.port), fp=None)
p.connect()
r = p.request("get:/")
- assert r.status_code == 400
+ assert r.status_code == 502
class TestProxy(tservers.HTTPProxTest):
@@ -661,63 +609,65 @@ class MasterRedirectRequest(tservers.TestMaster):
redirect_port = None # Set by TestRedirectRequest
def handle_request(self, f):
- request = f.request
- if request.path == "/p/201":
- addr = f.live.c.server_conn.address
- assert f.live.change_server(
- ("127.0.0.1", self.redirect_port), ssl=False)
- assert not f.live.change_server(
- ("127.0.0.1", self.redirect_port), ssl=False)
- tutils.raises(
- "SSL handshake error",
- f.live.change_server,
- ("127.0.0.1",
- self.redirect_port),
- ssl=True)
- assert f.live.change_server(addr, ssl=False)
- request.url = "http://127.0.0.1:%s/p/201" % self.redirect_port
- tservers.TestMaster.handle_request(self, f)
+ if f.request.path == "/p/201":
+
+ # This part should have no impact, but it should also not cause any exceptions.
+ addr = f.live.server_conn.address
+ addr2 = Address(("127.0.0.1", self.redirect_port))
+ f.live.set_server(addr2)
+ f.live.set_server(addr)
+
+ # This is the actual redirection.
+ f.request.port = self.redirect_port
+ super(MasterRedirectRequest, self).handle_request(f)
def handle_response(self, f):
f.response.content = str(f.client_conn.address.port)
f.response.headers[
"server-conn-id"] = [str(f.server_conn.source_address.port)]
- tservers.TestMaster.handle_response(self, f)
+ super(MasterRedirectRequest, self).handle_response(f)
class TestRedirectRequest(tservers.HTTPProxTest):
masterclass = MasterRedirectRequest
+ ssl = True
def test_redirect(self):
+ """
+ Imagine a single HTTPS connection with three requests:
+
+ 1. First request should pass through unmodified
+ 2. Second request will be redirected to a different host by an inline script
+ 3. Third request should pass through unmodified
+
+ This test verifies that the original destination is restored for the third request.
+ """
self.master.redirect_port = self.server2.port
p = self.pathoc()
self.server.clear_log()
self.server2.clear_log()
- r1 = p.request("get:'%s/p/200'" % self.server.urlbase)
+ r1 = p.request("get:'/p/200'")
assert r1.status_code == 200
assert self.server.last_log()
assert not self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
- r2 = p.request("get:'%s/p/201'" % self.server.urlbase)
+ r2 = p.request("get:'/p/201'")
assert r2.status_code == 201
assert not self.server.last_log()
assert self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
- r3 = p.request("get:'%s/p/202'" % self.server.urlbase)
+ r3 = p.request("get:'/p/202'")
assert r3.status_code == 202
assert self.server.last_log()
assert not self.server2.last_log()
assert r1.content == r2.content == r3.content
- assert r1.headers.get_first(
- "server-conn-id") == r3.headers.get_first("server-conn-id")
- # Make sure that we actually use the same connection in this test case
class MasterStreamRequest(tservers.TestMaster):
@@ -774,9 +724,9 @@ class TestStreamRequest(tservers.HTTPProxTest):
assert resp.headers["Transfer-Encoding"][0] == 'chunked'
assert resp.status_code == 200
- chunks = list(
- content for _, content, _ in protocol.read_http_body_chunked(
- resp.headers, None, "GET", 200, False))
+ chunks = list(protocol.read_http_body_chunked(
+ resp.headers, None, "GET", 200, False
+ ))
assert chunks == ["this", "isatest", ""]
connection.close()
@@ -784,7 +734,7 @@ class TestStreamRequest(tservers.HTTPProxTest):
class MasterFakeResponse(tservers.TestMaster):
def handle_request(self, f):
- resp = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ resp = HTTPResponse.wrap(netlib.tutils.tresp())
f.reply(resp)
@@ -809,7 +759,7 @@ class TestServerConnect(tservers.HTTPProxTest):
class MasterKillRequest(tservers.TestMaster):
def handle_request(self, f):
- f.reply(KILL)
+ f.reply(Kill)
class TestKillRequest(tservers.HTTPProxTest):
@@ -823,7 +773,7 @@ class TestKillRequest(tservers.HTTPProxTest):
class MasterKillResponse(tservers.TestMaster):
def handle_response(self, f):
- f.reply(KILL)
+ f.reply(Kill)
class TestKillResponse(tservers.HTTPProxTest):
@@ -849,7 +799,7 @@ class TestTransparentResolveError(tservers.TransparentProxTest):
class MasterIncomplete(tservers.TestMaster):
def handle_request(self, f):
- resp = http_wrappers.HTTPResponse.wrap(netlib.tutils.tresp())
+ resp = HTTPResponse.wrap(netlib.tutils.tresp())
resp.content = CONTENT_MISSING
f.reply(resp)
@@ -988,7 +938,7 @@ class TestProxyChainingSSLReconnect(tservers.HTTPUpstreamProxTest):
if not (k[0] in exclude):
f.client_conn.finish()
f.error = Error("terminated")
- f.reply(KILL)
+ f.reply(Kill)
return _func(f)
setattr(master, attr, handler)
@@ -1009,6 +959,9 @@ class TestProxyChainingSSLReconnect(tservers.HTTPUpstreamProxTest):
p = self.pathoc()
req = p.request("get:'/p/418:b\"content\"'")
+ assert req.content == "content"
+ assert req.status_code == 418
+
assert self.proxy.tmaster.state.flow_count() == 2 # CONNECT and request
# CONNECT, failing request,
assert self.chain[0].tmaster.state.flow_count() == 4
@@ -1017,8 +970,7 @@ class TestProxyChainingSSLReconnect(tservers.HTTPUpstreamProxTest):
assert self.chain[1].tmaster.state.flow_count() == 2
# (doesn't store (repeated) CONNECTs from chain[0]
# as it is a regular proxy)
- assert req.content == "content"
- assert req.status_code == 418
+
assert not self.chain[1].tmaster.state.flows[0].response # killed
assert self.chain[1].tmaster.state.flows[1].response
diff --git a/test/tservers.py b/test/tservers.py
index 3c73b262..c5256e53 100644
--- a/test/tservers.py
+++ b/test/tservers.py
@@ -1,6 +1,5 @@
import os.path
import threading
-import Queue
import shutil
import tempfile
import flask
@@ -8,7 +7,6 @@ import mock
from libmproxy.proxy.config import ProxyConfig
from libmproxy.proxy.server import ProxyServer
-from libmproxy.proxy.primitives import TransparentProxyMode
import libpathod.test
import libpathod.pathoc
from libmproxy import flow, controller
@@ -130,7 +128,6 @@ class ProxTestBase(object):
no_upstream_cert = cls.no_upstream_cert,
cadir = cls.cadir,
authenticator = cls.authenticator,
- ssl_ports=([cls.server.port, cls.server2.port] if cls.ssl else []),
clientcerts = tutils.test_data.path("data/clientcert") if cls.clientcerts else None
)
@@ -183,22 +180,24 @@ class TResolver:
def original_addr(self, sock):
return ("127.0.0.1", self.port)
-
class TransparentProxTest(ProxTestBase):
ssl = None
resolver = TResolver
@classmethod
- @mock.patch("libmproxy.platform.resolver")
- def setupAll(cls, _):
+ def setupAll(cls):
super(TransparentProxTest, cls).setupAll()
- if cls.ssl:
- ports = [cls.server.port, cls.server2.port]
- else:
- ports = []
- cls.config.mode = TransparentProxyMode(
- cls.resolver(cls.server.port),
- ports)
+
+ cls._resolver = mock.patch(
+ "libmproxy.platform.resolver",
+ new=lambda: cls.resolver(cls.server.port)
+ )
+ cls._resolver.start()
+
+ @classmethod
+ def teardownAll(cls):
+ cls._resolver.stop()
+ super(TransparentProxTest, cls).teardownAll()
@classmethod
def get_proxy_config(cls):
@@ -235,12 +234,10 @@ class ReverseProxTest(ProxTestBase):
@classmethod
def get_proxy_config(cls):
d = ProxTestBase.get_proxy_config()
- d["upstream_server"] = [
- True if cls.ssl else False,
- True if cls.ssl else False,
- "127.0.0.1",
- cls.server.port
- ]
+ d["upstream_server"] = (
+ "https" if cls.ssl else "http",
+ ("127.0.0.1", cls.server.port)
+ )
d["mode"] = "reverse"
return d
@@ -274,48 +271,6 @@ class SocksModeTest(HTTPProxTest):
d["mode"] = "socks5"
return d
-class SpoofModeTest(ProxTestBase):
- ssl = None
-
- @classmethod
- def get_proxy_config(cls):
- d = ProxTestBase.get_proxy_config()
- d["upstream_server"] = None
- d["mode"] = "spoof"
- return d
-
- def pathoc(self, sni=None):
- """
- Returns a connected Pathoc instance.
- """
- p = libpathod.pathoc.Pathoc(
- ("localhost", self.proxy.port), ssl=self.ssl, sni=sni, fp=None
- )
- p.connect()
- return p
-
-
-class SSLSpoofModeTest(ProxTestBase):
- ssl = True
-
- @classmethod
- def get_proxy_config(cls):
- d = ProxTestBase.get_proxy_config()
- d["upstream_server"] = None
- d["mode"] = "sslspoof"
- d["spoofed_ssl_port"] = 443
- return d
-
- def pathoc(self, sni=None):
- """
- Returns a connected Pathoc instance.
- """
- p = libpathod.pathoc.Pathoc(
- ("localhost", self.proxy.port), ssl=self.ssl, sni=sni, fp=None
- )
- p.connect()
- return p
-
class ChainProxTest(ProxTestBase):
"""
@@ -360,7 +315,7 @@ class ChainProxTest(ProxTestBase):
if cls.chain: # First proxy is in normal mode.
d.update(
mode="upstream",
- upstream_server=(False, False, "127.0.0.1", cls.chain[0].port)
+ upstream_server=("http", ("127.0.0.1", cls.chain[0].port))
)
return d
diff --git a/test/tutils.py b/test/tutils.py
index 61b1154c..d64388f3 100644
--- a/test/tutils.py
+++ b/test/tutils.py
@@ -3,22 +3,19 @@ import shutil
import tempfile
import argparse
import sys
-import mock_urwid
from cStringIO import StringIO
from contextlib import contextmanager
+
from nose.plugins.skip import SkipTest
from mock import Mock
-from time import time
-from netlib import certutils, odict
import netlib.tutils
-
-from libmproxy import flow, utils, controller
-from libmproxy.protocol import http, http_wrappers
-from libmproxy.proxy.connection import ClientConnection, ServerConnection
+from libmproxy import utils, controller
+from libmproxy.models import (
+ ClientConnection, ServerConnection, Error, HTTPRequest, HTTPResponse, HTTPFlow
+)
from libmproxy.console.flowview import FlowView
from libmproxy.console import ConsoleState
-from libmproxy.protocol.primitives import Error
def _SkipWindows():
@@ -53,11 +50,11 @@ def tflow(client_conn=True, server_conn=True, req=True, resp=None, err=None):
err = terr()
if req:
- req = http_wrappers.HTTPRequest.wrap(req)
+ req = HTTPRequest.wrap(req)
if resp:
- resp = http_wrappers.HTTPResponse.wrap(resp)
+ resp = HTTPResponse.wrap(resp)
- f = http.HTTPFlow(client_conn, server_conn)
+ f = HTTPFlow(client_conn, server_conn)
f.request = req
f.response = resp
f.error = err
@@ -91,7 +88,6 @@ def tserver_conn():
return c
-
def terr(content="error"):
"""
@return: libmproxy.protocol.primitives.Error
@@ -106,7 +102,7 @@ def tflowview(request_contents=None):
if request_contents is None:
flow = tflow()
else:
- flow = tflow(req=treq(request_contents))
+ flow = tflow(req=netlib.tutils.treq(request_contents))
fv = FlowView(m, cs, flow)
return fv
@@ -184,4 +180,5 @@ def capture_stderr(command, *args, **kwargs):
yield sys.stderr.getvalue()
sys.stderr = out
+
test_data = utils.Data(__name__)