aboutsummaryrefslogtreecommitdiffstats
path: root/libmproxy/protocol
diff options
context:
space:
mode:
authorMaximilian Hils <git@maximilianhils.com>2016-02-15 14:58:46 +0100
committerMaximilian Hils <git@maximilianhils.com>2016-02-15 14:58:46 +0100
commit33fa49277a821b9d38e8c9bf0bcf2adcfa2f6f04 (patch)
tree31914a601302579ff817504019296fd7e9e46765 /libmproxy/protocol
parent36f34f701991b5d474c005ec45e3b66e20f326a8 (diff)
downloadmitmproxy-33fa49277a821b9d38e8c9bf0bcf2adcfa2f6f04.tar.gz
mitmproxy-33fa49277a821b9d38e8c9bf0bcf2adcfa2f6f04.tar.bz2
mitmproxy-33fa49277a821b9d38e8c9bf0bcf2adcfa2f6f04.zip
move mitmproxy
Diffstat (limited to 'libmproxy/protocol')
-rw-r--r--libmproxy/protocol/__init__.py45
-rw-r--r--libmproxy/protocol/base.py198
-rw-r--r--libmproxy/protocol/http.py416
-rw-r--r--libmproxy/protocol/http1.py67
-rw-r--r--libmproxy/protocol/http2.py437
-rw-r--r--libmproxy/protocol/http_replay.py105
-rw-r--r--libmproxy/protocol/rawtcp.py88
-rw-r--r--libmproxy/protocol/tls.py566
8 files changed, 0 insertions, 1922 deletions
diff --git a/libmproxy/protocol/__init__.py b/libmproxy/protocol/__init__.py
deleted file mode 100644
index ea958d06..00000000
--- a/libmproxy/protocol/__init__.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-In mitmproxy, protocols are implemented as a set of layers, which are composed on top each other.
-The first layer is usually the proxy mode, e.g. transparent proxy or normal HTTP proxy. Next,
-various protocol layers are stacked on top of each other - imagine WebSockets on top of an HTTP
-Upgrade request. An actual mitmproxy connection may look as follows (outermost layer first):
-
- Transparent HTTP proxy, no TLS:
- - TransparentProxy
- - Http1Layer
- - HttpLayer
-
- Regular proxy, CONNECT request with WebSockets over SSL:
- - ReverseProxy
- - Http1Layer
- - HttpLayer
- - TLSLayer
- - WebsocketLayer (or TCPLayer)
-
-Every layer acts as a read-only context for its inner layers (see :py:class:`Layer`). To communicate
-with an outer layer, a layer can use functions provided in the context. The next layer is always
-determined by a call to :py:meth:`.next_layer() <libmproxy.proxy.RootContext.next_layer>`,
-which is provided by the root context.
-
-Another subtle design goal of this architecture is that upstream connections should be established
-as late as possible; this makes server replay without any outgoing connections possible.
-"""
-
-from __future__ import (absolute_import, print_function, division)
-from .base import Layer, ServerConnectionMixin, Kill
-from .tls import TlsLayer
-from .tls import is_tls_record_magic
-from .tls import TlsClientHello
-from .http import UpstreamConnectLayer
-from .http1 import Http1Layer
-from .http2 import Http2Layer
-from .rawtcp import RawTCPLayer
-
-__all__ = [
- "Layer", "ServerConnectionMixin", "Kill",
- "TlsLayer", "is_tls_record_magic", "TlsClientHello",
- "UpstreamConnectLayer",
- "Http1Layer",
- "Http2Layer",
- "RawTCPLayer",
-]
diff --git a/libmproxy/protocol/base.py b/libmproxy/protocol/base.py
deleted file mode 100644
index 40fcaf65..00000000
--- a/libmproxy/protocol/base.py
+++ /dev/null
@@ -1,198 +0,0 @@
-from __future__ import (absolute_import, print_function, division)
-import sys
-
-import six
-
-from ..models import ServerConnection
-from ..exceptions import ProtocolException
-from netlib.exceptions import TcpException
-
-
-class _LayerCodeCompletion(object):
-
- """
- Dummy class that provides type hinting in PyCharm, which simplifies development a lot.
- """
-
- def __init__(self, **mixin_args): # pragma: no cover
- super(_LayerCodeCompletion, self).__init__(**mixin_args)
- if True:
- return
- self.config = None
- """@type: libmproxy.proxy.ProxyConfig"""
- self.client_conn = None
- """@type: libmproxy.models.ClientConnection"""
- self.server_conn = None
- """@type: libmproxy.models.ServerConnection"""
- self.channel = None
- """@type: libmproxy.controller.Channel"""
- self.ctx = None
- """@type: libmproxy.protocol.Layer"""
-
-
-class Layer(_LayerCodeCompletion):
-
- """
- Base class for all layers. All other protocol layers should inherit from this class.
- """
-
- def __init__(self, ctx, **mixin_args):
- """
- Each layer usually passes itself to its child layers as a context. Properties of the
- context are transparently mapped to the layer, so that the following works:
-
- .. code-block:: python
-
- root_layer = Layer(None)
- root_layer.client_conn = 42
- sub_layer = Layer(root_layer)
- print(sub_layer.client_conn) # 42
-
- The root layer is passed a :py:class:`libmproxy.proxy.RootContext` object,
- which provides access to :py:attr:`.client_conn <libmproxy.proxy.RootContext.client_conn>`,
- :py:attr:`.next_layer <libmproxy.proxy.RootContext.next_layer>` and other basic attributes.
-
- Args:
- ctx: The (read-only) parent layer / context.
- """
- self.ctx = ctx
- """
- The parent layer.
-
- :type: :py:class:`Layer`
- """
- super(Layer, self).__init__(**mixin_args)
-
- def __call__(self):
- """Logic of the layer.
-
- Returns:
- Once the protocol has finished without exceptions.
-
- Raises:
- ~libmproxy.exceptions.ProtocolException: if an exception occurs. No other exceptions must be raised.
- """
- raise NotImplementedError()
-
- def __getattr__(self, name):
- """
- Attributes not present on the current layer are looked up on the context.
- """
- return getattr(self.ctx, name)
-
- @property
- def layers(self):
- """
- List of all layers, including the current layer (``[self, self.ctx, self.ctx.ctx, ...]``)
- """
- return [self] + self.ctx.layers
-
- def __repr__(self):
- return type(self).__name__
-
-
-class ServerConnectionMixin(object):
-
- """
- Mixin that provides a layer with the capabilities to manage a server connection.
- The server address can be passed in the constructor or set by calling :py:meth:`set_server`.
- Subclasses are responsible for calling :py:meth:`disconnect` before returning.
-
- Recommended Usage:
-
- .. code-block:: python
-
- class MyLayer(Layer, ServerConnectionMixin):
- def __call__(self):
- try:
- # Do something.
- finally:
- if self.server_conn:
- self.disconnect()
- """
-
- def __init__(self, server_address=None):
- super(ServerConnectionMixin, self).__init__()
- self.server_conn = ServerConnection(server_address, (self.config.host, 0))
- self.__check_self_connect()
-
- def __check_self_connect(self):
- """
- We try to protect the proxy from _accidentally_ connecting to itself,
- e.g. because of a failed transparent lookup or an invalid configuration.
- """
- address = self.server_conn.address
- if address:
- self_connect = (
- address.port == self.config.port and
- address.host in ("localhost", "127.0.0.1", "::1")
- )
- if self_connect:
- raise ProtocolException(
- "Invalid server address: {}\r\n"
- "The proxy shall not connect to itself.".format(repr(address))
- )
-
- def set_server(self, address, server_tls=None, sni=None):
- """
- Sets a new server address. If there is an existing connection, it will be closed.
-
- Raises:
- ~libmproxy.exceptions.ProtocolException:
- if ``server_tls`` is ``True``, but there was no TLS layer on the
- protocol stack which could have processed this.
- """
- if self.server_conn:
- self.disconnect()
- self.log("Set new server address: " + repr(address), "debug")
- self.server_conn.address = address
- self.__check_self_connect()
- if server_tls:
- raise ProtocolException(
- "Cannot upgrade to TLS, no TLS layer on the protocol stack."
- )
-
- def disconnect(self):
- """
- Deletes (and closes) an existing server connection.
- Must not be called if there is no existing connection.
- """
- self.log("serverdisconnect", "debug", [repr(self.server_conn.address)])
- address = self.server_conn.address
- source_address = self.server_conn.source_address
- self.server_conn.finish()
- self.server_conn.close()
- self.channel.tell("serverdisconnect", self.server_conn)
- self.server_conn = ServerConnection(address, source_address)
-
- def connect(self):
- """
- Establishes a server connection.
- Must not be called if there is an existing connection.
-
- Raises:
- ~libmproxy.exceptions.ProtocolException: if the connection could not be established.
- """
- if not self.server_conn.address:
- raise ProtocolException("Cannot connect to server, no server address given.")
- self.log("serverconnect", "debug", [repr(self.server_conn.address)])
- self.channel.ask("serverconnect", self.server_conn)
- try:
- self.server_conn.connect()
- except TcpException as e:
- six.reraise(
- ProtocolException,
- ProtocolException(
- "Server connection to {} failed: {}".format(
- repr(self.server_conn.address), str(e)
- )
- ),
- sys.exc_info()[2]
- )
-
-
-class Kill(Exception):
-
- """
- Signal that both client and server connection(s) should be killed immediately.
- """
diff --git a/libmproxy/protocol/http.py b/libmproxy/protocol/http.py
deleted file mode 100644
index 13d7903b..00000000
--- a/libmproxy/protocol/http.py
+++ /dev/null
@@ -1,416 +0,0 @@
-from __future__ import (absolute_import, print_function, division)
-
-import sys
-import traceback
-import six
-
-from netlib import tcp
-from netlib.exceptions import HttpException, HttpReadDisconnect, NetlibException
-from netlib.http import Headers, CONTENT_MISSING
-
-from h2.exceptions import H2Error
-
-from .. import utils
-from ..exceptions import HttpProtocolException, ProtocolException
-from ..models import (
- HTTPFlow,
- HTTPResponse,
- make_error_response,
- make_connect_response,
- Error,
- expect_continue_response
-)
-
-from .base import Layer, Kill
-
-
-class _HttpTransmissionLayer(Layer):
-
- def read_request(self):
- raise NotImplementedError()
-
- def read_request_body(self, request):
- raise NotImplementedError()
-
- def send_request(self, request):
- raise NotImplementedError()
-
- def read_response(self, request):
- response = self.read_response_headers()
- response.data.content = b"".join(
- self.read_response_body(request, response)
- )
- return response
-
- def read_response_headers(self):
- raise NotImplementedError()
-
- def read_response_body(self, request, response):
- raise NotImplementedError()
- yield "this is a generator" # pragma: no cover
-
- def send_response(self, response):
- if response.content == CONTENT_MISSING:
- raise HttpException("Cannot assemble flow with CONTENT_MISSING")
- self.send_response_headers(response)
- self.send_response_body(response, [response.content])
-
- def send_response_headers(self, response):
- raise NotImplementedError()
-
- def send_response_body(self, response, chunks):
- raise NotImplementedError()
-
- def check_close_connection(self, flow):
- raise NotImplementedError()
-
-
-class ConnectServerConnection(object):
-
- """
- "Fake" ServerConnection to represent state after a CONNECT request to an upstream proxy.
- """
-
- def __init__(self, address, ctx):
- self.address = tcp.Address.wrap(address)
- self._ctx = ctx
-
- @property
- def via(self):
- return self._ctx.server_conn
-
- def __getattr__(self, item):
- return getattr(self.via, item)
-
- def __nonzero__(self):
- return bool(self.via)
-
-
-class UpstreamConnectLayer(Layer):
-
- def __init__(self, ctx, connect_request):
- super(UpstreamConnectLayer, self).__init__(ctx)
- self.connect_request = connect_request
- self.server_conn = ConnectServerConnection(
- (connect_request.host, connect_request.port),
- self.ctx
- )
-
- def __call__(self):
- layer = self.ctx.next_layer(self)
- layer()
-
- def _send_connect_request(self):
- self.send_request(self.connect_request)
- resp = self.read_response(self.connect_request)
- if resp.status_code != 200:
- raise ProtocolException("Reconnect: Upstream server refuses CONNECT request")
-
- def connect(self):
- if not self.server_conn:
- self.ctx.connect()
- self._send_connect_request()
- else:
- pass # swallow the message
-
- def change_upstream_proxy_server(self, address):
- if address != self.server_conn.via.address:
- self.ctx.set_server(address)
-
- def set_server(self, address, server_tls=None, sni=None):
- if self.ctx.server_conn:
- self.ctx.disconnect()
- address = tcp.Address.wrap(address)
- self.connect_request.host = address.host
- self.connect_request.port = address.port
- self.server_conn.address = address
-
- if server_tls:
- raise ProtocolException(
- "Cannot upgrade to TLS, no TLS layer on the protocol stack."
- )
-
-
-class HttpLayer(Layer):
-
- def __init__(self, ctx, mode):
- super(HttpLayer, self).__init__(ctx)
- self.mode = mode
-
- self.__initial_server_conn = None
- "Contains the original destination in transparent mode, which needs to be restored"
- "if an inline script modified the target server for a single http request"
- # We cannot rely on server_conn.tls_established,
- # see https://github.com/mitmproxy/mitmproxy/issues/925
- self.__initial_server_tls = None
-
- def __call__(self):
- if self.mode == "transparent":
- self.__initial_server_tls = self._server_tls
- self.__initial_server_conn = self.server_conn
- while True:
- try:
- request = self.get_request_from_client()
- self.log("request", "debug", [repr(request)])
-
- # Handle Proxy Authentication
- # Proxy Authentication conceptually does not work in transparent mode.
- # We catch this misconfiguration on startup. Here, we sort out requests
- # after a successful CONNECT request (which do not need to be validated anymore)
- if self.mode != "transparent" and not self.authenticate(request):
- return
-
- # Make sure that the incoming request matches our expectations
- self.validate_request(request)
-
- # Regular Proxy Mode: Handle CONNECT
- if self.mode == "regular" and request.form_in == "authority":
- self.handle_regular_mode_connect(request)
- return
-
- except HttpReadDisconnect:
- # don't throw an error for disconnects that happen before/between requests.
- return
- except NetlibException as e:
- self.send_error_response(400, repr(e))
- six.reraise(ProtocolException, ProtocolException(
- "Error in HTTP connection: %s" % repr(e)), sys.exc_info()[2])
-
- try:
- flow = HTTPFlow(self.client_conn, self.server_conn, live=self)
- flow.request = request
- self.process_request_hook(flow)
-
- if not flow.response:
- self.establish_server_connection(flow)
- self.get_response_from_server(flow)
- else:
- # response was set by an inline script.
- # we now need to emulate the responseheaders hook.
- flow = self.channel.ask("responseheaders", flow)
- if flow == Kill:
- raise Kill()
-
- self.log("response", "debug", [repr(flow.response)])
- flow = self.channel.ask("response", flow)
- if flow == Kill:
- raise Kill()
- self.send_response_to_client(flow)
-
- if self.check_close_connection(flow):
- return
-
- # Handle 101 Switching Protocols
- # It may be useful to pass additional args (such as the upgrade header)
- # to next_layer in the future
- if flow.response.status_code == 101:
- layer = self.ctx.next_layer(self)
- layer()
- return
-
- # Upstream Proxy Mode: Handle CONNECT
- if flow.request.form_in == "authority" and flow.response.status_code == 200:
- self.handle_upstream_mode_connect(flow.request.copy())
- return
-
- except (ProtocolException, NetlibException) as e:
- self.send_error_response(502, repr(e))
-
- if not flow.response:
- flow.error = Error(str(e))
- self.channel.ask("error", flow)
- self.log(traceback.format_exc(), "debug")
- return
- else:
- six.reraise(ProtocolException, ProtocolException(
- "Error in HTTP connection: %s" % repr(e)), sys.exc_info()[2])
- finally:
- flow.live = False
-
- def get_request_from_client(self):
- request = self.read_request()
- if request.headers.get("expect", "").lower() == "100-continue":
- # TODO: We may have to use send_response_headers for HTTP2 here.
- self.send_response(expect_continue_response)
- request.headers.pop("expect")
- request.body = b"".join(self.read_request_body(request))
- return request
-
- def send_error_response(self, code, message):
- try:
- response = make_error_response(code, message)
- self.send_response(response)
- except (NetlibException, H2Error):
- self.log(traceback.format_exc(), "debug")
-
- def change_upstream_proxy_server(self, address):
- # Make set_upstream_proxy_server always available,
- # even if there's no UpstreamConnectLayer
- if address != self.server_conn.address:
- return self.set_server(address)
-
- def handle_regular_mode_connect(self, request):
- self.set_server((request.host, request.port))
- self.send_response(make_connect_response(request.http_version))
- layer = self.ctx.next_layer(self)
- layer()
-
- def handle_upstream_mode_connect(self, connect_request):
- layer = UpstreamConnectLayer(self, connect_request)
- layer()
-
- def send_response_to_client(self, flow):
- if not flow.response.stream:
- # no streaming:
- # we already received the full response from the server and can
- # send it to the client straight away.
- self.send_response(flow.response)
- else:
- # streaming:
- # First send the headers and then transfer the response incrementally
- self.send_response_headers(flow.response)
- chunks = self.read_response_body(
- flow.request,
- flow.response
- )
- if callable(flow.response.stream):
- chunks = flow.response.stream(chunks)
- self.send_response_body(flow.response, chunks)
- flow.response.timestamp_end = utils.timestamp()
-
- def get_response_from_server(self, flow):
- def get_response():
- self.send_request(flow.request)
- flow.response = self.read_response_headers()
-
- try:
- get_response()
- except NetlibException as v:
- self.log(
- "server communication error: %s" % repr(v),
- level="debug"
- )
- # In any case, we try to reconnect at least once. This is
- # necessary because it might be possible that we already
- # initiated an upstream connection after clientconnect that
- # has already been expired, e.g consider the following event
- # log:
- # > clientconnect (transparent mode destination known)
- # > serverconnect (required for client tls handshake)
- # > read n% of large request
- # > server detects timeout, disconnects
- # > read (100-n)% of large request
- # > send large request upstream
- self.disconnect()
- self.connect()
- get_response()
-
- # call the appropriate script hook - this is an opportunity for an
- # inline script to set flow.stream = True
- flow = self.channel.ask("responseheaders", flow)
- if flow == Kill:
- raise Kill()
-
- if flow.response.stream:
- flow.response.data.content = CONTENT_MISSING
- else:
- flow.response.data.content = b"".join(self.read_response_body(
- flow.request,
- flow.response
- ))
- flow.response.timestamp_end = utils.timestamp()
-
- # no further manipulation of self.server_conn beyond this point
- # we can safely set it as the final attribute value here.
- flow.server_conn = self.server_conn
-
- def process_request_hook(self, flow):
- # Determine .scheme, .host and .port attributes for inline scripts.
- # For absolute-form requests, they are directly given in the request.
- # For authority-form requests, we only need to determine the request scheme.
- # For relative-form requests, we need to determine host and port as
- # well.
- if self.mode == "regular":
- pass # only absolute-form at this point, nothing to do here.
- elif self.mode == "upstream":
- if flow.request.form_in == "authority":
- flow.request.scheme = "http" # pseudo value
- else:
- # Setting request.host also updates the host header, which we want to preserve
- host_header = flow.request.headers.get("host", None)
- flow.request.host = self.__initial_server_conn.address.host
- flow.request.port = self.__initial_server_conn.address.port
- if host_header:
- flow.request.headers["host"] = host_header
- flow.request.scheme = "https" if self.__initial_server_tls else "http"
-
- request_reply = self.channel.ask("request", flow)
- if request_reply == Kill:
- raise Kill()
- if isinstance(request_reply, HTTPResponse):
- flow.response = request_reply
- return
-
- def establish_server_connection(self, flow):
- address = tcp.Address((flow.request.host, flow.request.port))
- tls = (flow.request.scheme == "https")
-
- if self.mode == "regular" or self.mode == "transparent":
- # If there's an existing connection that doesn't match our expectations, kill it.
- if address != self.server_conn.address or tls != self.server_conn.tls_established:
- self.set_server(address, tls, address.host)
- # Establish connection is neccessary.
- if not self.server_conn:
- self.connect()
- else:
- if not self.server_conn:
- self.connect()
- if tls:
- raise HttpProtocolException("Cannot change scheme in upstream proxy mode.")
- """
- # This is a very ugly (untested) workaround to solve a very ugly problem.
- if self.server_conn and self.server_conn.tls_established and not ssl:
- self.disconnect()
- self.connect()
- elif ssl and not hasattr(self, "connected_to") or self.connected_to != address:
- if self.server_conn.tls_established:
- self.disconnect()
- self.connect()
-
- self.send_request(make_connect_request(address))
- tls_layer = TlsLayer(self, False, True)
- tls_layer._establish_tls_with_server()
- """
-
- def validate_request(self, request):
- if request.form_in == "absolute" and request.scheme != "http":
- raise HttpException("Invalid request scheme: %s" % request.scheme)
-
- expected_request_forms = {
- "regular": ("authority", "absolute",),
- "upstream": ("authority", "absolute"),
- "transparent": ("relative",)
- }
-
- allowed_request_forms = expected_request_forms[self.mode]
- if request.form_in not in allowed_request_forms:
- err_message = "Invalid HTTP request form (expected: %s, got: %s)" % (
- " or ".join(allowed_request_forms), request.form_in
- )
- raise HttpException(err_message)
-
- if self.mode == "regular" and request.form_in == "absolute":
- request.form_out = "relative"
-
- def authenticate(self, request):
- if self.config.authenticator:
- if self.config.authenticator.authenticate(request.headers):
- self.config.authenticator.clean(request.headers)
- else:
- self.send_response(make_error_response(
- 407,
- "Proxy Authentication Required",
- Headers(**self.config.authenticator.auth_challenge_headers())
- ))
- return False
- return True
diff --git a/libmproxy/protocol/http1.py b/libmproxy/protocol/http1.py
deleted file mode 100644
index a4cd8801..00000000
--- a/libmproxy/protocol/http1.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from __future__ import (absolute_import, print_function, division)
-
-
-from netlib.http import http1
-
-from .http import _HttpTransmissionLayer, HttpLayer
-from ..models import HTTPRequest, HTTPResponse
-
-
-class Http1Layer(_HttpTransmissionLayer):
-
- def __init__(self, ctx, mode):
- super(Http1Layer, self).__init__(ctx)
- self.mode = mode
-
- def read_request(self):
- req = http1.read_request(self.client_conn.rfile, body_size_limit=self.config.body_size_limit)
- return HTTPRequest.wrap(req)
-
- def read_request_body(self, request):
- expected_size = http1.expected_http_body_size(request)
- return http1.read_body(self.client_conn.rfile, expected_size, self.config.body_size_limit)
-
- def send_request(self, request):
- self.server_conn.wfile.write(http1.assemble_request(request))
- self.server_conn.wfile.flush()
-
- def read_response_headers(self):
- resp = http1.read_response_head(self.server_conn.rfile)
- return HTTPResponse.wrap(resp)
-
- def read_response_body(self, request, response):
- expected_size = http1.expected_http_body_size(request, response)
- return http1.read_body(self.server_conn.rfile, expected_size, self.config.body_size_limit)
-
- def send_response_headers(self, response):
- raw = http1.assemble_response_head(response)
- self.client_conn.wfile.write(raw)
- self.client_conn.wfile.flush()
-
- def send_response_body(self, response, chunks):
- for chunk in http1.assemble_body(response.headers, chunks):
- self.client_conn.wfile.write(chunk)
- self.client_conn.wfile.flush()
-
- def check_close_connection(self, flow):
- request_close = http1.connection_close(
- flow.request.http_version,
- flow.request.headers
- )
- response_close = http1.connection_close(
- flow.response.http_version,
- flow.response.headers
- )
- read_until_eof = http1.expected_http_body_size(flow.request, flow.response) == -1
- close_connection = request_close or response_close or read_until_eof
- if flow.request.form_in == "authority" and flow.response.status_code == 200:
- # Workaround for https://github.com/mitmproxy/mitmproxy/issues/313:
- # Charles Proxy sends a CONNECT response with HTTP/1.0
- # and no Content-Length header
-
- return False
- return close_connection
-
- def __call__(self):
- layer = HttpLayer(self, self.mode)
- layer()
diff --git a/libmproxy/protocol/http2.py b/libmproxy/protocol/http2.py
deleted file mode 100644
index c121637c..00000000
--- a/libmproxy/protocol/http2.py
+++ /dev/null
@@ -1,437 +0,0 @@
-from __future__ import (absolute_import, print_function, division)
-
-import threading
-import time
-import Queue
-
-from netlib.tcp import ssl_read_select
-from netlib.exceptions import HttpException
-from netlib.http import Headers
-from netlib.utils import http2_read_raw_frame
-
-import hyperframe
-import h2
-from h2.connection import H2Connection
-from h2.events import *
-
-from .base import Layer
-from .http import _HttpTransmissionLayer, HttpLayer
-from .. import utils
-from ..models import HTTPRequest, HTTPResponse
-
-
-class SafeH2Connection(H2Connection):
-
- def __init__(self, conn, *args, **kwargs):
- super(SafeH2Connection, self).__init__(*args, **kwargs)
- self.conn = conn
- self.lock = threading.RLock()
-
- def safe_close_connection(self, error_code):
- with self.lock:
- self.close_connection(error_code)
- self.conn.send(self.data_to_send())
-
- def safe_increment_flow_control(self, stream_id, length):
- if length == 0:
- return
-
- with self.lock:
- self.increment_flow_control_window(length)
- self.conn.send(self.data_to_send())
- with self.lock:
- if stream_id in self.streams and not self.streams[stream_id].closed:
- self.increment_flow_control_window(length, stream_id=stream_id)
- self.conn.send(self.data_to_send())
-
- def safe_reset_stream(self, stream_id, error_code):
- with self.lock:
- try:
- self.reset_stream(stream_id, error_code)
- except h2.exceptions.StreamClosedError:
- # stream is already closed - good
- pass
- self.conn.send(self.data_to_send())
-
- def safe_update_settings(self, new_settings):
- with self.lock:
- self.update_settings(new_settings)
- self.conn.send(self.data_to_send())
-
- def safe_send_headers(self, is_zombie, stream_id, headers):
- with self.lock:
- if is_zombie():
- return
- self.send_headers(stream_id, headers)
- self.conn.send(self.data_to_send())
-
- def safe_send_body(self, is_zombie, stream_id, chunks):
- for chunk in chunks:
- position = 0
- while position < len(chunk):
- self.lock.acquire()
- if is_zombie():
- self.lock.release()
- return
- max_outbound_frame_size = self.max_outbound_frame_size
- frame_chunk = chunk[position:position + max_outbound_frame_size]
- if self.local_flow_control_window(stream_id) < len(frame_chunk):
- self.lock.release()
- time.sleep(0)
- continue
- self.send_data(stream_id, frame_chunk)
- self.conn.send(self.data_to_send())
- self.lock.release()
- position += max_outbound_frame_size
- with self.lock:
- if is_zombie():
- return
- self.end_stream(stream_id)
- self.conn.send(self.data_to_send())
-
-
-class Http2Layer(Layer):
-
- def __init__(self, ctx, mode):
- super(Http2Layer, self).__init__(ctx)
- self.mode = mode
- self.streams = dict()
- self.client_reset_streams = []
- self.server_reset_streams = []
- self.server_to_client_stream_ids = dict([(0, 0)])
- self.client_conn.h2 = SafeH2Connection(self.client_conn, client_side=False)
-
- # make sure that we only pass actual SSL.Connection objects in here,
- # because otherwise ssl_read_select fails!
- self.active_conns = [self.client_conn.connection]
-
- def _initiate_server_conn(self):
- self.server_conn.h2 = SafeH2Connection(self.server_conn, client_side=True)
- self.server_conn.h2.initiate_connection()
- self.server_conn.send(self.server_conn.h2.data_to_send())
- self.active_conns.append(self.server_conn.connection)
-
- def connect(self): # pragma: no cover
- raise ValueError("CONNECT inside an HTTP2 stream is not supported.")
- # self.ctx.connect()
- # self.server_conn.connect()
- # self._initiate_server_conn()
-
- def set_server(self): # pragma: no cover
- raise NotImplementedError("Cannot change server for HTTP2 connections.")
-
- def disconnect(self): # pragma: no cover
- raise NotImplementedError("Cannot dis- or reconnect in HTTP2 connections.")
-
- def next_layer(self): # pragma: no cover
- # WebSockets over HTTP/2?
- # CONNECT for proxying?
- raise NotImplementedError()
-
- def _handle_event(self, event, source_conn, other_conn, is_server):
- self.log(
- "HTTP2 Event from {}".format("server" if is_server else "client"),
- "debug",
- [repr(event)]
- )
-
- if hasattr(event, 'stream_id'):
- if is_server and event.stream_id % 2 == 1:
- eid = self.server_to_client_stream_ids[event.stream_id]
- else:
- eid = event.stream_id
-
- if isinstance(event, RequestReceived):
- headers = Headers([[str(k), str(v)] for k, v in event.headers])
- self.streams[eid] = Http2SingleStreamLayer(self, eid, headers)
- self.streams[eid].timestamp_start = time.time()
- self.streams[eid].start()
- elif isinstance(event, ResponseReceived):
- headers = Headers([[str(k), str(v)] for k, v in event.headers])
- self.streams[eid].queued_data_length = 0
- self.streams[eid].timestamp_start = time.time()
- self.streams[eid].response_headers = headers
- self.streams[eid].response_arrived.set()
- elif isinstance(event, DataReceived):
- if self.config.body_size_limit and self.streams[eid].queued_data_length > self.config.body_size_limit:
- raise HttpException("HTTP body too large. Limit is {}.".format(self.config.body_size_limit))
- self.streams[eid].data_queue.put(event.data)
- self.streams[eid].queued_data_length += len(event.data)
- source_conn.h2.safe_increment_flow_control(event.stream_id, event.flow_controlled_length)
- elif isinstance(event, StreamEnded):
- self.streams[eid].timestamp_end = time.time()
- self.streams[eid].data_finished.set()
- elif isinstance(event, StreamReset):
- self.streams[eid].zombie = time.time()
- self.client_reset_streams.append(self.streams[eid].client_stream_id)
- if self.streams[eid].server_stream_id:
- self.server_reset_streams.append(self.streams[eid].server_stream_id)
- if eid in self.streams and event.error_code == 0x8:
- if is_server:
- other_stream_id = self.streams[eid].client_stream_id
- else:
- other_stream_id = self.streams[eid].server_stream_id
- if other_stream_id is not None:
- other_conn.h2.safe_reset_stream(other_stream_id, event.error_code)
- elif isinstance(event, RemoteSettingsChanged):
- new_settings = dict([(id, cs.new_value) for (id, cs) in event.changed_settings.iteritems()])
- other_conn.h2.safe_update_settings(new_settings)
- elif isinstance(event, ConnectionTerminated):
- # Do not immediately terminate the other connection.
- # Some streams might be still sending data to the client.
- return False
- elif isinstance(event, PushedStreamReceived):
- # pushed stream ids should be uniq and not dependent on race conditions
- # only the parent stream id must be looked up first
- parent_eid = self.server_to_client_stream_ids[event.parent_stream_id]
- with self.client_conn.h2.lock:
- self.client_conn.h2.push_stream(parent_eid, event.pushed_stream_id, event.headers)
-
- headers = Headers([[str(k), str(v)] for k, v in event.headers])
- headers['x-mitmproxy-pushed'] = 'true'
- self.streams[event.pushed_stream_id] = Http2SingleStreamLayer(self, event.pushed_stream_id, headers)
- self.streams[event.pushed_stream_id].timestamp_start = time.time()
- self.streams[event.pushed_stream_id].pushed = True
- self.streams[event.pushed_stream_id].parent_stream_id = parent_eid
- self.streams[event.pushed_stream_id].timestamp_end = time.time()
- self.streams[event.pushed_stream_id].request_data_finished.set()
- self.streams[event.pushed_stream_id].start()
- elif isinstance(event, TrailersReceived):
- raise NotImplementedError()
-
- return True
-
- def _cleanup_streams(self):
- death_time = time.time() - 10
- for stream_id in self.streams.keys():
- zombie = self.streams[stream_id].zombie
- if zombie and zombie <= death_time:
- self.streams.pop(stream_id, None)
-
- def __call__(self):
- if self.server_conn:
- self._initiate_server_conn()
-
- preamble = self.client_conn.rfile.read(24)
- self.client_conn.h2.initiate_connection()
- self.client_conn.h2.receive_data(preamble)
- self.client_conn.send(self.client_conn.h2.data_to_send())
-
- while True:
- r = ssl_read_select(self.active_conns, 1)
- for conn in r:
- source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn
- other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn
- is_server = (conn == self.server_conn.connection)
-
- with source_conn.h2.lock:
- try:
- raw_frame = b''.join(http2_read_raw_frame(source_conn.rfile))
- except:
- for stream in self.streams.values():
- stream.zombie = time.time()
- return
-
- frame, _ = hyperframe.frame.Frame.parse_frame_header(raw_frame[:9])
-
- if is_server:
- list = self.server_reset_streams
- else:
- list = self.client_reset_streams
- if frame.stream_id in list:
- # this frame belongs to a reset stream - just ignore it
- if isinstance(frame, hyperframe.frame.HeadersFrame) or isinstance(frame, hyperframe.frame.ContinuationFrame):
- # we need to keep the hpack-decoder happy too
- source_conn.h2.decoder.decode(raw_frame[9:])
- continue
-
- events = source_conn.h2.receive_data(raw_frame)
- source_conn.send(source_conn.h2.data_to_send())
-
- for event in events:
- if not self._handle_event(event, source_conn, other_conn, is_server):
- return
-
- self._cleanup_streams()
-
-
-class Http2SingleStreamLayer(_HttpTransmissionLayer, threading.Thread):
-
- def __init__(self, ctx, stream_id, request_headers):
- super(Http2SingleStreamLayer, self).__init__(ctx)
- self.zombie = None
- self.client_stream_id = stream_id
- self.server_stream_id = None
- self.request_headers = request_headers
- self.response_headers = None
- self.pushed = False
-
- self.request_data_queue = Queue.Queue()
- self.request_queued_data_length = 0
- self.request_data_finished = threading.Event()
-
- self.response_arrived = threading.Event()
- self.response_data_queue = Queue.Queue()
- self.response_queued_data_length = 0
- self.response_data_finished = threading.Event()
-
- @property
- def data_queue(self):
- if self.response_arrived.is_set():
- return self.response_data_queue
- else:
- return self.request_data_queue
-
- @property
- def queued_data_length(self):
- if self.response_arrived.is_set():
- return self.response_queued_data_length
- else:
- return self.request_queued_data_length
-
- @property
- def data_finished(self):
- if self.response_arrived.is_set():
- return self.response_data_finished
- else:
- return self.request_data_finished
-
- @queued_data_length.setter
- def queued_data_length(self, v):
- if self.response_arrived.is_set():
- return self.response_queued_data_length
- else:
- return self.request_queued_data_length
-
- def is_zombie(self):
- return self.zombie is not None
-
- def read_request(self):
- self.request_data_finished.wait()
-
- authority = self.request_headers.get(':authority', '')
- method = self.request_headers.get(':method', 'GET')
- scheme = self.request_headers.get(':scheme', 'https')
- path = self.request_headers.get(':path', '/')
- host = None
- port = None
-
- if path == '*' or path.startswith("/"):
- form_in = "relative"
- elif method == 'CONNECT': # pragma: no cover
- raise NotImplementedError("CONNECT over HTTP/2 is not implemented.")
- else: # pragma: no cover
- form_in = "absolute"
- # FIXME: verify if path or :host contains what we need
- scheme, host, port, _ = utils.parse_url(path)
-
- if authority:
- host, _, port = authority.partition(':')
-
- if not host:
- host = 'localhost'
- if not port:
- port = 443 if scheme == 'https' else 80
- port = int(port)
-
- data = []
- while self.request_data_queue.qsize() > 0:
- data.append(self.request_data_queue.get())
- data = b"".join(data)
-
- return HTTPRequest(
- form_in,
- method,
- scheme,
- host,
- port,
- path,
- b"HTTP/2.0",
- self.request_headers,
- data,
- timestamp_start=self.timestamp_start,
- timestamp_end=self.timestamp_end,
- )
-
- def send_request(self, message):
- if self.pushed:
- # nothing to do here
- return
-
- with self.server_conn.h2.lock:
- # We must not assign a stream id if we are already a zombie.
- if self.zombie:
- return
-
- self.server_stream_id = self.server_conn.h2.get_next_available_stream_id()
- self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id
-
- self.server_conn.h2.safe_send_headers(
- self.is_zombie,
- self.server_stream_id,
- message.headers
- )
- self.server_conn.h2.safe_send_body(
- self.is_zombie,
- self.server_stream_id,
- message.body
- )
-
- def read_response_headers(self):
- self.response_arrived.wait()
-
- status_code = int(self.response_headers.get(':status', 502))
-
- return HTTPResponse(
- http_version=b"HTTP/2.0",
- status_code=status_code,
- reason='',
- headers=self.response_headers,
- content=None,
- timestamp_start=self.timestamp_start,
- timestamp_end=self.timestamp_end,
- )
-
- def read_response_body(self, request, response):
- while True:
- try:
- yield self.response_data_queue.get(timeout=1)
- except Queue.Empty:
- pass
- if self.response_data_finished.is_set():
- while self.response_data_queue.qsize() > 0:
- yield self.response_data_queue.get()
- return
- if self.zombie:
- return
-
- def send_response_headers(self, response):
- self.client_conn.h2.safe_send_headers(
- self.is_zombie,
- self.client_stream_id,
- response.headers
- )
-
- def send_response_body(self, _response, chunks):
- self.client_conn.h2.safe_send_body(
- self.is_zombie,
- self.client_stream_id,
- chunks
- )
-
- def check_close_connection(self, flow):
- # This layer only handles a single stream.
- # RFC 7540 8.1: An HTTP request/response exchange fully consumes a single stream.
- return True
-
- def connect(self): # pragma: no cover
- raise ValueError("CONNECT inside an HTTP2 stream is not supported.")
-
- def set_server(self, *args, **kwargs): # pragma: no cover
- # do not mess with the server connection - all streams share it.
- pass
-
- def run(self):
- layer = HttpLayer(self, self.mode)
- layer()
- self.zombie = time.time()
diff --git a/libmproxy/protocol/http_replay.py b/libmproxy/protocol/http_replay.py
deleted file mode 100644
index 63870dfb..00000000
--- a/libmproxy/protocol/http_replay.py
+++ /dev/null
@@ -1,105 +0,0 @@
-from __future__ import (absolute_import, print_function, division)
-import threading
-import traceback
-from libmproxy.exceptions import ReplayException
-from netlib.exceptions import HttpException, TcpException
-from netlib.http import http1
-
-from ..controller import Channel
-from ..models import Error, HTTPResponse, ServerConnection, make_connect_request
-from .base import Kill
-
-
-# TODO: Doesn't really belong into libmproxy.protocol...
-
-
-class RequestReplayThread(threading.Thread):
- name = "RequestReplayThread"
-
- def __init__(self, config, flow, masterq, should_exit):
- """
- masterqueue can be a queue or None, if no scripthooks should be
- processed.
- """
- self.config, self.flow = config, flow
- if masterq:
- self.channel = Channel(masterq, should_exit)
- else:
- self.channel = None
- super(RequestReplayThread, self).__init__()
-
- def run(self):
- r = self.flow.request
- form_out_backup = r.form_out
- try:
- self.flow.response = None
-
- # If we have a channel, run script hooks.
- if self.channel:
- request_reply = self.channel.ask("request", self.flow)
- if request_reply == Kill:
- raise Kill()
- elif isinstance(request_reply, HTTPResponse):
- self.flow.response = request_reply
-
- if not self.flow.response:
- # In all modes, we directly connect to the server displayed
- if self.config.mode == "upstream":
- server_address = self.config.upstream_server.address
- server = ServerConnection(server_address, (self.config.host, 0))
- server.connect()
- if r.scheme == "https":
- connect_request = make_connect_request((r.host, r.port))
- server.wfile.write(http1.assemble_request(connect_request))
- server.wfile.flush()
- resp = http1.read_response(
- server.rfile,
- connect_request,
- body_size_limit=self.config.body_size_limit
- )
- if resp.status_code != 200:
- raise ReplayException("Upstream server refuses CONNECT request")
- server.establish_ssl(
- self.config.clientcerts,
- sni=self.flow.server_conn.sni
- )
- r.form_out = "relative"
- else:
- r.form_out = "absolute"
- else:
- server_address = (r.host, r.port)
- server = ServerConnection(server_address, (self.config.host, 0))
- server.connect()
- if r.scheme == "https":
- server.establish_ssl(
- self.config.clientcerts,
- sni=self.flow.server_conn.sni
- )
- r.form_out = "relative"
-
- server.wfile.write(http1.assemble_request(r))
- server.wfile.flush()
- self.flow.server_conn = server
- self.flow.response = HTTPResponse.wrap(http1.read_response(
- server.rfile,
- r,
- body_size_limit=self.config.body_size_limit
- ))
- if self.channel:
- response_reply = self.channel.ask("response", self.flow)
- if response_reply == Kill:
- raise Kill()
- except (ReplayException, HttpException, TcpException) as e:
- self.flow.error = Error(str(e))
- if self.channel:
- self.channel.ask("error", self.flow)
- except Kill:
- # Kill should only be raised if there's a channel in the
- # first place.
- from ..proxy.root_context import Log
- self.channel.tell("log", Log("Connection killed", "info"))
- except Exception:
- from ..proxy.root_context import Log
- self.channel.tell("log", Log(traceback.format_exc(), "error"))
- finally:
- r.form_out = form_out_backup
diff --git a/libmproxy/protocol/rawtcp.py b/libmproxy/protocol/rawtcp.py
deleted file mode 100644
index b87899e4..00000000
--- a/libmproxy/protocol/rawtcp.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from __future__ import (absolute_import, print_function, division)
-import socket
-import six
-import sys
-
-from OpenSSL import SSL
-from netlib.exceptions import TcpException
-
-from netlib.tcp import ssl_read_select
-from netlib.utils import clean_bin
-from ..exceptions import ProtocolException
-from .base import Layer
-
-
-class TcpMessage(object):
-
- def __init__(self, client_conn, server_conn, sender, receiver, message):
- self.client_conn = client_conn
- self.server_conn = server_conn
- self.sender = sender
- self.receiver = receiver
- self.message = message
-
-
-class RawTCPLayer(Layer):
- chunk_size = 4096
-
- def __init__(self, ctx, logging=True):
- self.logging = logging
- super(RawTCPLayer, self).__init__(ctx)
-
- def __call__(self):
- self.connect()
-
- buf = memoryview(bytearray(self.chunk_size))
-
- client = self.client_conn.connection
- server = self.server_conn.connection
- conns = [client, server]
-
- try:
- while True:
- r = ssl_read_select(conns, 10)
- for conn in r:
- dst = server if conn == client else client
-
- size = conn.recv_into(buf, self.chunk_size)
- if not size:
- conns.remove(conn)
- # Shutdown connection to the other peer
- if isinstance(conn, SSL.Connection):
- # We can't half-close a connection, so we just close everything here.
- # Sockets will be cleaned up on a higher level.
- return
- else:
- dst.shutdown(socket.SHUT_WR)
-
- if len(conns) == 0:
- return
- continue
-
- tcp_message = TcpMessage(
- self.client_conn, self.server_conn,
- self.client_conn if dst == server else self.server_conn,
- self.server_conn if dst == server else self.client_conn,
- buf[:size].tobytes())
- self.channel.ask("tcp_message", tcp_message)
- dst.sendall(tcp_message.message)
-
- if self.logging:
- # log messages are prepended with the client address,
- # hence the "weird" direction string.
- if dst == server:
- direction = "-> tcp -> {}".format(repr(self.server_conn.address))
- else:
- direction = "<- tcp <- {}".format(repr(self.server_conn.address))
- data = clean_bin(tcp_message.message)
- self.log(
- "{}\r\n{}".format(direction, data),
- "info"
- )
-
- except (socket.error, TcpException, SSL.Error) as e:
- six.reraise(
- ProtocolException,
- ProtocolException("TCP connection closed unexpectedly: {}".format(repr(e))),
- sys.exc_info()[2]
- )
diff --git a/libmproxy/protocol/tls.py b/libmproxy/protocol/tls.py
deleted file mode 100644
index 378dd7d4..00000000
--- a/libmproxy/protocol/tls.py
+++ /dev/null
@@ -1,566 +0,0 @@
-from __future__ import (absolute_import, print_function, division)
-
-import struct
-import sys
-
-from construct import ConstructError
-import six
-from netlib.exceptions import InvalidCertificateException
-from netlib.exceptions import TlsException
-
-from ..contrib.tls._constructs import ClientHello
-from ..exceptions import ProtocolException, TlsProtocolException, ClientHandshakeException
-from .base import Layer
-
-
-# taken from https://testssl.sh/openssl-rfc.mappping.html
-CIPHER_ID_NAME_MAP = {
- 0x00: 'NULL-MD5',
- 0x01: 'NULL-MD5',
- 0x02: 'NULL-SHA',
- 0x03: 'EXP-RC4-MD5',
- 0x04: 'RC4-MD5',
- 0x05: 'RC4-SHA',
- 0x06: 'EXP-RC2-CBC-MD5',
- 0x07: 'IDEA-CBC-SHA',
- 0x08: 'EXP-DES-CBC-SHA',
- 0x09: 'DES-CBC-SHA',
- 0x0a: 'DES-CBC3-SHA',
- 0x0b: 'EXP-DH-DSS-DES-CBC-SHA',
- 0x0c: 'DH-DSS-DES-CBC-SHA',
- 0x0d: 'DH-DSS-DES-CBC3-SHA',
- 0x0e: 'EXP-DH-RSA-DES-CBC-SHA',
- 0x0f: 'DH-RSA-DES-CBC-SHA',
- 0x10: 'DH-RSA-DES-CBC3-SHA',
- 0x11: 'EXP-EDH-DSS-DES-CBC-SHA',
- 0x12: 'EDH-DSS-DES-CBC-SHA',
- 0x13: 'EDH-DSS-DES-CBC3-SHA',
- 0x14: 'EXP-EDH-RSA-DES-CBC-SHA',
- 0x15: 'EDH-RSA-DES-CBC-SHA',
- 0x16: 'EDH-RSA-DES-CBC3-SHA',
- 0x17: 'EXP-ADH-RC4-MD5',
- 0x18: 'ADH-RC4-MD5',
- 0x19: 'EXP-ADH-DES-CBC-SHA',
- 0x1a: 'ADH-DES-CBC-SHA',
- 0x1b: 'ADH-DES-CBC3-SHA',
- # 0x1c: ,
- # 0x1d: ,
- 0x1e: 'KRB5-DES-CBC-SHA',
- 0x1f: 'KRB5-DES-CBC3-SHA',
- 0x20: 'KRB5-RC4-SHA',
- 0x21: 'KRB5-IDEA-CBC-SHA',
- 0x22: 'KRB5-DES-CBC-MD5',
- 0x23: 'KRB5-DES-CBC3-MD5',
- 0x24: 'KRB5-RC4-MD5',
- 0x25: 'KRB5-IDEA-CBC-MD5',
- 0x26: 'EXP-KRB5-DES-CBC-SHA',
- 0x27: 'EXP-KRB5-RC2-CBC-SHA',
- 0x28: 'EXP-KRB5-RC4-SHA',
- 0x29: 'EXP-KRB5-DES-CBC-MD5',
- 0x2a: 'EXP-KRB5-RC2-CBC-MD5',
- 0x2b: 'EXP-KRB5-RC4-MD5',
- 0x2f: 'AES128-SHA',
- 0x30: 'DH-DSS-AES128-SHA',
- 0x31: 'DH-RSA-AES128-SHA',
- 0x32: 'DHE-DSS-AES128-SHA',
- 0x33: 'DHE-RSA-AES128-SHA',
- 0x34: 'ADH-AES128-SHA',
- 0x35: 'AES256-SHA',
- 0x36: 'DH-DSS-AES256-SHA',
- 0x37: 'DH-RSA-AES256-SHA',
- 0x38: 'DHE-DSS-AES256-SHA',
- 0x39: 'DHE-RSA-AES256-SHA',
- 0x3a: 'ADH-AES256-SHA',
- 0x3b: 'NULL-SHA256',
- 0x3c: 'AES128-SHA256',
- 0x3d: 'AES256-SHA256',
- 0x3e: 'DH-DSS-AES128-SHA256',
- 0x3f: 'DH-RSA-AES128-SHA256',
- 0x40: 'DHE-DSS-AES128-SHA256',
- 0x41: 'CAMELLIA128-SHA',
- 0x42: 'DH-DSS-CAMELLIA128-SHA',
- 0x43: 'DH-RSA-CAMELLIA128-SHA',
- 0x44: 'DHE-DSS-CAMELLIA128-SHA',
- 0x45: 'DHE-RSA-CAMELLIA128-SHA',
- 0x46: 'ADH-CAMELLIA128-SHA',
- 0x62: 'EXP1024-DES-CBC-SHA',
- 0x63: 'EXP1024-DHE-DSS-DES-CBC-SHA',
- 0x64: 'EXP1024-RC4-SHA',
- 0x65: 'EXP1024-DHE-DSS-RC4-SHA',
- 0x66: 'DHE-DSS-RC4-SHA',
- 0x67: 'DHE-RSA-AES128-SHA256',
- 0x68: 'DH-DSS-AES256-SHA256',
- 0x69: 'DH-RSA-AES256-SHA256',
- 0x6a: 'DHE-DSS-AES256-SHA256',
- 0x6b: 'DHE-RSA-AES256-SHA256',
- 0x6c: 'ADH-AES128-SHA256',
- 0x6d: 'ADH-AES256-SHA256',
- 0x80: 'GOST94-GOST89-GOST89',
- 0x81: 'GOST2001-GOST89-GOST89',
- 0x82: 'GOST94-NULL-GOST94',
- 0x83: 'GOST2001-GOST89-GOST89',
- 0x84: 'CAMELLIA256-SHA',
- 0x85: 'DH-DSS-CAMELLIA256-SHA',
- 0x86: 'DH-RSA-CAMELLIA256-SHA',
- 0x87: 'DHE-DSS-CAMELLIA256-SHA',
- 0x88: 'DHE-RSA-CAMELLIA256-SHA',
- 0x89: 'ADH-CAMELLIA256-SHA',
- 0x8a: 'PSK-RC4-SHA',
- 0x8b: 'PSK-3DES-EDE-CBC-SHA',
- 0x8c: 'PSK-AES128-CBC-SHA',
- 0x8d: 'PSK-AES256-CBC-SHA',
- # 0x8e: ,
- # 0x8f: ,
- # 0x90: ,
- # 0x91: ,
- # 0x92: ,
- # 0x93: ,
- # 0x94: ,
- # 0x95: ,
- 0x96: 'SEED-SHA',
- 0x97: 'DH-DSS-SEED-SHA',
- 0x98: 'DH-RSA-SEED-SHA',
- 0x99: 'DHE-DSS-SEED-SHA',
- 0x9a: 'DHE-RSA-SEED-SHA',
- 0x9b: 'ADH-SEED-SHA',
- 0x9c: 'AES128-GCM-SHA256',
- 0x9d: 'AES256-GCM-SHA384',
- 0x9e: 'DHE-RSA-AES128-GCM-SHA256',
- 0x9f: 'DHE-RSA-AES256-GCM-SHA384',
- 0xa0: 'DH-RSA-AES128-GCM-SHA256',
- 0xa1: 'DH-RSA-AES256-GCM-SHA384',
- 0xa2: 'DHE-DSS-AES128-GCM-SHA256',
- 0xa3: 'DHE-DSS-AES256-GCM-SHA384',
- 0xa4: 'DH-DSS-AES128-GCM-SHA256',
- 0xa5: 'DH-DSS-AES256-GCM-SHA384',
- 0xa6: 'ADH-AES128-GCM-SHA256',
- 0xa7: 'ADH-AES256-GCM-SHA384',
- 0x5600: 'TLS_FALLBACK_SCSV',
- 0xc001: 'ECDH-ECDSA-NULL-SHA',
- 0xc002: 'ECDH-ECDSA-RC4-SHA',
- 0xc003: 'ECDH-ECDSA-DES-CBC3-SHA',
- 0xc004: 'ECDH-ECDSA-AES128-SHA',
- 0xc005: 'ECDH-ECDSA-AES256-SHA',
- 0xc006: 'ECDHE-ECDSA-NULL-SHA',
- 0xc007: 'ECDHE-ECDSA-RC4-SHA',
- 0xc008: 'ECDHE-ECDSA-DES-CBC3-SHA',
- 0xc009: 'ECDHE-ECDSA-AES128-SHA',
- 0xc00a: 'ECDHE-ECDSA-AES256-SHA',
- 0xc00b: 'ECDH-RSA-NULL-SHA',
- 0xc00c: 'ECDH-RSA-RC4-SHA',
- 0xc00d: 'ECDH-RSA-DES-CBC3-SHA',
- 0xc00e: 'ECDH-RSA-AES128-SHA',
- 0xc00f: 'ECDH-RSA-AES256-SHA',
- 0xc010: 'ECDHE-RSA-NULL-SHA',
- 0xc011: 'ECDHE-RSA-RC4-SHA',
- 0xc012: 'ECDHE-RSA-DES-CBC3-SHA',
- 0xc013: 'ECDHE-RSA-AES128-SHA',
- 0xc014: 'ECDHE-RSA-AES256-SHA',
- 0xc015: 'AECDH-NULL-SHA',
- 0xc016: 'AECDH-RC4-SHA',
- 0xc017: 'AECDH-DES-CBC3-SHA',
- 0xc018: 'AECDH-AES128-SHA',
- 0xc019: 'AECDH-AES256-SHA',
- 0xc01a: 'SRP-3DES-EDE-CBC-SHA',
- 0xc01b: 'SRP-RSA-3DES-EDE-CBC-SHA',
- 0xc01c: 'SRP-DSS-3DES-EDE-CBC-SHA',
- 0xc01d: 'SRP-AES-128-CBC-SHA',
- 0xc01e: 'SRP-RSA-AES-128-CBC-SHA',
- 0xc01f: 'SRP-DSS-AES-128-CBC-SHA',
- 0xc020: 'SRP-AES-256-CBC-SHA',
- 0xc021: 'SRP-RSA-AES-256-CBC-SHA',
- 0xc022: 'SRP-DSS-AES-256-CBC-SHA',
- 0xc023: 'ECDHE-ECDSA-AES128-SHA256',
- 0xc024: 'ECDHE-ECDSA-AES256-SHA384',
- 0xc025: 'ECDH-ECDSA-AES128-SHA256',
- 0xc026: 'ECDH-ECDSA-AES256-SHA384',
- 0xc027: 'ECDHE-RSA-AES128-SHA256',
- 0xc028: 'ECDHE-RSA-AES256-SHA384',
- 0xc029: 'ECDH-RSA-AES128-SHA256',
- 0xc02a: 'ECDH-RSA-AES256-SHA384',
- 0xc02b: 'ECDHE-ECDSA-AES128-GCM-SHA256',
- 0xc02c: 'ECDHE-ECDSA-AES256-GCM-SHA384',
- 0xc02d: 'ECDH-ECDSA-AES128-GCM-SHA256',
- 0xc02e: 'ECDH-ECDSA-AES256-GCM-SHA384',
- 0xc02f: 'ECDHE-RSA-AES128-GCM-SHA256',
- 0xc030: 'ECDHE-RSA-AES256-GCM-SHA384',
- 0xc031: 'ECDH-RSA-AES128-GCM-SHA256',
- 0xc032: 'ECDH-RSA-AES256-GCM-SHA384',
- 0xcc13: 'ECDHE-RSA-CHACHA20-POLY1305',
- 0xcc14: 'ECDHE-ECDSA-CHACHA20-POLY1305',
- 0xcc15: 'DHE-RSA-CHACHA20-POLY1305',
- 0xff00: 'GOST-MD5',
- 0xff01: 'GOST-GOST94',
- 0xff02: 'GOST-GOST89MAC',
- 0xff03: 'GOST-GOST89STREAM',
- 0x010080: 'RC4-MD5',
- 0x020080: 'EXP-RC4-MD5',
- 0x030080: 'RC2-CBC-MD5',
- 0x040080: 'EXP-RC2-CBC-MD5',
- 0x050080: 'IDEA-CBC-MD5',
- 0x060040: 'DES-CBC-MD5',
- 0x0700c0: 'DES-CBC3-MD5',
- 0x080080: 'RC4-64-MD5',
-}
-
-
-def is_tls_record_magic(d):
- """
- Returns:
- True, if the passed bytes start with the TLS record magic bytes.
- False, otherwise.
- """
- d = d[:3]
-
- # TLS ClientHello magic, works for SSLv3, TLSv1.0, TLSv1.1, TLSv1.2
- # http://www.moserware.com/2009/06/first-few-milliseconds-of-https.html#client-hello
- return (
- len(d) == 3 and
- d[0] == '\x16' and
- d[1] == '\x03' and
- d[2] in ('\x00', '\x01', '\x02', '\x03')
- )
-
-
-def get_client_hello(client_conn):
- """
- Peek into the socket and read all records that contain the initial client hello message.
-
- client_conn:
- The :py:class:`client connection <libmproxy.models.ClientConnection>`.
-
- Returns:
- The raw handshake packet bytes, without TLS record header(s).
- """
- client_hello = ""
- client_hello_size = 1
- offset = 0
- while len(client_hello) < client_hello_size:
- record_header = client_conn.rfile.peek(offset + 5)[offset:]
- if not is_tls_record_magic(record_header) or len(record_header) != 5:
- raise TlsProtocolException('Expected TLS record, got "%s" instead.' % record_header)
- record_size = struct.unpack("!H", record_header[3:])[0] + 5
- record_body = client_conn.rfile.peek(offset + record_size)[offset + 5:]
- if len(record_body) != record_size - 5:
- raise TlsProtocolException("Unexpected EOF in TLS handshake: %s" % record_body)
- client_hello += record_body
- offset += record_size
- client_hello_size = struct.unpack("!I", '\x00' + client_hello[1:4])[0] + 4
- return client_hello
-
-
-class TlsClientHello(object):
-
- def __init__(self, raw_client_hello):
- self._client_hello = ClientHello.parse(raw_client_hello)
-
- def raw(self):
- return self._client_hello
-
- @property
- def client_cipher_suites(self):
- return self._client_hello.cipher_suites.cipher_suites
-
- @property
- def client_sni(self):
- for extension in self._client_hello.extensions:
- if (extension.type == 0x00 and len(extension.server_names) == 1
- and extension.server_names[0].type == 0):
- return extension.server_names[0].name
-
- @property
- def client_alpn_protocols(self):
- for extension in self._client_hello.extensions:
- if extension.type == 0x10:
- return list(extension.alpn_protocols)
-
- @classmethod
- def from_client_conn(cls, client_conn):
- """
- Peek into the connection, read the initial client hello and parse it to obtain ALPN values.
- client_conn:
- The :py:class:`client connection <libmproxy.models.ClientConnection>`.
- Returns:
- :py:class:`client hello <libmproxy.protocol.tls.TlsClientHello>`.
- """
- try:
- raw_client_hello = get_client_hello(client_conn)[4:] # exclude handshake header.
- except ProtocolException as e:
- raise TlsProtocolException('Cannot read raw Client Hello: %s' % repr(e))
-
- try:
- return cls(raw_client_hello)
- except ConstructError as e:
- raise TlsProtocolException('Cannot parse Client Hello: %s, Raw Client Hello: %s' %
- (repr(e), raw_client_hello.encode("hex")))
-
- def __repr__(self):
- return "TlsClientHello( sni: %s alpn_protocols: %s, cipher_suites: %s)" % \
- (self.client_sni, self.client_alpn_protocols, self.client_cipher_suites)
-
-
-class TlsLayer(Layer):
-
- def __init__(self, ctx, client_tls, server_tls):
- self.client_sni = None
- self.client_alpn_protocols = None
- self.client_ciphers = []
-
- super(TlsLayer, self).__init__(ctx)
- self._client_tls = client_tls
- self._server_tls = server_tls
-
- self._sni_from_server_change = None
-
- def __call__(self):
- """
- The strategy for establishing SSL is as follows:
- First, we determine whether we need the server cert to establish ssl with the client.
- If so, we first connect to the server and then to the client.
- If not, we only connect to the client and do the server_ssl lazily on a Connect message.
-
- An additional complexity is that establish ssl with the server may require a SNI value from
- the client. In an ideal world, we'd do the following:
- 1. Start the SSL handshake with the client
- 2. Check if the client sends a SNI.
- 3. Pause the client handshake, establish SSL with the server.
- 4. Finish the client handshake with the certificate from the server.
- There's just one issue: We cannot get a callback from OpenSSL if the client doesn't send a SNI. :(
- Thus, we manually peek into the connection and parse the ClientHello message to obtain both SNI and ALPN values.
-
- Further notes:
- - OpenSSL 1.0.2 introduces a callback that would help here:
- https://www.openssl.org/docs/ssl/SSL_CTX_set_cert_cb.html
- - The original mitmproxy issue is https://github.com/mitmproxy/mitmproxy/issues/427
- """
-
- client_tls_requires_server_cert = (
- self._client_tls and self._server_tls and not self.config.no_upstream_cert
- )
-
- if self._client_tls:
- self._parse_client_hello()
-
- if client_tls_requires_server_cert:
- self._establish_tls_with_client_and_server()
- elif self._client_tls:
- self._establish_tls_with_client()
-
- layer = self.ctx.next_layer(self)
- layer()
-
- def __repr__(self): # pragma: no cover
- if self._client_tls and self._server_tls:
- return "TlsLayer(client and server)"
- elif self._client_tls:
- return "TlsLayer(client)"
- elif self._server_tls:
- return "TlsLayer(server)"
- else:
- return "TlsLayer(inactive)"
-
- def _parse_client_hello(self):
- """
- Peek into the connection, read the initial client hello and parse it to obtain ALPN values.
- """
- try:
- parsed = TlsClientHello.from_client_conn(self.client_conn)
- self.client_sni = parsed.client_sni
- self.client_alpn_protocols = parsed.client_alpn_protocols
- self.client_ciphers = parsed.client_cipher_suites
- except TlsProtocolException as e:
- self.log("Cannot parse Client Hello: %s" % repr(e), "error")
-
- def connect(self):
- if not self.server_conn:
- self.ctx.connect()
- if self._server_tls and not self.server_conn.tls_established:
- self._establish_tls_with_server()
-
- def set_server(self, address, server_tls=None, sni=None):
- if server_tls is not None:
- self._sni_from_server_change = sni
- self._server_tls = server_tls
- self.ctx.set_server(address, None, None)
-
- @property
- def sni_for_server_connection(self):
- if self._sni_from_server_change is False:
- return None
- else:
- return self._sni_from_server_change or self.client_sni
-
- @property
- def alpn_for_client_connection(self):
- return self.server_conn.get_alpn_proto_negotiated()
-
- def __alpn_select_callback(self, conn_, options):
- """
- Once the client signals the alternate protocols it supports,
- we reconnect upstream with the same list and pass the server's choice down to the client.
- """
-
- # This gets triggered if we haven't established an upstream connection yet.
- default_alpn = b'http/1.1'
- # alpn_preference = b'h2'
-
- if self.alpn_for_client_connection in options:
- choice = bytes(self.alpn_for_client_connection)
- elif default_alpn in options:
- choice = bytes(default_alpn)
- else:
- choice = options[0]
- self.log("ALPN for client: %s" % choice, "debug")
- return choice
-
- def _establish_tls_with_client_and_server(self):
- # If establishing TLS with the server fails, we try to establish TLS with the client nonetheless
- # to send an error message over TLS.
- try:
- self.ctx.connect()
- self._establish_tls_with_server()
- except Exception:
- try:
- self._establish_tls_with_client()
- except:
- pass
- six.reraise(*sys.exc_info())
-
- self._establish_tls_with_client()
-
- def _establish_tls_with_client(self):
- self.log("Establish TLS with client", "debug")
- cert, key, chain_file = self._find_cert()
-
- try:
- self.client_conn.convert_to_ssl(
- cert, key,
- method=self.config.openssl_method_client,
- options=self.config.openssl_options_client,
- cipher_list=self.config.ciphers_client,
- dhparams=self.config.certstore.dhparams,
- chain_file=chain_file,
- alpn_select_callback=self.__alpn_select_callback,
- )
- # Some TLS clients will not fail the handshake,
- # but will immediately throw an "unexpected eof" error on the first read.
- # The reason for this might be difficult to find, so we try to peek here to see if it
- # raises ann error.
- self.client_conn.rfile.peek(1)
- except TlsException as e:
- six.reraise(
- ClientHandshakeException,
- ClientHandshakeException(
- "Cannot establish TLS with client (sni: {sni}): {e}".format(
- sni=self.client_sni, e=repr(e)
- ),
- self.client_sni or repr(self.server_conn.address)
- ),
- sys.exc_info()[2]
- )
-
- def _establish_tls_with_server(self):
- self.log("Establish TLS with server", "debug")
- try:
- # We only support http/1.1 and h2.
- # If the server only supports spdy (next to http/1.1), it may select that
- # and mitmproxy would enter TCP passthrough mode, which we want to avoid.
- deprecated_http2_variant = lambda x: x.startswith("h2-") or x.startswith("spdy")
- if self.client_alpn_protocols:
- alpn = [x for x in self.client_alpn_protocols if not deprecated_http2_variant(x)]
- else:
- alpn = None
- if alpn and "h2" in alpn and not self.config.http2:
- alpn.remove("h2")
-
- ciphers_server = self.config.ciphers_server
- if not ciphers_server:
- ciphers_server = []
- for id in self.client_ciphers:
- if id in CIPHER_ID_NAME_MAP.keys():
- ciphers_server.append(CIPHER_ID_NAME_MAP[id])
- ciphers_server = ':'.join(ciphers_server)
-
- self.server_conn.establish_ssl(
- self.config.clientcerts,
- self.sni_for_server_connection,
- method=self.config.openssl_method_server,
- options=self.config.openssl_options_server,
- verify_options=self.config.openssl_verification_mode_server,
- ca_path=self.config.openssl_trusted_cadir_server,
- ca_pemfile=self.config.openssl_trusted_ca_server,
- cipher_list=ciphers_server,
- alpn_protos=alpn,
- )
- tls_cert_err = self.server_conn.ssl_verification_error
- if tls_cert_err is not None:
- self.log(
- "TLS verification failed for upstream server at depth %s with error: %s" %
- (tls_cert_err['depth'], tls_cert_err['errno']),
- "error")
- self.log("Ignoring server verification error, continuing with connection", "error")
- except InvalidCertificateException as e:
- tls_cert_err = self.server_conn.ssl_verification_error
- self.log(
- "TLS verification failed for upstream server at depth %s with error: %s" %
- (tls_cert_err['depth'], tls_cert_err['errno']),
- "error")
- self.log("Aborting connection attempt", "error")
- six.reraise(
- TlsProtocolException,
- TlsProtocolException("Cannot establish TLS with {address} (sni: {sni}): {e}".format(
- address=repr(self.server_conn.address),
- sni=self.sni_for_server_connection,
- e=repr(e),
- )),
- sys.exc_info()[2]
- )
- except TlsException as e:
- six.reraise(
- TlsProtocolException,
- TlsProtocolException("Cannot establish TLS with {address} (sni: {sni}): {e}".format(
- address=repr(self.server_conn.address),
- sni=self.sni_for_server_connection,
- e=repr(e),
- )),
- sys.exc_info()[2]
- )
-
- self.log("ALPN selected by server: %s" % self.alpn_for_client_connection, "debug")
-
- def _find_cert(self):
- """
- This function determines the Common Name (CN) and Subject Alternative Names (SANs)
- our certificate should have and then fetches a matching cert from the certstore.
- """
- host = None
- sans = set()
-
- # In normal operation, the server address should always be known at this point.
- # However, we may just want to establish TLS so that we can send an error message to the client,
- # in which case the address can be None.
- if self.server_conn.address:
- host = self.server_conn.address.host
-
- # Should we incorporate information from the server certificate?
- use_upstream_cert = (
- self.server_conn and
- self.server_conn.tls_established and
- (not self.config.no_upstream_cert)
- )
- if use_upstream_cert:
- upstream_cert = self.server_conn.cert
- sans.update(upstream_cert.altnames)
- if upstream_cert.cn:
- sans.add(host)
- host = upstream_cert.cn.decode("utf8").encode("idna")
- # Also add SNI values.
- if self.client_sni:
- sans.add(self.client_sni)
- if self._sni_from_server_change:
- sans.add(self._sni_from_server_change)
-
- # Some applications don't consider the CN and expect the hostname to be in the SANs.
- # For example, Thunderbird 38 will display a warning if the remote host is only the CN.
- sans.add(host)
- return self.config.certstore.get_cert(host, list(sans))