aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/imx6
Commit message (Expand)AuthorAgeFilesLines
* kernel: move some disabled symbols to genericAleksander Jan Bajkowski2021-04-111-1/+0
* Revert "imx6: bootscript-apalis: make it working on v2021.01 release"Petr Štetiar2021-02-141-4/+2
* imx6: bootscript-apalis: make it working on v2021.01 releasePetr Štetiar2021-02-141-2/+4
* target: use SPDX license identifiers on MakefilesAdrian Schmutzler2021-02-103-12/+5
* kernel: bump 5.4 to 5.4.95John Audia2021-02-041-37/+0
* imx6: disable unrequired pcie host driverKoen Vandeputte2021-01-071-2/+0
* imx6: refresh kernel configKoen Vandeputte2021-01-071-73/+2
* imx6: gw52xx: fix duplicate regulator namingKoen Vandeputte2021-01-071-0/+37
* kernel: move some disabled symbols to genericAleksander Jan Bajkowski2020-12-221-2/+0
* kernel: bump 5.4 to 5.4.77John Audia2020-11-183-8/+8
* treewide: remove default-state off for LEDsAdrian Schmutzler2020-11-164-10/+4
* kernel: remove support for kernel 4.19Adrian Schmutzler2020-10-308-2758/+0
* kernel: move CONFIG_F2FS_CHECK_FS to generic kernel configHauke Mehrtens2020-10-111-1/+0
* kernel: Move CONFIG_F2FS_FS_SECURITY to generic kernel configHauke Mehrtens2020-10-111-1/+0
* kernel: move F2FS_FS_XATTR and F2FS_STAT_FS symbols to genericDaniel Golle2020-10-092-3/+0
* kernel: clean up XATTR config symbolsPaul Spooren2020-10-091-2/+0
* kernel: unify CONFIG_GPIO_SYSFS in kernel configsFelix Fietkau2020-08-062-2/+0
* imx6: use device-tree compatible for board nameAdrian Schmutzler2020-07-275-134/+96
* treewide: drop shebang from non-executable target filesAdrian Schmutzler2020-06-163-5/+0
* kernel: bump 5.4 to 5.4.45Petr Štetiar2020-06-097-39/+0
* imx6: image: increase max ubifs file-system sizeTim Harvey2020-06-031-1/+1
* imx6: backport v5.8 imx6qdl-gw dt patchesTim Harvey2020-06-037-0/+455
* imx6: add back perf monitor related config symbolPetr Štetiar2020-04-251-0/+1
* imx6: refresh kernel configKoen Vandeputte2020-04-211-170/+5
* kernel: bump 4.19 to 4.19.115Koen Vandeputte2020-04-164-64/+8
* imx6: bootscript: use partition UUID for rootfs if possibleTim Harvey2020-04-031-14/+20
* kernel: bump 5.4 to 5.4.28Petr Štetiar2020-03-284-64/+8
* treewide: remove maintainer variable from targetsPetr Štetiar2020-03-161-1/+0
* imx6: switch to 5.4 kernelPetr Štetiar2020-03-161-1/+1
* imx6: add support for GW5907/GW5910/GW5912/GW5913Tim Harvey2020-03-1211-2/+4248
* imx6: remove unnecessary wildcard from board name matchingTim Harvey2020-03-121-7/+7
* imx6: Remove kernel 4.14 supportHauke Mehrtens2020-03-125-716/+0
* kernel: 5.4: move some kconfig options to genericYousong Zhou2020-03-101-6/+0
* kernel: bump 5.4 to 5.4.24Koen Vandeputte2020-03-091-5/+0
* imx6: 5.4: add missing kernel perf monitor symbolPetr Štetiar2020-03-081-0/+1
* imx6: apalis: move set_blkcnt variable into recovery scriptPetr Štetiar2020-03-081-0/+1
* imx6: apalis: fix variables in bootscriptPetr Štetiar2020-03-081-3/+4
* imx6: 5.4: dts: backport lsm9ds1 imu support for GW553xTim Harvey2020-03-011-0/+73
* treewide: move commonly disabled symbols to generic configDavid Bauer2020-03-011-5/+0
* imx6: add support for kernel 5.4Koen Vandeputte2020-02-285-0/+904
* kernel: Deactivate CONFIG_SFP in generic configHauke Mehrtens2019-12-231-1/+0
* imx6: install-dtb as separate IMAGEYousong Zhou2019-09-151-4/+6
* imx6: split up DEVICE_TITLEMoritz Warning2019-09-111-5/+12
* treewide: sysupgrade: use $UPGRADE_BACKUP to check for backupRafał Miłecki2019-09-111-1/+1
* treewide: use new procd sysupgrade $UPGRADE_BACKUP variableRafał Miłecki2019-09-051-1/+1
* treewide: when copying a backup file always specify dest nameRafał Miłecki2019-09-051-1/+1
* treewide: don't hardcode "sysupgrade.tgz" file nameRafał Miłecki2019-09-051-1/+2
* treewide: replace remaining (not working now) $SAVE_CONFIG usesRafał Miłecki2019-09-051-1/+1
* imx6: bump SDMA firmware to 3.5Koen Vandeputte2019-07-311-0/+0
* imx6: apalis: add missing jffs2reset to ramfs during sysupgradePetr Štetiar2019-07-171-1/+1
> from netlib.exceptions import HttpException from netlib.http import Headers from netlib.utils import http2_read_raw_frame from .base import Layer from .http import _HttpTransmissionLayer, HttpLayer from ..exceptions import ProtocolException, Http2ProtocolException from .. import utils from ..models import HTTPRequest, HTTPResponse class SafeH2Connection(H2Connection): def __init__(self, conn, *args, **kwargs): super(SafeH2Connection, self).__init__(*args, **kwargs) self.conn = conn self.lock = threading.RLock() def safe_increment_flow_control(self, stream_id, length): if length == 0: return with self.lock: self.increment_flow_control_window(length) self.conn.send(self.data_to_send()) with self.lock: if stream_id in self.streams and not self.streams[stream_id].closed: self.increment_flow_control_window(length, stream_id=stream_id) self.conn.send(self.data_to_send()) def safe_reset_stream(self, stream_id, error_code): with self.lock: try: self.reset_stream(stream_id, error_code) except h2.exceptions.StreamClosedError: # pragma: no cover # stream is already closed - good pass self.conn.send(self.data_to_send()) def safe_update_settings(self, new_settings): with self.lock: self.update_settings(new_settings) self.conn.send(self.data_to_send()) def safe_send_headers(self, is_zombie, stream_id, headers): with self.lock: if is_zombie(): # pragma: no cover raise Http2ProtocolException("Zombie Stream") self.send_headers(stream_id, headers.fields) self.conn.send(self.data_to_send()) def safe_send_body(self, is_zombie, stream_id, chunks): for chunk in chunks: position = 0 while position < len(chunk): self.lock.acquire() if is_zombie(): # pragma: no cover self.lock.release() raise Http2ProtocolException("Zombie Stream") max_outbound_frame_size = self.max_outbound_frame_size frame_chunk = chunk[position:position + max_outbound_frame_size] if self.local_flow_control_window(stream_id) < len(frame_chunk): self.lock.release() time.sleep(0) continue self.send_data(stream_id, frame_chunk) self.conn.send(self.data_to_send()) self.lock.release() position += max_outbound_frame_size with self.lock: if is_zombie(): # pragma: no cover raise Http2ProtocolException("Zombie Stream") self.end_stream(stream_id) self.conn.send(self.data_to_send()) class Http2Layer(Layer): def __init__(self, ctx, mode): super(Http2Layer, self).__init__(ctx) self.mode = mode self.streams = dict() self.server_to_client_stream_ids = dict([(0, 0)]) self.client_conn.h2 = SafeH2Connection(self.client_conn, client_side=False, header_encoding=False) # make sure that we only pass actual SSL.Connection objects in here, # because otherwise ssl_read_select fails! self.active_conns = [self.client_conn.connection] def _initiate_server_conn(self): self.server_conn.h2 = SafeH2Connection(self.server_conn, client_side=True, header_encoding=False) self.server_conn.h2.initiate_connection() self.server_conn.send(self.server_conn.h2.data_to_send()) self.active_conns.append(self.server_conn.connection) def connect(self): # pragma: no cover raise Http2ProtocolException("HTTP2 layer should already have a connection.") def set_server(self): # pragma: no cover raise NotImplementedError("Cannot change server for HTTP2 connections.") def disconnect(self): # pragma: no cover raise NotImplementedError("Cannot dis- or reconnect in HTTP2 connections.") def next_layer(self): # pragma: no cover # WebSockets over HTTP/2? # CONNECT for proxying? raise NotImplementedError() def _handle_event(self, event, source_conn, other_conn, is_server): self.log( "HTTP2 Event from {}".format("server" if is_server else "client"), "debug", [repr(event)] ) if hasattr(event, 'stream_id'): if is_server and event.stream_id % 2 == 1: eid = self.server_to_client_stream_ids[event.stream_id] else: eid = event.stream_id if isinstance(event, h2.events.RequestReceived): headers = Headers([[k, v] for k, v in event.headers]) self.streams[eid] = Http2SingleStreamLayer(self, eid, headers) self.streams[eid].timestamp_start = time.time() self.streams[eid].start() elif isinstance(event, h2.events.ResponseReceived): headers = Headers([[k, v] for k, v in event.headers]) self.streams[eid].queued_data_length = 0 self.streams[eid].timestamp_start = time.time() self.streams[eid].response_headers = headers self.streams[eid].response_arrived.set() elif isinstance(event, h2.events.DataReceived): if self.config.body_size_limit and self.streams[eid].queued_data_length > self.config.body_size_limit: raise HttpException("HTTP body too large. Limit is {}.".format(self.config.body_size_limit)) self.streams[eid].data_queue.put(event.data) self.streams[eid].queued_data_length += len(event.data) source_conn.h2.safe_increment_flow_control(event.stream_id, event.flow_controlled_length) elif isinstance(event, h2.events.StreamEnded): self.streams[eid].timestamp_end = time.time() self.streams[eid].data_finished.set() elif isinstance(event, h2.events.StreamReset): self.streams[eid].zombie = time.time() if eid in self.streams and event.error_code == 0x8: if is_server: other_stream_id = self.streams[eid].client_stream_id else: other_stream_id = self.streams[eid].server_stream_id if other_stream_id is not None: other_conn.h2.safe_reset_stream(other_stream_id, event.error_code) elif isinstance(event, h2.events.RemoteSettingsChanged): new_settings = dict([(id, cs.new_value) for (id, cs) in six.iteritems(event.changed_settings)]) other_conn.h2.safe_update_settings(new_settings) elif isinstance(event, h2.events.ConnectionTerminated): # Do not immediately terminate the other connection. # Some streams might be still sending data to the client. return False elif isinstance(event, h2.events.PushedStreamReceived): # pushed stream ids should be uniq and not dependent on race conditions # only the parent stream id must be looked up first parent_eid = self.server_to_client_stream_ids[event.parent_stream_id] with self.client_conn.h2.lock: self.client_conn.h2.push_stream(parent_eid, event.pushed_stream_id, event.headers) headers = Headers([[str(k), str(v)] for k, v in event.headers]) headers['x-mitmproxy-pushed'] = 'true' self.streams[event.pushed_stream_id] = Http2SingleStreamLayer(self, event.pushed_stream_id, headers) self.streams[event.pushed_stream_id].timestamp_start = time.time() self.streams[event.pushed_stream_id].pushed = True self.streams[event.pushed_stream_id].parent_stream_id = parent_eid self.streams[event.pushed_stream_id].timestamp_end = time.time() self.streams[event.pushed_stream_id].request_data_finished.set() self.streams[event.pushed_stream_id].start() elif isinstance(event, h2.events.TrailersReceived): raise NotImplementedError() return True def _cleanup_streams(self): death_time = time.time() - 10 for stream_id in self.streams.keys(): zombie = self.streams[stream_id].zombie if zombie and zombie <= death_time: self.streams.pop(stream_id, None) def __call__(self): if self.server_conn: self._initiate_server_conn() preamble = self.client_conn.rfile.read(24) self.client_conn.h2.initiate_connection() self.client_conn.h2.receive_data(preamble) self.client_conn.send(self.client_conn.h2.data_to_send()) while True: r = ssl_read_select(self.active_conns, 1) for conn in r: source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn is_server = (conn == self.server_conn.connection) with source_conn.h2.lock: try: raw_frame = b''.join(http2_read_raw_frame(source_conn.rfile)) except: # read frame failed: connection closed # kill all streams for stream in self.streams.values(): stream.zombie = time.time() return events = source_conn.h2.receive_data(raw_frame) source_conn.send(source_conn.h2.data_to_send()) for event in events: if not self._handle_event(event, source_conn, other_conn, is_server): return self._cleanup_streams() class Http2SingleStreamLayer(_HttpTransmissionLayer, threading.Thread): def __init__(self, ctx, stream_id, request_headers): super(Http2SingleStreamLayer, self).__init__(ctx, name="Thread-Http2SingleStreamLayer-{}".format(stream_id)) self.zombie = None self.client_stream_id = stream_id self.server_stream_id = None self.request_headers = request_headers self.response_headers = None self.pushed = False self.request_data_queue = queue.Queue() self.request_queued_data_length = 0 self.request_data_finished = threading.Event() self.response_arrived = threading.Event() self.response_data_queue = queue.Queue() self.response_queued_data_length = 0 self.response_data_finished = threading.Event() @property def data_queue(self): if self.response_arrived.is_set(): return self.response_data_queue else: return self.request_data_queue @property def queued_data_length(self): if self.response_arrived.is_set(): return self.response_queued_data_length else: return self.request_queued_data_length @property def data_finished(self): if self.response_arrived.is_set(): return self.response_data_finished else: return self.request_data_finished @queued_data_length.setter def queued_data_length(self, v): if self.response_arrived.is_set(): return self.response_queued_data_length else: return self.request_queued_data_length def is_zombie(self): return self.zombie is not None def read_request(self): self.request_data_finished.wait() authority = self.request_headers.get(':authority', '') method = self.request_headers.get(':method', 'GET') scheme = self.request_headers.get(':scheme', 'https') path = self.request_headers.get(':path', '/') host = None port = None if path == '*' or path.startswith("/"): first_line_format = "relative" elif method == 'CONNECT': # pragma: no cover raise NotImplementedError("CONNECT over HTTP/2 is not implemented.") else: # pragma: no cover first_line_format = "absolute" # FIXME: verify if path or :host contains what we need scheme, host, port, _ = utils.parse_url(path) if authority: host, _, port = authority.partition(':') if not host: host = 'localhost' if not port: port = 443 if scheme == 'https' else 80 port = int(port) data = [] while self.request_data_queue.qsize() > 0: data.append(self.request_data_queue.get()) data = b"".join(data) return HTTPRequest( first_line_format, method, scheme, host, port, path, b"HTTP/2.0", self.request_headers, data, timestamp_start=self.timestamp_start, timestamp_end=self.timestamp_end, ) def send_request(self, message): if self.pushed: # nothing to do here return with self.server_conn.h2.lock: # We must not assign a stream id if we are already a zombie. if self.zombie: # pragma: no cover raise Http2ProtocolException("Zombie Stream") self.server_stream_id = self.server_conn.h2.get_next_available_stream_id() self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id self.server_conn.h2.safe_send_headers( self.is_zombie, self.server_stream_id, message.headers ) self.server_conn.h2.safe_send_body( self.is_zombie, self.server_stream_id, message.body ) if self.zombie: # pragma: no cover raise Http2ProtocolException("Zombie Stream") def read_response_headers(self): self.response_arrived.wait() status_code = int(self.response_headers.get(':status', 502)) return HTTPResponse( http_version=b"HTTP/2.0", status_code=status_code, reason='', headers=self.response_headers, content=None, timestamp_start=self.timestamp_start, timestamp_end=self.timestamp_end, ) def read_response_body(self, request, response): while True: try: yield self.response_data_queue.get(timeout=1) except queue.Empty: pass if self.response_data_finished.is_set(): while self.response_data_queue.qsize() > 0: yield self.response_data_queue.get() return if self.zombie: # pragma: no cover raise Http2ProtocolException("Zombie Stream") def send_response_headers(self, response): self.client_conn.h2.safe_send_headers( self.is_zombie, self.client_stream_id, response.headers ) if self.zombie: # pragma: no cover raise Http2ProtocolException("Zombie Stream") def send_response_body(self, _response, chunks): self.client_conn.h2.safe_send_body( self.is_zombie, self.client_stream_id, chunks ) if self.zombie: # pragma: no cover raise Http2ProtocolException("Zombie Stream") def check_close_connection(self, flow): # This layer only handles a single stream. # RFC 7540 8.1: An HTTP request/response exchange fully consumes a single stream. return True def set_server(self, *args, **kwargs): # pragma: no cover # do not mess with the server connection - all streams share it. pass def run(self): layer = HttpLayer(self, self.mode) try: layer() except ProtocolException as e: self.log(repr(e), "info") self.log(traceback.format_exc(), "debug") self.zombie = time.time()