aboutsummaryrefslogtreecommitdiffstats
path: root/examples/complex/mitmproxywrapper.py
blob: eade0fe2fed2f49cc6c60db543cbc8cbf570e223 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
#!/usr/bin/env python
#
# Helper tool to enable/disable OS X proxy and wrap mitmproxy
#
# Get usage information with:
#
# mitmproxywrapper.py -h
#

import subprocess
import re
import argparse
import contextlib
import os
import sys


class Wrapper:
    def __init__(self, port, extra_arguments=None):
        self.port = port
        self.extra_arguments = extra_arguments

    def run_networksetup_command(self, *arguments):
        return subprocess.check_output(
            ['sudo', 'networksetup'] + list(arguments))

    def proxy_state_for_service(self, service):
        state = self.run_networksetup_command(
            '-getwebproxy',
            service).splitlines()
        return dict([re.findall(r'([^:]+): (.*)', line)[0] for line in state])

    def enable_proxy_for_service(self, service):
        print('Enabling proxy on {}...'.format(service))
        for subcommand in ['-setwebproxy', '-setsecurewebproxy']:
            self.run_networksetup_command(
                subcommand, service, '127.0.0.1', str(
                    self.port))

    def disable_proxy_for_service(self, service):
        print('Disabling proxy on {}...'.format(service))
        for subcommand in ['-setwebproxystate', '-setsecurewebproxystate']:
            self.run_networksetup_command(subcommand, service, 'Off')

    def interface_name_to_service_name_map(self):
        order = self.run_networksetup_command('-listnetworkserviceorder')
        mapping = re.findall(
            r'\(\d+\)\s(.*)$\n\(.*Device: (.+)\)$',
            order,
            re.MULTILINE)
        return dict([(b, a) for (a, b) in mapping])

    def run_command_with_input(self, command, input):
        popen = subprocess.Popen(
            command,
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE)
        (stdout, stderr) = popen.communicate(input)
        return stdout

    def primary_interace_name(self):
        scutil_script = 'get State:/Network/Global/IPv4\nd.show\n'
        stdout = self.run_command_with_input('/usr/sbin/scutil', scutil_script)
        interface, = re.findall(r'PrimaryInterface\s*:\s*(.+)', stdout)
        return interface

    def primary_service_name(self):
        return self.interface_name_to_service_name_map()[
            self.primary_interace_name()]

    def proxy_enabled_for_service(self, service):
        return self.proxy_state_for_service(service)['Enabled'] == 'Yes'

    def toggle_proxy(self):
        new_state = not self.proxy_enabled_for_service(
            self.primary_service_name())
        for service_name in self.connected_service_names():
            if self.proxy_enabled_for_service(service_name) and not new_state:
                self.disable_proxy_for_service(service_name)
            elif not self.proxy_enabled_for_service(service_name) and new_state:
                self.enable_proxy_for_service(service_name)

    def connected_service_names(self):
        scutil_script = 'list\n'
        stdout = self.run_command_with_input('/usr/sbin/scutil', scutil_script)
        service_ids = re.findall(r'State:/Network/Service/(.+)/IPv4', stdout)

        service_names = []
        for service_id in service_ids:
            scutil_script = 'show Setup:/Network/Service/{}\n'.format(
                service_id)
            stdout = self.run_command_with_input(
                '/usr/sbin/scutil',
                scutil_script)
            service_name, = re.findall(r'UserDefinedName\s*:\s*(.+)', stdout)
            service_names.append(service_name)

        return service_names

    def wrap_mitmproxy(self):
        with self.wrap_proxy():
            cmd = ['mitmproxy', '-p', str(self.port)]
            if self.extra_arguments:
                cmd.extend(self.extra_arguments)
            subprocess.check_call(cmd)

    def wrap_honeyproxy(self):
        with self.wrap_proxy():
            popen = subprocess.Popen('honeyproxy.sh')
            try:
                popen.wait()
            except KeyboardInterrupt:
                popen.terminate()

    @contextlib.contextmanager
    def wrap_proxy(self):
        connected_service_names = self.connected_service_names()
        for service_name in connected_service_names:
            if not self.proxy_enabled_for_service(service_name):
                self.enable_proxy_for_service(service_name)

        yield

        for service_name in connected_service_names:
            if self.proxy_enabled_for_service(service_name):
                self.disable_proxy_for_service(service_name)

    @classmethod
    def ensure_superuser(cls):
        if os.getuid() != 0:
            print('Relaunching with sudo...')
            os.execv('/usr/bin/sudo', ['/usr/bin/sudo'] + sys.argv)

    @classmethod
    def main(cls):
        parser = argparse.ArgumentParser(
            description='Helper tool for OS X proxy configuration and mitmproxy.',
            epilog='Any additional arguments will be passed on unchanged to mitmproxy.')
        parser.add_argument(
            '-t',
            '--toggle',
            action='store_true',
            help='just toggle the proxy configuration')
        # parser.add_argument('--honeyproxy', action='store_true', help='run honeyproxy instead of mitmproxy')
        parser.add_argument(
            '-p',
            '--port',
            type=int,
            help='override the default port of 8080',
            default=8080)
        args, extra_arguments = parser.parse_known_args()

        wrapper = cls(port=args.port, extra_arguments=extra_arguments)

        if args.toggle:
            wrapper.toggle_proxy()
        # elif args.honeyproxy:
        #     wrapper.wrap_honeyproxy()
        else:
            wrapper.wrap_mitmproxy()


if __name__ == '__main__':
    Wrapper.ensure_superuser()
    Wrapper.main()
tor; irq_desc_t *desc = &irq_desc[vector]; struct irqaction *action; perfc_incrc(irqs); spin_lock(&desc->lock); desc->handler->ack(vector); if ( likely(desc->status & IRQ_GUEST) ) { __do_IRQ_guest(vector); spin_unlock(&desc->lock); return; } desc->status &= ~IRQ_REPLAY; desc->status |= IRQ_PENDING; /* * Since we set PENDING, if another processor is handling a different * instance of this same irq, the other processor will take care of it. */ if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) ) goto out; desc->status |= IRQ_INPROGRESS; action = desc->action; while ( desc->status & IRQ_PENDING ) { desc->status &= ~IRQ_PENDING; irq_enter(); spin_unlock_irq(&desc->lock); action->handler(vector_to_irq(vector), action->dev_id, regs); spin_lock_irq(&desc->lock); irq_exit(); } desc->status &= ~IRQ_INPROGRESS; out: desc->handler->end(vector); spin_unlock(&desc->lock); } void free_irq(unsigned int irq) { unsigned int vector = irq_to_vector(irq); irq_desc_t *desc = &irq_desc[vector]; unsigned long flags; spin_lock_irqsave(&desc->lock,flags); desc->action = NULL; desc->depth = 1; desc->status |= IRQ_DISABLED; desc->handler->shutdown(irq); spin_unlock_irqrestore(&desc->lock,flags); /* Wait to make sure it's not being used on another CPU */ do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); } int setup_irq(unsigned int irq, struct irqaction *new) { unsigned int vector = irq_to_vector(irq); irq_desc_t *desc = &irq_desc[vector]; unsigned long flags; spin_lock_irqsave(&desc->lock,flags); if ( desc->action != NULL ) { spin_unlock_irqrestore(&desc->lock,flags); return -EBUSY; } desc->action = new; desc->depth = 0; desc->status &= ~IRQ_DISABLED; desc->handler->startup(vector); spin_unlock_irqrestore(&desc->lock,flags); return 0; } /* * HANDLING OF GUEST-BOUND PHYSICAL IRQS */ #define IRQ_MAX_GUESTS 7 typedef struct { u8 nr_guests; u8 in_flight; u8 shareable; u8 ack_type; #define ACKTYPE_NONE 0 /* No final acknowledgement is required */ #define ACKTYPE_UNMASK 1 /* Unmask PIC hardware (from any CPU) */ #define ACKTYPE_EOI 2 /* EOI on the CPU that was interrupted */ cpumask_t cpu_eoi_map; /* CPUs that need to EOI this interrupt */ struct domain *guest[IRQ_MAX_GUESTS]; } irq_guest_action_t; /* * Stack of interrupts awaiting EOI on each CPU. These must be popped in * order, as only the current highest-priority pending irq can be EOIed. */ static struct { u8 vector; /* Vector awaiting EOI */ u8 ready; /* Ready for EOI now? */ } pending_eoi[NR_CPUS][NR_VECTORS] __cacheline_aligned; #define pending_eoi_sp(cpu) (pending_eoi[cpu][NR_VECTORS-1].vector) static void __do_IRQ_guest(int vector) { unsigned int irq = vector_to_irq(vector); irq_desc_t *desc = &irq_desc[vector]; irq_guest_action_t *action = (irq_guest_action_t *)desc->action; struct domain *d; int i, sp, cpu = smp_processor_id(); if ( unlikely(action->nr_guests == 0) ) { /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */ ASSERT(action->ack_type == ACKTYPE_EOI); ASSERT(desc->status & IRQ_DISABLED); desc->handler->end(vector); return; } if ( action->ack_type == ACKTYPE_EOI ) { sp = pending_eoi_sp(cpu); ASSERT((sp == 0) || (pending_eoi[cpu][sp-1].vector < vector)); ASSERT(sp < (NR_VECTORS-1)); pending_eoi[cpu][sp].vector = vector; pending_eoi[cpu][sp].ready = 0; pending_eoi_sp(cpu) = sp+1; cpu_set(cpu, action->cpu_eoi_map); } for ( i = 0; i < action->nr_guests; i++ ) { d = action->guest[i]; if ( (action->ack_type != ACKTYPE_NONE) && !test_and_set_bit(irq, d->pirq_mask) ) action->in_flight++; send_guest_pirq(d, irq); } } /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */ static void flush_ready_eoi(void *unused) { irq_desc_t *desc; int vector, sp, cpu = smp_processor_id(); ASSERT(!local_irq_is_enabled()); sp = pending_eoi_sp(cpu); while ( (--sp >= 0) && pending_eoi[cpu][sp].ready ) { vector = pending_eoi[cpu][sp].vector; desc = &irq_desc[vector]; spin_lock(&desc->lock); desc->handler->end(vector); spin_unlock(&desc->lock); } pending_eoi_sp(cpu) = sp+1; } static void __set_eoi_ready(irq_desc_t *desc) { irq_guest_action_t *action = (irq_guest_action_t *)desc->action; int vector, sp, cpu = smp_processor_id(); vector = desc - irq_desc; if ( !(desc->status & IRQ_GUEST) || (action->in_flight != 0) || !cpu_test_and_clear(cpu, action->cpu_eoi_map) ) return; sp = pending_eoi_sp(cpu); do { ASSERT(sp > 0); } while ( pending_eoi[cpu][--sp].vector != vector ); ASSERT(!pending_eoi[cpu][sp].ready); pending_eoi[cpu][sp].ready = 1; } /* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */ static void set_eoi_ready(void *data) { irq_desc_t *desc = data; ASSERT(!local_irq_is_enabled()); spin_lock(&desc->lock); __set_eoi_ready(desc); spin_unlock(&desc->lock); flush_ready_eoi(NULL); } /* * Forcibly flush all pending EOIs on this CPU by emulating end-of-ISR * notifications from guests. The caller of this function must ensure that * all CPUs execute flush_ready_eoi(). */ static void flush_all_pending_eoi(void *unused) { irq_desc_t *desc; irq_guest_action_t *action; int i, vector, sp, cpu = smp_processor_id(); ASSERT(!local_irq_is_enabled()); sp = pending_eoi_sp(cpu); while ( --sp >= 0 ) { if ( pending_eoi[cpu][sp].ready ) continue; vector = pending_eoi[cpu][sp].vector; desc = &irq_desc[vector]; spin_lock(&desc->lock); action = (irq_guest_action_t *)desc->action; ASSERT(action->ack_type == ACKTYPE_EOI); ASSERT(desc->status & IRQ_GUEST); for ( i = 0; i < action->nr_guests; i++ ) clear_bit(vector_to_irq(vector), action->guest[i]->pirq_mask); action->in_flight = 0; spin_unlock(&desc->lock); } flush_ready_eoi(NULL); } static void __pirq_guest_eoi(struct domain *d, int irq) { irq_desc_t *desc; irq_guest_action_t *action; cpumask_t cpu_eoi_map; desc = &irq_desc[irq_to_vector(irq)]; action = (irq_guest_action_t *)desc->action; spin_lock_irq(&desc->lock); ASSERT(!test_bit(irq, d->pirq_mask) || (action->ack_type != ACKTYPE_NONE)); if ( unlikely(!test_and_clear_bit(irq, d->pirq_mask)) || unlikely(--action->in_flight != 0) ) { spin_unlock_irq(&desc->lock); return; } if ( action->ack_type == ACKTYPE_UNMASK ) { ASSERT(cpus_empty(action->cpu_eoi_map)); desc->handler->end(irq_to_vector(irq)); spin_unlock_irq(&desc->lock); return; } ASSERT(action->ack_type == ACKTYPE_EOI); cpu_eoi_map = action->cpu_eoi_map; if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) ) { __set_eoi_ready(desc); spin_unlock(&desc->lock); flush_ready_eoi(NULL); local_irq_enable(); } else { spin_unlock_irq(&desc->lock); } if ( !cpus_empty(cpu_eoi_map) ) on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0); } int pirq_guest_eoi(struct domain *d, int irq) { if ( (irq < 0) || (irq >= NR_IRQS) ) return -EINVAL; __pirq_guest_eoi(d, irq); return 0; } int pirq_guest_unmask(struct domain *d) { unsigned int irq; shared_info_t *s = d->shared_info; for ( irq = find_first_bit(d->pirq_mask, NR_IRQS); irq < NR_IRQS; irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) ) { if ( !test_bit(d->pirq_to_evtchn[irq], s->evtchn_mask) ) __pirq_guest_eoi(d, irq); } return 0; } extern int ioapic_ack_new; int pirq_acktype(int irq) { irq_desc_t *desc; unsigned int vector; vector = irq_to_vector(irq); if ( vector == 0 ) return ACKTYPE_NONE; desc = &irq_desc[vector]; /* * Edge-triggered IO-APIC interrupts need no final acknowledgement: * we ACK early during interrupt processing. */ if ( !strcmp(desc->handler->typename, "IO-APIC-edge") ) return ACKTYPE_NONE; /* * Level-triggered IO-APIC interrupts need to be acknowledged on the CPU * on which they were received. This is because we tickle the LAPIC to EOI. */ if ( !strcmp(desc->handler->typename, "IO-APIC-level") ) return ioapic_ack_new ? ACKTYPE_EOI : ACKTYPE_UNMASK; /* Legacy PIC interrupts can be acknowledged from any CPU. */ if ( !strcmp(desc->handler->typename, "XT-PIC") ) return ACKTYPE_UNMASK; if ( strstr(desc->handler->typename, "MPIC") ) { if ( desc->status & IRQ_LEVEL ) return (desc->status & IRQ_PER_CPU) ? ACKTYPE_EOI : ACKTYPE_UNMASK; return ACKTYPE_NONE; /* edge-triggered => no final EOI */ } BUG(); return 0; } int pirq_shared(int irq) { unsigned int vector; irq_desc_t *desc; irq_guest_action_t *action; unsigned long flags; int shared; vector = irq_to_vector(irq); if ( vector == 0 ) return 0; desc = &irq_desc[vector]; spin_lock_irqsave(&desc->lock, flags); action = (irq_guest_action_t *)desc->action; shared = ((desc->status & IRQ_GUEST) && (action->nr_guests > 1)); spin_unlock_irqrestore(&desc->lock, flags); return shared; } int pirq_guest_bind(struct vcpu *v, int irq, int will_share) { unsigned int vector; irq_desc_t *desc; irq_guest_action_t *action; unsigned long flags; int rc = 0; cpumask_t cpumask = CPU_MASK_NONE; retry: vector = irq_to_vector(irq); if ( vector == 0 ) return -EINVAL; desc = &irq_desc[vector]; spin_lock_irqsave(&desc->lock, flags); action = (irq_guest_action_t *)desc->action; if ( !(desc->status & IRQ_GUEST) ) { if ( desc->action != NULL ) { DPRINTK("Cannot bind IRQ %d to guest. In use by '%s'.\n", irq, desc->action->name); rc = -EBUSY; goto out; } action = xmalloc(irq_guest_action_t); if ( (desc->action = (struct irqaction *)action) == NULL ) { DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq); rc = -ENOMEM; goto out; } action->nr_guests = 0; action->in_flight = 0; action->shareable = will_share; action->ack_type = pirq_acktype(irq); action->cpu_eoi_map = CPU_MASK_NONE; desc->depth = 0; desc->status |= IRQ_GUEST; desc->status &= ~IRQ_DISABLED; desc->handler->startup(vector); /* Attempt to bind the interrupt target to the correct CPU. */ cpu_set(v->processor, cpumask); if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) ) desc->handler->set_affinity(vector, cpumask); } else if ( !will_share || !action->shareable ) { DPRINTK("Cannot bind IRQ %d to guest. Will not share with others.\n", irq); rc = -EBUSY; goto out; } else if ( action->nr_guests == 0 ) { /* * Indicates that an ACKTYPE_EOI interrupt is being released. * Wait for that to happen before continuing. */ ASSERT(action->ack_type == ACKTYPE_EOI); ASSERT(desc->status & IRQ_DISABLED); spin_unlock_irqrestore(&desc->lock, flags); cpu_relax(); goto retry; } if ( action->nr_guests == IRQ_MAX_GUESTS ) { DPRINTK("Cannot bind IRQ %d to guest. Already at max share.\n", irq); rc = -EBUSY; goto out; } action->guest[action->nr_guests++] = v->domain; out: spin_unlock_irqrestore(&desc->lock, flags); return rc; } int pirq_guest_unbind(struct domain *d, int irq) { unsigned int vector = irq_to_vector(irq); irq_desc_t *desc = &irq_desc[vector]; irq_guest_action_t *action; cpumask_t cpu_eoi_map; unsigned long flags; int i; BUG_ON(vector == 0); spin_lock_irqsave(&desc->lock, flags); action = (irq_guest_action_t *)desc->action; i = 0; while ( action->guest[i] && (action->guest[i] != d) ) i++; memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1); action->nr_guests--; switch ( action->ack_type ) { case ACKTYPE_UNMASK: if ( test_and_clear_bit(irq, d->pirq_mask) && (--action->in_flight == 0) ) desc->handler->end(vector); break; case ACKTYPE_EOI: /* NB. If #guests == 0 then we clear the eoi_map later on. */ if ( test_and_clear_bit(irq, d->pirq_mask) && (--action->in_flight == 0) && (action->nr_guests != 0) ) { cpu_eoi_map = action->cpu_eoi_map; spin_unlock_irqrestore(&desc->lock, flags); on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0); spin_lock_irqsave(&desc->lock, flags); } break; } BUG_ON(test_bit(irq, d->pirq_mask)); if ( action->nr_guests != 0 ) goto out; BUG_ON(action->in_flight != 0); /* Disabling IRQ before releasing the desc_lock avoids an IRQ storm. */ desc->depth = 1; desc->status |= IRQ_DISABLED; desc->handler->disable(vector); /* * We may have a EOI languishing anywhere in one of the per-CPU * EOI stacks. Forcibly flush the stack on every CPU where this might * be the case. */ cpu_eoi_map = action->cpu_eoi_map; if ( !cpus_empty(cpu_eoi_map) ) { BUG_ON(action->ack_type != ACKTYPE_EOI); spin_unlock_irqrestore(&desc->lock, flags); on_selected_cpus(cpu_eoi_map, flush_all_pending_eoi, NULL, 1, 1); on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 1); spin_lock_irqsave(&desc->lock, flags); } BUG_ON(!cpus_empty(action->cpu_eoi_map)); desc->action = NULL; xfree(action); desc->status &= ~IRQ_GUEST; desc->handler->shutdown(vector); out: spin_unlock_irqrestore(&desc->lock, flags); return 0; } extern void dump_ioapic_irq_info(void); static void dump_irqs(unsigned char key) { int i, irq, vector; irq_desc_t *desc; irq_guest_action_t *action; struct domain *d; unsigned long flags; printk("Guest interrupt information:\n"); for ( irq = 0; irq < NR_IRQS; irq++ ) { vector = irq_to_vector(irq); if ( vector == 0 ) continue; desc = &irq_desc[vector]; spin_lock_irqsave(&desc->lock, flags); if ( desc->status & IRQ_GUEST ) { action = (irq_guest_action_t *)desc->action; printk(" IRQ%3d Vec%3d: type=%-15s status=%08x " "in-flight=%d domain-list=", irq, vector, desc->handler->typename, desc->status, action->in_flight); for ( i = 0; i < action->nr_guests; i++ ) { d = action->guest[i]; printk("%u(%c%c%c%c)", d->domain_id, (test_bit(d->pirq_to_evtchn[irq], d->shared_info->evtchn_pending) ? 'P' : '-'), (test_bit(d->pirq_to_evtchn[irq]/BITS_PER_LONG, &d->shared_info->vcpu_info[0]. evtchn_pending_sel) ? 'S' : '-'), (test_bit(d->pirq_to_evtchn[irq], d->shared_info->evtchn_mask) ? 'M' : '-'), (test_bit(irq, d->pirq_mask) ? 'M' : '-')); if ( i != action->nr_guests ) printk(","); } printk("\n"); } spin_unlock_irqrestore(&desc->lock, flags); } dump_ioapic_irq_info(); } static int __init setup_dump_irqs(void) { register_keyhandler('i', dump_irqs, "dump interrupt bindings"); return 0; } __initcall(setup_dump_irqs); static struct timer end_irq_timer[NR_CPUS]; /* * force_intack: Forcibly emit all pending EOIs on each CPU every second. * Mainly useful for debugging or poking lazy guests ISRs. */ static void end_irq_timeout(void *unused) { int cpu = smp_processor_id(); local_irq_disable(); flush_all_pending_eoi(NULL); local_irq_enable(); on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 0); set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000)); } static void __init __setup_irq_timeout(void *unused) { int cpu = smp_processor_id(); init_timer(&end_irq_timer[cpu], end_irq_timeout, NULL, cpu); set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000)); } static int force_intack; boolean_param("force_intack", force_intack); static int __init setup_irq_timeout(void) { if ( force_intack ) on_each_cpu(__setup_irq_timeout, NULL, 1, 1); return 0; } __initcall(setup_irq_timeout);