/****************************************************************************** * common/compat/grant_table.c * */ #include #define xen_grant_entry grant_entry CHECK_grant_entry; #undef xen_grant_entry #define xen_gnttab_map_grant_ref gnttab_map_grant_ref CHECK_gnttab_map_grant_ref; #undef xen_gnttab_map_grant_ref #define xen_gnttab_unmap_grant_ref gnttab_unmap_grant_ref CHECK_gnttab_unmap_grant_ref; #undef xen_gnttab_unmap_grant_ref #define xen_gnttab_unmap_and_replace gnttab_unmap_and_replace CHECK_gnttab_unmap_and_replace; #undef xen_gnttab_unmap_and_replace DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_compat_t); DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_compat_t); DEFINE_XEN_GUEST_HANDLE(gnttab_copy_compat_t); #define xen_gnttab_dump_table gnttab_dump_table CHECK_gnttab_dump_table; #undef xen_gnttab_dump_table int compat_grant_table_op(unsigned int cmd, XEN_GUEST_HANDLE(void) cmp_uop, unsigned int count) { int rc = 0; unsigned int i; XEN_GUEST_HANDLE(void) cnt_uop; set_xen_guest_handle(cnt_uop, NULL); switch ( cmd ) { #define CASE(name) \ case GNTTABOP_##name: \ if ( unlikely(!guest_handle_okay(guest_handle_cast(cmp_uop, \ gnttab_##name##_compat_t), \ count)) ) \ rc = -EFAULT; \ break #ifndef CHECK_gnttab_map_grant_ref CASE(map_grant_ref); #endif #ifndef CHECK_gnttab_unmap_grant_ref CASE(unmap_grant_ref); #endif #ifndef CHECK_gnttab_unmap_and_replace CASE(unmap_and_replace); #endif #ifndef CHECK_gnttab_setup_table CASE(setup_table); #endif #ifndef CHECK_gnttab_transfer CASE(transfer); #endif #ifndef CHECK_gnttab_copy CASE(copy); #endif #ifndef CHECK_gnttab_dump_table CASE(dump_table); #endif #undef CASE default: return do_grant_table_op(cmd, cmp_uop, count); } if ( (int)count < 0 ) rc = -EINVAL; for ( i = 0; i < count && rc == 0; ) { unsigned int n; union { XEN_GUEST_HANDLE(void) uop; struct gnttab_setup_table *setup; struct gnttab_transfer *xfer; struct gnttab_copy *copy; } nat; union { struct compat_gnttab_setup_table setup; struct compat_gnttab_transfer xfer; struct compat_gnttab_copy copy; } cmp; set_xen_guest_handle(nat.uop, COMPAT_ARG_XLAT_VIRT_BASE); switch ( cmd ) { case GNTTABOP_setup_table: if ( unlikely(count > 1) ) rc = -EINVAL; else if ( unlikely(__copy_from_guest(&cmp.setup, cmp_uop, 1)) ) rc = -EFAULT; else if ( unlikely(!compat_handle_okay(cmp.setup.frame_list, cmp.setup.nr_frames)) ) rc = -EFAULT; else { unsigned int max_frame_list_size_in_page = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.setup)) / sizeof(*nat.setup->frame_list.p); if ( max_frame_list_size_in_page < max_nr_grant_frames ) { gdprintk(XENLOG_WARNING, "max_nr_grant_frames is too large (%u,%u)\n", max_nr_grant_frames, max_frame_list_size_in_page); rc = -EINVAL; } else { #define XLAT_gnttab_setup_table_HNDL_frame_list(_d_, _s_) \ set_xen_guest_handle((_d_)->frame_list, (unsigned long *)(nat.setup + 1)) XLAT_gnttab_setup_table(nat.setup, &cmp.setup); #undef XLAT_gnttab_setup_table_HNDL_frame_list rc = gnttab_setup_table(guest_handle_cast(nat.uop, gnttab_setup_table_t), 1); } } ASSERT(rc <= 0); if ( rc == 0 ) { #define XLAT_gnttab_setup_table_HNDL_frame_list(_d_, _s_) \ do \ { \ if ( (_s_)->status == GNTST_okay ) \ { \ for ( i = 0; i < (_s_)->nr_frames; ++i ) \ { \ unsigned int frame = (_s_)->frame_list.p[i]; \ (void)__copy_to_compat_offset((_d_)->frame_list, i, &frame, 1); \ } \ } \ } while (0) XLAT_gnttab_setup_table(&cmp.setup, nat.setup); #undef XLAT_gnttab_setup_table_HNDL_frame_list if ( unlikely(__copy_to_guest(cmp_uop, &cmp.setup, 1)) ) rc = -EFAULT; else i = 1; } break; case GNTTABOP_transfer: for ( n = 0; n < COMPAT_ARG_XLAT_SIZE / sizeof(*nat.xfer) && i < count && rc == 0; ++i, ++n ) { if ( unlikely(__copy_from_guest_offset(&cmp.xfer, cmp_uop, i, 1)) ) rc = -EFAULT; else { XLAT_gnttab_transfer(nat.xfer + n, &cmp.xfer); } } if ( rc == 0 ) rc = gnttab_transfer(guest_handle_cast(nat.uop, gnttab_transfer_t), n); if ( rc > 0 ) { ASSERT(rc < n); i -= n - rc; n = rc; } if ( rc >= 0 ) { XEN_GUEST_HANDLE(gnttab_transfer_compat_t) xfer; xfer = guest_handle_cast(cmp_uop, gnttab_transfer_compat_t); guest_handle_add_offset(xfer, i); cnt_uop = guest_handle_cast(xfer, void); while ( n-- ) { guest_handle_add_offset(xfer, -1); if ( __copy_field_t
/**
 *  The WebSocket backend is responsible for updating our knowledge of flows and events
 *  from the REST API and live updates delivered via a WebSocket connection.
 *  An alternative backend may use the REST API only to host static instances.
 */
import { fetchApi } from "../utils"

const CMD_RESET = 'reset'

export default class WebsocketBackend {
    constructor(store) {
        this.activeFetches = {}
        this.store = store
        this.connect()
    }

    connect() {
        this.socket = new WebSocket(location.origin.replace('http', 'ws') + '/updates')
        this.socket.addEventListener('open', () => this.onOpen())
        this.socket.addEventListener('close', () => this.onClose())
        this.socket.addEventListener('message', msg => this.onMessage(JSON.parse(msg.data)))
        this.socket.addEventListener('error', error => this.onError(error))
    }

    onOpen() {
        this.fetchData("settings")
        this.fetchData("flows")
        this.fetchData("events")
    }

    fetchData(resource) {
        let queue = []
        this.activeFetches[resource] = queue
        fetchApi(`/${resource}`)
            .then(res => res.json())
            .then(json => {
                // Make sure that we are not superseded yet by the server sending a RESET.
                if (this.activeFetches[resource] === queue)
                    this.receive(resource, json)
            })
    }

    onMessage(msg) {

        if (msg.cmd === CMD_RESET) {
            return this.fetchData(msg.resource)
        }
        if (msg.resource in this.activeFetches) {
            this.activeFetches[msg.resource].push(msg)
        } else {
            let type = `${msg.resource}_${msg.cmd}`.toUpperCase()
            this.store.dispatch({ type, ...msg })
        }
    }

    receive(resource, data) {
        let type = `${resource}_RECEIVE`.toUpperCase()
        this.store.dispatch({ type, cmd: "receive", resource, data })
        let queue = this.activeFetches[resource]
        delete this.activeFetches[resource]
        queue.forEach(msg => this.onMessage(msg))
    }

    onClose() {
        // FIXME
        console.error("onClose", arguments)
    }

    onError() {
        // FIXME
        console.error("onError", arguments)
    }
}