aboutsummaryrefslogtreecommitdiffstats
path: root/net/8021q
diff options
context:
space:
mode:
authorroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
committerroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
commit849369d6c66d3054688672f97d31fceb8e8230fb (patch)
tree6135abc790ca67dedbe07c39806591e70eda81ce /net/8021q
downloadlinux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.gz
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.bz2
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.zip
initial_commit
Diffstat (limited to 'net/8021q')
-rw-r--r--net/8021q/Kconfig29
-rw-r--r--net/8021q/Makefile10
-rw-r--r--net/8021q/vlan.c722
-rw-r--r--net/8021q/vlan.h134
-rw-r--r--net/8021q/vlan_core.c181
-rw-r--r--net/8021q/vlan_dev.c705
-rw-r--r--net/8021q/vlan_gvrp.c66
-rw-r--r--net/8021q/vlan_netlink.c240
-rw-r--r--net/8021q/vlanproc.c327
-rw-r--r--net/8021q/vlanproc.h20
10 files changed, 2434 insertions, 0 deletions
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig
new file mode 100644
index 00000000..fa073a54
--- /dev/null
+++ b/net/8021q/Kconfig
@@ -0,0 +1,29 @@
+#
+# Configuration for 802.1Q VLAN support
+#
+
+config VLAN_8021Q
+ tristate "802.1Q VLAN Support"
+ ---help---
+ Select this and you will be able to create 802.1Q VLAN interfaces
+ on your ethernet interfaces. 802.1Q VLAN supports almost
+ everything a regular ethernet interface does, including
+ firewalling, bridging, and of course IP traffic. You will need
+ the 'vconfig' tool from the VLAN project in order to effectively
+ use VLANs. See the VLAN web page for more information:
+ <http://www.candelatech.com/~greear/vlan.html>
+
+ To compile this code as a module, choose M here: the module
+ will be called 8021q.
+
+ If unsure, say N.
+
+config VLAN_8021Q_GVRP
+ bool "GVRP (GARP VLAN Registration Protocol) support"
+ depends on VLAN_8021Q
+ select GARP
+ help
+ Select this to enable GVRP end-system support. GVRP is used for
+ automatic propagation of registered VLANs to switches.
+
+ If unsure, say N.
diff --git a/net/8021q/Makefile b/net/8021q/Makefile
new file mode 100644
index 00000000..9f4f174e
--- /dev/null
+++ b/net/8021q/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Linux VLAN layer.
+#
+obj-$(subst m,y,$(CONFIG_VLAN_8021Q)) += vlan_core.o
+obj-$(CONFIG_VLAN_8021Q) += 8021q.o
+
+8021q-y := vlan.o vlan_dev.o vlan_netlink.o
+8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
+8021q-$(CONFIG_PROC_FS) += vlanproc.o
+
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
new file mode 100644
index 00000000..917ecb93
--- /dev/null
+++ b/net/8021q/vlan.c
@@ -0,0 +1,722 @@
+/*
+ * INET 802.1Q VLAN
+ * Ethernet-type device handling.
+ *
+ * Authors: Ben Greear <greearb@candelatech.com>
+ * Please send support related email to: netdev@vger.kernel.org
+ * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
+ *
+ * Fixes:
+ * Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
+ * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
+ * Correct all the locking - David S. Miller <davem@redhat.com>;
+ * Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+#include <net/p8022.h>
+#include <net/arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/notifier.h>
+#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <asm/uaccess.h>
+
+#include <linux/if_vlan.h>
+#include "vlan.h"
+#include "vlanproc.h"
+
+#define DRV_VERSION "1.8"
+
+/* Global VLAN variables */
+
+int vlan_net_id __read_mostly;
+
+const char vlan_fullname[] = "802.1Q VLAN Support";
+const char vlan_version[] = DRV_VERSION;
+
+/* End of global variables definitions. */
+
+static void vlan_group_free(struct vlan_group *grp)
+{
+ int i;
+
+ for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
+ kfree(grp->vlan_devices_arrays[i]);
+ kfree(grp);
+}
+
+static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
+{
+ struct vlan_group *grp;
+
+ grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
+ if (!grp)
+ return NULL;
+
+ grp->real_dev = real_dev;
+ return grp;
+}
+
+static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
+{
+ struct net_device **array;
+ unsigned int size;
+
+ ASSERT_RTNL();
+
+ array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
+ if (array != NULL)
+ return 0;
+
+ size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
+ array = kzalloc(size, GFP_KERNEL);
+ if (array == NULL)
+ return -ENOBUFS;
+
+ vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array;
+ return 0;
+}
+
+static void vlan_rcu_free(struct rcu_head *rcu)
+{
+ vlan_group_free(container_of(rcu, struct vlan_group, rcu));
+}
+
+void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct net_device *real_dev = vlan->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ struct vlan_group *grp;
+ u16 vlan_id = vlan->vlan_id;
+
+ ASSERT_RTNL();
+
+ grp = rtnl_dereference(real_dev->vlgrp);
+ BUG_ON(!grp);
+
+ /* Take it out of our own structures, but be sure to interlock with
+ * HW accelerating devices or SW vlan input packet processing if
+ * VLAN is not 0 (leave it there for 802.1p).
+ */
+ if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
+ ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+
+ grp->nr_vlans--;
+
+ if (vlan->flags & VLAN_FLAG_GVRP)
+ vlan_gvrp_request_leave(dev);
+
+ vlan_group_set_device(grp, vlan_id, NULL);
+ /* Because unregister_netdevice_queue() makes sure at least one rcu
+ * grace period is respected before device freeing,
+ * we dont need to call synchronize_net() here.
+ */
+ unregister_netdevice_queue(dev, head);
+
+ /* If the group is now empty, kill off the group. */
+ if (grp->nr_vlans == 0) {
+ vlan_gvrp_uninit_applicant(real_dev);
+
+ rcu_assign_pointer(real_dev->vlgrp, NULL);
+ if (ops->ndo_vlan_rx_register)
+ ops->ndo_vlan_rx_register(real_dev, NULL);
+
+ /* Free the group, after all cpu's are done. */
+ call_rcu(&grp->rcu, vlan_rcu_free);
+ }
+
+ /* Get rid of the vlan's reference to real_dev */
+ dev_put(real_dev);
+}
+
+int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
+{
+ const char *name = real_dev->name;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+
+ if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
+ pr_info("8021q: VLANs not supported on %s\n", name);
+ return -EOPNOTSUPP;
+ }
+
+ if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
+ (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
+ pr_info("8021q: Device %s has buggy VLAN hw accel\n", name);
+ return -EOPNOTSUPP;
+ }
+
+ if (vlan_find_dev(real_dev, vlan_id) != NULL)
+ return -EEXIST;
+
+ return 0;
+}
+
+int register_vlan_dev(struct net_device *dev)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct net_device *real_dev = vlan->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ u16 vlan_id = vlan->vlan_id;
+ struct vlan_group *grp, *ngrp = NULL;
+ int err;
+
+ grp = rtnl_dereference(real_dev->vlgrp);
+ if (!grp) {
+ ngrp = grp = vlan_group_alloc(real_dev);
+ if (!grp)
+ return -ENOBUFS;
+ err = vlan_gvrp_init_applicant(real_dev);
+ if (err < 0)
+ goto out_free_group;
+ }
+
+ err = vlan_group_prealloc_vid(grp, vlan_id);
+ if (err < 0)
+ goto out_uninit_applicant;
+
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto out_uninit_applicant;
+
+ /* Account for reference in struct vlan_dev_info */
+ dev_hold(real_dev);
+
+ netif_stacked_transfer_operstate(real_dev, dev);
+ linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
+
+ /* So, got the sucker initialized, now lets place
+ * it into our local structure.
+ */
+ vlan_group_set_device(grp, vlan_id, dev);
+ grp->nr_vlans++;
+
+ if (ngrp) {
+ if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
+ ops->ndo_vlan_rx_register(real_dev, ngrp);
+ rcu_assign_pointer(real_dev->vlgrp, ngrp);
+ }
+ if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
+ ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
+
+ return 0;
+
+out_uninit_applicant:
+ if (ngrp)
+ vlan_gvrp_uninit_applicant(real_dev);
+out_free_group:
+ if (ngrp) {
+ /* Free the group, after all cpu's are done. */
+ call_rcu(&ngrp->rcu, vlan_rcu_free);
+ }
+ return err;
+}
+
+/* Attach a VLAN device to a mac address (ie Ethernet Card).
+ * Returns 0 if the device was created or a negative error code otherwise.
+ */
+static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
+{
+ struct net_device *new_dev;
+ struct net *net = dev_net(real_dev);
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+ char name[IFNAMSIZ];
+ int err;
+
+ if (vlan_id >= VLAN_VID_MASK)
+ return -ERANGE;
+
+ err = vlan_check_real_dev(real_dev, vlan_id);
+ if (err < 0)
+ return err;
+
+ /* Gotta set up the fields for the device. */
+ switch (vn->name_type) {
+ case VLAN_NAME_TYPE_RAW_PLUS_VID:
+ /* name will look like: eth1.0005 */
+ snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
+ break;
+ case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
+ /* Put our vlan.VID in the name.
+ * Name will look like: vlan5
+ */
+ snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
+ break;
+ case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
+ /* Put our vlan.VID in the name.
+ * Name will look like: eth0.5
+ */
+ snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
+ break;
+ case VLAN_NAME_TYPE_PLUS_VID:
+ /* Put our vlan.VID in the name.
+ * Name will look like: vlan0005
+ */
+ default:
+ snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
+ }
+
+ new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
+
+ if (new_dev == NULL)
+ return -ENOBUFS;
+
+ dev_net_set(new_dev, net);
+ /* need 4 bytes for extra VLAN header info,
+ * hope the underlying device can handle it.
+ */
+ new_dev->mtu = real_dev->mtu;
+
+ vlan_dev_info(new_dev)->vlan_id = vlan_id;
+ vlan_dev_info(new_dev)->real_dev = real_dev;
+ vlan_dev_info(new_dev)->dent = NULL;
+ vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+
+ new_dev->rtnl_link_ops = &vlan_link_ops;
+ err = register_vlan_dev(new_dev);
+ if (err < 0)
+ goto out_free_newdev;
+
+ return 0;
+
+out_free_newdev:
+ free_netdev(new_dev);
+ return err;
+}
+
+static void vlan_sync_address(struct net_device *dev,
+ struct net_device *vlandev)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
+
+ /* May be called without an actual change */
+ if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
+ return;
+
+ /* vlan address was different from the old address and is equal to
+ * the new address */
+ if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
+ !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
+ dev_uc_del(dev, vlandev->dev_addr);
+
+ /* vlan address was equal to the old address and is different from
+ * the new address */
+ if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
+ compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
+ dev_uc_add(dev, vlandev->dev_addr);
+
+ memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
+}
+
+static void vlan_transfer_features(struct net_device *dev,
+ struct net_device *vlandev)
+{
+ vlandev->gso_max_size = dev->gso_max_size;
+
+ if (dev->features & NETIF_F_HW_VLAN_TX)
+ vlandev->hard_header_len = dev->hard_header_len;
+ else
+ vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
+
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
+#endif
+
+ netdev_update_features(vlandev);
+}
+
+static void __vlan_device_event(struct net_device *dev, unsigned long event)
+{
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ vlan_proc_rem_dev(dev);
+ if (vlan_proc_add_dev(dev) < 0)
+ pr_warning("8021q: failed to change proc name for %s\n",
+ dev->name);
+ break;
+ case NETDEV_REGISTER:
+ if (vlan_proc_add_dev(dev) < 0)
+ pr_warning("8021q: failed to add proc entry for %s\n",
+ dev->name);
+ break;
+ case NETDEV_UNREGISTER:
+ vlan_proc_rem_dev(dev);
+ break;
+ }
+}
+
+static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct vlan_group *grp;
+ int i, flgs;
+ struct net_device *vlandev;
+ struct vlan_dev_info *vlan;
+ LIST_HEAD(list);
+
+ if (is_vlan_dev(dev))
+ __vlan_device_event(dev, event);
+
+ if ((event == NETDEV_UP) &&
+ (dev->features & NETIF_F_HW_VLAN_FILTER) &&
+ dev->netdev_ops->ndo_vlan_rx_add_vid) {
+ pr_info("8021q: adding VLAN 0 to HW filter on device %s\n",
+ dev->name);
+ dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
+ }
+
+ grp = rtnl_dereference(dev->vlgrp);
+ if (!grp)
+ goto out;
+
+ /* It is OK that we do not hold the group lock right now,
+ * as we run under the RTNL lock.
+ */
+
+ switch (event) {
+ case NETDEV_CHANGE:
+ /* Propagate real device state to vlan devices */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ netif_stacked_transfer_operstate(dev, vlandev);
+ }
+ break;
+
+ case NETDEV_CHANGEADDR:
+ /* Adjust unicast filters on underlying device */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ flgs = vlandev->flags;
+ if (!(flgs & IFF_UP))
+ continue;
+
+ vlan_sync_address(dev, vlandev);
+ }
+ break;
+
+ case NETDEV_CHANGEMTU:
+ for (i = 0; i < VLAN_N_VID; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ if (vlandev->mtu <= dev->mtu)
+ continue;
+
+ dev_set_mtu(vlandev, dev->mtu);
+ }
+ break;
+
+ case NETDEV_FEAT_CHANGE:
+ /* Propagate device features to underlying device */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ vlan_transfer_features(dev, vlandev);
+ }
+
+ break;
+
+ case NETDEV_DOWN:
+ /* Put all VLANs for this dev in the down state too. */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ flgs = vlandev->flags;
+ if (!(flgs & IFF_UP))
+ continue;
+
+ vlan = vlan_dev_info(vlandev);
+ if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ dev_change_flags(vlandev, flgs & ~IFF_UP);
+ netif_stacked_transfer_operstate(dev, vlandev);
+ }
+ break;
+
+ case NETDEV_UP:
+ /* Put all VLANs for this dev in the up state too. */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ flgs = vlandev->flags;
+ if (flgs & IFF_UP)
+ continue;
+
+ vlan = vlan_dev_info(vlandev);
+ if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ dev_change_flags(vlandev, flgs | IFF_UP);
+ netif_stacked_transfer_operstate(dev, vlandev);
+ }
+ break;
+
+ case NETDEV_UNREGISTER:
+ /* twiddle thumbs on netns device moves */
+ if (dev->reg_state != NETREG_UNREGISTERING)
+ break;
+
+ for (i = 0; i < VLAN_N_VID; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ /* unregistration of last vlan destroys group, abort
+ * afterwards */
+ if (grp->nr_vlans == 1)
+ i = VLAN_N_VID;
+
+ unregister_vlan_dev(vlandev, &list);
+ }
+ unregister_netdevice_many(&list);
+ break;
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ return NOTIFY_BAD;
+
+ case NETDEV_NOTIFY_PEERS:
+ case NETDEV_BONDING_FAILOVER:
+ /* Propagate to vlan devices */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ call_netdevice_notifiers(event, vlandev);
+ }
+ break;
+ }
+
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vlan_notifier_block __read_mostly = {
+ .notifier_call = vlan_device_event,
+};
+
+/*
+ * VLAN IOCTL handler.
+ * o execute requested action or pass command to the device driver
+ * arg is really a struct vlan_ioctl_args __user *.
+ */
+static int vlan_ioctl_handler(struct net *net, void __user *arg)
+{
+ int err;
+ struct vlan_ioctl_args args;
+ struct net_device *dev = NULL;
+
+ if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
+ return -EFAULT;
+
+ /* Null terminate this sucker, just in case. */
+ args.device1[23] = 0;
+ args.u.device2[23] = 0;
+
+ rtnl_lock();
+
+ switch (args.cmd) {
+ case SET_VLAN_INGRESS_PRIORITY_CMD:
+ case SET_VLAN_EGRESS_PRIORITY_CMD:
+ case SET_VLAN_FLAG_CMD:
+ case ADD_VLAN_CMD:
+ case DEL_VLAN_CMD:
+ case GET_VLAN_REALDEV_NAME_CMD:
+ case GET_VLAN_VID_CMD:
+ err = -ENODEV;
+ dev = __dev_get_by_name(net, args.device1);
+ if (!dev)
+ goto out;
+
+ err = -EINVAL;
+ if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
+ goto out;
+ }
+
+ switch (args.cmd) {
+ case SET_VLAN_INGRESS_PRIORITY_CMD:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ vlan_dev_set_ingress_priority(dev,
+ args.u.skb_priority,
+ args.vlan_qos);
+ err = 0;
+ break;
+
+ case SET_VLAN_EGRESS_PRIORITY_CMD:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ err = vlan_dev_set_egress_priority(dev,
+ args.u.skb_priority,
+ args.vlan_qos);
+ break;
+
+ case SET_VLAN_FLAG_CMD:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ err = vlan_dev_change_flags(dev,
+ args.vlan_qos ? args.u.flag : 0,
+ args.u.flag);
+ break;
+
+ case SET_VLAN_NAME_TYPE_CMD:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ if ((args.u.name_type >= 0) &&
+ (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
+ struct vlan_net *vn;
+
+ vn = net_generic(net, vlan_net_id);
+ vn->name_type = args.u.name_type;
+ err = 0;
+ } else {
+ err = -EINVAL;
+ }
+ break;
+
+ case ADD_VLAN_CMD:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ err = register_vlan_device(dev, args.u.VID);
+ break;
+
+ case DEL_VLAN_CMD:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ unregister_vlan_dev(dev, NULL);
+ err = 0;
+ break;
+
+ case GET_VLAN_REALDEV_NAME_CMD:
+ err = 0;
+ vlan_dev_get_realdev_name(dev, args.u.device2);
+ if (copy_to_user(arg, &args,
+ sizeof(struct vlan_ioctl_args)))
+ err = -EFAULT;
+ break;
+
+ case GET_VLAN_VID_CMD:
+ err = 0;
+ args.u.VID = vlan_dev_vlan_id(dev);
+ if (copy_to_user(arg, &args,
+ sizeof(struct vlan_ioctl_args)))
+ err = -EFAULT;
+ break;
+
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+out:
+ rtnl_unlock();
+ return err;
+}
+
+static int __net_init vlan_init_net(struct net *net)
+{
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+ int err;
+
+ vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
+
+ err = vlan_proc_init(net);
+
+ return err;
+}
+
+static void __net_exit vlan_exit_net(struct net *net)
+{
+ vlan_proc_cleanup(net);
+}
+
+static struct pernet_operations vlan_net_ops = {
+ .init = vlan_init_net,
+ .exit = vlan_exit_net,
+ .id = &vlan_net_id,
+ .size = sizeof(struct vlan_net),
+};
+
+static int __init vlan_proto_init(void)
+{
+ int err;
+
+ pr_info("%s v%s\n", vlan_fullname, vlan_version);
+
+ err = register_pernet_subsys(&vlan_net_ops);
+ if (err < 0)
+ goto err0;
+
+ err = register_netdevice_notifier(&vlan_notifier_block);
+ if (err < 0)
+ goto err2;
+
+ err = vlan_gvrp_init();
+ if (err < 0)
+ goto err3;
+
+ err = vlan_netlink_init();
+ if (err < 0)
+ goto err4;
+
+ vlan_ioctl_set(vlan_ioctl_handler);
+ return 0;
+
+err4:
+ vlan_gvrp_uninit();
+err3:
+ unregister_netdevice_notifier(&vlan_notifier_block);
+err2:
+ unregister_pernet_subsys(&vlan_net_ops);
+err0:
+ return err;
+}
+
+static void __exit vlan_cleanup_module(void)
+{
+ vlan_ioctl_set(NULL);
+ vlan_netlink_fini();
+
+ unregister_netdevice_notifier(&vlan_notifier_block);
+
+ unregister_pernet_subsys(&vlan_net_ops);
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
+
+ vlan_gvrp_uninit();
+}
+
+module_init(vlan_proto_init);
+module_exit(vlan_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
new file mode 100644
index 00000000..9da07e30
--- /dev/null
+++ b/net/8021q/vlan.h
@@ -0,0 +1,134 @@
+#ifndef __BEN_VLAN_802_1Q_INC__
+#define __BEN_VLAN_802_1Q_INC__
+
+#include <linux/if_vlan.h>
+#include <linux/u64_stats_sync.h>
+
+
+/**
+ * struct vlan_priority_tci_mapping - vlan egress priority mappings
+ * @priority: skb priority
+ * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
+ * @next: pointer to next struct
+ */
+struct vlan_priority_tci_mapping {
+ u32 priority;
+ u16 vlan_qos;
+ struct vlan_priority_tci_mapping *next;
+};
+
+
+/**
+ * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
+ * @rx_packets: number of received packets
+ * @rx_bytes: number of received bytes
+ * @rx_multicast: number of received multicast packets
+ * @tx_packets: number of transmitted packets
+ * @tx_bytes: number of transmitted bytes
+ * @syncp: synchronization point for 64bit counters
+ * @rx_errors: number of rx errors
+ * @tx_dropped: number of tx drops
+ */
+struct vlan_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_multicast;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_errors;
+ u32 tx_dropped;
+};
+
+/**
+ * struct vlan_dev_info - VLAN private device data
+ * @nr_ingress_mappings: number of ingress priority mappings
+ * @ingress_priority_map: ingress priority mappings
+ * @nr_egress_mappings: number of egress priority mappings
+ * @egress_priority_map: hash of egress priority mappings
+ * @vlan_id: VLAN identifier
+ * @flags: device flags
+ * @real_dev: underlying netdevice
+ * @real_dev_addr: address of underlying netdevice
+ * @dent: proc dir entry
+ * @vlan_pcpu_stats: ptr to percpu rx stats
+ */
+struct vlan_dev_info {
+ unsigned int nr_ingress_mappings;
+ u32 ingress_priority_map[8];
+ unsigned int nr_egress_mappings;
+ struct vlan_priority_tci_mapping *egress_priority_map[16];
+
+ u16 vlan_id;
+ u16 flags;
+
+ struct net_device *real_dev;
+ unsigned char real_dev_addr[ETH_ALEN];
+
+ struct proc_dir_entry *dent;
+ struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
+};
+
+static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+/* found in vlan_dev.c */
+void vlan_dev_set_ingress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio);
+int vlan_dev_set_egress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio);
+int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
+void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
+
+int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id);
+void vlan_setup(struct net_device *dev);
+int register_vlan_dev(struct net_device *dev);
+void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+
+static inline u32 vlan_get_ingress_priority(struct net_device *dev,
+ u16 vlan_tci)
+{
+ struct vlan_dev_info *vip = vlan_dev_info(dev);
+
+ return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];
+}
+
+#ifdef CONFIG_VLAN_8021Q_GVRP
+extern int vlan_gvrp_request_join(const struct net_device *dev);
+extern void vlan_gvrp_request_leave(const struct net_device *dev);
+extern int vlan_gvrp_init_applicant(struct net_device *dev);
+extern void vlan_gvrp_uninit_applicant(struct net_device *dev);
+extern int vlan_gvrp_init(void);
+extern void vlan_gvrp_uninit(void);
+#else
+static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
+static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
+static inline int vlan_gvrp_init_applicant(struct net_device *dev) { return 0; }
+static inline void vlan_gvrp_uninit_applicant(struct net_device *dev) {}
+static inline int vlan_gvrp_init(void) { return 0; }
+static inline void vlan_gvrp_uninit(void) {}
+#endif
+
+extern const char vlan_fullname[];
+extern const char vlan_version[];
+extern int vlan_netlink_init(void);
+extern void vlan_netlink_fini(void);
+
+extern struct rtnl_link_ops vlan_link_ops;
+
+extern int vlan_net_id;
+
+struct proc_dir_entry;
+
+struct vlan_net {
+ /* /proc/net/vlan */
+ struct proc_dir_entry *proc_vlan_dir;
+ /* /proc/net/vlan/config */
+ struct proc_dir_entry *proc_vlan_conf;
+ /* Determines interface naming scheme. */
+ unsigned short name_type;
+};
+
+#endif /* !(__BEN_VLAN_802_1Q_INC__) */
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
new file mode 100644
index 00000000..27263fb1
--- /dev/null
+++ b/net/8021q/vlan_core.c
@@ -0,0 +1,181 @@
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netpoll.h>
+#include "vlan.h"
+
+bool vlan_do_receive(struct sk_buff **skbp)
+{
+ struct sk_buff *skb = *skbp;
+ u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
+ struct net_device *vlan_dev;
+ struct vlan_pcpu_stats *rx_stats;
+
+ vlan_dev = vlan_find_dev(skb->dev, vlan_id);
+ if (!vlan_dev) {
+ if (vlan_id)
+ skb->pkt_type = PACKET_OTHERHOST;
+ return false;
+ }
+
+ skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return false;
+
+ skb->dev = vlan_dev;
+ if (skb->pkt_type == PACKET_OTHERHOST) {
+ /* Our lower layer thinks this is not local, let's make sure.
+ * This allows the VLAN to have a different MAC than the
+ * underlying device, and still route correctly. */
+ if (!compare_ether_addr(eth_hdr(skb)->h_dest,
+ vlan_dev->dev_addr))
+ skb->pkt_type = PACKET_HOST;
+ }
+
+ if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+ unsigned int offset = skb->data - skb_mac_header(skb);
+
+ /*
+ * vlan_insert_tag expect skb->data pointing to mac header.
+ * So change skb->data before calling it and change back to
+ * original position later
+ */
+ skb_push(skb, offset);
+ skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
+ if (!skb)
+ return false;
+ skb_pull(skb, offset + VLAN_HLEN);
+ skb_reset_mac_len(skb);
+ }
+
+ skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
+ skb->vlan_tci = 0;
+
+ rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
+
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->rx_packets++;
+ rx_stats->rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx_stats->rx_multicast++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ return true;
+}
+
+struct net_device *vlan_dev_real_dev(const struct net_device *dev)
+{
+ return vlan_dev_info(dev)->real_dev;
+}
+EXPORT_SYMBOL(vlan_dev_real_dev);
+
+u16 vlan_dev_vlan_id(const struct net_device *dev)
+{
+ return vlan_dev_info(dev)->vlan_id;
+}
+EXPORT_SYMBOL(vlan_dev_vlan_id);
+
+/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
+int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
+ u16 vlan_tci, int polling)
+{
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
+ return polling ? netif_receive_skb(skb) : netif_rx(skb);
+}
+EXPORT_SYMBOL(__vlan_hwaccel_rx);
+
+gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct sk_buff *skb)
+{
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
+ return napi_gro_receive(napi, skb);
+}
+EXPORT_SYMBOL(vlan_gro_receive);
+
+gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci)
+{
+ __vlan_hwaccel_put_tag(napi->skb, vlan_tci);
+ return napi_gro_frags(napi);
+}
+EXPORT_SYMBOL(vlan_gro_frags);
+
+static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
+{
+ if (skb_cow(skb, skb_headroom(skb)) < 0)
+ return NULL;
+ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+ skb->mac_header += VLAN_HLEN;
+ skb_reset_mac_len(skb);
+ return skb;
+}
+
+static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
+{
+ __be16 proto;
+ unsigned char *rawp;
+
+ /*
+ * Was a VLAN packet, grab the encapsulated protocol, which the layer
+ * three protocols care about.
+ */
+
+ proto = vhdr->h_vlan_encapsulated_proto;
+ if (ntohs(proto) >= 1536) {
+ skb->protocol = proto;
+ return;
+ }
+
+ rawp = skb->data;
+ if (*(unsigned short *) rawp == 0xFFFF)
+ /*
+ * This is a magic hack to spot IPX packets. Older Novell
+ * breaks the protocol design and runs IPX over 802.3 without
+ * an 802.2 LLC layer. We look for FFFF which isn't a used
+ * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
+ * but does for the rest.
+ */
+ skb->protocol = htons(ETH_P_802_3);
+ else
+ /*
+ * Real 802.2 LLC
+ */
+ skb->protocol = htons(ETH_P_802_2);
+}
+
+struct sk_buff *vlan_untag(struct sk_buff *skb)
+{
+ struct vlan_hdr *vhdr;
+ u16 vlan_tci;
+
+ if (unlikely(vlan_tx_tag_present(skb))) {
+ /* vlan_tci is already set-up so leave this for another time */
+ return skb;
+ }
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto err_free;
+
+ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+ goto err_free;
+
+ vhdr = (struct vlan_hdr *) skb->data;
+ vlan_tci = ntohs(vhdr->h_vlan_TCI);
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
+
+ skb_pull_rcsum(skb, VLAN_HLEN);
+ vlan_set_encap_proto(skb, vhdr);
+
+ skb = vlan_reorder_header(skb);
+ if (unlikely(!skb))
+ goto err_free;
+
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ return skb;
+
+err_free:
+ kfree_skb(skb);
+ return NULL;
+}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
new file mode 100644
index 00000000..d5484561
--- /dev/null
+++ b/net/8021q/vlan_dev.c
@@ -0,0 +1,705 @@
+/* -*- linux-c -*-
+ * INET 802.1Q VLAN
+ * Ethernet-type device handling.
+ *
+ * Authors: Ben Greear <greearb@candelatech.com>
+ * Please send support related email to: netdev@vger.kernel.org
+ * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
+ *
+ * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
+ * - reset skb->pkt_type on incoming packets when MAC was changed
+ * - see that changed MAC is saddr for outgoing packets
+ * Oct 20, 2001: Ard van Breeman:
+ * - Fix MC-list, finally.
+ * - Flush MC-list on VLAN destroy.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <net/arp.h>
+
+#include "vlan.h"
+#include "vlanproc.h"
+#include <linux/if_vlan.h>
+
+/*
+ * Rebuild the Ethernet MAC header. This is called after an ARP
+ * (or in future other address resolution) has completed on this
+ * sk_buff. We now let ARP fill in the other fields.
+ *
+ * This routine CANNOT use cached dst->neigh!
+ * Really, it is used only when dst->neigh is wrong.
+ *
+ * TODO: This needs a checkup, I'm ignorant here. --BLG
+ */
+static int vlan_dev_rebuild_header(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
+
+ switch (veth->h_vlan_encapsulated_proto) {
+#ifdef CONFIG_INET
+ case htons(ETH_P_IP):
+
+ /* TODO: Confirm this will work with VLAN headers... */
+ return arp_find(veth->h_dest, skb);
+#endif
+ default:
+ pr_debug("%s: unable to resolve type %X addresses.\n",
+ dev->name, ntohs(veth->h_vlan_encapsulated_proto));
+
+ memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
+ break;
+ }
+
+ return 0;
+}
+
+static inline u16
+vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vlan_priority_tci_mapping *mp;
+
+ mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
+ while (mp) {
+ if (mp->priority == skb->priority) {
+ return mp->vlan_qos; /* This should already be shifted
+ * to mask correctly with the
+ * VLAN's TCI */
+ }
+ mp = mp->next;
+ }
+ return 0;
+}
+
+/*
+ * Create the VLAN header for an arbitrary protocol layer
+ *
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp)
+ *
+ * This is called when the SKB is moving down the stack towards the
+ * physical devices.
+ */
+static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct vlan_hdr *vhdr;
+ unsigned int vhdrlen = 0;
+ u16 vlan_tci = 0;
+ int rc;
+
+ if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+ vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
+
+ vlan_tci = vlan_dev_info(dev)->vlan_id;
+ vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
+ vhdr->h_vlan_TCI = htons(vlan_tci);
+
+ /*
+ * Set the protocol type. For a packet of type ETH_P_802_3/2 we
+ * put the length in here instead.
+ */
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
+ vhdr->h_vlan_encapsulated_proto = htons(type);
+ else
+ vhdr->h_vlan_encapsulated_proto = htons(len);
+
+ skb->protocol = htons(ETH_P_8021Q);
+ type = ETH_P_8021Q;
+ vhdrlen = VLAN_HLEN;
+ }
+
+ /* Before delegating work to the lower layer, enter our MAC-address */
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
+ /* Now make the underlying real hard header */
+ dev = vlan_dev_info(dev)->real_dev;
+ rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
+ if (rc > 0)
+ rc += vhdrlen;
+ return rc;
+}
+
+static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
+ unsigned int len;
+ int ret;
+
+ /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
+ *
+ * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
+ * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
+ */
+ if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
+ vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
+ u16 vlan_tci;
+ vlan_tci = vlan_dev_info(dev)->vlan_id;
+ vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
+ skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
+ }
+
+ skb->dev = vlan_dev_info(dev)->real_dev;
+ len = skb->len;
+ ret = dev_queue_xmit(skb);
+
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ struct vlan_pcpu_stats *stats;
+
+ stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+ }
+
+ return ret;
+}
+
+static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ /* TODO: gotta make sure the underlying layer can handle it,
+ * maybe an IFF_VLAN_CAPABLE flag for devices?
+ */
+ if (vlan_dev_info(dev)->real_dev->mtu < new_mtu)
+ return -ERANGE;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+void vlan_dev_set_ingress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+
+ if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
+ vlan->nr_ingress_mappings--;
+ else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
+ vlan->nr_ingress_mappings++;
+
+ vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
+}
+
+int vlan_dev_set_egress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_priority_tci_mapping *mp = NULL;
+ struct vlan_priority_tci_mapping *np;
+ u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+
+ /* See if a priority mapping exists.. */
+ mp = vlan->egress_priority_map[skb_prio & 0xF];
+ while (mp) {
+ if (mp->priority == skb_prio) {
+ if (mp->vlan_qos && !vlan_qos)
+ vlan->nr_egress_mappings--;
+ else if (!mp->vlan_qos && vlan_qos)
+ vlan->nr_egress_mappings++;
+ mp->vlan_qos = vlan_qos;
+ return 0;
+ }
+ mp = mp->next;
+ }
+
+ /* Create a new mapping then. */
+ mp = vlan->egress_priority_map[skb_prio & 0xF];
+ np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
+ if (!np)
+ return -ENOBUFS;
+
+ np->next = mp;
+ np->priority = skb_prio;
+ np->vlan_qos = vlan_qos;
+ vlan->egress_priority_map[skb_prio & 0xF] = np;
+ if (vlan_qos)
+ vlan->nr_egress_mappings++;
+ return 0;
+}
+
+/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
+int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ u32 old_flags = vlan->flags;
+
+ if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
+ VLAN_FLAG_LOOSE_BINDING))
+ return -EINVAL;
+
+ vlan->flags = (old_flags & ~mask) | (flags & mask);
+
+ if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
+ if (vlan->flags & VLAN_FLAG_GVRP)
+ vlan_gvrp_request_join(dev);
+ else
+ vlan_gvrp_request_leave(dev);
+ }
+ return 0;
+}
+
+void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
+{
+ strncpy(result, vlan_dev_info(dev)->real_dev->name, 23);
+}
+
+static int vlan_dev_open(struct net_device *dev)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct net_device *real_dev = vlan->real_dev;
+ int err;
+
+ if (!(real_dev->flags & IFF_UP) &&
+ !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ return -ENETDOWN;
+
+ if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
+ err = dev_uc_add(real_dev, dev->dev_addr);
+ if (err < 0)
+ goto out;
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(real_dev, 1);
+ if (err < 0)
+ goto del_unicast;
+ }
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(real_dev, 1);
+ if (err < 0)
+ goto clear_allmulti;
+ }
+
+ memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN);
+
+ if (vlan->flags & VLAN_FLAG_GVRP)
+ vlan_gvrp_request_join(dev);
+
+ if (netif_carrier_ok(real_dev))
+ netif_carrier_on(dev);
+ return 0;
+
+clear_allmulti:
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, -1);
+del_unicast:
+ if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ dev_uc_del(real_dev, dev->dev_addr);
+out:
+ netif_carrier_off(dev);
+ return err;
+}
+
+static int vlan_dev_stop(struct net_device *dev)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ dev_mc_unsync(real_dev, dev);
+ dev_uc_unsync(real_dev, dev);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, -1);
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(real_dev, -1);
+
+ if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ dev_uc_del(real_dev, dev->dev_addr);
+
+ netif_carrier_off(dev);
+ return 0;
+}
+
+static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!(dev->flags & IFF_UP))
+ goto out;
+
+ if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
+ err = dev_uc_add(real_dev, addr->sa_data);
+ if (err < 0)
+ return err;
+ }
+
+ if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ dev_uc_del(real_dev, dev->dev_addr);
+
+out:
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ return 0;
+}
+
+static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ struct ifreq ifrr;
+ int err = -EOPNOTSUPP;
+
+ strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ ifrr.ifr_ifru = ifr->ifr_ifru;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
+ err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+ break;
+ }
+
+ if (!err)
+ ifr->ifr_ifru = ifrr.ifr_ifru;
+
+ return err;
+}
+
+static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int err = 0;
+
+ if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
+ err = ops->ndo_neigh_setup(real_dev, pa);
+
+ return err;
+}
+
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = 0;
+
+ if (ops->ndo_fcoe_ddp_setup)
+ rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
+
+ return rc;
+}
+
+static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int len = 0;
+
+ if (ops->ndo_fcoe_ddp_done)
+ len = ops->ndo_fcoe_ddp_done(real_dev, xid);
+
+ return len;
+}
+
+static int vlan_dev_fcoe_enable(struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_enable)
+ rc = ops->ndo_fcoe_enable(real_dev);
+ return rc;
+}
+
+static int vlan_dev_fcoe_disable(struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_disable)
+ rc = ops->ndo_fcoe_disable(real_dev);
+ return rc;
+}
+
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_get_wwn)
+ rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
+ return rc;
+}
+
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = 0;
+
+ if (ops->ndo_fcoe_ddp_target)
+ rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+
+ return rc;
+}
+#endif
+
+static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+}
+
+static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
+{
+ dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
+ dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
+}
+
+/*
+ * vlan network devices have devices nesting below it, and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key vlan_netdev_xmit_lock_key;
+static struct lock_class_key vlan_netdev_addr_lock_key;
+
+static void vlan_dev_set_lockdep_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_subclass)
+{
+ lockdep_set_class_and_subclass(&txq->_xmit_lock,
+ &vlan_netdev_xmit_lock_key,
+ *(int *)_subclass);
+}
+
+static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+{
+ lockdep_set_class_and_subclass(&dev->addr_list_lock,
+ &vlan_netdev_addr_lock_key,
+ subclass);
+ netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
+}
+
+static const struct header_ops vlan_header_ops = {
+ .create = vlan_dev_hard_header,
+ .rebuild = vlan_dev_rebuild_header,
+ .parse = eth_header_parse,
+};
+
+static const struct net_device_ops vlan_netdev_ops;
+
+static int vlan_dev_init(struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ int subclass = 0;
+
+ netif_carrier_off(dev);
+
+ /* IFF_BROADCAST|IFF_MULTICAST; ??? */
+ dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+ IFF_MASTER | IFF_SLAVE);
+ dev->iflink = real_dev->ifindex;
+ dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
+ (1<<__LINK_STATE_DORMANT))) |
+ (1<<__LINK_STATE_PRESENT);
+
+ dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
+ NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
+ NETIF_F_ALL_FCOE;
+
+ dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
+ dev->gso_max_size = real_dev->gso_max_size;
+
+ /* ipv6 shared card related stuff */
+ dev->dev_id = real_dev->dev_id;
+
+ if (is_zero_ether_addr(dev->dev_addr))
+ memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len);
+ if (is_zero_ether_addr(dev->broadcast))
+ memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
+#endif
+
+ dev->needed_headroom = real_dev->needed_headroom;
+ if (real_dev->features & NETIF_F_HW_VLAN_TX) {
+ dev->header_ops = real_dev->header_ops;
+ dev->hard_header_len = real_dev->hard_header_len;
+ } else {
+ dev->header_ops = &vlan_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
+ }
+
+ dev->netdev_ops = &vlan_netdev_ops;
+
+ if (is_vlan_dev(real_dev))
+ subclass = 1;
+
+ vlan_dev_set_lockdep_class(dev, subclass);
+
+ vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ if (!vlan_dev_info(dev)->vlan_pcpu_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void vlan_dev_uninit(struct net_device *dev)
+{
+ struct vlan_priority_tci_mapping *pm;
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ int i;
+
+ free_percpu(vlan->vlan_pcpu_stats);
+ vlan->vlan_pcpu_stats = NULL;
+ for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+ while ((pm = vlan->egress_priority_map[i]) != NULL) {
+ vlan->egress_priority_map[i] = pm->next;
+ kfree(pm);
+ }
+ }
+}
+
+static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ u32 old_features = features;
+
+ features &= real_dev->features;
+ features &= real_dev->vlan_features;
+
+ if (old_features & NETIF_F_SOFT_FEATURES)
+ features |= old_features & NETIF_F_SOFT_FEATURES;
+
+ if (dev_ethtool_get_rx_csum(real_dev))
+ features |= NETIF_F_RXCSUM;
+ features |= NETIF_F_LLTX;
+
+ return features;
+}
+
+static int vlan_ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ return dev_ethtool_get_settings(vlan->real_dev, cmd);
+}
+
+static void vlan_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, vlan_fullname);
+ strcpy(info->version, vlan_version);
+ strcpy(info->fw_version, "N/A");
+}
+
+static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+
+ if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+ struct vlan_pcpu_stats *p;
+ u32 rx_errors = 0, tx_dropped = 0;
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
+ unsigned int start;
+
+ p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ rxpackets = p->rx_packets;
+ rxbytes = p->rx_bytes;
+ rxmulticast = p->rx_multicast;
+ txpackets = p->tx_packets;
+ txbytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+
+ stats->rx_packets += rxpackets;
+ stats->rx_bytes += rxbytes;
+ stats->multicast += rxmulticast;
+ stats->tx_packets += txpackets;
+ stats->tx_bytes += txbytes;
+ /* rx_errors & tx_dropped are u32 */
+ rx_errors += p->rx_errors;
+ tx_dropped += p->tx_dropped;
+ }
+ stats->rx_errors = rx_errors;
+ stats->tx_dropped = tx_dropped;
+ }
+ return stats;
+}
+
+static const struct ethtool_ops vlan_ethtool_ops = {
+ .get_settings = vlan_ethtool_get_settings,
+ .get_drvinfo = vlan_ethtool_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops vlan_netdev_ops = {
+ .ndo_change_mtu = vlan_dev_change_mtu,
+ .ndo_init = vlan_dev_init,
+ .ndo_uninit = vlan_dev_uninit,
+ .ndo_open = vlan_dev_open,
+ .ndo_stop = vlan_dev_stop,
+ .ndo_start_xmit = vlan_dev_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = vlan_dev_set_mac_address,
+ .ndo_set_rx_mode = vlan_dev_set_rx_mode,
+ .ndo_set_multicast_list = vlan_dev_set_rx_mode,
+ .ndo_change_rx_flags = vlan_dev_change_rx_flags,
+ .ndo_do_ioctl = vlan_dev_ioctl,
+ .ndo_neigh_setup = vlan_dev_neigh_setup,
+ .ndo_get_stats64 = vlan_dev_get_stats64,
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
+ .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
+ .ndo_fcoe_enable = vlan_dev_fcoe_enable,
+ .ndo_fcoe_disable = vlan_dev_fcoe_disable,
+ .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
+ .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
+#endif
+ .ndo_fix_features = vlan_dev_fix_features,
+};
+
+void vlan_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->priv_flags |= IFF_802_1Q_VLAN;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+ dev->tx_queue_len = 0;
+
+ dev->netdev_ops = &vlan_netdev_ops;
+ dev->destructor = free_netdev;
+ dev->ethtool_ops = &vlan_ethtool_ops;
+
+ memset(dev->broadcast, 0, ETH_ALEN);
+}
diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c
new file mode 100644
index 00000000..061cecee
--- /dev/null
+++ b/net/8021q/vlan_gvrp.c
@@ -0,0 +1,66 @@
+/*
+ * IEEE 802.1Q GARP VLAN Registration Protocol (GVRP)
+ *
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/if_vlan.h>
+#include <net/garp.h>
+#include "vlan.h"
+
+#define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
+
+enum gvrp_attributes {
+ GVRP_ATTR_INVALID,
+ GVRP_ATTR_VID,
+ __GVRP_ATTR_MAX
+};
+#define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1)
+
+static struct garp_application vlan_gvrp_app __read_mostly = {
+ .proto.group_address = GARP_GVRP_ADDRESS,
+ .maxattr = GVRP_ATTR_MAX,
+ .type = GARP_APPLICATION_GVRP,
+};
+
+int vlan_gvrp_request_join(const struct net_device *dev)
+{
+ const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ __be16 vlan_id = htons(vlan->vlan_id);
+
+ return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
+ &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
+}
+
+void vlan_gvrp_request_leave(const struct net_device *dev)
+{
+ const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ __be16 vlan_id = htons(vlan->vlan_id);
+
+ garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
+ &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
+}
+
+int vlan_gvrp_init_applicant(struct net_device *dev)
+{
+ return garp_init_applicant(dev, &vlan_gvrp_app);
+}
+
+void vlan_gvrp_uninit_applicant(struct net_device *dev)
+{
+ garp_uninit_applicant(dev, &vlan_gvrp_app);
+}
+
+int __init vlan_gvrp_init(void)
+{
+ return garp_register_application(&vlan_gvrp_app);
+}
+
+void vlan_gvrp_uninit(void)
+{
+ garp_unregister_application(&vlan_gvrp_app);
+}
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
new file mode 100644
index 00000000..be9a5c19
--- /dev/null
+++ b/net/8021q/vlan_netlink.c
@@ -0,0 +1,240 @@
+/*
+ * VLAN netlink control interface
+ *
+ * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+#include <net/rtnetlink.h>
+#include "vlan.h"
+
+
+static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
+ [IFLA_VLAN_ID] = { .type = NLA_U16 },
+ [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
+ [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
+ [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
+ [IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) },
+};
+
+
+static inline int vlan_validate_qos_map(struct nlattr *attr)
+{
+ if (!attr)
+ return 0;
+ return nla_validate_nested(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy);
+}
+
+static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ifla_vlan_flags *flags;
+ u16 id;
+ int err;
+
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+
+ if (!data)
+ return -EINVAL;
+
+ if (data[IFLA_VLAN_ID]) {
+ id = nla_get_u16(data[IFLA_VLAN_ID]);
+ if (id >= VLAN_VID_MASK)
+ return -ERANGE;
+ }
+ if (data[IFLA_VLAN_FLAGS]) {
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ if ((flags->flags & flags->mask) &
+ ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
+ VLAN_FLAG_LOOSE_BINDING))
+ return -EINVAL;
+ }
+
+ err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]);
+ if (err < 0)
+ return err;
+ err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static int vlan_changelink(struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ifla_vlan_flags *flags;
+ struct ifla_vlan_qos_mapping *m;
+ struct nlattr *attr;
+ int rem;
+
+ if (data[IFLA_VLAN_FLAGS]) {
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ }
+ if (data[IFLA_VLAN_INGRESS_QOS]) {
+ nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
+ m = nla_data(attr);
+ vlan_dev_set_ingress_priority(dev, m->to, m->from);
+ }
+ }
+ if (data[IFLA_VLAN_EGRESS_QOS]) {
+ nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
+ m = nla_data(attr);
+ vlan_dev_set_egress_priority(dev, m->from, m->to);
+ }
+ }
+ return 0;
+}
+
+static int vlan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct net_device *real_dev;
+ int err;
+
+ if (!data[IFLA_VLAN_ID])
+ return -EINVAL;
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+ real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!real_dev)
+ return -ENODEV;
+
+ vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
+ vlan->real_dev = real_dev;
+ vlan->flags = VLAN_FLAG_REORDER_HDR;
+
+ err = vlan_check_real_dev(real_dev, vlan->vlan_id);
+ if (err < 0)
+ return err;
+
+ if (!tb[IFLA_MTU])
+ dev->mtu = real_dev->mtu;
+ else if (dev->mtu > real_dev->mtu)
+ return -EINVAL;
+
+ err = vlan_changelink(dev, tb, data);
+ if (err < 0)
+ return err;
+
+ return register_vlan_dev(dev);
+}
+
+static inline size_t vlan_qos_map_size(unsigned int n)
+{
+ if (n == 0)
+ return 0;
+ /* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */
+ return nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n;
+}
+
+static size_t vlan_get_size(const struct net_device *dev)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+
+ return nla_total_size(2) + /* IFLA_VLAN_ID */
+ sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
+ vlan_qos_map_size(vlan->nr_ingress_mappings) +
+ vlan_qos_map_size(vlan->nr_egress_mappings);
+}
+
+static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_priority_tci_mapping *pm;
+ struct ifla_vlan_flags f;
+ struct ifla_vlan_qos_mapping m;
+ struct nlattr *nest;
+ unsigned int i;
+
+ NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id);
+ if (vlan->flags) {
+ f.flags = vlan->flags;
+ f.mask = ~0;
+ NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f);
+ }
+ if (vlan->nr_ingress_mappings) {
+ nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) {
+ if (!vlan->ingress_priority_map[i])
+ continue;
+
+ m.from = i;
+ m.to = vlan->ingress_priority_map[i];
+ NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
+ sizeof(m), &m);
+ }
+ nla_nest_end(skb, nest);
+ }
+
+ if (vlan->nr_egress_mappings) {
+ nest = nla_nest_start(skb, IFLA_VLAN_EGRESS_QOS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+ for (pm = vlan->egress_priority_map[i]; pm;
+ pm = pm->next) {
+ if (!pm->vlan_qos)
+ continue;
+
+ m.from = pm->priority;
+ m.to = (pm->vlan_qos >> 13) & 0x7;
+ NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
+ sizeof(m), &m);
+ }
+ }
+ nla_nest_end(skb, nest);
+ }
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+struct rtnl_link_ops vlan_link_ops __read_mostly = {
+ .kind = "vlan",
+ .maxtype = IFLA_VLAN_MAX,
+ .policy = vlan_policy,
+ .priv_size = sizeof(struct vlan_dev_info),
+ .setup = vlan_setup,
+ .validate = vlan_validate,
+ .newlink = vlan_newlink,
+ .changelink = vlan_changelink,
+ .dellink = unregister_vlan_dev,
+ .get_size = vlan_get_size,
+ .fill_info = vlan_fill_info,
+};
+
+int __init vlan_netlink_init(void)
+{
+ return rtnl_link_register(&vlan_link_ops);
+}
+
+void __exit vlan_netlink_fini(void)
+{
+ rtnl_link_unregister(&vlan_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("vlan");
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
new file mode 100644
index 00000000..d940c49d
--- /dev/null
+++ b/net/8021q/vlanproc.c
@@ -0,0 +1,327 @@
+/******************************************************************************
+ * vlanproc.c VLAN Module. /proc filesystem interface.
+ *
+ * This module is completely hardware-independent and provides
+ * access to the router using Linux /proc filesystem.
+ *
+ * Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c
+ * by: Gene Kozin <genek@compuserve.com>
+ *
+ * Copyright: (c) 1998 Ben Greear
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ * ============================================================================
+ * Jan 20, 1998 Ben Greear Initial Version
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include "vlanproc.h"
+#include "vlan.h"
+
+/****** Function Prototypes *************************************************/
+
+/* Methods for preparing data for reading proc entries */
+static int vlan_seq_show(struct seq_file *seq, void *v);
+static void *vlan_seq_start(struct seq_file *seq, loff_t *pos);
+static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+static void vlan_seq_stop(struct seq_file *seq, void *);
+static int vlandev_seq_show(struct seq_file *seq, void *v);
+
+/*
+ * Global Data
+ */
+
+
+/*
+ * Names of the proc directory entries
+ */
+
+static const char name_root[] = "vlan";
+static const char name_conf[] = "config";
+
+/*
+ * Structures for interfacing with the /proc filesystem.
+ * VLAN creates its own directory /proc/net/vlan with the following
+ * entries:
+ * config device status/configuration
+ * <device> entry for each device
+ */
+
+/*
+ * Generic /proc/net/vlan/<file> file and inode operations
+ */
+
+static const struct seq_operations vlan_seq_ops = {
+ .start = vlan_seq_start,
+ .next = vlan_seq_next,
+ .stop = vlan_seq_stop,
+ .show = vlan_seq_show,
+};
+
+static int vlan_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &vlan_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations vlan_fops = {
+ .owner = THIS_MODULE,
+ .open = vlan_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+/*
+ * /proc/net/vlan/<device> file and inode operations
+ */
+
+static int vlandev_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vlandev_seq_show, PDE(inode)->data);
+}
+
+static const struct file_operations vlandev_fops = {
+ .owner = THIS_MODULE,
+ .open = vlandev_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * Proc filesystem derectory entries.
+ */
+
+/* Strings */
+static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = {
+ [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID",
+ [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD",
+ [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD",
+ [VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID",
+};
+/*
+ * Interface functions
+ */
+
+/*
+ * Clean up /proc/net/vlan entries
+ */
+
+void vlan_proc_cleanup(struct net *net)
+{
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+
+ if (vn->proc_vlan_conf)
+ remove_proc_entry(name_conf, vn->proc_vlan_dir);
+
+ if (vn->proc_vlan_dir)
+ proc_net_remove(net, name_root);
+
+ /* Dynamically added entries should be cleaned up as their vlan_device
+ * is removed, so we should not have to take care of it here...
+ */
+}
+
+/*
+ * Create /proc/net/vlan entries
+ */
+
+int __net_init vlan_proc_init(struct net *net)
+{
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+
+ vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net);
+ if (!vn->proc_vlan_dir)
+ goto err;
+
+ vn->proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR,
+ vn->proc_vlan_dir, &vlan_fops);
+ if (!vn->proc_vlan_conf)
+ goto err;
+ return 0;
+
+err:
+ pr_err("%s: can't create entry in proc filesystem!\n", __func__);
+ vlan_proc_cleanup(net);
+ return -ENOBUFS;
+}
+
+/*
+ * Add directory entry for VLAN device.
+ */
+
+int vlan_proc_add_dev(struct net_device *vlandev)
+{
+ struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+ struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
+
+ dev_info->dent =
+ proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
+ vn->proc_vlan_dir, &vlandev_fops, vlandev);
+ if (!dev_info->dent)
+ return -ENOBUFS;
+ return 0;
+}
+
+/*
+ * Delete directory entry for VLAN device.
+ */
+int vlan_proc_rem_dev(struct net_device *vlandev)
+{
+ struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
+
+ /** NOTE: This will consume the memory pointed to by dent, it seems. */
+ if (vlan_dev_info(vlandev)->dent) {
+ remove_proc_entry(vlan_dev_info(vlandev)->dent->name,
+ vn->proc_vlan_dir);
+ vlan_dev_info(vlandev)->dent = NULL;
+ }
+ return 0;
+}
+
+/****** Proc filesystem entry points ****************************************/
+
+/*
+ * The following few functions build the content of /proc/net/vlan/config
+ */
+
+/* start read of /proc/net/vlan/config */
+static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
+{
+ struct net_device *dev;
+ struct net *net = seq_file_net(seq);
+ loff_t i = 1;
+
+ rcu_read_lock();
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ for_each_netdev_rcu(net, dev) {
+ if (!is_vlan_dev(dev))
+ continue;
+
+ if (i++ == *pos)
+ return dev;
+ }
+
+ return NULL;
+}
+
+static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct net_device *dev;
+ struct net *net = seq_file_net(seq);
+
+ ++*pos;
+
+ dev = (struct net_device *)v;
+ if (v == SEQ_START_TOKEN)
+ dev = net_device_entry(&net->dev_base_head);
+
+ for_each_netdev_continue_rcu(net, dev) {
+ if (!is_vlan_dev(dev))
+ continue;
+
+ return dev;
+ }
+
+ return NULL;
+}
+
+static void vlan_seq_stop(struct seq_file *seq, void *v)
+ __releases(rcu)
+{
+ rcu_read_unlock();
+}
+
+static int vlan_seq_show(struct seq_file *seq, void *v)
+{
+ struct net *net = seq_file_net(seq);
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+
+ if (v == SEQ_START_TOKEN) {
+ const char *nmtype = NULL;
+
+ seq_puts(seq, "VLAN Dev name | VLAN ID\n");
+
+ if (vn->name_type < ARRAY_SIZE(vlan_name_type_str))
+ nmtype = vlan_name_type_str[vn->name_type];
+
+ seq_printf(seq, "Name-Type: %s\n",
+ nmtype ? nmtype : "UNKNOWN");
+ } else {
+ const struct net_device *vlandev = v;
+ const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+
+ seq_printf(seq, "%-15s| %d | %s\n", vlandev->name,
+ dev_info->vlan_id, dev_info->real_dev->name);
+ }
+ return 0;
+}
+
+static int vlandev_seq_show(struct seq_file *seq, void *offset)
+{
+ struct net_device *vlandev = (struct net_device *) seq->private;
+ const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *stats;
+ static const char fmt64[] = "%30s %12llu\n";
+ int i;
+
+ if (!is_vlan_dev(vlandev))
+ return 0;
+
+ stats = dev_get_stats(vlandev, &temp);
+ seq_printf(seq,
+ "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
+ vlandev->name, dev_info->vlan_id,
+ (int)(dev_info->flags & 1), vlandev->priv_flags);
+
+ seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
+ seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
+ seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast);
+ seq_puts(seq, "\n");
+ seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
+ seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
+ seq_printf(seq, "Device: %s", dev_info->real_dev->name);
+ /* now show all PRIORITY mappings relating to this VLAN */
+ seq_printf(seq, "\nINGRESS priority mappings: "
+ "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n",
+ dev_info->ingress_priority_map[0],
+ dev_info->ingress_priority_map[1],
+ dev_info->ingress_priority_map[2],
+ dev_info->ingress_priority_map[3],
+ dev_info->ingress_priority_map[4],
+ dev_info->ingress_priority_map[5],
+ dev_info->ingress_priority_map[6],
+ dev_info->ingress_priority_map[7]);
+
+ seq_printf(seq, " EGRESS priority mappings: ");
+ for (i = 0; i < 16; i++) {
+ const struct vlan_priority_tci_mapping *mp
+ = dev_info->egress_priority_map[i];
+ while (mp) {
+ seq_printf(seq, "%u:%hu ",
+ mp->priority, ((mp->vlan_qos >> 13) & 0x7));
+ mp = mp->next;
+ }
+ }
+ seq_puts(seq, "\n");
+
+ return 0;
+}
diff --git a/net/8021q/vlanproc.h b/net/8021q/vlanproc.h
new file mode 100644
index 00000000..063f60a3
--- /dev/null
+++ b/net/8021q/vlanproc.h
@@ -0,0 +1,20 @@
+#ifndef __BEN_VLAN_PROC_INC__
+#define __BEN_VLAN_PROC_INC__
+
+#ifdef CONFIG_PROC_FS
+struct net;
+
+int vlan_proc_init(struct net *net);
+int vlan_proc_rem_dev(struct net_device *vlandev);
+int vlan_proc_add_dev(struct net_device *vlandev);
+void vlan_proc_cleanup(struct net *net);
+
+#else /* No CONFIG_PROC_FS */
+
+#define vlan_proc_init(net) (0)
+#define vlan_proc_cleanup(net) do {} while (0)
+#define vlan_proc_add_dev(dev) ({(void)(dev), 0; })
+#define vlan_proc_rem_dev(dev) ({(void)(dev), 0; })
+#endif
+
+#endif /* !(__BEN_VLAN_PROC_INC__) */