diff options
Diffstat (limited to 'target/linux/generic/patches-3.8')
147 files changed, 25390 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.8/001-sctp_fix_default_choice.patch b/target/linux/generic/patches-3.8/001-sctp_fix_default_choice.patch new file mode 100644 index 0000000000..cea064cc51 --- /dev/null +++ b/target/linux/generic/patches-3.8/001-sctp_fix_default_choice.patch @@ -0,0 +1,33 @@ +From 7a70ff39328cd24b9c7db11eb4ae1a18c698a538 Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <florian@openwrt.org> +Date: Mon, 7 Jan 2013 14:26:15 +0100 +Subject: [PATCH] sctp: fix typo in default SCTP cookie choice + +Commit 0d0863b0 (sctp: Change defaults on cookie hmac selection) +introduced a choice configuration option to select the default SCTP +cookie hashing algorithm, a typo was introduced for the default choice. +This is an issue when running make oldconfig because an explicit choice +number must be entered since no default is available. This patch fixes +the typo, thus providing a valid default choice. + +Signed-off-by: Florian Fainelli <florian@openwrt.org> +--- + net/sctp/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig +index c262106..7521d94 100644 +--- a/net/sctp/Kconfig ++++ b/net/sctp/Kconfig +@@ -68,7 +68,7 @@ config SCTP_DBG_OBJCNT + If unsure, say N + choice + prompt "Default SCTP cookie HMAC encoding" +- default SCTP_COOKIE_HMAC_MD5 ++ default SCTP_DEFAULT_COOKIE_HMAC_MD5 + help + This option sets the default sctp cookie hmac algorithm + when in doubt select 'md5' +-- +1.7.10.4 + diff --git a/target/linux/generic/patches-3.8/060-hso_devices.patch b/target/linux/generic/patches-3.8/060-hso_devices.patch new file mode 100644 index 0000000000..0080bd58b7 --- /dev/null +++ b/target/linux/generic/patches-3.8/060-hso_devices.patch @@ -0,0 +1,36 @@ +--- a/drivers/net/usb/hso.c ++++ b/drivers/net/usb/hso.c +@@ -468,8 +468,10 @@ static const struct usb_device_id hso_id + {USB_DEVICE(0x0af0, 0x8400)}, + {USB_DEVICE(0x0af0, 0x8600)}, + {USB_DEVICE(0x0af0, 0x8800)}, +- {USB_DEVICE(0x0af0, 0x8900)}, +- {USB_DEVICE(0x0af0, 0x9000)}, ++ {USB_DEVICE(0x0af0, 0x8900)}, /* GTM 67xx */ ++ {USB_DEVICE(0x0af0, 0x9000)}, /* GTM 66xx */ ++ {USB_DEVICE(0x0af0, 0x9200)}, /* GTM 67xxWFS */ ++ {USB_DEVICE(0x0af0, 0x9300)}, /* GTM 66xxWFS */ + {USB_DEVICE(0x0af0, 0xd035)}, + {USB_DEVICE(0x0af0, 0xd055)}, + {USB_DEVICE(0x0af0, 0xd155)}, +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -1237,6 +1237,18 @@ UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + 0 ), + ++UNUSUAL_DEV( 0x0af0, 0x9200, 0x0000, 0x0000, ++ "Option", ++ "Globetrotter 67xxWFS SD-Card", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ 0 ), ++ ++UNUSUAL_DEV( 0x0af0, 0x9300, 0x0000, 0x0000, ++ "Option", ++ "Globetrotter 66xxWFS SD-Card", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ 0 ), ++ + UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000, + "Option", + "GI 070x SD-Card", diff --git a/target/linux/generic/patches-3.8/061-arm_xz_decompressor_build_fix.patch b/target/linux/generic/patches-3.8/061-arm_xz_decompressor_build_fix.patch new file mode 100644 index 0000000000..e39a429472 --- /dev/null +++ b/target/linux/generic/patches-3.8/061-arm_xz_decompressor_build_fix.patch @@ -0,0 +1,10 @@ +--- a/arch/arm/boot/compressed/decompress.c ++++ b/arch/arm/boot/compressed/decompress.c +@@ -50,6 +50,7 @@ extern char * strstr(const char * s1, co + #ifdef CONFIG_KERNEL_XZ + #define memmove memmove + #define memcpy memcpy ++extern char * strstr(const char *, const char *); + #include "../../../../lib/decompress_unxz.c" + #endif + diff --git a/target/linux/generic/patches-3.8/062-mips_decompressor_build_fix.patch b/target/linux/generic/patches-3.8/062-mips_decompressor_build_fix.patch new file mode 100644 index 0000000000..52c45c38e0 --- /dev/null +++ b/target/linux/generic/patches-3.8/062-mips_decompressor_build_fix.patch @@ -0,0 +1,66 @@ +From 0db3db45f5bd6df4bdc03bbd5dec672e16164c4e Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <florian@openwrt.org> +Date: Mon, 12 Nov 2012 12:31:55 +0100 +Subject: [PATCH] MIPS: decompressor: fix build failure on memcpy() in + decompress.c + +The decompress.c file includes linux/kernel.h which causes the following +inclusion chain to be pulled: +linux/kernel.h -> + linux/dynamic_debug.h -> + linux/string.h -> + asm/string.h + +We end up having a the GCC builtin + architecture specific memcpy() expanding +into this: + +void *({ size_t __len = (size_t n); void *__ret; if +(__builtin_constant_p(size_t n) && __len >= 64) __ret = memcpy((void *dest), +(const void *src), __len); else __ret = __builtin_memcpy((void *dest), (const +void *src), __len); __ret; }) +{ + [memcpy implementation in decompress.c starts here] + int i; + const char *s = src; + char *d = dest; + + for (i = 0; i < n; i++) + d[i] = s[i]; + return dest; +} + +raising the following compilation error: +arch/mips/boot/compressed/decompress.c:46:8: error: expected identifier or '(' +before '{' token + +There are at least three possibilities to fix this issue: + +1) define _LINUX_STRING_H_ at the beginning of decompress.c to prevent + further linux/string.h definitions and declarations from being used, and add + an explicit strstr() declaration for linux/dynamic_debug.h + +2) remove the inclusion of linux/kernel.h because we actually use no definition + or declaration from this header file + +3) undefine memcpy or re-define memcpy to memcpy thus resulting in picking up + the local memcpy() implementation to this compilation unit + +This patch uses the second option which is the less intrusive one. + +Signed-off-by: Florian Fainelli <florian@openwrt.org> +--- + arch/mips/boot/compressed/decompress.c | 2 -- + 1 file changed, 2 deletions(-) + +--- a/arch/mips/boot/compressed/decompress.c ++++ b/arch/mips/boot/compressed/decompress.c +@@ -10,9 +10,7 @@ + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +- + #include <linux/types.h> +-#include <linux/kernel.h> + + #include <asm/addrspace.h> + diff --git a/target/linux/generic/patches-3.8/084-x86_fix_perf_uclibc_build.patch b/target/linux/generic/patches-3.8/084-x86_fix_perf_uclibc_build.patch new file mode 100644 index 0000000000..95163a43ae --- /dev/null +++ b/target/linux/generic/patches-3.8/084-x86_fix_perf_uclibc_build.patch @@ -0,0 +1,27 @@ +From 031d8ad2c3cee85c515e551fc8c0054bdedb7b8b Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <florian@openwrt.org> +Date: Thu, 13 Dec 2012 18:02:11 +0100 +Subject: [PATCH] x86: fix perf build with uclibc toolchains + +libio.h is not provided by uClibc, in order to be able to test the +definition of __UCLIBC__ we need to include stdlib.h, which also +includes stddef.h, providing the definition of 'NULL' + +Signed-off-by: Florian Fainelli <florian@openwrt.org> +--- + tools/perf/arch/x86/util/dwarf-regs.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/tools/perf/arch/x86/util/dwarf-regs.c ++++ b/tools/perf/arch/x86/util/dwarf-regs.c +@@ -20,7 +20,10 @@ + * + */ + ++#include <stdlib.h> ++#ifndef __UCLIBC__ + #include <libio.h> ++#endif + #include <dwarf-regs.h> + + /* diff --git a/target/linux/generic/patches-3.8/100-overlayfs.patch b/target/linux/generic/patches-3.8/100-overlayfs.patch new file mode 100644 index 0000000000..3e065137b8 --- /dev/null +++ b/target/linux/generic/patches-3.8/100-overlayfs.patch @@ -0,0 +1,3191 @@ +--- a/Documentation/filesystems/Locking ++++ b/Documentation/filesystems/Locking +@@ -64,6 +64,7 @@ prototypes: + int (*atomic_open)(struct inode *, struct dentry *, + struct file *, unsigned open_flag, + umode_t create_mode, int *opened); ++ int (*dentry_open)(struct dentry *, struct file *, const struct cred *); + + locking rules: + all may block +@@ -91,6 +92,7 @@ removexattr: yes + fiemap: no + update_time: no + atomic_open: yes ++dentry_open: no + + Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on + victim. +--- /dev/null ++++ b/Documentation/filesystems/overlayfs.txt +@@ -0,0 +1,199 @@ ++Written by: Neil Brown <neilb@suse.de> ++ ++Overlay Filesystem ++================== ++ ++This document describes a prototype for a new approach to providing ++overlay-filesystem functionality in Linux (sometimes referred to as ++union-filesystems). An overlay-filesystem tries to present a ++filesystem which is the result over overlaying one filesystem on top ++of the other. ++ ++The result will inevitably fail to look exactly like a normal ++filesystem for various technical reasons. The expectation is that ++many use cases will be able to ignore these differences. ++ ++This approach is 'hybrid' because the objects that appear in the ++filesystem do not all appear to belong to that filesystem. In many ++cases an object accessed in the union will be indistinguishable ++from accessing the corresponding object from the original filesystem. ++This is most obvious from the 'st_dev' field returned by stat(2). ++ ++While directories will report an st_dev from the overlay-filesystem, ++all non-directory objects will report an st_dev from the lower or ++upper filesystem that is providing the object. Similarly st_ino will ++only be unique when combined with st_dev, and both of these can change ++over the lifetime of a non-directory object. Many applications and ++tools ignore these values and will not be affected. ++ ++Upper and Lower ++--------------- ++ ++An overlay filesystem combines two filesystems - an 'upper' filesystem ++and a 'lower' filesystem. When a name exists in both filesystems, the ++object in the 'upper' filesystem is visible while the object in the ++'lower' filesystem is either hidden or, in the case of directories, ++merged with the 'upper' object. ++ ++It would be more correct to refer to an upper and lower 'directory ++tree' rather than 'filesystem' as it is quite possible for both ++directory trees to be in the same filesystem and there is no ++requirement that the root of a filesystem be given for either upper or ++lower. ++ ++The lower filesystem can be any filesystem supported by Linux and does ++not need to be writable. The lower filesystem can even be another ++overlayfs. The upper filesystem will normally be writable and if it ++is it must support the creation of trusted.* extended attributes, and ++must provide valid d_type in readdir responses, at least for symbolic ++links - so NFS is not suitable. ++ ++A read-only overlay of two read-only filesystems may use any ++filesystem type. ++ ++Directories ++----------- ++ ++Overlaying mainly involves directories. If a given name appears in both ++upper and lower filesystems and refers to a non-directory in either, ++then the lower object is hidden - the name refers only to the upper ++object. ++ ++Where both upper and lower objects are directories, a merged directory ++is formed. ++ ++At mount time, the two directories given as mount options are combined ++into a merged directory: ++ ++ mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper /overlay ++ ++Then whenever a lookup is requested in such a merged directory, the ++lookup is performed in each actual directory and the combined result ++is cached in the dentry belonging to the overlay filesystem. If both ++actual lookups find directories, both are stored and a merged ++directory is created, otherwise only one is stored: the upper if it ++exists, else the lower. ++ ++Only the lists of names from directories are merged. Other content ++such as metadata and extended attributes are reported for the upper ++directory only. These attributes of the lower directory are hidden. ++ ++whiteouts and opaque directories ++-------------------------------- ++ ++In order to support rm and rmdir without changing the lower ++filesystem, an overlay filesystem needs to record in the upper filesystem ++that files have been removed. This is done using whiteouts and opaque ++directories (non-directories are always opaque). ++ ++The overlay filesystem uses extended attributes with a ++"trusted.overlay." prefix to record these details. ++ ++A whiteout is created as a symbolic link with target ++"(overlay-whiteout)" and with xattr "trusted.overlay.whiteout" set to "y". ++When a whiteout is found in the upper level of a merged directory, any ++matching name in the lower level is ignored, and the whiteout itself ++is also hidden. ++ ++A directory is made opaque by setting the xattr "trusted.overlay.opaque" ++to "y". Where the upper filesystem contains an opaque directory, any ++directory in the lower filesystem with the same name is ignored. ++ ++readdir ++------- ++ ++When a 'readdir' request is made on a merged directory, the upper and ++lower directories are each read and the name lists merged in the ++obvious way (upper is read first, then lower - entries that already ++exist are not re-added). This merged name list is cached in the ++'struct file' and so remains as long as the file is kept open. If the ++directory is opened and read by two processes at the same time, they ++will each have separate caches. A seekdir to the start of the ++directory (offset 0) followed by a readdir will cause the cache to be ++discarded and rebuilt. ++ ++This means that changes to the merged directory do not appear while a ++directory is being read. This is unlikely to be noticed by many ++programs. ++ ++seek offsets are assigned sequentially when the directories are read. ++Thus if ++ - read part of a directory ++ - remember an offset, and close the directory ++ - re-open the directory some time later ++ - seek to the remembered offset ++ ++there may be little correlation between the old and new locations in ++the list of filenames, particularly if anything has changed in the ++directory. ++ ++Readdir on directories that are not merged is simply handled by the ++underlying directory (upper or lower). ++ ++ ++Non-directories ++--------------- ++ ++Objects that are not directories (files, symlinks, device-special ++files etc.) are presented either from the upper or lower filesystem as ++appropriate. When a file in the lower filesystem is accessed in a way ++the requires write-access, such as opening for write access, changing ++some metadata etc., the file is first copied from the lower filesystem ++to the upper filesystem (copy_up). Note that creating a hard-link ++also requires copy_up, though of course creation of a symlink does ++not. ++ ++The copy_up may turn out to be unnecessary, for example if the file is ++opened for read-write but the data is not modified. ++ ++The copy_up process first makes sure that the containing directory ++exists in the upper filesystem - creating it and any parents as ++necessary. It then creates the object with the same metadata (owner, ++mode, mtime, symlink-target etc.) and then if the object is a file, the ++data is copied from the lower to the upper filesystem. Finally any ++extended attributes are copied up. ++ ++Once the copy_up is complete, the overlay filesystem simply ++provides direct access to the newly created file in the upper ++filesystem - future operations on the file are barely noticed by the ++overlay filesystem (though an operation on the name of the file such as ++rename or unlink will of course be noticed and handled). ++ ++ ++Non-standard behavior ++--------------------- ++ ++The copy_up operation essentially creates a new, identical file and ++moves it over to the old name. The new file may be on a different ++filesystem, so both st_dev and st_ino of the file may change. ++ ++Any open files referring to this inode will access the old data and ++metadata. Similarly any file locks obtained before copy_up will not ++apply to the copied up file. ++ ++On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and ++fsetxattr(2) will fail with EROFS. ++ ++If a file with multiple hard links is copied up, then this will ++"break" the link. Changes will not be propagated to other names ++referring to the same inode. ++ ++Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory ++object in overlayfs will not contain valid absolute paths, only ++relative paths leading up to the filesystem's root. This will be ++fixed in the future. ++ ++Some operations are not atomic, for example a crash during copy_up or ++rename will leave the filesystem in an inconsistent state. This will ++be addressed in the future. ++ ++Changes to underlying filesystems ++--------------------------------- ++ ++Offline changes, when the overlay is not mounted, are allowed to either ++the upper or the lower trees. ++ ++Changes to the underlying filesystems while part of a mounted overlay ++filesystem are not allowed. If the underlying filesystem is changed, ++the behavior of the overlay is undefined, though it will not result in ++a crash or deadlock. +--- a/Documentation/filesystems/vfs.txt ++++ b/Documentation/filesystems/vfs.txt +@@ -362,6 +362,7 @@ struct inode_operations { + int (*atomic_open)(struct inode *, struct dentry *, + struct file *, unsigned open_flag, + umode_t create_mode, int *opened); ++ int (*dentry_open)(struct dentry *, struct file *, const struct cred *); + }; + + Again, all methods are called without any locks being held, unless +@@ -681,6 +682,12 @@ struct address_space_operations { + but instead uses bmap to find out where the blocks in the file + are and uses those addresses directly. + ++ dentry_open: this is an alternative to f_op->open(), the difference is that ++ this method may open a file not necessarily originating from the same ++ filesystem as the one i_op->open() was called on. It may be ++ useful for stacking filesystems which want to allow native I/O directly ++ on underlying files. ++ + + invalidatepage: If a page has PagePrivate set, then invalidatepage + will be called when part or all of the page is to be removed +--- a/fs/ecryptfs/main.c ++++ b/fs/ecryptfs/main.c +@@ -567,6 +567,13 @@ static struct dentry *ecryptfs_mount(str + s->s_maxbytes = path.dentry->d_sb->s_maxbytes; + s->s_blocksize = path.dentry->d_sb->s_blocksize; + s->s_magic = ECRYPTFS_SUPER_MAGIC; ++ s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1; ++ ++ rc = -EINVAL; ++ if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { ++ printk(KERN_ERR "eCryptfs: maximum fs stacking depth exceeded\n"); ++ goto out_free; ++ } + + inode = ecryptfs_get_inode(path.dentry->d_inode, s); + rc = PTR_ERR(inode); +--- a/fs/internal.h ++++ b/fs/internal.h +@@ -42,11 +42,6 @@ static inline int __sync_blockdev(struct + extern void __init chrdev_init(void); + + /* +- * namei.c +- */ +-extern int __inode_permission(struct inode *, int); +- +-/* + * namespace.c + */ + extern int copy_mount_options(const void __user *, unsigned long *); +--- a/fs/Kconfig ++++ b/fs/Kconfig +@@ -67,6 +67,7 @@ source "fs/quota/Kconfig" + + source "fs/autofs4/Kconfig" + source "fs/fuse/Kconfig" ++source "fs/overlayfs/Kconfig" + + config CUSE + tristate "Character device in Userspace support" +--- a/fs/Makefile ++++ b/fs/Makefile +@@ -107,6 +107,7 @@ obj-$(CONFIG_QNX6FS_FS) += qnx6/ + obj-$(CONFIG_AUTOFS4_FS) += autofs4/ + obj-$(CONFIG_ADFS_FS) += adfs/ + obj-$(CONFIG_FUSE_FS) += fuse/ ++obj-$(CONFIG_OVERLAYFS_FS) += overlayfs/ + obj-$(CONFIG_UDF_FS) += udf/ + obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ + obj-$(CONFIG_OMFS_FS) += omfs/ +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -402,6 +402,7 @@ int __inode_permission(struct inode *ino + + return security_inode_permission(inode, mask); + } ++EXPORT_SYMBOL(__inode_permission); + + /** + * sb_permission - Check superblock-level permissions +@@ -2878,9 +2879,12 @@ finish_open_created: + error = may_open(&nd->path, acc_mode, open_flag); + if (error) + goto out; +- file->f_path.mnt = nd->path.mnt; +- error = finish_open(file, nd->path.dentry, NULL, opened); +- if (error) { ++ ++ BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ ++ error = vfs_open(&nd->path, file, current_cred()); ++ if (!error) { ++ *opened |= FILE_OPENED; ++ } else { + if (error == -EOPENSTALE) + goto stale_open; + goto out; +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -1410,6 +1410,24 @@ void drop_collected_mounts(struct vfsmou + release_mounts(&umount_list); + } + ++struct vfsmount *clone_private_mount(struct path *path) ++{ ++ struct mount *old_mnt = real_mount(path->mnt); ++ struct mount *new_mnt; ++ ++ if (IS_MNT_UNBINDABLE(old_mnt)) ++ return ERR_PTR(-EINVAL); ++ ++ down_read(&namespace_sem); ++ new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); ++ up_read(&namespace_sem); ++ if (!new_mnt) ++ return ERR_PTR(-ENOMEM); ++ ++ return &new_mnt->mnt; ++} ++EXPORT_SYMBOL_GPL(clone_private_mount); ++ + int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, + struct vfsmount *root) + { +--- a/fs/open.c ++++ b/fs/open.c +@@ -816,8 +816,7 @@ struct file *dentry_open(const struct pa + return ERR_PTR(error); + + f->f_flags = flags; +- f->f_path = *path; +- error = do_dentry_open(f, NULL, cred); ++ error = vfs_open(path, f, cred); + if (!error) { + error = open_check_o_direct(f); + if (error) { +@@ -832,6 +831,26 @@ struct file *dentry_open(const struct pa + } + EXPORT_SYMBOL(dentry_open); + ++/** ++ * vfs_open - open the file at the given path ++ * @path: path to open ++ * @filp: newly allocated file with f_flag initialized ++ * @cred: credentials to use ++ */ ++int vfs_open(const struct path *path, struct file *filp, ++ const struct cred *cred) ++{ ++ struct inode *inode = path->dentry->d_inode; ++ ++ if (inode->i_op->dentry_open) ++ return inode->i_op->dentry_open(path->dentry, filp, cred); ++ else { ++ filp->f_path = *path; ++ return do_dentry_open(filp, NULL, cred); ++ } ++} ++EXPORT_SYMBOL(vfs_open); ++ + static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) + { + int lookup_flags = 0; +--- /dev/null ++++ b/fs/overlayfs/copy_up.c +@@ -0,0 +1,385 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include <linux/fs.h> ++#include <linux/slab.h> ++#include <linux/file.h> ++#include <linux/splice.h> ++#include <linux/xattr.h> ++#include <linux/security.h> ++#include <linux/uaccess.h> ++#include <linux/sched.h> ++#include "overlayfs.h" ++ ++#define OVL_COPY_UP_CHUNK_SIZE (1 << 20) ++ ++static int ovl_copy_up_xattr(struct dentry *old, struct dentry *new) ++{ ++ ssize_t list_size, size; ++ char *buf, *name, *value; ++ int error; ++ ++ if (!old->d_inode->i_op->getxattr || ++ !new->d_inode->i_op->getxattr) ++ return 0; ++ ++ list_size = vfs_listxattr(old, NULL, 0); ++ if (list_size <= 0) { ++ if (list_size == -EOPNOTSUPP) ++ return 0; ++ return list_size; ++ } ++ ++ buf = kzalloc(list_size, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ error = -ENOMEM; ++ value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL); ++ if (!value) ++ goto out; ++ ++ list_size = vfs_listxattr(old, buf, list_size); ++ if (list_size <= 0) { ++ error = list_size; ++ goto out_free_value; ++ } ++ ++ for (name = buf; name < (buf + list_size); name += strlen(name) + 1) { ++ size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX); ++ if (size <= 0) { ++ error = size; ++ goto out_free_value; ++ } ++ error = vfs_setxattr(new, name, value, size, 0); ++ if (error) ++ goto out_free_value; ++ } ++ ++out_free_value: ++ kfree(value); ++out: ++ kfree(buf); ++ return error; ++} ++ ++static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len) ++{ ++ struct file *old_file; ++ struct file *new_file; ++ int error = 0; ++ ++ if (len == 0) ++ return 0; ++ ++ old_file = ovl_path_open(old, O_RDONLY); ++ if (IS_ERR(old_file)) ++ return PTR_ERR(old_file); ++ ++ new_file = ovl_path_open(new, O_WRONLY); ++ if (IS_ERR(new_file)) { ++ error = PTR_ERR(new_file); ++ goto out_fput; ++ } ++ ++ /* FIXME: copy up sparse files efficiently */ ++ while (len) { ++ loff_t offset = new_file->f_pos; ++ size_t this_len = OVL_COPY_UP_CHUNK_SIZE; ++ long bytes; ++ ++ if (len < this_len) ++ this_len = len; ++ ++ if (signal_pending_state(TASK_KILLABLE, current)) { ++ error = -EINTR; ++ break; ++ } ++ ++ bytes = do_splice_direct(old_file, &offset, new_file, this_len, ++ SPLICE_F_MOVE); ++ if (bytes <= 0) { ++ error = bytes; ++ break; ++ } ++ ++ len -= bytes; ++ } ++ ++ fput(new_file); ++out_fput: ++ fput(old_file); ++ return error; ++} ++ ++static char *ovl_read_symlink(struct dentry *realdentry) ++{ ++ int res; ++ char *buf; ++ struct inode *inode = realdentry->d_inode; ++ mm_segment_t old_fs; ++ ++ res = -EINVAL; ++ if (!inode->i_op->readlink) ++ goto err; ++ ++ res = -ENOMEM; ++ buf = (char *) __get_free_page(GFP_KERNEL); ++ if (!buf) ++ goto err; ++ ++ old_fs = get_fs(); ++ set_fs(get_ds()); ++ /* The cast to a user pointer is valid due to the set_fs() */ ++ res = inode->i_op->readlink(realdentry, ++ (char __user *)buf, PAGE_SIZE - 1); ++ set_fs(old_fs); ++ if (res < 0) { ++ free_page((unsigned long) buf); ++ goto err; ++ } ++ buf[res] = '\0'; ++ ++ return buf; ++ ++err: ++ return ERR_PTR(res); ++} ++ ++static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat) ++{ ++ struct iattr attr = { ++ .ia_valid = ++ ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET, ++ .ia_atime = stat->atime, ++ .ia_mtime = stat->mtime, ++ }; ++ ++ return notify_change(upperdentry, &attr); ++} ++ ++static int ovl_set_mode(struct dentry *upperdentry, umode_t mode) ++{ ++ struct iattr attr = { ++ .ia_valid = ATTR_MODE, ++ .ia_mode = mode, ++ }; ++ ++ return notify_change(upperdentry, &attr); ++} ++ ++static int ovl_copy_up_locked(struct dentry *upperdir, struct dentry *dentry, ++ struct path *lowerpath, struct kstat *stat, ++ const char *link) ++{ ++ int err; ++ struct path newpath; ++ umode_t mode = stat->mode; ++ ++ /* Can't properly set mode on creation because of the umask */ ++ stat->mode &= S_IFMT; ++ ++ ovl_path_upper(dentry, &newpath); ++ WARN_ON(newpath.dentry); ++ newpath.dentry = ovl_upper_create(upperdir, dentry, stat, link); ++ if (IS_ERR(newpath.dentry)) ++ return PTR_ERR(newpath.dentry); ++ ++ if (S_ISREG(stat->mode)) { ++ err = ovl_copy_up_data(lowerpath, &newpath, stat->size); ++ if (err) ++ goto err_remove; ++ } ++ ++ err = ovl_copy_up_xattr(lowerpath->dentry, newpath.dentry); ++ if (err) ++ goto err_remove; ++ ++ mutex_lock(&newpath.dentry->d_inode->i_mutex); ++ if (!S_ISLNK(stat->mode)) ++ err = ovl_set_mode(newpath.dentry, mode); ++ if (!err) ++ err = ovl_set_timestamps(newpath.dentry, stat); ++ mutex_unlock(&newpath.dentry->d_inode->i_mutex); ++ if (err) ++ goto err_remove; ++ ++ ovl_dentry_update(dentry, newpath.dentry); ++ ++ /* ++ * Easiest way to get rid of the lower dentry reference is to ++ * drop this dentry. This is neither needed nor possible for ++ * directories. ++ */ ++ if (!S_ISDIR(stat->mode)) ++ d_drop(dentry); ++ ++ return 0; ++ ++err_remove: ++ if (S_ISDIR(stat->mode)) ++ vfs_rmdir(upperdir->d_inode, newpath.dentry); ++ else ++ vfs_unlink(upperdir->d_inode, newpath.dentry); ++ ++ dput(newpath.dentry); ++ ++ return err; ++} ++ ++/* ++ * Copy up a single dentry ++ * ++ * Directory renames only allowed on "pure upper" (already created on ++ * upper filesystem, never copied up). Directories which are on lower or ++ * are merged may not be renamed. For these -EXDEV is returned and ++ * userspace has to deal with it. This means, when copying up a ++ * directory we can rely on it and ancestors being stable. ++ * ++ * Non-directory renames start with copy up of source if necessary. The ++ * actual rename will only proceed once the copy up was successful. Copy ++ * up uses upper parent i_mutex for exclusion. Since rename can change ++ * d_parent it is possible that the copy up will lock the old parent. At ++ * that point the file will have already been copied up anyway. ++ */ ++static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, ++ struct path *lowerpath, struct kstat *stat) ++{ ++ int err; ++ struct kstat pstat; ++ struct path parentpath; ++ struct dentry *upperdir; ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ char *link = NULL; ++ ++ ovl_path_upper(parent, &parentpath); ++ upperdir = parentpath.dentry; ++ ++ err = vfs_getattr(parentpath.mnt, parentpath.dentry, &pstat); ++ if (err) ++ return err; ++ ++ if (S_ISLNK(stat->mode)) { ++ link = ovl_read_symlink(lowerpath->dentry); ++ if (IS_ERR(link)) ++ return PTR_ERR(link); ++ } ++ ++ err = -ENOMEM; ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ goto out_free_link; ++ ++ override_cred->fsuid = stat->uid; ++ override_cred->fsgid = stat->gid; ++ /* ++ * CAP_SYS_ADMIN for copying up extended attributes ++ * CAP_DAC_OVERRIDE for create ++ * CAP_FOWNER for chmod, timestamp update ++ * CAP_FSETID for chmod ++ * CAP_MKNOD for mknod ++ */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); ++ cap_raise(override_cred->cap_effective, CAP_FOWNER); ++ cap_raise(override_cred->cap_effective, CAP_FSETID); ++ cap_raise(override_cred->cap_effective, CAP_MKNOD); ++ old_cred = override_creds(override_cred); ++ ++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT); ++ if (ovl_path_type(dentry) != OVL_PATH_LOWER) { ++ err = 0; ++ } else { ++ err = ovl_copy_up_locked(upperdir, dentry, lowerpath, ++ stat, link); ++ if (!err) { ++ /* Restore timestamps on parent (best effort) */ ++ ovl_set_timestamps(upperdir, &pstat); ++ } ++ } ++ ++ mutex_unlock(&upperdir->d_inode->i_mutex); ++ ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ ++out_free_link: ++ if (link) ++ free_page((unsigned long) link); ++ ++ return err; ++} ++ ++int ovl_copy_up(struct dentry *dentry) ++{ ++ int err; ++ ++ err = 0; ++ while (!err) { ++ struct dentry *next; ++ struct dentry *parent; ++ struct path lowerpath; ++ struct kstat stat; ++ enum ovl_path_type type = ovl_path_type(dentry); ++ ++ if (type != OVL_PATH_LOWER) ++ break; ++ ++ next = dget(dentry); ++ /* find the topmost dentry not yet copied up */ ++ for (;;) { ++ parent = dget_parent(next); ++ ++ type = ovl_path_type(parent); ++ if (type != OVL_PATH_LOWER) ++ break; ++ ++ dput(next); ++ next = parent; ++ } ++ ++ ovl_path_lower(next, &lowerpath); ++ err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat); ++ if (!err) ++ err = ovl_copy_up_one(parent, next, &lowerpath, &stat); ++ ++ dput(parent); ++ dput(next); ++ } ++ ++ return err; ++} ++ ++/* Optimize by not copying up the file first and truncating later */ ++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size) ++{ ++ int err; ++ struct kstat stat; ++ struct path lowerpath; ++ struct dentry *parent = dget_parent(dentry); ++ ++ err = ovl_copy_up(parent); ++ if (err) ++ goto out_dput_parent; ++ ++ ovl_path_lower(dentry, &lowerpath); ++ err = vfs_getattr(lowerpath.mnt, lowerpath.dentry, &stat); ++ if (err) ++ goto out_dput_parent; ++ ++ if (size < stat.size) ++ stat.size = size; ++ ++ err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat); ++ ++out_dput_parent: ++ dput(parent); ++ return err; ++} +--- /dev/null ++++ b/fs/overlayfs/dir.c +@@ -0,0 +1,604 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include <linux/fs.h> ++#include <linux/namei.h> ++#include <linux/xattr.h> ++#include <linux/security.h> ++#include <linux/cred.h> ++#include "overlayfs.h" ++ ++static const char *ovl_whiteout_symlink = "(overlay-whiteout)"; ++ ++static int ovl_whiteout(struct dentry *upperdir, struct dentry *dentry) ++{ ++ int err; ++ struct dentry *newdentry; ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ ++ /* FIXME: recheck lower dentry to see if whiteout is really needed */ ++ ++ err = -ENOMEM; ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ goto out; ++ ++ /* ++ * CAP_SYS_ADMIN for setxattr ++ * CAP_DAC_OVERRIDE for symlink creation ++ * CAP_FOWNER for unlink in sticky directory ++ */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); ++ cap_raise(override_cred->cap_effective, CAP_FOWNER); ++ override_cred->fsuid = GLOBAL_ROOT_UID; ++ override_cred->fsgid = GLOBAL_ROOT_GID; ++ old_cred = override_creds(override_cred); ++ ++ newdentry = lookup_one_len(dentry->d_name.name, upperdir, ++ dentry->d_name.len); ++ err = PTR_ERR(newdentry); ++ if (IS_ERR(newdentry)) ++ goto out_put_cred; ++ ++ /* Just been removed within the same locked region */ ++ WARN_ON(newdentry->d_inode); ++ ++ err = vfs_symlink(upperdir->d_inode, newdentry, ovl_whiteout_symlink); ++ if (err) ++ goto out_dput; ++ ++ ovl_dentry_version_inc(dentry->d_parent); ++ ++ err = vfs_setxattr(newdentry, ovl_whiteout_xattr, "y", 1, 0); ++ if (err) ++ vfs_unlink(upperdir->d_inode, newdentry); ++ ++out_dput: ++ dput(newdentry); ++out_put_cred: ++ revert_creds(old_cred); ++ put_cred(override_cred); ++out: ++ if (err) { ++ /* ++ * There's no way to recover from failure to whiteout. ++ * What should we do? Log a big fat error and... ? ++ */ ++ printk(KERN_ERR "overlayfs: ERROR - failed to whiteout '%s'\n", ++ dentry->d_name.name); ++ } ++ ++ return err; ++} ++ ++static struct dentry *ovl_lookup_create(struct dentry *upperdir, ++ struct dentry *template) ++{ ++ int err; ++ struct dentry *newdentry; ++ struct qstr *name = &template->d_name; ++ ++ newdentry = lookup_one_len(name->name, upperdir, name->len); ++ if (IS_ERR(newdentry)) ++ return newdentry; ++ ++ if (newdentry->d_inode) { ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ ++ /* No need to check whiteout if lower parent is non-existent */ ++ err = -EEXIST; ++ if (!ovl_dentry_lower(template->d_parent)) ++ goto out_dput; ++ ++ if (!S_ISLNK(newdentry->d_inode->i_mode)) ++ goto out_dput; ++ ++ err = -ENOMEM; ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ goto out_dput; ++ ++ /* ++ * CAP_SYS_ADMIN for getxattr ++ * CAP_FOWNER for unlink in sticky directory ++ */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ cap_raise(override_cred->cap_effective, CAP_FOWNER); ++ old_cred = override_creds(override_cred); ++ ++ err = -EEXIST; ++ if (ovl_is_whiteout(newdentry)) ++ err = vfs_unlink(upperdir->d_inode, newdentry); ++ ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ if (err) ++ goto out_dput; ++ ++ dput(newdentry); ++ newdentry = lookup_one_len(name->name, upperdir, name->len); ++ if (IS_ERR(newdentry)) { ++ ovl_whiteout(upperdir, template); ++ return newdentry; ++ } ++ ++ /* ++ * Whiteout just been successfully removed, parent ++ * i_mutex is still held, there's no way the lookup ++ * could return positive. ++ */ ++ WARN_ON(newdentry->d_inode); ++ } ++ ++ return newdentry; ++ ++out_dput: ++ dput(newdentry); ++ return ERR_PTR(err); ++} ++ ++struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry, ++ struct kstat *stat, const char *link) ++{ ++ int err; ++ struct dentry *newdentry; ++ struct inode *dir = upperdir->d_inode; ++ ++ newdentry = ovl_lookup_create(upperdir, dentry); ++ if (IS_ERR(newdentry)) ++ goto out; ++ ++ switch (stat->mode & S_IFMT) { ++ case S_IFREG: ++ err = vfs_create(dir, newdentry, stat->mode, NULL); ++ break; ++ ++ case S_IFDIR: ++ err = vfs_mkdir(dir, newdentry, stat->mode); ++ break; ++ ++ case S_IFCHR: ++ case S_IFBLK: ++ case S_IFIFO: ++ case S_IFSOCK: ++ err = vfs_mknod(dir, newdentry, stat->mode, stat->rdev); ++ break; ++ ++ case S_IFLNK: ++ err = vfs_symlink(dir, newdentry, link); ++ break; ++ ++ default: ++ err = -EPERM; ++ } ++ if (err) { ++ if (ovl_dentry_is_opaque(dentry)) ++ ovl_whiteout(upperdir, dentry); ++ dput(newdentry); ++ newdentry = ERR_PTR(err); ++ } else if (WARN_ON(!newdentry->d_inode)) { ++ /* ++ * Not quite sure if non-instantiated dentry is legal or not. ++ * VFS doesn't seem to care so check and warn here. ++ */ ++ dput(newdentry); ++ newdentry = ERR_PTR(-ENOENT); ++ } ++ ++out: ++ return newdentry; ++ ++} ++ ++static int ovl_set_opaque(struct dentry *upperdentry) ++{ ++ int err; ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ return -ENOMEM; ++ ++ /* CAP_SYS_ADMIN for setxattr of "trusted" namespace */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ old_cred = override_creds(override_cred); ++ err = vfs_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0); ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ ++ return err; ++} ++ ++static int ovl_remove_opaque(struct dentry *upperdentry) ++{ ++ int err; ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ return -ENOMEM; ++ ++ /* CAP_SYS_ADMIN for removexattr of "trusted" namespace */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ old_cred = override_creds(override_cred); ++ err = vfs_removexattr(upperdentry, ovl_opaque_xattr); ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ ++ return err; ++} ++ ++static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, ++ struct kstat *stat) ++{ ++ int err; ++ enum ovl_path_type type; ++ struct path realpath; ++ ++ type = ovl_path_real(dentry, &realpath); ++ err = vfs_getattr(realpath.mnt, realpath.dentry, stat); ++ if (err) ++ return err; ++ ++ stat->dev = dentry->d_sb->s_dev; ++ stat->ino = dentry->d_inode->i_ino; ++ ++ /* ++ * It's probably not worth it to count subdirs to get the ++ * correct link count. nlink=1 seems to pacify 'find' and ++ * other utilities. ++ */ ++ if (type == OVL_PATH_MERGE) ++ stat->nlink = 1; ++ ++ return 0; ++} ++ ++static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev, ++ const char *link) ++{ ++ int err; ++ struct dentry *newdentry; ++ struct dentry *upperdir; ++ struct inode *inode; ++ struct kstat stat = { ++ .mode = mode, ++ .rdev = rdev, ++ }; ++ ++ err = -ENOMEM; ++ inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata); ++ if (!inode) ++ goto out; ++ ++ err = ovl_copy_up(dentry->d_parent); ++ if (err) ++ goto out_iput; ++ ++ upperdir = ovl_dentry_upper(dentry->d_parent); ++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT); ++ ++ newdentry = ovl_upper_create(upperdir, dentry, &stat, link); ++ err = PTR_ERR(newdentry); ++ if (IS_ERR(newdentry)) ++ goto out_unlock; ++ ++ ovl_dentry_version_inc(dentry->d_parent); ++ if (ovl_dentry_is_opaque(dentry) && S_ISDIR(mode)) { ++ err = ovl_set_opaque(newdentry); ++ if (err) { ++ vfs_rmdir(upperdir->d_inode, newdentry); ++ ovl_whiteout(upperdir, dentry); ++ goto out_dput; ++ } ++ } ++ ovl_dentry_update(dentry, newdentry); ++ ovl_copyattr(newdentry->d_inode, inode); ++ d_instantiate(dentry, inode); ++ inode = NULL; ++ newdentry = NULL; ++ err = 0; ++ ++out_dput: ++ dput(newdentry); ++out_unlock: ++ mutex_unlock(&upperdir->d_inode->i_mutex); ++out_iput: ++ iput(inode); ++out: ++ return err; ++} ++ ++static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode, ++ bool excl) ++{ ++ return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL); ++} ++ ++static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) ++{ ++ return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL); ++} ++ ++static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, ++ dev_t rdev) ++{ ++ return ovl_create_object(dentry, mode, rdev, NULL); ++} ++ ++static int ovl_symlink(struct inode *dir, struct dentry *dentry, ++ const char *link) ++{ ++ return ovl_create_object(dentry, S_IFLNK, 0, link); ++} ++ ++static int ovl_do_remove(struct dentry *dentry, bool is_dir) ++{ ++ int err; ++ enum ovl_path_type type; ++ struct path realpath; ++ struct dentry *upperdir; ++ ++ err = ovl_copy_up(dentry->d_parent); ++ if (err) ++ return err; ++ ++ upperdir = ovl_dentry_upper(dentry->d_parent); ++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT); ++ type = ovl_path_real(dentry, &realpath); ++ if (type != OVL_PATH_LOWER) { ++ err = -ESTALE; ++ if (realpath.dentry->d_parent != upperdir) ++ goto out_d_drop; ++ ++ /* FIXME: create whiteout up front and rename to target */ ++ ++ if (is_dir) ++ err = vfs_rmdir(upperdir->d_inode, realpath.dentry); ++ else ++ err = vfs_unlink(upperdir->d_inode, realpath.dentry); ++ if (err) ++ goto out_d_drop; ++ ++ ovl_dentry_version_inc(dentry->d_parent); ++ } ++ ++ if (type != OVL_PATH_UPPER || ovl_dentry_is_opaque(dentry)) ++ err = ovl_whiteout(upperdir, dentry); ++ ++ /* ++ * Keeping this dentry hashed would mean having to release ++ * upperpath/lowerpath, which could only be done if we are the ++ * sole user of this dentry. Too tricky... Just unhash for ++ * now. ++ */ ++out_d_drop: ++ d_drop(dentry); ++ mutex_unlock(&upperdir->d_inode->i_mutex); ++ ++ return err; ++} ++ ++static int ovl_unlink(struct inode *dir, struct dentry *dentry) ++{ ++ return ovl_do_remove(dentry, false); ++} ++ ++ ++static int ovl_rmdir(struct inode *dir, struct dentry *dentry) ++{ ++ int err; ++ enum ovl_path_type type; ++ ++ type = ovl_path_type(dentry); ++ if (type != OVL_PATH_UPPER) { ++ err = ovl_check_empty_and_clear(dentry, type); ++ if (err) ++ return err; ++ } ++ ++ return ovl_do_remove(dentry, true); ++} ++ ++static int ovl_link(struct dentry *old, struct inode *newdir, ++ struct dentry *new) ++{ ++ int err; ++ struct dentry *olddentry; ++ struct dentry *newdentry; ++ struct dentry *upperdir; ++ struct inode *newinode; ++ ++ err = ovl_copy_up(old); ++ if (err) ++ goto out; ++ ++ err = ovl_copy_up(new->d_parent); ++ if (err) ++ goto out; ++ ++ upperdir = ovl_dentry_upper(new->d_parent); ++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT); ++ newdentry = ovl_lookup_create(upperdir, new); ++ err = PTR_ERR(newdentry); ++ if (IS_ERR(newdentry)) ++ goto out_unlock; ++ ++ olddentry = ovl_dentry_upper(old); ++ err = vfs_link(olddentry, upperdir->d_inode, newdentry); ++ if (!err) { ++ if (WARN_ON(!newdentry->d_inode)) { ++ dput(newdentry); ++ err = -ENOENT; ++ goto out_unlock; ++ } ++ newinode = ovl_new_inode(old->d_sb, newdentry->d_inode->i_mode, ++ new->d_fsdata); ++ if (!newinode) ++ goto link_fail; ++ ovl_copyattr(upperdir->d_inode, newinode); ++ ++ ovl_dentry_version_inc(new->d_parent); ++ ovl_dentry_update(new, newdentry); ++ ++ d_instantiate(new, newinode); ++ } else { ++link_fail: ++ if (ovl_dentry_is_opaque(new)) ++ ovl_whiteout(upperdir, new); ++ dput(newdentry); ++ } ++out_unlock: ++ mutex_unlock(&upperdir->d_inode->i_mutex); ++out: ++ return err; ++ ++} ++ ++static int ovl_rename(struct inode *olddir, struct dentry *old, ++ struct inode *newdir, struct dentry *new) ++{ ++ int err; ++ enum ovl_path_type old_type; ++ enum ovl_path_type new_type; ++ struct dentry *old_upperdir; ++ struct dentry *new_upperdir; ++ struct dentry *olddentry; ++ struct dentry *newdentry; ++ struct dentry *trap; ++ bool old_opaque; ++ bool new_opaque; ++ bool new_create = false; ++ bool is_dir = S_ISDIR(old->d_inode->i_mode); ++ ++ /* Don't copy up directory trees */ ++ old_type = ovl_path_type(old); ++ if (old_type != OVL_PATH_UPPER && is_dir) ++ return -EXDEV; ++ ++ if (new->d_inode) { ++ new_type = ovl_path_type(new); ++ ++ if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) { ++ if (ovl_dentry_lower(old)->d_inode == ++ ovl_dentry_lower(new)->d_inode) ++ return 0; ++ } ++ if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) { ++ if (ovl_dentry_upper(old)->d_inode == ++ ovl_dentry_upper(new)->d_inode) ++ return 0; ++ } ++ ++ if (new_type != OVL_PATH_UPPER && ++ S_ISDIR(new->d_inode->i_mode)) { ++ err = ovl_check_empty_and_clear(new, new_type); ++ if (err) ++ return err; ++ } ++ } else { ++ new_type = OVL_PATH_UPPER; ++ } ++ ++ err = ovl_copy_up(old); ++ if (err) ++ return err; ++ ++ err = ovl_copy_up(new->d_parent); ++ if (err) ++ return err; ++ ++ old_upperdir = ovl_dentry_upper(old->d_parent); ++ new_upperdir = ovl_dentry_upper(new->d_parent); ++ ++ trap = lock_rename(new_upperdir, old_upperdir); ++ ++ olddentry = ovl_dentry_upper(old); ++ newdentry = ovl_dentry_upper(new); ++ if (newdentry) { ++ dget(newdentry); ++ } else { ++ new_create = true; ++ newdentry = ovl_lookup_create(new_upperdir, new); ++ err = PTR_ERR(newdentry); ++ if (IS_ERR(newdentry)) ++ goto out_unlock; ++ } ++ ++ err = -ESTALE; ++ if (olddentry->d_parent != old_upperdir) ++ goto out_dput; ++ if (newdentry->d_parent != new_upperdir) ++ goto out_dput; ++ if (olddentry == trap) ++ goto out_dput; ++ if (newdentry == trap) ++ goto out_dput; ++ ++ old_opaque = ovl_dentry_is_opaque(old); ++ new_opaque = ovl_dentry_is_opaque(new) || new_type != OVL_PATH_UPPER; ++ ++ if (is_dir && !old_opaque && new_opaque) { ++ err = ovl_set_opaque(olddentry); ++ if (err) ++ goto out_dput; ++ } ++ ++ err = vfs_rename(old_upperdir->d_inode, olddentry, ++ new_upperdir->d_inode, newdentry); ++ ++ if (err) { ++ if (new_create && ovl_dentry_is_opaque(new)) ++ ovl_whiteout(new_upperdir, new); ++ if (is_dir && !old_opaque && new_opaque) ++ ovl_remove_opaque(olddentry); ++ goto out_dput; ++ } ++ ++ if (old_type != OVL_PATH_UPPER || old_opaque) ++ err = ovl_whiteout(old_upperdir, old); ++ if (is_dir && old_opaque && !new_opaque) ++ ovl_remove_opaque(olddentry); ++ ++ if (old_opaque != new_opaque) ++ ovl_dentry_set_opaque(old, new_opaque); ++ ++ ovl_dentry_version_inc(old->d_parent); ++ ovl_dentry_version_inc(new->d_parent); ++ ++out_dput: ++ dput(newdentry); ++out_unlock: ++ unlock_rename(new_upperdir, old_upperdir); ++ return err; ++} ++ ++const struct inode_operations ovl_dir_inode_operations = { ++ .lookup = ovl_lookup, ++ .mkdir = ovl_mkdir, ++ .symlink = ovl_symlink, ++ .unlink = ovl_unlink, ++ .rmdir = ovl_rmdir, ++ .rename = ovl_rename, ++ .link = ovl_link, ++ .setattr = ovl_setattr, ++ .create = ovl_create, ++ .mknod = ovl_mknod, ++ .permission = ovl_permission, ++ .getattr = ovl_dir_getattr, ++ .setxattr = ovl_setxattr, ++ .getxattr = ovl_getxattr, ++ .listxattr = ovl_listxattr, ++ .removexattr = ovl_removexattr, ++}; +--- /dev/null ++++ b/fs/overlayfs/inode.c +@@ -0,0 +1,372 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include <linux/fs.h> ++#include <linux/slab.h> ++#include <linux/xattr.h> ++#include "overlayfs.h" ++ ++int ovl_setattr(struct dentry *dentry, struct iattr *attr) ++{ ++ struct dentry *upperdentry; ++ int err; ++ ++ if ((attr->ia_valid & ATTR_SIZE) && !ovl_dentry_upper(dentry)) ++ err = ovl_copy_up_truncate(dentry, attr->ia_size); ++ else ++ err = ovl_copy_up(dentry); ++ if (err) ++ return err; ++ ++ upperdentry = ovl_dentry_upper(dentry); ++ ++ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) ++ attr->ia_valid &= ~ATTR_MODE; ++ ++ mutex_lock(&upperdentry->d_inode->i_mutex); ++ err = notify_change(upperdentry, attr); ++ if (!err) ++ ovl_copyattr(upperdentry->d_inode, dentry->d_inode); ++ mutex_unlock(&upperdentry->d_inode->i_mutex); ++ ++ return err; ++} ++ ++static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry, ++ struct kstat *stat) ++{ ++ struct path realpath; ++ ++ ovl_path_real(dentry, &realpath); ++ return vfs_getattr(realpath.mnt, realpath.dentry, stat); ++} ++ ++int ovl_permission(struct inode *inode, int mask) ++{ ++ struct ovl_entry *oe; ++ struct dentry *alias = NULL; ++ struct inode *realinode; ++ struct dentry *realdentry; ++ bool is_upper; ++ int err; ++ ++ if (S_ISDIR(inode->i_mode)) { ++ oe = inode->i_private; ++ } else if (mask & MAY_NOT_BLOCK) { ++ return -ECHILD; ++ } else { ++ /* ++ * For non-directories find an alias and get the info ++ * from there. ++ */ ++ alias = d_find_any_alias(inode); ++ if (WARN_ON(!alias)) ++ return -ENOENT; ++ ++ oe = alias->d_fsdata; ++ } ++ ++ realdentry = ovl_entry_real(oe, &is_upper); ++ ++ /* Careful in RCU walk mode */ ++ realinode = ACCESS_ONCE(realdentry->d_inode); ++ if (!realinode) { ++ WARN_ON(!(mask & MAY_NOT_BLOCK)); ++ err = -ENOENT; ++ goto out_dput; ++ } ++ ++ if (mask & MAY_WRITE) { ++ umode_t mode = realinode->i_mode; ++ ++ /* ++ * Writes will always be redirected to upper layer, so ++ * ignore lower layer being read-only. ++ * ++ * If the overlay itself is read-only then proceed ++ * with the permission check, don't return EROFS. ++ * This will only happen if this is the lower layer of ++ * another overlayfs. ++ * ++ * If upper fs becomes read-only after the overlay was ++ * constructed return EROFS to prevent modification of ++ * upper layer. ++ */ ++ err = -EROFS; ++ if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) && ++ (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) ++ goto out_dput; ++ } ++ ++ err = __inode_permission(realinode, mask); ++out_dput: ++ dput(alias); ++ return err; ++} ++ ++ ++struct ovl_link_data { ++ struct dentry *realdentry; ++ void *cookie; ++}; ++ ++static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd) ++{ ++ void *ret; ++ struct dentry *realdentry; ++ struct inode *realinode; ++ ++ realdentry = ovl_dentry_real(dentry); ++ realinode = realdentry->d_inode; ++ ++ if (WARN_ON(!realinode->i_op->follow_link)) ++ return ERR_PTR(-EPERM); ++ ++ ret = realinode->i_op->follow_link(realdentry, nd); ++ if (IS_ERR(ret)) ++ return ret; ++ ++ if (realinode->i_op->put_link) { ++ struct ovl_link_data *data; ++ ++ data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL); ++ if (!data) { ++ realinode->i_op->put_link(realdentry, nd, ret); ++ return ERR_PTR(-ENOMEM); ++ } ++ data->realdentry = realdentry; ++ data->cookie = ret; ++ ++ return data; ++ } else { ++ return NULL; ++ } ++} ++ ++static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c) ++{ ++ struct inode *realinode; ++ struct ovl_link_data *data = c; ++ ++ if (!data) ++ return; ++ ++ realinode = data->realdentry->d_inode; ++ realinode->i_op->put_link(data->realdentry, nd, data->cookie); ++ kfree(data); ++} ++ ++static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz) ++{ ++ struct path realpath; ++ struct inode *realinode; ++ ++ ovl_path_real(dentry, &realpath); ++ realinode = realpath.dentry->d_inode; ++ ++ if (!realinode->i_op->readlink) ++ return -EINVAL; ++ ++ touch_atime(&realpath); ++ ++ return realinode->i_op->readlink(realpath.dentry, buf, bufsiz); ++} ++ ++ ++static bool ovl_is_private_xattr(const char *name) ++{ ++ return strncmp(name, "trusted.overlay.", 14) == 0; ++} ++ ++int ovl_setxattr(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags) ++{ ++ int err; ++ struct dentry *upperdentry; ++ ++ if (ovl_is_private_xattr(name)) ++ return -EPERM; ++ ++ err = ovl_copy_up(dentry); ++ if (err) ++ return err; ++ ++ upperdentry = ovl_dentry_upper(dentry); ++ return vfs_setxattr(upperdentry, name, value, size, flags); ++} ++ ++ssize_t ovl_getxattr(struct dentry *dentry, const char *name, ++ void *value, size_t size) ++{ ++ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && ++ ovl_is_private_xattr(name)) ++ return -ENODATA; ++ ++ return vfs_getxattr(ovl_dentry_real(dentry), name, value, size); ++} ++ ++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) ++{ ++ ssize_t res; ++ int off; ++ ++ res = vfs_listxattr(ovl_dentry_real(dentry), list, size); ++ if (res <= 0 || size == 0) ++ return res; ++ ++ if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE) ++ return res; ++ ++ /* filter out private xattrs */ ++ for (off = 0; off < res;) { ++ char *s = list + off; ++ size_t slen = strlen(s) + 1; ++ ++ BUG_ON(off + slen > res); ++ ++ if (ovl_is_private_xattr(s)) { ++ res -= slen; ++ memmove(s, s + slen, res - off); ++ } else { ++ off += slen; ++ } ++ } ++ ++ return res; ++} ++ ++int ovl_removexattr(struct dentry *dentry, const char *name) ++{ ++ int err; ++ struct path realpath; ++ enum ovl_path_type type; ++ ++ if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && ++ ovl_is_private_xattr(name)) ++ return -ENODATA; ++ ++ type = ovl_path_real(dentry, &realpath); ++ if (type == OVL_PATH_LOWER) { ++ err = vfs_getxattr(realpath.dentry, name, NULL, 0); ++ if (err < 0) ++ return err; ++ ++ err = ovl_copy_up(dentry); ++ if (err) ++ return err; ++ ++ ovl_path_upper(dentry, &realpath); ++ } ++ ++ return vfs_removexattr(realpath.dentry, name); ++} ++ ++static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type, ++ struct dentry *realdentry) ++{ ++ if (type != OVL_PATH_LOWER) ++ return false; ++ ++ if (special_file(realdentry->d_inode->i_mode)) ++ return false; ++ ++ if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC)) ++ return false; ++ ++ return true; ++} ++ ++static int ovl_dentry_open(struct dentry *dentry, struct file *file, ++ const struct cred *cred) ++{ ++ int err; ++ struct path realpath; ++ enum ovl_path_type type; ++ ++ type = ovl_path_real(dentry, &realpath); ++ if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) { ++ if (file->f_flags & O_TRUNC) ++ err = ovl_copy_up_truncate(dentry, 0); ++ else ++ err = ovl_copy_up(dentry); ++ if (err) ++ return err; ++ ++ ovl_path_upper(dentry, &realpath); ++ } ++ ++ return vfs_open(&realpath, file, cred); ++} ++ ++static const struct inode_operations ovl_file_inode_operations = { ++ .setattr = ovl_setattr, ++ .permission = ovl_permission, ++ .getattr = ovl_getattr, ++ .setxattr = ovl_setxattr, ++ .getxattr = ovl_getxattr, ++ .listxattr = ovl_listxattr, ++ .removexattr = ovl_removexattr, ++ .dentry_open = ovl_dentry_open, ++}; ++ ++static const struct inode_operations ovl_symlink_inode_operations = { ++ .setattr = ovl_setattr, ++ .follow_link = ovl_follow_link, ++ .put_link = ovl_put_link, ++ .readlink = ovl_readlink, ++ .getattr = ovl_getattr, ++ .setxattr = ovl_setxattr, ++ .getxattr = ovl_getxattr, ++ .listxattr = ovl_listxattr, ++ .removexattr = ovl_removexattr, ++}; ++ ++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, ++ struct ovl_entry *oe) ++{ ++ struct inode *inode; ++ ++ inode = new_inode(sb); ++ if (!inode) ++ return NULL; ++ ++ mode &= S_IFMT; ++ ++ inode->i_ino = get_next_ino(); ++ inode->i_mode = mode; ++ inode->i_flags |= S_NOATIME | S_NOCMTIME; ++ ++ switch (mode) { ++ case S_IFDIR: ++ inode->i_private = oe; ++ inode->i_op = &ovl_dir_inode_operations; ++ inode->i_fop = &ovl_dir_operations; ++ break; ++ ++ case S_IFLNK: ++ inode->i_op = &ovl_symlink_inode_operations; ++ break; ++ ++ case S_IFREG: ++ case S_IFSOCK: ++ case S_IFBLK: ++ case S_IFCHR: ++ case S_IFIFO: ++ inode->i_op = &ovl_file_inode_operations; ++ break; ++ ++ default: ++ WARN(1, "illegal file type: %i\n", mode); ++ iput(inode); ++ inode = NULL; ++ } ++ ++ return inode; ++ ++} +--- /dev/null ++++ b/fs/overlayfs/Kconfig +@@ -0,0 +1,4 @@ ++config OVERLAYFS_FS ++ tristate "Overlay filesystem support" ++ help ++ Add support for overlay filesystem. +--- /dev/null ++++ b/fs/overlayfs/Makefile +@@ -0,0 +1,7 @@ ++# ++# Makefile for the overlay filesystem. ++# ++ ++obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o ++ ++overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o +--- /dev/null ++++ b/fs/overlayfs/overlayfs.h +@@ -0,0 +1,70 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++struct ovl_entry; ++ ++enum ovl_path_type { ++ OVL_PATH_UPPER, ++ OVL_PATH_MERGE, ++ OVL_PATH_LOWER, ++}; ++ ++extern const char *ovl_opaque_xattr; ++extern const char *ovl_whiteout_xattr; ++extern const struct dentry_operations ovl_dentry_operations; ++ ++enum ovl_path_type ovl_path_type(struct dentry *dentry); ++u64 ovl_dentry_version_get(struct dentry *dentry); ++void ovl_dentry_version_inc(struct dentry *dentry); ++void ovl_path_upper(struct dentry *dentry, struct path *path); ++void ovl_path_lower(struct dentry *dentry, struct path *path); ++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); ++struct dentry *ovl_dentry_upper(struct dentry *dentry); ++struct dentry *ovl_dentry_lower(struct dentry *dentry); ++struct dentry *ovl_dentry_real(struct dentry *dentry); ++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper); ++bool ovl_dentry_is_opaque(struct dentry *dentry); ++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque); ++bool ovl_is_whiteout(struct dentry *dentry); ++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); ++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, ++ unsigned int flags); ++struct file *ovl_path_open(struct path *path, int flags); ++ ++struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry, ++ struct kstat *stat, const char *link); ++ ++/* readdir.c */ ++extern const struct file_operations ovl_dir_operations; ++int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type); ++ ++/* inode.c */ ++int ovl_setattr(struct dentry *dentry, struct iattr *attr); ++int ovl_permission(struct inode *inode, int mask); ++int ovl_setxattr(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags); ++ssize_t ovl_getxattr(struct dentry *dentry, const char *name, ++ void *value, size_t size); ++ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); ++int ovl_removexattr(struct dentry *dentry, const char *name); ++ ++struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, ++ struct ovl_entry *oe); ++static inline void ovl_copyattr(struct inode *from, struct inode *to) ++{ ++ to->i_uid = from->i_uid; ++ to->i_gid = from->i_gid; ++} ++ ++/* dir.c */ ++extern const struct inode_operations ovl_dir_inode_operations; ++ ++/* copy_up.c */ ++int ovl_copy_up(struct dentry *dentry); ++int ovl_copy_up_truncate(struct dentry *dentry, loff_t size); +--- /dev/null ++++ b/fs/overlayfs/readdir.c +@@ -0,0 +1,566 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include <linux/fs.h> ++#include <linux/slab.h> ++#include <linux/namei.h> ++#include <linux/file.h> ++#include <linux/xattr.h> ++#include <linux/rbtree.h> ++#include <linux/security.h> ++#include <linux/cred.h> ++#include "overlayfs.h" ++ ++struct ovl_cache_entry { ++ const char *name; ++ unsigned int len; ++ unsigned int type; ++ u64 ino; ++ bool is_whiteout; ++ struct list_head l_node; ++ struct rb_node node; ++}; ++ ++struct ovl_readdir_data { ++ struct rb_root *root; ++ struct list_head *list; ++ struct list_head *middle; ++ struct dentry *dir; ++ int count; ++ int err; ++}; ++ ++struct ovl_dir_file { ++ bool is_real; ++ bool is_cached; ++ struct list_head cursor; ++ u64 cache_version; ++ struct list_head cache; ++ struct file *realfile; ++}; ++ ++static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) ++{ ++ return container_of(n, struct ovl_cache_entry, node); ++} ++ ++static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, ++ const char *name, int len) ++{ ++ struct rb_node *node = root->rb_node; ++ int cmp; ++ ++ while (node) { ++ struct ovl_cache_entry *p = ovl_cache_entry_from_node(node); ++ ++ cmp = strncmp(name, p->name, len); ++ if (cmp > 0) ++ node = p->node.rb_right; ++ else if (cmp < 0 || len < p->len) ++ node = p->node.rb_left; ++ else ++ return p; ++ } ++ ++ return NULL; ++} ++ ++static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, ++ u64 ino, unsigned int d_type) ++{ ++ struct ovl_cache_entry *p; ++ ++ p = kmalloc(sizeof(*p) + len + 1, GFP_KERNEL); ++ if (p) { ++ char *name_copy = (char *) (p + 1); ++ memcpy(name_copy, name, len); ++ name_copy[len] = '\0'; ++ p->name = name_copy; ++ p->len = len; ++ p->type = d_type; ++ p->ino = ino; ++ p->is_whiteout = false; ++ } ++ ++ return p; ++} ++ ++static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, ++ const char *name, int len, u64 ino, ++ unsigned int d_type) ++{ ++ struct rb_node **newp = &rdd->root->rb_node; ++ struct rb_node *parent = NULL; ++ struct ovl_cache_entry *p; ++ ++ while (*newp) { ++ int cmp; ++ struct ovl_cache_entry *tmp; ++ ++ parent = *newp; ++ tmp = ovl_cache_entry_from_node(*newp); ++ cmp = strncmp(name, tmp->name, len); ++ if (cmp > 0) ++ newp = &tmp->node.rb_right; ++ else if (cmp < 0 || len < tmp->len) ++ newp = &tmp->node.rb_left; ++ else ++ return 0; ++ } ++ ++ p = ovl_cache_entry_new(name, len, ino, d_type); ++ if (p == NULL) ++ return -ENOMEM; ++ ++ list_add_tail(&p->l_node, rdd->list); ++ rb_link_node(&p->node, parent, newp); ++ rb_insert_color(&p->node, rdd->root); ++ ++ return 0; ++} ++ ++static int ovl_fill_lower(void *buf, const char *name, int namelen, ++ loff_t offset, u64 ino, unsigned int d_type) ++{ ++ struct ovl_readdir_data *rdd = buf; ++ struct ovl_cache_entry *p; ++ ++ rdd->count++; ++ p = ovl_cache_entry_find(rdd->root, name, namelen); ++ if (p) { ++ list_move_tail(&p->l_node, rdd->middle); ++ } else { ++ p = ovl_cache_entry_new(name, namelen, ino, d_type); ++ if (p == NULL) ++ rdd->err = -ENOMEM; ++ else ++ list_add_tail(&p->l_node, rdd->middle); ++ } ++ ++ return rdd->err; ++} ++ ++static void ovl_cache_free(struct list_head *list) ++{ ++ struct ovl_cache_entry *p; ++ struct ovl_cache_entry *n; ++ ++ list_for_each_entry_safe(p, n, list, l_node) ++ kfree(p); ++ ++ INIT_LIST_HEAD(list); ++} ++ ++static int ovl_fill_upper(void *buf, const char *name, int namelen, ++ loff_t offset, u64 ino, unsigned int d_type) ++{ ++ struct ovl_readdir_data *rdd = buf; ++ ++ rdd->count++; ++ return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); ++} ++ ++static inline int ovl_dir_read(struct path *realpath, ++ struct ovl_readdir_data *rdd, filldir_t filler) ++{ ++ struct file *realfile; ++ int err; ++ ++ realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY); ++ if (IS_ERR(realfile)) ++ return PTR_ERR(realfile); ++ ++ do { ++ rdd->count = 0; ++ rdd->err = 0; ++ err = vfs_readdir(realfile, filler, rdd); ++ if (err >= 0) ++ err = rdd->err; ++ } while (!err && rdd->count); ++ fput(realfile); ++ ++ return 0; ++} ++ ++static void ovl_dir_reset(struct file *file) ++{ ++ struct ovl_dir_file *od = file->private_data; ++ enum ovl_path_type type = ovl_path_type(file->f_path.dentry); ++ ++ if (ovl_dentry_version_get(file->f_path.dentry) != od->cache_version) { ++ list_del_init(&od->cursor); ++ ovl_cache_free(&od->cache); ++ od->is_cached = false; ++ } ++ WARN_ON(!od->is_real && type != OVL_PATH_MERGE); ++ if (od->is_real && type == OVL_PATH_MERGE) { ++ fput(od->realfile); ++ od->realfile = NULL; ++ od->is_real = false; ++ } ++} ++ ++static int ovl_dir_mark_whiteouts(struct ovl_readdir_data *rdd) ++{ ++ struct ovl_cache_entry *p; ++ struct dentry *dentry; ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ ++ override_cred = prepare_creds(); ++ if (!override_cred) { ++ ovl_cache_free(rdd->list); ++ return -ENOMEM; ++ } ++ ++ /* ++ * CAP_SYS_ADMIN for getxattr ++ * CAP_DAC_OVERRIDE for lookup ++ */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); ++ old_cred = override_creds(override_cred); ++ ++ mutex_lock(&rdd->dir->d_inode->i_mutex); ++ list_for_each_entry(p, rdd->list, l_node) { ++ if (p->type != DT_LNK) ++ continue; ++ ++ dentry = lookup_one_len(p->name, rdd->dir, p->len); ++ if (IS_ERR(dentry)) ++ continue; ++ ++ p->is_whiteout = ovl_is_whiteout(dentry); ++ dput(dentry); ++ } ++ mutex_unlock(&rdd->dir->d_inode->i_mutex); ++ ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ ++ return 0; ++} ++ ++static inline int ovl_dir_read_merged(struct path *upperpath, ++ struct path *lowerpath, ++ struct ovl_readdir_data *rdd) ++{ ++ int err; ++ struct rb_root root = RB_ROOT; ++ struct list_head middle; ++ ++ rdd->root = &root; ++ if (upperpath->dentry) { ++ rdd->dir = upperpath->dentry; ++ err = ovl_dir_read(upperpath, rdd, ovl_fill_upper); ++ if (err) ++ goto out; ++ ++ err = ovl_dir_mark_whiteouts(rdd); ++ if (err) ++ goto out; ++ } ++ /* ++ * Insert lowerpath entries before upperpath ones, this allows ++ * offsets to be reasonably constant ++ */ ++ list_add(&middle, rdd->list); ++ rdd->middle = &middle; ++ err = ovl_dir_read(lowerpath, rdd, ovl_fill_lower); ++ list_del(&middle); ++out: ++ rdd->root = NULL; ++ ++ return err; ++} ++ ++static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) ++{ ++ struct list_head *l; ++ loff_t off; ++ ++ l = od->cache.next; ++ for (off = 0; off < pos; off++) { ++ if (l == &od->cache) ++ break; ++ l = l->next; ++ } ++ list_move_tail(&od->cursor, l); ++} ++ ++static int ovl_readdir(struct file *file, void *buf, filldir_t filler) ++{ ++ struct ovl_dir_file *od = file->private_data; ++ int res; ++ ++ if (!file->f_pos) ++ ovl_dir_reset(file); ++ ++ if (od->is_real) { ++ res = vfs_readdir(od->realfile, filler, buf); ++ file->f_pos = od->realfile->f_pos; ++ ++ return res; ++ } ++ ++ if (!od->is_cached) { ++ struct path lowerpath; ++ struct path upperpath; ++ struct ovl_readdir_data rdd = { .list = &od->cache }; ++ ++ ovl_path_lower(file->f_path.dentry, &lowerpath); ++ ovl_path_upper(file->f_path.dentry, &upperpath); ++ ++ res = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd); ++ if (res) { ++ ovl_cache_free(rdd.list); ++ return res; ++ } ++ ++ od->cache_version = ovl_dentry_version_get(file->f_path.dentry); ++ od->is_cached = true; ++ ++ ovl_seek_cursor(od, file->f_pos); ++ } ++ ++ while (od->cursor.next != &od->cache) { ++ int over; ++ loff_t off; ++ struct ovl_cache_entry *p; ++ ++ p = list_entry(od->cursor.next, struct ovl_cache_entry, l_node); ++ off = file->f_pos; ++ if (!p->is_whiteout) { ++ over = filler(buf, p->name, p->len, off, p->ino, ++ p->type); ++ if (over) ++ break; ++ } ++ file->f_pos++; ++ list_move(&od->cursor, &p->l_node); ++ } ++ ++ return 0; ++} ++ ++static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) ++{ ++ loff_t res; ++ struct ovl_dir_file *od = file->private_data; ++ ++ mutex_lock(&file->f_dentry->d_inode->i_mutex); ++ if (!file->f_pos) ++ ovl_dir_reset(file); ++ ++ if (od->is_real) { ++ res = vfs_llseek(od->realfile, offset, origin); ++ file->f_pos = od->realfile->f_pos; ++ } else { ++ res = -EINVAL; ++ ++ switch (origin) { ++ case SEEK_CUR: ++ offset += file->f_pos; ++ break; ++ case SEEK_SET: ++ break; ++ default: ++ goto out_unlock; ++ } ++ if (offset < 0) ++ goto out_unlock; ++ ++ if (offset != file->f_pos) { ++ file->f_pos = offset; ++ if (od->is_cached) ++ ovl_seek_cursor(od, offset); ++ } ++ res = offset; ++ } ++out_unlock: ++ mutex_unlock(&file->f_dentry->d_inode->i_mutex); ++ ++ return res; ++} ++ ++static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, ++ int datasync) ++{ ++ struct ovl_dir_file *od = file->private_data; ++ ++ /* May need to reopen directory if it got copied up */ ++ if (!od->realfile) { ++ struct path upperpath; ++ ++ ovl_path_upper(file->f_path.dentry, &upperpath); ++ od->realfile = ovl_path_open(&upperpath, O_RDONLY); ++ if (IS_ERR(od->realfile)) ++ return PTR_ERR(od->realfile); ++ } ++ ++ return vfs_fsync_range(od->realfile, start, end, datasync); ++} ++ ++static int ovl_dir_release(struct inode *inode, struct file *file) ++{ ++ struct ovl_dir_file *od = file->private_data; ++ ++ list_del(&od->cursor); ++ ovl_cache_free(&od->cache); ++ if (od->realfile) ++ fput(od->realfile); ++ kfree(od); ++ ++ return 0; ++} ++ ++static int ovl_dir_open(struct inode *inode, struct file *file) ++{ ++ struct path realpath; ++ struct file *realfile; ++ struct ovl_dir_file *od; ++ enum ovl_path_type type; ++ ++ od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL); ++ if (!od) ++ return -ENOMEM; ++ ++ type = ovl_path_real(file->f_path.dentry, &realpath); ++ realfile = ovl_path_open(&realpath, file->f_flags); ++ if (IS_ERR(realfile)) { ++ kfree(od); ++ return PTR_ERR(realfile); ++ } ++ INIT_LIST_HEAD(&od->cache); ++ INIT_LIST_HEAD(&od->cursor); ++ od->is_cached = false; ++ od->realfile = realfile; ++ od->is_real = (type != OVL_PATH_MERGE); ++ file->private_data = od; ++ ++ return 0; ++} ++ ++const struct file_operations ovl_dir_operations = { ++ .read = generic_read_dir, ++ .open = ovl_dir_open, ++ .readdir = ovl_readdir, ++ .llseek = ovl_dir_llseek, ++ .fsync = ovl_dir_fsync, ++ .release = ovl_dir_release, ++}; ++ ++static int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) ++{ ++ int err; ++ struct path lowerpath; ++ struct path upperpath; ++ struct ovl_cache_entry *p; ++ struct ovl_readdir_data rdd = { .list = list }; ++ ++ ovl_path_upper(dentry, &upperpath); ++ ovl_path_lower(dentry, &lowerpath); ++ ++ err = ovl_dir_read_merged(&upperpath, &lowerpath, &rdd); ++ if (err) ++ return err; ++ ++ err = 0; ++ ++ list_for_each_entry(p, list, l_node) { ++ if (p->is_whiteout) ++ continue; ++ ++ if (p->name[0] == '.') { ++ if (p->len == 1) ++ continue; ++ if (p->len == 2 && p->name[1] == '.') ++ continue; ++ } ++ err = -ENOTEMPTY; ++ break; ++ } ++ ++ return err; ++} ++ ++static int ovl_remove_whiteouts(struct dentry *dir, struct list_head *list) ++{ ++ struct path upperpath; ++ struct dentry *upperdir; ++ struct ovl_cache_entry *p; ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ int err; ++ ++ ovl_path_upper(dir, &upperpath); ++ upperdir = upperpath.dentry; ++ ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ return -ENOMEM; ++ ++ /* ++ * CAP_DAC_OVERRIDE for lookup and unlink ++ * CAP_SYS_ADMIN for setxattr of "trusted" namespace ++ * CAP_FOWNER for unlink in sticky directory ++ */ ++ cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ cap_raise(override_cred->cap_effective, CAP_FOWNER); ++ old_cred = override_creds(override_cred); ++ ++ err = vfs_setxattr(upperdir, ovl_opaque_xattr, "y", 1, 0); ++ if (err) ++ goto out_revert_creds; ++ ++ mutex_lock_nested(&upperdir->d_inode->i_mutex, I_MUTEX_PARENT); ++ list_for_each_entry(p, list, l_node) { ++ struct dentry *dentry; ++ int ret; ++ ++ if (!p->is_whiteout) ++ continue; ++ ++ dentry = lookup_one_len(p->name, upperdir, p->len); ++ if (IS_ERR(dentry)) { ++ printk(KERN_WARNING ++ "overlayfs: failed to lookup whiteout %.*s: %li\n", ++ p->len, p->name, PTR_ERR(dentry)); ++ continue; ++ } ++ ret = vfs_unlink(upperdir->d_inode, dentry); ++ dput(dentry); ++ if (ret) ++ printk(KERN_WARNING ++ "overlayfs: failed to unlink whiteout %.*s: %i\n", ++ p->len, p->name, ret); ++ } ++ mutex_unlock(&upperdir->d_inode->i_mutex); ++ ++out_revert_creds: ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ ++ return err; ++} ++ ++int ovl_check_empty_and_clear(struct dentry *dentry, enum ovl_path_type type) ++{ ++ int err; ++ LIST_HEAD(list); ++ ++ err = ovl_check_empty_dir(dentry, &list); ++ if (!err && type == OVL_PATH_MERGE) ++ err = ovl_remove_whiteouts(dentry, &list); ++ ++ ovl_cache_free(&list); ++ ++ return err; ++} +--- /dev/null ++++ b/fs/overlayfs/super.c +@@ -0,0 +1,685 @@ ++/* ++ * ++ * Copyright (C) 2011 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ */ ++ ++#include <linux/fs.h> ++#include <linux/namei.h> ++#include <linux/xattr.h> ++#include <linux/security.h> ++#include <linux/mount.h> ++#include <linux/slab.h> ++#include <linux/parser.h> ++#include <linux/module.h> ++#include <linux/cred.h> ++#include <linux/sched.h> ++#include <linux/statfs.h> ++#include <linux/seq_file.h> ++#include "overlayfs.h" ++ ++MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); ++MODULE_DESCRIPTION("Overlay filesystem"); ++MODULE_LICENSE("GPL"); ++ ++#define OVERLAYFS_SUPER_MAGIC 0x794c764f ++ ++struct ovl_config { ++ char *lowerdir; ++ char *upperdir; ++}; ++ ++/* private information held for overlayfs's superblock */ ++struct ovl_fs { ++ struct vfsmount *upper_mnt; ++ struct vfsmount *lower_mnt; ++ long lower_namelen; ++ /* pathnames of lower and upper dirs, for show_options */ ++ struct ovl_config config; ++}; ++ ++/* private information held for every overlayfs dentry */ ++struct ovl_entry { ++ /* ++ * Keep "double reference" on upper dentries, so that ++ * d_delete() doesn't think it's OK to reset d_inode to NULL. ++ */ ++ struct dentry *__upperdentry; ++ struct dentry *lowerdentry; ++ union { ++ struct { ++ u64 version; ++ bool opaque; ++ }; ++ struct rcu_head rcu; ++ }; ++}; ++ ++const char *ovl_whiteout_xattr = "trusted.overlay.whiteout"; ++const char *ovl_opaque_xattr = "trusted.overlay.opaque"; ++ ++ ++enum ovl_path_type ovl_path_type(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ if (oe->__upperdentry) { ++ if (oe->lowerdentry && S_ISDIR(dentry->d_inode->i_mode)) ++ return OVL_PATH_MERGE; ++ else ++ return OVL_PATH_UPPER; ++ } else { ++ return OVL_PATH_LOWER; ++ } ++} ++ ++static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) ++{ ++ struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry); ++ smp_read_barrier_depends(); ++ return upperdentry; ++} ++ ++void ovl_path_upper(struct dentry *dentry, struct path *path) ++{ ++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ path->mnt = ofs->upper_mnt; ++ path->dentry = ovl_upperdentry_dereference(oe); ++} ++ ++void ovl_path_lower(struct dentry *dentry, struct path *path) ++{ ++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ path->mnt = ofs->lower_mnt; ++ path->dentry = oe->lowerdentry; ++} ++ ++enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) ++{ ++ ++ enum ovl_path_type type = ovl_path_type(dentry); ++ ++ if (type == OVL_PATH_LOWER) ++ ovl_path_lower(dentry, path); ++ else ++ ovl_path_upper(dentry, path); ++ ++ return type; ++} ++ ++struct dentry *ovl_dentry_upper(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ return ovl_upperdentry_dereference(oe); ++} ++ ++struct dentry *ovl_dentry_lower(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ return oe->lowerdentry; ++} ++ ++struct dentry *ovl_dentry_real(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ struct dentry *realdentry; ++ ++ realdentry = ovl_upperdentry_dereference(oe); ++ if (!realdentry) ++ realdentry = oe->lowerdentry; ++ ++ return realdentry; ++} ++ ++struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper) ++{ ++ struct dentry *realdentry; ++ ++ realdentry = ovl_upperdentry_dereference(oe); ++ if (realdentry) { ++ *is_upper = true; ++ } else { ++ realdentry = oe->lowerdentry; ++ *is_upper = false; ++ } ++ return realdentry; ++} ++ ++bool ovl_dentry_is_opaque(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ return oe->opaque; ++} ++ ++void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ oe->opaque = opaque; ++} ++ ++void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex)); ++ WARN_ON(oe->__upperdentry); ++ BUG_ON(!upperdentry->d_inode); ++ smp_wmb(); ++ oe->__upperdentry = dget(upperdentry); ++} ++ ++void ovl_dentry_version_inc(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); ++ oe->version++; ++} ++ ++u64 ovl_dentry_version_get(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); ++ return oe->version; ++} ++ ++bool ovl_is_whiteout(struct dentry *dentry) ++{ ++ int res; ++ char val; ++ ++ if (!dentry) ++ return false; ++ if (!dentry->d_inode) ++ return false; ++ if (!S_ISLNK(dentry->d_inode->i_mode)) ++ return false; ++ ++ res = vfs_getxattr(dentry, ovl_whiteout_xattr, &val, 1); ++ if (res == 1 && val == 'y') ++ return true; ++ ++ return false; ++} ++ ++static bool ovl_is_opaquedir(struct dentry *dentry) ++{ ++ int res; ++ char val; ++ ++ if (!S_ISDIR(dentry->d_inode->i_mode)) ++ return false; ++ ++ res = vfs_getxattr(dentry, ovl_opaque_xattr, &val, 1); ++ if (res == 1 && val == 'y') ++ return true; ++ ++ return false; ++} ++ ++static void ovl_entry_free(struct rcu_head *head) ++{ ++ struct ovl_entry *oe = container_of(head, struct ovl_entry, rcu); ++ kfree(oe); ++} ++ ++static void ovl_dentry_release(struct dentry *dentry) ++{ ++ struct ovl_entry *oe = dentry->d_fsdata; ++ ++ if (oe) { ++ dput(oe->__upperdentry); ++ dput(oe->__upperdentry); ++ dput(oe->lowerdentry); ++ call_rcu(&oe->rcu, ovl_entry_free); ++ } ++} ++ ++const struct dentry_operations ovl_dentry_operations = { ++ .d_release = ovl_dentry_release, ++}; ++ ++static struct ovl_entry *ovl_alloc_entry(void) ++{ ++ return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL); ++} ++ ++static inline struct dentry *ovl_lookup_real(struct dentry *dir, ++ struct qstr *name) ++{ ++ struct dentry *dentry; ++ ++ mutex_lock(&dir->d_inode->i_mutex); ++ dentry = lookup_one_len(name->name, dir, name->len); ++ mutex_unlock(&dir->d_inode->i_mutex); ++ ++ if (IS_ERR(dentry)) { ++ if (PTR_ERR(dentry) == -ENOENT) ++ dentry = NULL; ++ } else if (!dentry->d_inode) { ++ dput(dentry); ++ dentry = NULL; ++ } ++ return dentry; ++} ++ ++static int ovl_do_lookup(struct dentry *dentry) ++{ ++ struct ovl_entry *oe; ++ struct dentry *upperdir; ++ struct dentry *lowerdir; ++ struct dentry *upperdentry = NULL; ++ struct dentry *lowerdentry = NULL; ++ struct inode *inode = NULL; ++ int err; ++ ++ err = -ENOMEM; ++ oe = ovl_alloc_entry(); ++ if (!oe) ++ goto out; ++ ++ upperdir = ovl_dentry_upper(dentry->d_parent); ++ lowerdir = ovl_dentry_lower(dentry->d_parent); ++ ++ if (upperdir) { ++ upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); ++ err = PTR_ERR(upperdentry); ++ if (IS_ERR(upperdentry)) ++ goto out_put_dir; ++ ++ if (lowerdir && upperdentry && ++ (S_ISLNK(upperdentry->d_inode->i_mode) || ++ S_ISDIR(upperdentry->d_inode->i_mode))) { ++ const struct cred *old_cred; ++ struct cred *override_cred; ++ ++ err = -ENOMEM; ++ override_cred = prepare_creds(); ++ if (!override_cred) ++ goto out_dput_upper; ++ ++ /* CAP_SYS_ADMIN needed for getxattr */ ++ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); ++ old_cred = override_creds(override_cred); ++ ++ if (ovl_is_opaquedir(upperdentry)) { ++ oe->opaque = true; ++ } else if (ovl_is_whiteout(upperdentry)) { ++ dput(upperdentry); ++ upperdentry = NULL; ++ oe->opaque = true; ++ } ++ revert_creds(old_cred); ++ put_cred(override_cred); ++ } ++ } ++ if (lowerdir && !oe->opaque) { ++ lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name); ++ err = PTR_ERR(lowerdentry); ++ if (IS_ERR(lowerdentry)) ++ goto out_dput_upper; ++ } ++ ++ if (lowerdentry && upperdentry && ++ (!S_ISDIR(upperdentry->d_inode->i_mode) || ++ !S_ISDIR(lowerdentry->d_inode->i_mode))) { ++ dput(lowerdentry); ++ lowerdentry = NULL; ++ oe->opaque = true; ++ } ++ ++ if (lowerdentry || upperdentry) { ++ struct dentry *realdentry; ++ ++ realdentry = upperdentry ? upperdentry : lowerdentry; ++ err = -ENOMEM; ++ inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, ++ oe); ++ if (!inode) ++ goto out_dput; ++ ovl_copyattr(realdentry->d_inode, inode); ++ } ++ ++ if (upperdentry) ++ oe->__upperdentry = dget(upperdentry); ++ ++ if (lowerdentry) ++ oe->lowerdentry = lowerdentry; ++ ++ dentry->d_fsdata = oe; ++ dentry->d_op = &ovl_dentry_operations; ++ d_add(dentry, inode); ++ ++ return 0; ++ ++out_dput: ++ dput(lowerdentry); ++out_dput_upper: ++ dput(upperdentry); ++out_put_dir: ++ kfree(oe); ++out: ++ return err; ++} ++ ++struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, ++ unsigned int flags) ++{ ++ int err = ovl_do_lookup(dentry); ++ ++ if (err) ++ return ERR_PTR(err); ++ ++ return NULL; ++} ++ ++struct file *ovl_path_open(struct path *path, int flags) ++{ ++ path_get(path); ++ return dentry_open(path, flags, current_cred()); ++} ++ ++static void ovl_put_super(struct super_block *sb) ++{ ++ struct ovl_fs *ufs = sb->s_fs_info; ++ ++ if (!(sb->s_flags & MS_RDONLY)) ++ mnt_drop_write(ufs->upper_mnt); ++ ++ mntput(ufs->upper_mnt); ++ mntput(ufs->lower_mnt); ++ ++ kfree(ufs->config.lowerdir); ++ kfree(ufs->config.upperdir); ++ kfree(ufs); ++} ++ ++static int ovl_remount_fs(struct super_block *sb, int *flagsp, char *data) ++{ ++ int flags = *flagsp; ++ struct ovl_fs *ufs = sb->s_fs_info; ++ ++ /* When remounting rw or ro, we need to adjust the write access to the ++ * upper fs. ++ */ ++ if (((flags ^ sb->s_flags) & MS_RDONLY) == 0) ++ /* No change to readonly status */ ++ return 0; ++ ++ if (flags & MS_RDONLY) { ++ mnt_drop_write(ufs->upper_mnt); ++ return 0; ++ } else ++ return mnt_want_write(ufs->upper_mnt); ++} ++ ++/** ++ * ovl_statfs ++ * @sb: The overlayfs super block ++ * @buf: The struct kstatfs to fill in with stats ++ * ++ * Get the filesystem statistics. As writes always target the upper layer ++ * filesystem pass the statfs to the same filesystem. ++ */ ++static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) ++{ ++ struct ovl_fs *ofs = dentry->d_sb->s_fs_info; ++ struct dentry *root_dentry = dentry->d_sb->s_root; ++ struct path path; ++ int err; ++ ++ ovl_path_upper(root_dentry, &path); ++ ++ err = vfs_statfs(&path, buf); ++ if (!err) { ++ buf->f_namelen = max(buf->f_namelen, ofs->lower_namelen); ++ buf->f_type = OVERLAYFS_SUPER_MAGIC; ++ } ++ ++ return err; ++} ++ ++/** ++ * ovl_show_options ++ * ++ * Prints the mount options for a given superblock. ++ * Returns zero; does not fail. ++ */ ++static int ovl_show_options(struct seq_file *m, struct dentry *dentry) ++{ ++ struct super_block *sb = dentry->d_sb; ++ struct ovl_fs *ufs = sb->s_fs_info; ++ ++ seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); ++ seq_printf(m, ",upperdir=%s", ufs->config.upperdir); ++ return 0; ++} ++ ++static const struct super_operations ovl_super_operations = { ++ .put_super = ovl_put_super, ++ .remount_fs = ovl_remount_fs, ++ .statfs = ovl_statfs, ++ .show_options = ovl_show_options, ++}; ++ ++enum { ++ Opt_lowerdir, ++ Opt_upperdir, ++ Opt_err, ++}; ++ ++static const match_table_t ovl_tokens = { ++ {Opt_lowerdir, "lowerdir=%s"}, ++ {Opt_upperdir, "upperdir=%s"}, ++ {Opt_err, NULL} ++}; ++ ++static int ovl_parse_opt(char *opt, struct ovl_config *config) ++{ ++ char *p; ++ ++ config->upperdir = NULL; ++ config->lowerdir = NULL; ++ ++ while ((p = strsep(&opt, ",")) != NULL) { ++ int token; ++ substring_t args[MAX_OPT_ARGS]; ++ ++ if (!*p) ++ continue; ++ ++ token = match_token(p, ovl_tokens, args); ++ switch (token) { ++ case Opt_upperdir: ++ kfree(config->upperdir); ++ config->upperdir = match_strdup(&args[0]); ++ if (!config->upperdir) ++ return -ENOMEM; ++ break; ++ ++ case Opt_lowerdir: ++ kfree(config->lowerdir); ++ config->lowerdir = match_strdup(&args[0]); ++ if (!config->lowerdir) ++ return -ENOMEM; ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ } ++ return 0; ++} ++ ++static int ovl_fill_super(struct super_block *sb, void *data, int silent) ++{ ++ struct path lowerpath; ++ struct path upperpath; ++ struct inode *root_inode; ++ struct dentry *root_dentry; ++ struct ovl_entry *oe; ++ struct ovl_fs *ufs; ++ struct kstatfs statfs; ++ int err; ++ ++ err = -ENOMEM; ++ ufs = kmalloc(sizeof(struct ovl_fs), GFP_KERNEL); ++ if (!ufs) ++ goto out; ++ ++ err = ovl_parse_opt((char *) data, &ufs->config); ++ if (err) ++ goto out_free_ufs; ++ ++ err = -EINVAL; ++ if (!ufs->config.upperdir || !ufs->config.lowerdir) { ++ printk(KERN_ERR "overlayfs: missing upperdir or lowerdir\n"); ++ goto out_free_config; ++ } ++ ++ oe = ovl_alloc_entry(); ++ if (oe == NULL) ++ goto out_free_config; ++ ++ err = kern_path(ufs->config.upperdir, LOOKUP_FOLLOW, &upperpath); ++ if (err) ++ goto out_free_oe; ++ ++ err = kern_path(ufs->config.lowerdir, LOOKUP_FOLLOW, &lowerpath); ++ if (err) ++ goto out_put_upperpath; ++ ++ err = -ENOTDIR; ++ if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || ++ !S_ISDIR(lowerpath.dentry->d_inode->i_mode)) ++ goto out_put_lowerpath; ++ ++ err = vfs_statfs(&lowerpath, &statfs); ++ if (err) { ++ printk(KERN_ERR "overlayfs: statfs failed on lowerpath\n"); ++ goto out_put_lowerpath; ++ } ++ ufs->lower_namelen = statfs.f_namelen; ++ ++ sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth, ++ lowerpath.mnt->mnt_sb->s_stack_depth) + 1; ++ ++ err = -EINVAL; ++ if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { ++ printk(KERN_ERR "overlayfs: maximum fs stacking depth exceeded\n"); ++ goto out_put_lowerpath; ++ } ++ ++ ++ ufs->upper_mnt = clone_private_mount(&upperpath); ++ err = PTR_ERR(ufs->upper_mnt); ++ if (IS_ERR(ufs->upper_mnt)) { ++ printk(KERN_ERR "overlayfs: failed to clone upperpath\n"); ++ goto out_put_lowerpath; ++ } ++ ++ ufs->lower_mnt = clone_private_mount(&lowerpath); ++ err = PTR_ERR(ufs->lower_mnt); ++ if (IS_ERR(ufs->lower_mnt)) { ++ printk(KERN_ERR "overlayfs: failed to clone lowerpath\n"); ++ goto out_put_upper_mnt; ++ } ++ ++ /* ++ * Make lower_mnt R/O. That way fchmod/fchown on lower file ++ * will fail instead of modifying lower fs. ++ */ ++ ufs->lower_mnt->mnt_flags |= MNT_READONLY; ++ ++ /* If the upper fs is r/o, we mark overlayfs r/o too */ ++ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) ++ sb->s_flags |= MS_RDONLY; ++ ++ if (!(sb->s_flags & MS_RDONLY)) { ++ err = mnt_want_write(ufs->upper_mnt); ++ if (err) ++ goto out_put_lower_mnt; ++ } ++ ++ err = -ENOMEM; ++ root_inode = ovl_new_inode(sb, S_IFDIR, oe); ++ if (!root_inode) ++ goto out_drop_write; ++ ++ root_dentry = d_make_root(root_inode); ++ if (!root_dentry) ++ goto out_drop_write; ++ ++ mntput(upperpath.mnt); ++ mntput(lowerpath.mnt); ++ ++ oe->__upperdentry = dget(upperpath.dentry); ++ oe->lowerdentry = lowerpath.dentry; ++ ++ root_dentry->d_fsdata = oe; ++ root_dentry->d_op = &ovl_dentry_operations; ++ ++ sb->s_magic = OVERLAYFS_SUPER_MAGIC; ++ sb->s_op = &ovl_super_operations; ++ sb->s_root = root_dentry; ++ sb->s_fs_info = ufs; ++ ++ return 0; ++ ++out_drop_write: ++ if (!(sb->s_flags & MS_RDONLY)) ++ mnt_drop_write(ufs->upper_mnt); ++out_put_lower_mnt: ++ mntput(ufs->lower_mnt); ++out_put_upper_mnt: ++ mntput(ufs->upper_mnt); ++out_put_lowerpath: ++ path_put(&lowerpath); ++out_put_upperpath: ++ path_put(&upperpath); ++out_free_oe: ++ kfree(oe); ++out_free_config: ++ kfree(ufs->config.lowerdir); ++ kfree(ufs->config.upperdir); ++out_free_ufs: ++ kfree(ufs); ++out: ++ return err; ++} ++ ++static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags, ++ const char *dev_name, void *raw_data) ++{ ++ return mount_nodev(fs_type, flags, raw_data, ovl_fill_super); ++} ++ ++static struct file_system_type ovl_fs_type = { ++ .owner = THIS_MODULE, ++ .name = "overlayfs", ++ .mount = ovl_mount, ++ .kill_sb = kill_anon_super, ++}; ++ ++static int __init ovl_init(void) ++{ ++ return register_filesystem(&ovl_fs_type); ++} ++ ++static void __exit ovl_exit(void) ++{ ++ unregister_filesystem(&ovl_fs_type); ++} ++ ++module_init(ovl_init); ++module_exit(ovl_exit); +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -1305,6 +1305,7 @@ long do_splice_direct(struct file *in, l + + return ret; + } ++EXPORT_SYMBOL(do_splice_direct); + + static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, + struct pipe_inode_info *opipe, +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -244,6 +244,12 @@ struct iattr { + */ + #include <linux/quota.h> + ++/* ++ * Maximum number of layers of fs stack. Needs to be limited to ++ * prevent kernel stack overflow ++ */ ++#define FILESYSTEM_MAX_STACK_DEPTH 2 ++ + /** + * enum positive_aop_returns - aop return codes with specific semantics + * +@@ -1320,6 +1326,11 @@ struct super_block { + + /* Being remounted read-only */ + int s_readonly_remount; ++ ++ /* ++ * Indicates how deep in a filesystem stack this SB is ++ */ ++ int s_stack_depth; + }; + + /* superblock cache pruning functions */ +@@ -1573,6 +1584,7 @@ struct inode_operations { + int (*atomic_open)(struct inode *, struct dentry *, + struct file *, unsigned open_flag, + umode_t create_mode, int *opened); ++ int (*dentry_open)(struct dentry *, struct file *, const struct cred *); + } ____cacheline_aligned; + + ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, +@@ -2005,6 +2017,7 @@ extern struct file *file_open_name(struc + extern struct file *filp_open(const char *, int, umode_t); + extern struct file *file_open_root(struct dentry *, struct vfsmount *, + const char *, int); ++extern int vfs_open(const struct path *, struct file *, const struct cred *); + extern struct file * dentry_open(const struct path *, int, const struct cred *); + extern int filp_close(struct file *, fl_owner_t id); + +@@ -2210,6 +2223,7 @@ extern sector_t bmap(struct inode *, sec + #endif + extern int notify_change(struct dentry *, struct iattr *); + extern int inode_permission(struct inode *, int); ++extern int __inode_permission(struct inode *, int); + extern int generic_permission(struct inode *, int); + + static inline bool execute_ok(struct inode *inode) +--- a/include/linux/mount.h ++++ b/include/linux/mount.h +@@ -66,6 +66,9 @@ extern void mnt_pin(struct vfsmount *mnt + extern void mnt_unpin(struct vfsmount *mnt); + extern int __mnt_is_readonly(struct vfsmount *mnt); + ++struct path; ++extern struct vfsmount *clone_private_mount(struct path *path); ++ + struct file_system_type; + extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, + int flags, const char *name, +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -5623,6 +5623,13 @@ F: drivers/scsi/osd/ + F: include/scsi/osd_* + F: fs/exofs/ + ++OVERLAYFS FILESYSTEM ++M: Miklos Szeredi <miklos@szeredi.hu> ++L: linux-fsdevel@vger.kernel.org ++S: Supported ++F: fs/overlayfs/* ++F: Documentation/filesystems/overlayfs.txt ++ + P54 WIRELESS DRIVER + M: Christian Lamparter <chunkeey@googlemail.com> + L: linux-wireless@vger.kernel.org diff --git a/target/linux/generic/patches-3.8/102-ehci_hcd_ignore_oc.patch b/target/linux/generic/patches-3.8/102-ehci_hcd_ignore_oc.patch new file mode 100644 index 0000000000..1117223eb4 --- /dev/null +++ b/target/linux/generic/patches-3.8/102-ehci_hcd_ignore_oc.patch @@ -0,0 +1,41 @@ +--- a/drivers/usb/host/ehci-hcd.c ++++ b/drivers/usb/host/ehci-hcd.c +@@ -634,7 +634,7 @@ static int ehci_run (struct usb_hcd *hcd + "USB %x.%x started, EHCI %x.%02x%s\n", + ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), + temp >> 8, temp & 0xff, +- ignore_oc ? ", overcurrent ignored" : ""); ++ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : ""); + + ehci_writel(ehci, INTR_MASK, + &ehci->regs->intr_enable); /* Turn On Interrupts */ +--- a/drivers/usb/host/ehci-hub.c ++++ b/drivers/usb/host/ehci-hub.c +@@ -611,7 +611,7 @@ ehci_hub_status_data (struct usb_hcd *hc + * always set, seem to clear PORT_OCC and PORT_CSC when writing to + * PORT_POWER; that's surprising, but maybe within-spec. + */ +- if (!ignore_oc) ++ if (!ignore_oc && !ehci->ignore_oc) + mask = PORT_CSC | PORT_PEC | PORT_OCC; + else + mask = PORT_CSC | PORT_PEC; +@@ -825,7 +825,7 @@ static int ehci_hub_control ( + if (temp & PORT_PEC) + status |= USB_PORT_STAT_C_ENABLE << 16; + +- if ((temp & PORT_OCC) && !ignore_oc){ ++ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){ + status |= USB_PORT_STAT_C_OVERCURRENT << 16; + + /* +--- a/drivers/usb/host/ehci.h ++++ b/drivers/usb/host/ehci.h +@@ -196,6 +196,7 @@ struct ehci_hcd { /* one per controlle + unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ + unsigned has_synopsys_hc_bug:1; /* Synopsys HC */ + unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */ ++ unsigned ignore_oc:1; + + /* required for usb32 quirk */ + #define OHCI_CTRL_HCFS (3 << 6) diff --git a/target/linux/generic/patches-3.8/110-fix_mtd_include.patch b/target/linux/generic/patches-3.8/110-fix_mtd_include.patch new file mode 100644 index 0000000000..c63dbc0596 --- /dev/null +++ b/target/linux/generic/patches-3.8/110-fix_mtd_include.patch @@ -0,0 +1,10 @@ +--- a/include/linux/mtd/physmap.h ++++ b/include/linux/mtd/physmap.h +@@ -17,6 +17,7 @@ + + #include <linux/mtd/mtd.h> + #include <linux/mtd/partitions.h> ++#include <linux/platform_device.h> + + struct map_info; + struct platform_device; diff --git a/target/linux/generic/patches-3.8/200-fix_localversion.patch b/target/linux/generic/patches-3.8/200-fix_localversion.patch new file mode 100644 index 0000000000..6c16dd1b4b --- /dev/null +++ b/target/linux/generic/patches-3.8/200-fix_localversion.patch @@ -0,0 +1,11 @@ +--- a/scripts/setlocalversion ++++ b/scripts/setlocalversion +@@ -167,7 +167,7 @@ else + # annotated or signed tagged state (as git describe only + # looks at signed or annotated tags - git tag -a/-s) and + # LOCALVERSION= is not specified +- if test "${LOCALVERSION+set}" != "set"; then ++ if test "${CONFIG_LOCALVERSION+set}" != "set"; then + scm=$(scm_version --short) + res="$res${scm:++}" + fi diff --git a/target/linux/generic/patches-3.8/201-extra_optimization.patch b/target/linux/generic/patches-3.8/201-extra_optimization.patch new file mode 100644 index 0000000000..d94d2cf3a8 --- /dev/null +++ b/target/linux/generic/patches-3.8/201-extra_optimization.patch @@ -0,0 +1,24 @@ +--- a/Makefile ++++ b/Makefile +@@ -570,9 +570,9 @@ endif # $(dot-config) + all: vmlinux + + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +-KBUILD_CFLAGS += -Os ++KBUILD_CFLAGS += -Os -fno-caller-saves + else +-KBUILD_CFLAGS += -O2 ++KBUILD_CFLAGS += -O2 -fno-reorder-blocks -fno-tree-ch -fno-caller-saves + endif + + include $(srctree)/arch/$(SRCARCH)/Makefile +@@ -645,6 +645,9 @@ endif + NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) + CHECKFLAGS += $(NOSTDINC_FLAGS) + ++# improve gcc optimization ++CFLAGS += $(call cc-option,-funit-at-a-time,) ++ + # warn about C99 declaration after statement + KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) + diff --git a/target/linux/generic/patches-3.8/202-reduce_module_size.patch b/target/linux/generic/patches-3.8/202-reduce_module_size.patch new file mode 100644 index 0000000000..dee601989b --- /dev/null +++ b/target/linux/generic/patches-3.8/202-reduce_module_size.patch @@ -0,0 +1,11 @@ +--- a/Makefile ++++ b/Makefile +@@ -379,7 +379,7 @@ KBUILD_CFLAGS_KERNEL := + KBUILD_AFLAGS := -D__ASSEMBLY__ + KBUILD_AFLAGS_MODULE := -DMODULE + KBUILD_CFLAGS_MODULE := -DMODULE +-KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds ++KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s) + + # Read KERNELRELEASE from include/config/kernel.release (if it exists) + KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) diff --git a/target/linux/generic/patches-3.8/210-darwin_scripts_include.patch b/target/linux/generic/patches-3.8/210-darwin_scripts_include.patch new file mode 100644 index 0000000000..a43d3454d9 --- /dev/null +++ b/target/linux/generic/patches-3.8/210-darwin_scripts_include.patch @@ -0,0 +1,3088 @@ +--- a/scripts/kallsyms.c ++++ b/scripts/kallsyms.c +@@ -22,6 +22,35 @@ + #include <stdlib.h> + #include <string.h> + #include <ctype.h> ++#ifdef __APPLE__ ++/* Darwin has no memmem implementation, this one is ripped of the uClibc-0.9.28 source */ ++void *memmem (const void *haystack, size_t haystack_len, ++ const void *needle, size_t needle_len) ++{ ++ const char *begin; ++ const char *const last_possible ++ = (const char *) haystack + haystack_len - needle_len; ++ ++ if (needle_len == 0) ++ /* The first occurrence of the empty string is deemed to occur at ++ the beginning of the string. */ ++ return (void *) haystack; ++ ++ /* Sanity check, otherwise the loop might search through the whole ++ memory. */ ++ if (__builtin_expect (haystack_len < needle_len, 0)) ++ return NULL; ++ ++ for (begin = (const char *) haystack; begin <= last_possible; ++begin) ++ if (begin[0] == ((const char *) needle)[0] && ++ !memcmp ((const void *) &begin[1], ++ (const void *) ((const char *) needle + 1), ++ needle_len - 1)) ++ return (void *) begin; ++ ++ return NULL; ++} ++#endif + + #ifndef ARRAY_SIZE + #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) +--- a/scripts/kconfig/Makefile ++++ b/scripts/kconfig/Makefile +@@ -129,6 +129,9 @@ check-lxdialog := $(srctree)/$(src)/lxd + # we really need to do so. (Do not call gcc as part of make mrproper) + HOST_EXTRACFLAGS += $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags) \ + -DLOCALE ++ifeq ($(shell uname -s),Darwin) ++HOST_LOADLIBES += -lncurses ++endif + + # =========================================================================== + # Shared Makefile for the various kconfig executables: +--- a/scripts/mod/mk_elfconfig.c ++++ b/scripts/mod/mk_elfconfig.c +@@ -1,7 +1,11 @@ + #include <stdio.h> + #include <stdlib.h> + #include <string.h> ++#ifndef __APPLE__ + #include <elf.h> ++#else ++#include "elf.h" ++#endif + + int + main(int argc, char **argv) +--- a/scripts/mod/modpost.h ++++ b/scripts/mod/modpost.h +@@ -7,7 +7,11 @@ + #include <sys/mman.h> + #include <fcntl.h> + #include <unistd.h> ++#if !(defined(__APPLE__) || defined(__CYGWIN__)) + #include <elf.h> ++#else ++#include "elf.h" ++#endif + + #include "elfconfig.h" + +--- /dev/null ++++ b/scripts/mod/elf.h +@@ -0,0 +1,3007 @@ ++/* This file defines standard ELF types, structures, and macros. ++ Copyright (C) 1995-2012 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#ifndef _ELF_H ++#define _ELF_H 1 ++ ++/* Standard ELF types. */ ++ ++#include <stdint.h> ++ ++/* Type for a 16-bit quantity. */ ++typedef uint16_t Elf32_Half; ++typedef uint16_t Elf64_Half; ++ ++/* Types for signed and unsigned 32-bit quantities. */ ++typedef uint32_t Elf32_Word; ++typedef int32_t Elf32_Sword; ++typedef uint32_t Elf64_Word; ++typedef int32_t Elf64_Sword; ++ ++/* Types for signed and unsigned 64-bit quantities. */ ++typedef uint64_t Elf32_Xword; ++typedef int64_t Elf32_Sxword; ++typedef uint64_t Elf64_Xword; ++typedef int64_t Elf64_Sxword; ++ ++/* Type of addresses. */ ++typedef uint32_t Elf32_Addr; ++typedef uint64_t Elf64_Addr; ++ ++/* Type of file offsets. */ ++typedef uint32_t Elf32_Off; ++typedef uint64_t Elf64_Off; ++ ++/* Type for section indices, which are 16-bit quantities. */ ++typedef uint16_t Elf32_Section; ++typedef uint16_t Elf64_Section; ++ ++/* Type for version symbol information. */ ++typedef Elf32_Half Elf32_Versym; ++typedef Elf64_Half Elf64_Versym; ++ ++ ++/* The ELF file header. This appears at the start of every ELF file. */ ++ ++#define EI_NIDENT (16) ++ ++typedef struct ++{ ++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */ ++ Elf32_Half e_type; /* Object file type */ ++ Elf32_Half e_machine; /* Architecture */ ++ Elf32_Word e_version; /* Object file version */ ++ Elf32_Addr e_entry; /* Entry point virtual address */ ++ Elf32_Off e_phoff; /* Program header table file offset */ ++ Elf32_Off e_shoff; /* Section header table file offset */ ++ Elf32_Word e_flags; /* Processor-specific flags */ ++ Elf32_Half e_ehsize; /* ELF header size in bytes */ ++ Elf32_Half e_phentsize; /* Program header table entry size */ ++ Elf32_Half e_phnum; /* Program header table entry count */ ++ Elf32_Half e_shentsize; /* Section header table entry size */ ++ Elf32_Half e_shnum; /* Section header table entry count */ ++ Elf32_Half e_shstrndx; /* Section header string table index */ ++} Elf32_Ehdr; ++ ++typedef struct ++{ ++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */ ++ Elf64_Half e_type; /* Object file type */ ++ Elf64_Half e_machine; /* Architecture */ ++ Elf64_Word e_version; /* Object file version */ ++ Elf64_Addr e_entry; /* Entry point virtual address */ ++ Elf64_Off e_phoff; /* Program header table file offset */ ++ Elf64_Off e_shoff; /* Section header table file offset */ ++ Elf64_Word e_flags; /* Processor-specific flags */ ++ Elf64_Half e_ehsize; /* ELF header size in bytes */ ++ Elf64_Half e_phentsize; /* Program header table entry size */ ++ Elf64_Half e_phnum; /* Program header table entry count */ ++ Elf64_Half e_shentsize; /* Section header table entry size */ ++ Elf64_Half e_shnum; /* Section header table entry count */ ++ Elf64_Half e_shstrndx; /* Section header string table index */ ++} Elf64_Ehdr; ++ ++/* Fields in the e_ident array. The EI_* macros are indices into the ++ array. The macros under each EI_* macro are the values the byte ++ may have. */ ++ ++#define EI_MAG0 0 /* File identification byte 0 index */ ++#define ELFMAG0 0x7f /* Magic number byte 0 */ ++ ++#define EI_MAG1 1 /* File identification byte 1 index */ ++#define ELFMAG1 'E' /* Magic number byte 1 */ ++ ++#define EI_MAG2 2 /* File identification byte 2 index */ ++#define ELFMAG2 'L' /* Magic number byte 2 */ ++ ++#define EI_MAG3 3 /* File identification byte 3 index */ ++#define ELFMAG3 'F' /* Magic number byte 3 */ ++ ++/* Conglomeration of the identification bytes, for easy testing as a word. */ ++#define ELFMAG "\177ELF" ++#define SELFMAG 4 ++ ++#define EI_CLASS 4 /* File class byte index */ ++#define ELFCLASSNONE 0 /* Invalid class */ ++#define ELFCLASS32 1 /* 32-bit objects */ ++#define ELFCLASS64 2 /* 64-bit objects */ ++#define ELFCLASSNUM 3 ++ ++#define EI_DATA 5 /* Data encoding byte index */ ++#define ELFDATANONE 0 /* Invalid data encoding */ ++#define ELFDATA2LSB 1 /* 2's complement, little endian */ ++#define ELFDATA2MSB 2 /* 2's complement, big endian */ ++#define ELFDATANUM 3 ++ ++#define EI_VERSION 6 /* File version byte index */ ++ /* Value must be EV_CURRENT */ ++ ++#define EI_OSABI 7 /* OS ABI identification */ ++#define ELFOSABI_NONE 0 /* UNIX System V ABI */ ++#define ELFOSABI_SYSV 0 /* Alias. */ ++#define ELFOSABI_HPUX 1 /* HP-UX */ ++#define ELFOSABI_NETBSD 2 /* NetBSD. */ ++#define ELFOSABI_GNU 3 /* Object uses GNU ELF extensions. */ ++#define ELFOSABI_LINUX ELFOSABI_GNU /* Compatibility alias. */ ++#define ELFOSABI_SOLARIS 6 /* Sun Solaris. */ ++#define ELFOSABI_AIX 7 /* IBM AIX. */ ++#define ELFOSABI_IRIX 8 /* SGI Irix. */ ++#define ELFOSABI_FREEBSD 9 /* FreeBSD. */ ++#define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */ ++#define ELFOSABI_MODESTO 11 /* Novell Modesto. */ ++#define ELFOSABI_OPENBSD 12 /* OpenBSD. */ ++#define ELFOSABI_ARM_AEABI 64 /* ARM EABI */ ++#define ELFOSABI_ARM 97 /* ARM */ ++#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */ ++ ++#define EI_ABIVERSION 8 /* ABI version */ ++ ++#define EI_PAD 9 /* Byte index of padding bytes */ ++ ++/* Legal values for e_type (object file type). */ ++ ++#define ET_NONE 0 /* No file type */ ++#define ET_REL 1 /* Relocatable file */ ++#define ET_EXEC 2 /* Executable file */ ++#define ET_DYN 3 /* Shared object file */ ++#define ET_CORE 4 /* Core file */ ++#define ET_NUM 5 /* Number of defined types */ ++#define ET_LOOS 0xfe00 /* OS-specific range start */ ++#define ET_HIOS 0xfeff /* OS-specific range end */ ++#define ET_LOPROC 0xff00 /* Processor-specific range start */ ++#define ET_HIPROC 0xffff /* Processor-specific range end */ ++ ++/* Legal values for e_machine (architecture). */ ++ ++#define EM_NONE 0 /* No machine */ ++#define EM_M32 1 /* AT&T WE 32100 */ ++#define EM_SPARC 2 /* SUN SPARC */ ++#define EM_386 3 /* Intel 80386 */ ++#define EM_68K 4 /* Motorola m68k family */ ++#define EM_88K 5 /* Motorola m88k family */ ++#define EM_860 7 /* Intel 80860 */ ++#define EM_MIPS 8 /* MIPS R3000 big-endian */ ++#define EM_S370 9 /* IBM System/370 */ ++#define EM_MIPS_RS3_LE 10 /* MIPS R3000 little-endian */ ++ ++#define EM_PARISC 15 /* HPPA */ ++#define EM_VPP500 17 /* Fujitsu VPP500 */ ++#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */ ++#define EM_960 19 /* Intel 80960 */ ++#define EM_PPC 20 /* PowerPC */ ++#define EM_PPC64 21 /* PowerPC 64-bit */ ++#define EM_S390 22 /* IBM S390 */ ++ ++#define EM_V800 36 /* NEC V800 series */ ++#define EM_FR20 37 /* Fujitsu FR20 */ ++#define EM_RH32 38 /* TRW RH-32 */ ++#define EM_RCE 39 /* Motorola RCE */ ++#define EM_ARM 40 /* ARM */ ++#define EM_FAKE_ALPHA 41 /* Digital Alpha */ ++#define EM_SH 42 /* Hitachi SH */ ++#define EM_SPARCV9 43 /* SPARC v9 64-bit */ ++#define EM_TRICORE 44 /* Siemens Tricore */ ++#define EM_ARC 45 /* Argonaut RISC Core */ ++#define EM_H8_300 46 /* Hitachi H8/300 */ ++#define EM_H8_300H 47 /* Hitachi H8/300H */ ++#define EM_H8S 48 /* Hitachi H8S */ ++#define EM_H8_500 49 /* Hitachi H8/500 */ ++#define EM_IA_64 50 /* Intel Merced */ ++#define EM_MIPS_X 51 /* Stanford MIPS-X */ ++#define EM_COLDFIRE 52 /* Motorola Coldfire */ ++#define EM_68HC12 53 /* Motorola M68HC12 */ ++#define EM_MMA 54 /* Fujitsu MMA Multimedia Accelerator*/ ++#define EM_PCP 55 /* Siemens PCP */ ++#define EM_NCPU 56 /* Sony nCPU embeeded RISC */ ++#define EM_NDR1 57 /* Denso NDR1 microprocessor */ ++#define EM_STARCORE 58 /* Motorola Start*Core processor */ ++#define EM_ME16 59 /* Toyota ME16 processor */ ++#define EM_ST100 60 /* STMicroelectronic ST100 processor */ ++#define EM_TINYJ 61 /* Advanced Logic Corp. Tinyj emb.fam*/ ++#define EM_X86_64 62 /* AMD x86-64 architecture */ ++#define EM_PDSP 63 /* Sony DSP Processor */ ++ ++#define EM_FX66 66 /* Siemens FX66 microcontroller */ ++#define EM_ST9PLUS 67 /* STMicroelectronics ST9+ 8/16 mc */ ++#define EM_ST7 68 /* STmicroelectronics ST7 8 bit mc */ ++#define EM_68HC16 69 /* Motorola MC68HC16 microcontroller */ ++#define EM_68HC11 70 /* Motorola MC68HC11 microcontroller */ ++#define EM_68HC08 71 /* Motorola MC68HC08 microcontroller */ ++#define EM_68HC05 72 /* Motorola MC68HC05 microcontroller */ ++#define EM_SVX 73 /* Silicon Graphics SVx */ ++#define EM_ST19 74 /* STMicroelectronics ST19 8 bit mc */ ++#define EM_VAX 75 /* Digital VAX */ ++#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ ++#define EM_JAVELIN 77 /* Infineon Technologies 32-bit embedded processor */ ++#define EM_FIREPATH 78 /* Element 14 64-bit DSP Processor */ ++#define EM_ZSP 79 /* LSI Logic 16-bit DSP Processor */ ++#define EM_MMIX 80 /* Donald Knuth's educational 64-bit processor */ ++#define EM_HUANY 81 /* Harvard University machine-independent object files */ ++#define EM_PRISM 82 /* SiTera Prism */ ++#define EM_AVR 83 /* Atmel AVR 8-bit microcontroller */ ++#define EM_FR30 84 /* Fujitsu FR30 */ ++#define EM_D10V 85 /* Mitsubishi D10V */ ++#define EM_D30V 86 /* Mitsubishi D30V */ ++#define EM_V850 87 /* NEC v850 */ ++#define EM_M32R 88 /* Mitsubishi M32R */ ++#define EM_MN10300 89 /* Matsushita MN10300 */ ++#define EM_MN10200 90 /* Matsushita MN10200 */ ++#define EM_PJ 91 /* picoJava */ ++#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */ ++#define EM_ARC_A5 93 /* ARC Cores Tangent-A5 */ ++#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */ ++#define EM_TILEPRO 188 /* Tilera TILEPro */ ++#define EM_TILEGX 191 /* Tilera TILE-Gx */ ++#define EM_NUM 192 ++ ++/* If it is necessary to assign new unofficial EM_* values, please ++ pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the ++ chances of collision with official or non-GNU unofficial values. */ ++ ++#define EM_ALPHA 0x9026 ++ ++/* Legal values for e_version (version). */ ++ ++#define EV_NONE 0 /* Invalid ELF version */ ++#define EV_CURRENT 1 /* Current version */ ++#define EV_NUM 2 ++ ++/* Section header. */ ++ ++typedef struct ++{ ++ Elf32_Word sh_name; /* Section name (string tbl index) */ ++ Elf32_Word sh_type; /* Section type */ ++ Elf32_Word sh_flags; /* Section flags */ ++ Elf32_Addr sh_addr; /* Section virtual addr at execution */ ++ Elf32_Off sh_offset; /* Section file offset */ ++ Elf32_Word sh_size; /* Section size in bytes */ ++ Elf32_Word sh_link; /* Link to another section */ ++ Elf32_Word sh_info; /* Additional section information */ ++ Elf32_Word sh_addralign; /* Section alignment */ ++ Elf32_Word sh_entsize; /* Entry size if section holds table */ ++} Elf32_Shdr; ++ ++typedef struct ++{ ++ Elf64_Word sh_name; /* Section name (string tbl index) */ ++ Elf64_Word sh_type; /* Section type */ ++ Elf64_Xword sh_flags; /* Section flags */ ++ Elf64_Addr sh_addr; /* Section virtual addr at execution */ ++ Elf64_Off sh_offset; /* Section file offset */ ++ Elf64_Xword sh_size; /* Section size in bytes */ ++ Elf64_Word sh_link; /* Link to another section */ ++ Elf64_Word sh_info; /* Additional section information */ ++ Elf64_Xword sh_addralign; /* Section alignment */ ++ Elf64_Xword sh_entsize; /* Entry size if section holds table */ ++} Elf64_Shdr; ++ ++/* Special section indices. */ ++ ++#define SHN_UNDEF 0 /* Undefined section */ ++#define SHN_LORESERVE 0xff00 /* Start of reserved indices */ ++#define SHN_LOPROC 0xff00 /* Start of processor-specific */ ++#define SHN_BEFORE 0xff00 /* Order section before all others ++ (Solaris). */ ++#define SHN_AFTER 0xff01 /* Order section after all others ++ (Solaris). */ ++#define SHN_HIPROC 0xff1f /* End of processor-specific */ ++#define SHN_LOOS 0xff20 /* Start of OS-specific */ ++#define SHN_HIOS 0xff3f /* End of OS-specific */ ++#define SHN_ABS 0xfff1 /* Associated symbol is absolute */ ++#define SHN_COMMON 0xfff2 /* Associated symbol is common */ ++#define SHN_XINDEX 0xffff /* Index is in extra table. */ ++#define SHN_HIRESERVE 0xffff /* End of reserved indices */ ++ ++/* Legal values for sh_type (section type). */ ++ ++#define SHT_NULL 0 /* Section header table entry unused */ ++#define SHT_PROGBITS 1 /* Program data */ ++#define SHT_SYMTAB 2 /* Symbol table */ ++#define SHT_STRTAB 3 /* String table */ ++#define SHT_RELA 4 /* Relocation entries with addends */ ++#define SHT_HASH 5 /* Symbol hash table */ ++#define SHT_DYNAMIC 6 /* Dynamic linking information */ ++#define SHT_NOTE 7 /* Notes */ ++#define SHT_NOBITS 8 /* Program space with no data (bss) */ ++#define SHT_REL 9 /* Relocation entries, no addends */ ++#define SHT_SHLIB 10 /* Reserved */ ++#define SHT_DYNSYM 11 /* Dynamic linker symbol table */ ++#define SHT_INIT_ARRAY 14 /* Array of constructors */ ++#define SHT_FINI_ARRAY 15 /* Array of destructors */ ++#define SHT_PREINIT_ARRAY 16 /* Array of pre-constructors */ ++#define SHT_GROUP 17 /* Section group */ ++#define SHT_SYMTAB_SHNDX 18 /* Extended section indeces */ ++#define SHT_NUM 19 /* Number of defined types. */ ++#define SHT_LOOS 0x60000000 /* Start OS-specific. */ ++#define SHT_GNU_ATTRIBUTES 0x6ffffff5 /* Object attributes. */ ++#define SHT_GNU_HASH 0x6ffffff6 /* GNU-style hash table. */ ++#define SHT_GNU_LIBLIST 0x6ffffff7 /* Prelink library list */ ++#define SHT_CHECKSUM 0x6ffffff8 /* Checksum for DSO content. */ ++#define SHT_LOSUNW 0x6ffffffa /* Sun-specific low bound. */ ++#define SHT_SUNW_move 0x6ffffffa ++#define SHT_SUNW_COMDAT 0x6ffffffb ++#define SHT_SUNW_syminfo 0x6ffffffc ++#define SHT_GNU_verdef 0x6ffffffd /* Version definition section. */ ++#define SHT_GNU_verneed 0x6ffffffe /* Version needs section. */ ++#define SHT_GNU_versym 0x6fffffff /* Version symbol table. */ ++#define SHT_HISUNW 0x6fffffff /* Sun-specific high bound. */ ++#define SHT_HIOS 0x6fffffff /* End OS-specific type */ ++#define SHT_LOPROC 0x70000000 /* Start of processor-specific */ ++#define SHT_HIPROC 0x7fffffff /* End of processor-specific */ ++#define SHT_LOUSER 0x80000000 /* Start of application-specific */ ++#define SHT_HIUSER 0x8fffffff /* End of application-specific */ ++ ++/* Legal values for sh_flags (section flags). */ ++ ++#define SHF_WRITE (1 << 0) /* Writable */ ++#define SHF_ALLOC (1 << 1) /* Occupies memory during execution */ ++#define SHF_EXECINSTR (1 << 2) /* Executable */ ++#define SHF_MERGE (1 << 4) /* Might be merged */ ++#define SHF_STRINGS (1 << 5) /* Contains nul-terminated strings */ ++#define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */ ++#define SHF_LINK_ORDER (1 << 7) /* Preserve order after combining */ ++#define SHF_OS_NONCONFORMING (1 << 8) /* Non-standard OS specific handling ++ required */ ++#define SHF_GROUP (1 << 9) /* Section is member of a group. */ ++#define SHF_TLS (1 << 10) /* Section hold thread-local data. */ ++#define SHF_MASKOS 0x0ff00000 /* OS-specific. */ ++#define SHF_MASKPROC 0xf0000000 /* Processor-specific */ ++#define SHF_ORDERED (1 << 30) /* Special ordering requirement ++ (Solaris). */ ++#define SHF_EXCLUDE (1 << 31) /* Section is excluded unless ++ referenced or allocated (Solaris).*/ ++ ++/* Section group handling. */ ++#define GRP_COMDAT 0x1 /* Mark group as COMDAT. */ ++ ++/* Symbol table entry. */ ++ ++typedef struct ++{ ++ Elf32_Word st_name; /* Symbol name (string tbl index) */ ++ Elf32_Addr st_value; /* Symbol value */ ++ Elf32_Word st_size; /* Symbol size */ ++ unsigned char st_info; /* Symbol type and binding */ ++ unsigned char st_other; /* Symbol visibility */ ++ Elf32_Section st_shndx; /* Section index */ ++} Elf32_Sym; ++ ++typedef struct ++{ ++ Elf64_Word st_name; /* Symbol name (string tbl index) */ ++ unsigned char st_info; /* Symbol type and binding */ ++ unsigned char st_other; /* Symbol visibility */ ++ Elf64_Section st_shndx; /* Section index */ ++ Elf64_Addr st_value; /* Symbol value */ ++ Elf64_Xword st_size; /* Symbol size */ ++} Elf64_Sym; ++ ++/* The syminfo section if available contains additional information about ++ every dynamic symbol. */ ++ ++typedef struct ++{ ++ Elf32_Half si_boundto; /* Direct bindings, symbol bound to */ ++ Elf32_Half si_flags; /* Per symbol flags */ ++} Elf32_Syminfo; ++ ++typedef struct ++{ ++ Elf64_Half si_boundto; /* Direct bindings, symbol bound to */ ++ Elf64_Half si_flags; /* Per symbol flags */ ++} Elf64_Syminfo; ++ ++/* Possible values for si_boundto. */ ++#define SYMINFO_BT_SELF 0xffff /* Symbol bound to self */ ++#define SYMINFO_BT_PARENT 0xfffe /* Symbol bound to parent */ ++#define SYMINFO_BT_LOWRESERVE 0xff00 /* Beginning of reserved entries */ ++ ++/* Possible bitmasks for si_flags. */ ++#define SYMINFO_FLG_DIRECT 0x0001 /* Direct bound symbol */ ++#define SYMINFO_FLG_PASSTHRU 0x0002 /* Pass-thru symbol for translator */ ++#define SYMINFO_FLG_COPY 0x0004 /* Symbol is a copy-reloc */ ++#define SYMINFO_FLG_LAZYLOAD 0x0008 /* Symbol bound to object to be lazy ++ loaded */ ++/* Syminfo version values. */ ++#define SYMINFO_NONE 0 ++#define SYMINFO_CURRENT 1 ++#define SYMINFO_NUM 2 ++ ++ ++/* How to extract and insert information held in the st_info field. */ ++ ++#define ELF32_ST_BIND(val) (((unsigned char) (val)) >> 4) ++#define ELF32_ST_TYPE(val) ((val) & 0xf) ++#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf)) ++ ++/* Both Elf32_Sym and Elf64_Sym use the same one-byte st_info field. */ ++#define ELF64_ST_BIND(val) ELF32_ST_BIND (val) ++#define ELF64_ST_TYPE(val) ELF32_ST_TYPE (val) ++#define ELF64_ST_INFO(bind, type) ELF32_ST_INFO ((bind), (type)) ++ ++/* Legal values for ST_BIND subfield of st_info (symbol binding). */ ++ ++#define STB_LOCAL 0 /* Local symbol */ ++#define STB_GLOBAL 1 /* Global symbol */ ++#define STB_WEAK 2 /* Weak symbol */ ++#define STB_NUM 3 /* Number of defined types. */ ++#define STB_LOOS 10 /* Start of OS-specific */ ++#define STB_GNU_UNIQUE 10 /* Unique symbol. */ ++#define STB_HIOS 12 /* End of OS-specific */ ++#define STB_LOPROC 13 /* Start of processor-specific */ ++#define STB_HIPROC 15 /* End of processor-specific */ ++ ++/* Legal values for ST_TYPE subfield of st_info (symbol type). */ ++ ++#define STT_NOTYPE 0 /* Symbol type is unspecified */ ++#define STT_OBJECT 1 /* Symbol is a data object */ ++#define STT_FUNC 2 /* Symbol is a code object */ ++#define STT_SECTION 3 /* Symbol associated with a section */ ++#define STT_FILE 4 /* Symbol's name is file name */ ++#define STT_COMMON 5 /* Symbol is a common data object */ ++#define STT_TLS 6 /* Symbol is thread-local data object*/ ++#define STT_NUM 7 /* Number of defined types. */ ++#define STT_LOOS 10 /* Start of OS-specific */ ++#define STT_GNU_IFUNC 10 /* Symbol is indirect code object */ ++#define STT_HIOS 12 /* End of OS-specific */ ++#define STT_LOPROC 13 /* Start of processor-specific */ ++#define STT_HIPROC 15 /* End of processor-specific */ ++ ++ ++/* Symbol table indices are found in the hash buckets and chain table ++ of a symbol hash table section. This special index value indicates ++ the end of a chain, meaning no further symbols are found in that bucket. */ ++ ++#define STN_UNDEF 0 /* End of a chain. */ ++ ++ ++/* How to extract and insert information held in the st_other field. */ ++ ++#define ELF32_ST_VISIBILITY(o) ((o) & 0x03) ++ ++/* For ELF64 the definitions are the same. */ ++#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) ++ ++/* Symbol visibility specification encoded in the st_other field. */ ++#define STV_DEFAULT 0 /* Default symbol visibility rules */ ++#define STV_INTERNAL 1 /* Processor specific hidden class */ ++#define STV_HIDDEN 2 /* Sym unavailable in other modules */ ++#define STV_PROTECTED 3 /* Not preemptible, not exported */ ++ ++ ++/* Relocation table entry without addend (in section of type SHT_REL). */ ++ ++typedef struct ++{ ++ Elf32_Addr r_offset; /* Address */ ++ Elf32_Word r_info; /* Relocation type and symbol index */ ++} Elf32_Rel; ++ ++/* I have seen two different definitions of the Elf64_Rel and ++ Elf64_Rela structures, so we'll leave them out until Novell (or ++ whoever) gets their act together. */ ++/* The following, at least, is used on Sparc v9, MIPS, and Alpha. */ ++ ++typedef struct ++{ ++ Elf64_Addr r_offset; /* Address */ ++ Elf64_Xword r_info; /* Relocation type and symbol index */ ++} Elf64_Rel; ++ ++/* Relocation table entry with addend (in section of type SHT_RELA). */ ++ ++typedef struct ++{ ++ Elf32_Addr r_offset; /* Address */ ++ Elf32_Word r_info; /* Relocation type and symbol index */ ++ Elf32_Sword r_addend; /* Addend */ ++} Elf32_Rela; ++ ++typedef struct ++{ ++ Elf64_Addr r_offset; /* Address */ ++ Elf64_Xword r_info; /* Relocation type and symbol index */ ++ Elf64_Sxword r_addend; /* Addend */ ++} Elf64_Rela; ++ ++/* How to extract and insert information held in the r_info field. */ ++ ++#define ELF32_R_SYM(val) ((val) >> 8) ++#define ELF32_R_TYPE(val) ((val) & 0xff) ++#define ELF32_R_INFO(sym, type) (((sym) << 8) + ((type) & 0xff)) ++ ++#define ELF64_R_SYM(i) ((i) >> 32) ++#define ELF64_R_TYPE(i) ((i) & 0xffffffff) ++#define ELF64_R_INFO(sym,type) ((((Elf64_Xword) (sym)) << 32) + (type)) ++ ++/* Program segment header. */ ++ ++typedef struct ++{ ++ Elf32_Word p_type; /* Segment type */ ++ Elf32_Off p_offset; /* Segment file offset */ ++ Elf32_Addr p_vaddr; /* Segment virtual address */ ++ Elf32_Addr p_paddr; /* Segment physical address */ ++ Elf32_Word p_filesz; /* Segment size in file */ ++ Elf32_Word p_memsz; /* Segment size in memory */ ++ Elf32_Word p_flags; /* Segment flags */ ++ Elf32_Word p_align; /* Segment alignment */ ++} Elf32_Phdr; ++ ++typedef struct ++{ ++ Elf64_Word p_type; /* Segment type */ ++ Elf64_Word p_flags; /* Segment flags */ ++ Elf64_Off p_offset; /* Segment file offset */ ++ Elf64_Addr p_vaddr; /* Segment virtual address */ ++ Elf64_Addr p_paddr; /* Segment physical address */ ++ Elf64_Xword p_filesz; /* Segment size in file */ ++ Elf64_Xword p_memsz; /* Segment size in memory */ ++ Elf64_Xword p_align; /* Segment alignment */ ++} Elf64_Phdr; ++ ++/* Special value for e_phnum. This indicates that the real number of ++ program headers is too large to fit into e_phnum. Instead the real ++ value is in the field sh_info of section 0. */ ++ ++#define PN_XNUM 0xffff ++ ++/* Legal values for p_type (segment type). */ ++ ++#define PT_NULL 0 /* Program header table entry unused */ ++#define PT_LOAD 1 /* Loadable program segment */ ++#define PT_DYNAMIC 2 /* Dynamic linking information */ ++#define PT_INTERP 3 /* Program interpreter */ ++#define PT_NOTE 4 /* Auxiliary information */ ++#define PT_SHLIB 5 /* Reserved */ ++#define PT_PHDR 6 /* Entry for header table itself */ ++#define PT_TLS 7 /* Thread-local storage segment */ ++#define PT_NUM 8 /* Number of defined types */ ++#define PT_LOOS 0x60000000 /* Start of OS-specific */ ++#define PT_GNU_EH_FRAME 0x6474e550 /* GCC .eh_frame_hdr segment */ ++#define PT_GNU_STACK 0x6474e551 /* Indicates stack executability */ ++#define PT_GNU_RELRO 0x6474e552 /* Read-only after relocation */ ++#define PT_LOSUNW 0x6ffffffa ++#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */ ++#define PT_SUNWSTACK 0x6ffffffb /* Stack segment */ ++#define PT_HISUNW 0x6fffffff ++#define PT_HIOS 0x6fffffff /* End of OS-specific */ ++#define PT_LOPROC 0x70000000 /* Start of processor-specific */ ++#define PT_HIPROC 0x7fffffff /* End of processor-specific */ ++ ++/* Legal values for p_flags (segment flags). */ ++ ++#define PF_X (1 << 0) /* Segment is executable */ ++#define PF_W (1 << 1) /* Segment is writable */ ++#define PF_R (1 << 2) /* Segment is readable */ ++#define PF_MASKOS 0x0ff00000 /* OS-specific */ ++#define PF_MASKPROC 0xf0000000 /* Processor-specific */ ++ ++/* Legal values for note segment descriptor types for core files. */ ++ ++#define NT_PRSTATUS 1 /* Contains copy of prstatus struct */ ++#define NT_FPREGSET 2 /* Contains copy of fpregset struct */ ++#define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */ ++#define NT_PRXREG 4 /* Contains copy of prxregset struct */ ++#define NT_TASKSTRUCT 4 /* Contains copy of task structure */ ++#define NT_PLATFORM 5 /* String from sysinfo(SI_PLATFORM) */ ++#define NT_AUXV 6 /* Contains copy of auxv array */ ++#define NT_GWINDOWS 7 /* Contains copy of gwindows struct */ ++#define NT_ASRS 8 /* Contains copy of asrset struct */ ++#define NT_PSTATUS 10 /* Contains copy of pstatus struct */ ++#define NT_PSINFO 13 /* Contains copy of psinfo struct */ ++#define NT_PRCRED 14 /* Contains copy of prcred struct */ ++#define NT_UTSNAME 15 /* Contains copy of utsname struct */ ++#define NT_LWPSTATUS 16 /* Contains copy of lwpstatus struct */ ++#define NT_LWPSINFO 17 /* Contains copy of lwpinfo struct */ ++#define NT_PRFPXREG 20 /* Contains copy of fprxregset struct */ ++#define NT_PRXFPREG 0x46e62b7f /* Contains copy of user_fxsr_struct */ ++#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ ++#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ ++#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ ++#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ ++#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ ++#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */ ++ ++/* Legal values for the note segment descriptor types for object files. */ ++ ++#define NT_VERSION 1 /* Contains a version string. */ ++ ++ ++/* Dynamic section entry. */ ++ ++typedef struct ++{ ++ Elf32_Sword d_tag; /* Dynamic entry type */ ++ union ++ { ++ Elf32_Word d_val; /* Integer value */ ++ Elf32_Addr d_ptr; /* Address value */ ++ } d_un; ++} Elf32_Dyn; ++ ++typedef struct ++{ ++ Elf64_Sxword d_tag; /* Dynamic entry type */ ++ union ++ { ++ Elf64_Xword d_val; /* Integer value */ ++ Elf64_Addr d_ptr; /* Address value */ ++ } d_un; ++} Elf64_Dyn; ++ ++/* Legal values for d_tag (dynamic entry type). */ ++ ++#define DT_NULL 0 /* Marks end of dynamic section */ ++#define DT_NEEDED 1 /* Name of needed library */ ++#define DT_PLTRELSZ 2 /* Size in bytes of PLT relocs */ ++#define DT_PLTGOT 3 /* Processor defined value */ ++#define DT_HASH 4 /* Address of symbol hash table */ ++#define DT_STRTAB 5 /* Address of string table */ ++#define DT_SYMTAB 6 /* Address of symbol table */ ++#define DT_RELA 7 /* Address of Rela relocs */ ++#define DT_RELASZ 8 /* Total size of Rela relocs */ ++#define DT_RELAENT 9 /* Size of one Rela reloc */ ++#define DT_STRSZ 10 /* Size of string table */ ++#define DT_SYMENT 11 /* Size of one symbol table entry */ ++#define DT_INIT 12 /* Address of init function */ ++#define DT_FINI 13 /* Address of termination function */ ++#define DT_SONAME 14 /* Name of shared object */ ++#define DT_RPATH 15 /* Library search path (deprecated) */ ++#define DT_SYMBOLIC 16 /* Start symbol search here */ ++#define DT_REL 17 /* Address of Rel relocs */ ++#define DT_RELSZ 18 /* Total size of Rel relocs */ ++#define DT_RELENT 19 /* Size of one Rel reloc */ ++#define DT_PLTREL 20 /* Type of reloc in PLT */ ++#define DT_DEBUG 21 /* For debugging; unspecified */ ++#define DT_TEXTREL 22 /* Reloc might modify .text */ ++#define DT_JMPREL 23 /* Address of PLT relocs */ ++#define DT_BIND_NOW 24 /* Process relocations of object */ ++#define DT_INIT_ARRAY 25 /* Array with addresses of init fct */ ++#define DT_FINI_ARRAY 26 /* Array with addresses of fini fct */ ++#define DT_INIT_ARRAYSZ 27 /* Size in bytes of DT_INIT_ARRAY */ ++#define DT_FINI_ARRAYSZ 28 /* Size in bytes of DT_FINI_ARRAY */ ++#define DT_RUNPATH 29 /* Library search path */ ++#define DT_FLAGS 30 /* Flags for the object being loaded */ ++#define DT_ENCODING 32 /* Start of encoded range */ ++#define DT_PREINIT_ARRAY 32 /* Array with addresses of preinit fct*/ ++#define DT_PREINIT_ARRAYSZ 33 /* size in bytes of DT_PREINIT_ARRAY */ ++#define DT_NUM 34 /* Number used */ ++#define DT_LOOS 0x6000000d /* Start of OS-specific */ ++#define DT_HIOS 0x6ffff000 /* End of OS-specific */ ++#define DT_LOPROC 0x70000000 /* Start of processor-specific */ ++#define DT_HIPROC 0x7fffffff /* End of processor-specific */ ++#define DT_PROCNUM DT_MIPS_NUM /* Most used by any processor */ ++ ++/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the ++ Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's ++ approach. */ ++#define DT_VALRNGLO 0x6ffffd00 ++#define DT_GNU_PRELINKED 0x6ffffdf5 /* Prelinking timestamp */ ++#define DT_GNU_CONFLICTSZ 0x6ffffdf6 /* Size of conflict section */ ++#define DT_GNU_LIBLISTSZ 0x6ffffdf7 /* Size of library list */ ++#define DT_CHECKSUM 0x6ffffdf8 ++#define DT_PLTPADSZ 0x6ffffdf9 ++#define DT_MOVEENT 0x6ffffdfa ++#define DT_MOVESZ 0x6ffffdfb ++#define DT_FEATURE_1 0x6ffffdfc /* Feature selection (DTF_*). */ ++#define DT_POSFLAG_1 0x6ffffdfd /* Flags for DT_* entries, effecting ++ the following DT_* entry. */ ++#define DT_SYMINSZ 0x6ffffdfe /* Size of syminfo table (in bytes) */ ++#define DT_SYMINENT 0x6ffffdff /* Entry size of syminfo */ ++#define DT_VALRNGHI 0x6ffffdff ++#define DT_VALTAGIDX(tag) (DT_VALRNGHI - (tag)) /* Reverse order! */ ++#define DT_VALNUM 12 ++ ++/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the ++ Dyn.d_un.d_ptr field of the Elf*_Dyn structure. ++ ++ If any adjustment is made to the ELF object after it has been ++ built these entries will need to be adjusted. */ ++#define DT_ADDRRNGLO 0x6ffffe00 ++#define DT_GNU_HASH 0x6ffffef5 /* GNU-style hash table. */ ++#define DT_TLSDESC_PLT 0x6ffffef6 ++#define DT_TLSDESC_GOT 0x6ffffef7 ++#define DT_GNU_CONFLICT 0x6ffffef8 /* Start of conflict section */ ++#define DT_GNU_LIBLIST 0x6ffffef9 /* Library list */ ++#define DT_CONFIG 0x6ffffefa /* Configuration information. */ ++#define DT_DEPAUDIT 0x6ffffefb /* Dependency auditing. */ ++#define DT_AUDIT 0x6ffffefc /* Object auditing. */ ++#define DT_PLTPAD 0x6ffffefd /* PLT padding. */ ++#define DT_MOVETAB 0x6ffffefe /* Move table. */ ++#define DT_SYMINFO 0x6ffffeff /* Syminfo table. */ ++#define DT_ADDRRNGHI 0x6ffffeff ++#define DT_ADDRTAGIDX(tag) (DT_ADDRRNGHI - (tag)) /* Reverse order! */ ++#define DT_ADDRNUM 11 ++ ++/* The versioning entry types. The next are defined as part of the ++ GNU extension. */ ++#define DT_VERSYM 0x6ffffff0 ++ ++#define DT_RELACOUNT 0x6ffffff9 ++#define DT_RELCOUNT 0x6ffffffa ++ ++/* These were chosen by Sun. */ ++#define DT_FLAGS_1 0x6ffffffb /* State flags, see DF_1_* below. */ ++#define DT_VERDEF 0x6ffffffc /* Address of version definition ++ table */ ++#define DT_VERDEFNUM 0x6ffffffd /* Number of version definitions */ ++#define DT_VERNEED 0x6ffffffe /* Address of table with needed ++ versions */ ++#define DT_VERNEEDNUM 0x6fffffff /* Number of needed versions */ ++#define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag)) /* Reverse order! */ ++#define DT_VERSIONTAGNUM 16 ++ ++/* Sun added these machine-independent extensions in the "processor-specific" ++ range. Be compatible. */ ++#define DT_AUXILIARY 0x7ffffffd /* Shared object to load before self */ ++#define DT_FILTER 0x7fffffff /* Shared object to get values from */ ++#define DT_EXTRATAGIDX(tag) ((Elf32_Word)-((Elf32_Sword) (tag) <<1>>1)-1) ++#define DT_EXTRANUM 3 ++ ++/* Values of `d_un.d_val' in the DT_FLAGS entry. */ ++#define DF_ORIGIN 0x00000001 /* Object may use DF_ORIGIN */ ++#define DF_SYMBOLIC 0x00000002 /* Symbol resolutions starts here */ ++#define DF_TEXTREL 0x00000004 /* Object contains text relocations */ ++#define DF_BIND_NOW 0x00000008 /* No lazy binding for this object */ ++#define DF_STATIC_TLS 0x00000010 /* Module uses the static TLS model */ ++ ++/* State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1 ++ entry in the dynamic section. */ ++#define DF_1_NOW 0x00000001 /* Set RTLD_NOW for this object. */ ++#define DF_1_GLOBAL 0x00000002 /* Set RTLD_GLOBAL for this object. */ ++#define DF_1_GROUP 0x00000004 /* Set RTLD_GROUP for this object. */ ++#define DF_1_NODELETE 0x00000008 /* Set RTLD_NODELETE for this object.*/ ++#define DF_1_LOADFLTR 0x00000010 /* Trigger filtee loading at runtime.*/ ++#define DF_1_INITFIRST 0x00000020 /* Set RTLD_INITFIRST for this object*/ ++#define DF_1_NOOPEN 0x00000040 /* Set RTLD_NOOPEN for this object. */ ++#define DF_1_ORIGIN 0x00000080 /* $ORIGIN must be handled. */ ++#define DF_1_DIRECT 0x00000100 /* Direct binding enabled. */ ++#define DF_1_TRANS 0x00000200 ++#define DF_1_INTERPOSE 0x00000400 /* Object is used to interpose. */ ++#define DF_1_NODEFLIB 0x00000800 /* Ignore default lib search path. */ ++#define DF_1_NODUMP 0x00001000 /* Object can't be dldump'ed. */ ++#define DF_1_CONFALT 0x00002000 /* Configuration alternative created.*/ ++#define DF_1_ENDFILTEE 0x00004000 /* Filtee terminates filters search. */ ++#define DF_1_DISPRELDNE 0x00008000 /* Disp reloc applied at build time. */ ++#define DF_1_DISPRELPND 0x00010000 /* Disp reloc applied at run-time. */ ++ ++/* Flags for the feature selection in DT_FEATURE_1. */ ++#define DTF_1_PARINIT 0x00000001 ++#define DTF_1_CONFEXP 0x00000002 ++ ++/* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry. */ ++#define DF_P1_LAZYLOAD 0x00000001 /* Lazyload following object. */ ++#define DF_P1_GROUPPERM 0x00000002 /* Symbols from next object are not ++ generally available. */ ++ ++/* Version definition sections. */ ++ ++typedef struct ++{ ++ Elf32_Half vd_version; /* Version revision */ ++ Elf32_Half vd_flags; /* Version information */ ++ Elf32_Half vd_ndx; /* Version Index */ ++ Elf32_Half vd_cnt; /* Number of associated aux entries */ ++ Elf32_Word vd_hash; /* Version name hash value */ ++ Elf32_Word vd_aux; /* Offset in bytes to verdaux array */ ++ Elf32_Word vd_next; /* Offset in bytes to next verdef ++ entry */ ++} Elf32_Verdef; ++ ++typedef struct ++{ ++ Elf64_Half vd_version; /* Version revision */ ++ Elf64_Half vd_flags; /* Version information */ ++ Elf64_Half vd_ndx; /* Version Index */ ++ Elf64_Half vd_cnt; /* Number of associated aux entries */ ++ Elf64_Word vd_hash; /* Version name hash value */ ++ Elf64_Word vd_aux; /* Offset in bytes to verdaux array */ ++ Elf64_Word vd_next; /* Offset in bytes to next verdef ++ entry */ ++} Elf64_Verdef; ++ ++ ++/* Legal values for vd_version (version revision). */ ++#define VER_DEF_NONE 0 /* No version */ ++#define VER_DEF_CURRENT 1 /* Current version */ ++#define VER_DEF_NUM 2 /* Given version number */ ++ ++/* Legal values for vd_flags (version information flags). */ ++#define VER_FLG_BASE 0x1 /* Version definition of file itself */ ++#define VER_FLG_WEAK 0x2 /* Weak version identifier */ ++ ++/* Versym symbol index values. */ ++#define VER_NDX_LOCAL 0 /* Symbol is local. */ ++#define VER_NDX_GLOBAL 1 /* Symbol is global. */ ++#define VER_NDX_LORESERVE 0xff00 /* Beginning of reserved entries. */ ++#define VER_NDX_ELIMINATE 0xff01 /* Symbol is to be eliminated. */ ++ ++/* Auxialiary version information. */ ++ ++typedef struct ++{ ++ Elf32_Word vda_name; /* Version or dependency names */ ++ Elf32_Word vda_next; /* Offset in bytes to next verdaux ++ entry */ ++} Elf32_Verdaux; ++ ++typedef struct ++{ ++ Elf64_Word vda_name; /* Version or dependency names */ ++ Elf64_Word vda_next; /* Offset in bytes to next verdaux ++ entry */ ++} Elf64_Verdaux; ++ ++ ++/* Version dependency section. */ ++ ++typedef struct ++{ ++ Elf32_Half vn_version; /* Version of structure */ ++ Elf32_Half vn_cnt; /* Number of associated aux entries */ ++ Elf32_Word vn_file; /* Offset of filename for this ++ dependency */ ++ Elf32_Word vn_aux; /* Offset in bytes to vernaux array */ ++ Elf32_Word vn_next; /* Offset in bytes to next verneed ++ entry */ ++} Elf32_Verneed; ++ ++typedef struct ++{ ++ Elf64_Half vn_version; /* Version of structure */ ++ Elf64_Half vn_cnt; /* Number of associated aux entries */ ++ Elf64_Word vn_file; /* Offset of filename for this ++ dependency */ ++ Elf64_Word vn_aux; /* Offset in bytes to vernaux array */ ++ Elf64_Word vn_next; /* Offset in bytes to next verneed ++ entry */ ++} Elf64_Verneed; ++ ++ ++/* Legal values for vn_version (version revision). */ ++#define VER_NEED_NONE 0 /* No version */ ++#define VER_NEED_CURRENT 1 /* Current version */ ++#define VER_NEED_NUM 2 /* Given version number */ ++ ++/* Auxiliary needed version information. */ ++ ++typedef struct ++{ ++ Elf32_Word vna_hash; /* Hash value of dependency name */ ++ Elf32_Half vna_flags; /* Dependency specific information */ ++ Elf32_Half vna_other; /* Unused */ ++ Elf32_Word vna_name; /* Dependency name string offset */ ++ Elf32_Word vna_next; /* Offset in bytes to next vernaux ++ entry */ ++} Elf32_Vernaux; ++ ++typedef struct ++{ ++ Elf64_Word vna_hash; /* Hash value of dependency name */ ++ Elf64_Half vna_flags; /* Dependency specific information */ ++ Elf64_Half vna_other; /* Unused */ ++ Elf64_Word vna_name; /* Dependency name string offset */ ++ Elf64_Word vna_next; /* Offset in bytes to next vernaux ++ entry */ ++} Elf64_Vernaux; ++ ++ ++/* Legal values for vna_flags. */ ++#define VER_FLG_WEAK 0x2 /* Weak version identifier */ ++ ++ ++/* Auxiliary vector. */ ++ ++/* This vector is normally only used by the program interpreter. The ++ usual definition in an ABI supplement uses the name auxv_t. The ++ vector is not usually defined in a standard <elf.h> file, but it ++ can't hurt. We rename it to avoid conflicts. The sizes of these ++ types are an arrangement between the exec server and the program ++ interpreter, so we don't fully specify them here. */ ++ ++typedef struct ++{ ++ uint32_t a_type; /* Entry type */ ++ union ++ { ++ uint32_t a_val; /* Integer value */ ++ /* We use to have pointer elements added here. We cannot do that, ++ though, since it does not work when using 32-bit definitions ++ on 64-bit platforms and vice versa. */ ++ } a_un; ++} Elf32_auxv_t; ++ ++typedef struct ++{ ++ uint64_t a_type; /* Entry type */ ++ union ++ { ++ uint64_t a_val; /* Integer value */ ++ /* We use to have pointer elements added here. We cannot do that, ++ though, since it does not work when using 32-bit definitions ++ on 64-bit platforms and vice versa. */ ++ } a_un; ++} Elf64_auxv_t; ++ ++/* Legal values for a_type (entry type). */ ++ ++#define AT_NULL 0 /* End of vector */ ++#define AT_IGNORE 1 /* Entry should be ignored */ ++#define AT_EXECFD 2 /* File descriptor of program */ ++#define AT_PHDR 3 /* Program headers for program */ ++#define AT_PHENT 4 /* Size of program header entry */ ++#define AT_PHNUM 5 /* Number of program headers */ ++#define AT_PAGESZ 6 /* System page size */ ++#define AT_BASE 7 /* Base address of interpreter */ ++#define AT_FLAGS 8 /* Flags */ ++#define AT_ENTRY 9 /* Entry point of program */ ++#define AT_NOTELF 10 /* Program is not ELF */ ++#define AT_UID 11 /* Real uid */ ++#define AT_EUID 12 /* Effective uid */ ++#define AT_GID 13 /* Real gid */ ++#define AT_EGID 14 /* Effective gid */ ++#define AT_CLKTCK 17 /* Frequency of times() */ ++ ++/* Some more special a_type values describing the hardware. */ ++#define AT_PLATFORM 15 /* String identifying platform. */ ++#define AT_HWCAP 16 /* Machine dependent hints about ++ processor capabilities. */ ++ ++/* This entry gives some information about the FPU initialization ++ performed by the kernel. */ ++#define AT_FPUCW 18 /* Used FPU control word. */ ++ ++/* Cache block sizes. */ ++#define AT_DCACHEBSIZE 19 /* Data cache block size. */ ++#define AT_ICACHEBSIZE 20 /* Instruction cache block size. */ ++#define AT_UCACHEBSIZE 21 /* Unified cache block size. */ ++ ++/* A special ignored value for PPC, used by the kernel to control the ++ interpretation of the AUXV. Must be > 16. */ ++#define AT_IGNOREPPC 22 /* Entry should be ignored. */ ++ ++#define AT_SECURE 23 /* Boolean, was exec setuid-like? */ ++ ++#define AT_BASE_PLATFORM 24 /* String identifying real platforms.*/ ++ ++#define AT_RANDOM 25 /* Address of 16 random bytes. */ ++ ++#define AT_EXECFN 31 /* Filename of executable. */ ++ ++/* Pointer to the global system page used for system calls and other ++ nice things. */ ++#define AT_SYSINFO 32 ++#define AT_SYSINFO_EHDR 33 ++ ++/* Shapes of the caches. Bits 0-3 contains associativity; bits 4-7 contains ++ log2 of line size; mask those to get cache size. */ ++#define AT_L1I_CACHESHAPE 34 ++#define AT_L1D_CACHESHAPE 35 ++#define AT_L2_CACHESHAPE 36 ++#define AT_L3_CACHESHAPE 37 ++ ++/* Note section contents. Each entry in the note section begins with ++ a header of a fixed form. */ ++ ++typedef struct ++{ ++ Elf32_Word n_namesz; /* Length of the note's name. */ ++ Elf32_Word n_descsz; /* Length of the note's descriptor. */ ++ Elf32_Word n_type; /* Type of the note. */ ++} Elf32_Nhdr; ++ ++typedef struct ++{ ++ Elf64_Word n_namesz; /* Length of the note's name. */ ++ Elf64_Word n_descsz; /* Length of the note's descriptor. */ ++ Elf64_Word n_type; /* Type of the note. */ ++} Elf64_Nhdr; ++ ++/* Known names of notes. */ ++ ++/* Solaris entries in the note section have this name. */ ++#define ELF_NOTE_SOLARIS "SUNW Solaris" ++ ++/* Note entries for GNU systems have this name. */ ++#define ELF_NOTE_GNU "GNU" ++ ++ ++/* Defined types of notes for Solaris. */ ++ ++/* Value of descriptor (one word) is desired pagesize for the binary. */ ++#define ELF_NOTE_PAGESIZE_HINT 1 ++ ++ ++/* Defined note types for GNU systems. */ ++ ++/* ABI information. The descriptor consists of words: ++ word 0: OS descriptor ++ word 1: major version of the ABI ++ word 2: minor version of the ABI ++ word 3: subminor version of the ABI ++*/ ++#define NT_GNU_ABI_TAG 1 ++#define ELF_NOTE_ABI NT_GNU_ABI_TAG /* Old name. */ ++ ++/* Known OSes. These values can appear in word 0 of an ++ NT_GNU_ABI_TAG note section entry. */ ++#define ELF_NOTE_OS_LINUX 0 ++#define ELF_NOTE_OS_GNU 1 ++#define ELF_NOTE_OS_SOLARIS2 2 ++#define ELF_NOTE_OS_FREEBSD 3 ++ ++/* Synthetic hwcap information. The descriptor begins with two words: ++ word 0: number of entries ++ word 1: bitmask of enabled entries ++ Then follow variable-length entries, one byte followed by a ++ '\0'-terminated hwcap name string. The byte gives the bit ++ number to test if enabled, (1U << bit) & bitmask. */ ++#define NT_GNU_HWCAP 2 ++ ++/* Build ID bits as generated by ld --build-id. ++ The descriptor consists of any nonzero number of bytes. */ ++#define NT_GNU_BUILD_ID 3 ++ ++/* Version note generated by GNU gold containing a version string. */ ++#define NT_GNU_GOLD_VERSION 4 ++ ++ ++/* Move records. */ ++typedef struct ++{ ++ Elf32_Xword m_value; /* Symbol value. */ ++ Elf32_Word m_info; /* Size and index. */ ++ Elf32_Word m_poffset; /* Symbol offset. */ ++ Elf32_Half m_repeat; /* Repeat count. */ ++ Elf32_Half m_stride; /* Stride info. */ ++} Elf32_Move; ++ ++typedef struct ++{ ++ Elf64_Xword m_value; /* Symbol value. */ ++ Elf64_Xword m_info; /* Size and index. */ ++ Elf64_Xword m_poffset; /* Symbol offset. */ ++ Elf64_Half m_repeat; /* Repeat count. */ ++ Elf64_Half m_stride; /* Stride info. */ ++} Elf64_Move; ++ ++/* Macro to construct move records. */ ++#define ELF32_M_SYM(info) ((info) >> 8) ++#define ELF32_M_SIZE(info) ((unsigned char) (info)) ++#define ELF32_M_INFO(sym, size) (((sym) << 8) + (unsigned char) (size)) ++ ++#define ELF64_M_SYM(info) ELF32_M_SYM (info) ++#define ELF64_M_SIZE(info) ELF32_M_SIZE (info) ++#define ELF64_M_INFO(sym, size) ELF32_M_INFO (sym, size) ++ ++ ++/* Motorola 68k specific definitions. */ ++ ++/* Values for Elf32_Ehdr.e_flags. */ ++#define EF_CPU32 0x00810000 ++ ++/* m68k relocs. */ ++ ++#define R_68K_NONE 0 /* No reloc */ ++#define R_68K_32 1 /* Direct 32 bit */ ++#define R_68K_16 2 /* Direct 16 bit */ ++#define R_68K_8 3 /* Direct 8 bit */ ++#define R_68K_PC32 4 /* PC relative 32 bit */ ++#define R_68K_PC16 5 /* PC relative 16 bit */ ++#define R_68K_PC8 6 /* PC relative 8 bit */ ++#define R_68K_GOT32 7 /* 32 bit PC relative GOT entry */ ++#define R_68K_GOT16 8 /* 16 bit PC relative GOT entry */ ++#define R_68K_GOT8 9 /* 8 bit PC relative GOT entry */ ++#define R_68K_GOT32O 10 /* 32 bit GOT offset */ ++#define R_68K_GOT16O 11 /* 16 bit GOT offset */ ++#define R_68K_GOT8O 12 /* 8 bit GOT offset */ ++#define R_68K_PLT32 13 /* 32 bit PC relative PLT address */ ++#define R_68K_PLT16 14 /* 16 bit PC relative PLT address */ ++#define R_68K_PLT8 15 /* 8 bit PC relative PLT address */ ++#define R_68K_PLT32O 16 /* 32 bit PLT offset */ ++#define R_68K_PLT16O 17 /* 16 bit PLT offset */ ++#define R_68K_PLT8O 18 /* 8 bit PLT offset */ ++#define R_68K_COPY 19 /* Copy symbol at runtime */ ++#define R_68K_GLOB_DAT 20 /* Create GOT entry */ ++#define R_68K_JMP_SLOT 21 /* Create PLT entry */ ++#define R_68K_RELATIVE 22 /* Adjust by program base */ ++#define R_68K_TLS_GD32 25 /* 32 bit GOT offset for GD */ ++#define R_68K_TLS_GD16 26 /* 16 bit GOT offset for GD */ ++#define R_68K_TLS_GD8 27 /* 8 bit GOT offset for GD */ ++#define R_68K_TLS_LDM32 28 /* 32 bit GOT offset for LDM */ ++#define R_68K_TLS_LDM16 29 /* 16 bit GOT offset for LDM */ ++#define R_68K_TLS_LDM8 30 /* 8 bit GOT offset for LDM */ ++#define R_68K_TLS_LDO32 31 /* 32 bit module-relative offset */ ++#define R_68K_TLS_LDO16 32 /* 16 bit module-relative offset */ ++#define R_68K_TLS_LDO8 33 /* 8 bit module-relative offset */ ++#define R_68K_TLS_IE32 34 /* 32 bit GOT offset for IE */ ++#define R_68K_TLS_IE16 35 /* 16 bit GOT offset for IE */ ++#define R_68K_TLS_IE8 36 /* 8 bit GOT offset for IE */ ++#define R_68K_TLS_LE32 37 /* 32 bit offset relative to ++ static TLS block */ ++#define R_68K_TLS_LE16 38 /* 16 bit offset relative to ++ static TLS block */ ++#define R_68K_TLS_LE8 39 /* 8 bit offset relative to ++ static TLS block */ ++#define R_68K_TLS_DTPMOD32 40 /* 32 bit module number */ ++#define R_68K_TLS_DTPREL32 41 /* 32 bit module-relative offset */ ++#define R_68K_TLS_TPREL32 42 /* 32 bit TP-relative offset */ ++/* Keep this the last entry. */ ++#define R_68K_NUM 43 ++ ++/* Intel 80386 specific definitions. */ ++ ++/* i386 relocs. */ ++ ++#define R_386_NONE 0 /* No reloc */ ++#define R_386_32 1 /* Direct 32 bit */ ++#define R_386_PC32 2 /* PC relative 32 bit */ ++#define R_386_GOT32 3 /* 32 bit GOT entry */ ++#define R_386_PLT32 4 /* 32 bit PLT address */ ++#define R_386_COPY 5 /* Copy symbol at runtime */ ++#define R_386_GLOB_DAT 6 /* Create GOT entry */ ++#define R_386_JMP_SLOT 7 /* Create PLT entry */ ++#define R_386_RELATIVE 8 /* Adjust by program base */ ++#define R_386_GOTOFF 9 /* 32 bit offset to GOT */ ++#define R_386_GOTPC 10 /* 32 bit PC relative offset to GOT */ ++#define R_386_32PLT 11 ++#define R_386_TLS_TPOFF 14 /* Offset in static TLS block */ ++#define R_386_TLS_IE 15 /* Address of GOT entry for static TLS ++ block offset */ ++#define R_386_TLS_GOTIE 16 /* GOT entry for static TLS block ++ offset */ ++#define R_386_TLS_LE 17 /* Offset relative to static TLS ++ block */ ++#define R_386_TLS_GD 18 /* Direct 32 bit for GNU version of ++ general dynamic thread local data */ ++#define R_386_TLS_LDM 19 /* Direct 32 bit for GNU version of ++ local dynamic thread local data ++ in LE code */ ++#define R_386_16 20 ++#define R_386_PC16 21 ++#define R_386_8 22 ++#define R_386_PC8 23 ++#define R_386_TLS_GD_32 24 /* Direct 32 bit for general dynamic ++ thread local data */ ++#define R_386_TLS_GD_PUSH 25 /* Tag for pushl in GD TLS code */ ++#define R_386_TLS_GD_CALL 26 /* Relocation for call to ++ __tls_get_addr() */ ++#define R_386_TLS_GD_POP 27 /* Tag for popl in GD TLS code */ ++#define R_386_TLS_LDM_32 28 /* Direct 32 bit for local dynamic ++ thread local data in LE code */ ++#define R_386_TLS_LDM_PUSH 29 /* Tag for pushl in LDM TLS code */ ++#define R_386_TLS_LDM_CALL 30 /* Relocation for call to ++ __tls_get_addr() in LDM code */ ++#define R_386_TLS_LDM_POP 31 /* Tag for popl in LDM TLS code */ ++#define R_386_TLS_LDO_32 32 /* Offset relative to TLS block */ ++#define R_386_TLS_IE_32 33 /* GOT entry for negated static TLS ++ block offset */ ++#define R_386_TLS_LE_32 34 /* Negated offset relative to static ++ TLS block */ ++#define R_386_TLS_DTPMOD32 35 /* ID of module containing symbol */ ++#define R_386_TLS_DTPOFF32 36 /* Offset in TLS block */ ++#define R_386_TLS_TPOFF32 37 /* Negated offset in static TLS block */ ++/* 38? */ ++#define R_386_TLS_GOTDESC 39 /* GOT offset for TLS descriptor. */ ++#define R_386_TLS_DESC_CALL 40 /* Marker of call through TLS ++ descriptor for ++ relaxation. */ ++#define R_386_TLS_DESC 41 /* TLS descriptor containing ++ pointer to code and to ++ argument, returning the TLS ++ offset for the symbol. */ ++#define R_386_IRELATIVE 42 /* Adjust indirectly by program base */ ++/* Keep this the last entry. */ ++#define R_386_NUM 43 ++ ++/* SUN SPARC specific definitions. */ ++ ++/* Legal values for ST_TYPE subfield of st_info (symbol type). */ ++ ++#define STT_SPARC_REGISTER 13 /* Global register reserved to app. */ ++ ++/* Values for Elf64_Ehdr.e_flags. */ ++ ++#define EF_SPARCV9_MM 3 ++#define EF_SPARCV9_TSO 0 ++#define EF_SPARCV9_PSO 1 ++#define EF_SPARCV9_RMO 2 ++#define EF_SPARC_LEDATA 0x800000 /* little endian data */ ++#define EF_SPARC_EXT_MASK 0xFFFF00 ++#define EF_SPARC_32PLUS 0x000100 /* generic V8+ features */ ++#define EF_SPARC_SUN_US1 0x000200 /* Sun UltraSPARC1 extensions */ ++#define EF_SPARC_HAL_R1 0x000400 /* HAL R1 extensions */ ++#define EF_SPARC_SUN_US3 0x000800 /* Sun UltraSPARCIII extensions */ ++ ++/* SPARC relocs. */ ++ ++#define R_SPARC_NONE 0 /* No reloc */ ++#define R_SPARC_8 1 /* Direct 8 bit */ ++#define R_SPARC_16 2 /* Direct 16 bit */ ++#define R_SPARC_32 3 /* Direct 32 bit */ ++#define R_SPARC_DISP8 4 /* PC relative 8 bit */ ++#define R_SPARC_DISP16 5 /* PC relative 16 bit */ ++#define R_SPARC_DISP32 6 /* PC relative 32 bit */ ++#define R_SPARC_WDISP30 7 /* PC relative 30 bit shifted */ ++#define R_SPARC_WDISP22 8 /* PC relative 22 bit shifted */ ++#define R_SPARC_HI22 9 /* High 22 bit */ ++#define R_SPARC_22 10 /* Direct 22 bit */ ++#define R_SPARC_13 11 /* Direct 13 bit */ ++#define R_SPARC_LO10 12 /* Truncated 10 bit */ ++#define R_SPARC_GOT10 13 /* Truncated 10 bit GOT entry */ ++#define R_SPARC_GOT13 14 /* 13 bit GOT entry */ ++#define R_SPARC_GOT22 15 /* 22 bit GOT entry shifted */ ++#define R_SPARC_PC10 16 /* PC relative 10 bit truncated */ ++#define R_SPARC_PC22 17 /* PC relative 22 bit shifted */ ++#define R_SPARC_WPLT30 18 /* 30 bit PC relative PLT address */ ++#define R_SPARC_COPY 19 /* Copy symbol at runtime */ ++#define R_SPARC_GLOB_DAT 20 /* Create GOT entry */ ++#define R_SPARC_JMP_SLOT 21 /* Create PLT entry */ ++#define R_SPARC_RELATIVE 22 /* Adjust by program base */ ++#define R_SPARC_UA32 23 /* Direct 32 bit unaligned */ ++ ++/* Additional Sparc64 relocs. */ ++ ++#define R_SPARC_PLT32 24 /* Direct 32 bit ref to PLT entry */ ++#define R_SPARC_HIPLT22 25 /* High 22 bit PLT entry */ ++#define R_SPARC_LOPLT10 26 /* Truncated 10 bit PLT entry */ ++#define R_SPARC_PCPLT32 27 /* PC rel 32 bit ref to PLT entry */ ++#define R_SPARC_PCPLT22 28 /* PC rel high 22 bit PLT entry */ ++#define R_SPARC_PCPLT10 29 /* PC rel trunc 10 bit PLT entry */ ++#define R_SPARC_10 30 /* Direct 10 bit */ ++#define R_SPARC_11 31 /* Direct 11 bit */ ++#define R_SPARC_64 32 /* Direct 64 bit */ ++#define R_SPARC_OLO10 33 /* 10bit with secondary 13bit addend */ ++#define R_SPARC_HH22 34 /* Top 22 bits of direct 64 bit */ ++#define R_SPARC_HM10 35 /* High middle 10 bits of ... */ ++#define R_SPARC_LM22 36 /* Low middle 22 bits of ... */ ++#define R_SPARC_PC_HH22 37 /* Top 22 bits of pc rel 64 bit */ ++#define R_SPARC_PC_HM10 38 /* High middle 10 bit of ... */ ++#define R_SPARC_PC_LM22 39 /* Low miggle 22 bits of ... */ ++#define R_SPARC_WDISP16 40 /* PC relative 16 bit shifted */ ++#define R_SPARC_WDISP19 41 /* PC relative 19 bit shifted */ ++#define R_SPARC_GLOB_JMP 42 /* was part of v9 ABI but was removed */ ++#define R_SPARC_7 43 /* Direct 7 bit */ ++#define R_SPARC_5 44 /* Direct 5 bit */ ++#define R_SPARC_6 45 /* Direct 6 bit */ ++#define R_SPARC_DISP64 46 /* PC relative 64 bit */ ++#define R_SPARC_PLT64 47 /* Direct 64 bit ref to PLT entry */ ++#define R_SPARC_HIX22 48 /* High 22 bit complemented */ ++#define R_SPARC_LOX10 49 /* Truncated 11 bit complemented */ ++#define R_SPARC_H44 50 /* Direct high 12 of 44 bit */ ++#define R_SPARC_M44 51 /* Direct mid 22 of 44 bit */ ++#define R_SPARC_L44 52 /* Direct low 10 of 44 bit */ ++#define R_SPARC_REGISTER 53 /* Global register usage */ ++#define R_SPARC_UA64 54 /* Direct 64 bit unaligned */ ++#define R_SPARC_UA16 55 /* Direct 16 bit unaligned */ ++#define R_SPARC_TLS_GD_HI22 56 ++#define R_SPARC_TLS_GD_LO10 57 ++#define R_SPARC_TLS_GD_ADD 58 ++#define R_SPARC_TLS_GD_CALL 59 ++#define R_SPARC_TLS_LDM_HI22 60 ++#define R_SPARC_TLS_LDM_LO10 61 ++#define R_SPARC_TLS_LDM_ADD 62 ++#define R_SPARC_TLS_LDM_CALL 63 ++#define R_SPARC_TLS_LDO_HIX22 64 ++#define R_SPARC_TLS_LDO_LOX10 65 ++#define R_SPARC_TLS_LDO_ADD 66 ++#define R_SPARC_TLS_IE_HI22 67 ++#define R_SPARC_TLS_IE_LO10 68 ++#define R_SPARC_TLS_IE_LD 69 ++#define R_SPARC_TLS_IE_LDX 70 ++#define R_SPARC_TLS_IE_ADD 71 ++#define R_SPARC_TLS_LE_HIX22 72 ++#define R_SPARC_TLS_LE_LOX10 73 ++#define R_SPARC_TLS_DTPMOD32 74 ++#define R_SPARC_TLS_DTPMOD64 75 ++#define R_SPARC_TLS_DTPOFF32 76 ++#define R_SPARC_TLS_DTPOFF64 77 ++#define R_SPARC_TLS_TPOFF32 78 ++#define R_SPARC_TLS_TPOFF64 79 ++#define R_SPARC_GOTDATA_HIX22 80 ++#define R_SPARC_GOTDATA_LOX10 81 ++#define R_SPARC_GOTDATA_OP_HIX22 82 ++#define R_SPARC_GOTDATA_OP_LOX10 83 ++#define R_SPARC_GOTDATA_OP 84 ++#define R_SPARC_H34 85 ++#define R_SPARC_SIZE32 86 ++#define R_SPARC_SIZE64 87 ++#define R_SPARC_WDISP10 88 ++#define R_SPARC_JMP_IREL 248 ++#define R_SPARC_IRELATIVE 249 ++#define R_SPARC_GNU_VTINHERIT 250 ++#define R_SPARC_GNU_VTENTRY 251 ++#define R_SPARC_REV32 252 ++/* Keep this the last entry. */ ++#define R_SPARC_NUM 253 ++ ++/* For Sparc64, legal values for d_tag of Elf64_Dyn. */ ++ ++#define DT_SPARC_REGISTER 0x70000001 ++#define DT_SPARC_NUM 2 ++ ++/* MIPS R3000 specific definitions. */ ++ ++/* Legal values for e_flags field of Elf32_Ehdr. */ ++ ++#define EF_MIPS_NOREORDER 1 /* A .noreorder directive was used */ ++#define EF_MIPS_PIC 2 /* Contains PIC code */ ++#define EF_MIPS_CPIC 4 /* Uses PIC calling sequence */ ++#define EF_MIPS_XGOT 8 ++#define EF_MIPS_64BIT_WHIRL 16 ++#define EF_MIPS_ABI2 32 ++#define EF_MIPS_ABI_ON32 64 ++#define EF_MIPS_ARCH 0xf0000000 /* MIPS architecture level */ ++ ++/* Legal values for MIPS architecture level. */ ++ ++#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ ++#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ ++#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ ++#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ ++#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ ++#define EF_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */ ++#define EF_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */ ++ ++/* The following are non-official names and should not be used. */ ++ ++#define E_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ ++#define E_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ ++#define E_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ ++#define E_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ ++#define E_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ ++#define E_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */ ++#define E_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */ ++ ++/* Special section indices. */ ++ ++#define SHN_MIPS_ACOMMON 0xff00 /* Allocated common symbols */ ++#define SHN_MIPS_TEXT 0xff01 /* Allocated test symbols. */ ++#define SHN_MIPS_DATA 0xff02 /* Allocated data symbols. */ ++#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */ ++#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */ ++ ++/* Legal values for sh_type field of Elf32_Shdr. */ ++ ++#define SHT_MIPS_LIBLIST 0x70000000 /* Shared objects used in link */ ++#define SHT_MIPS_MSYM 0x70000001 ++#define SHT_MIPS_CONFLICT 0x70000002 /* Conflicting symbols */ ++#define SHT_MIPS_GPTAB 0x70000003 /* Global data area sizes */ ++#define SHT_MIPS_UCODE 0x70000004 /* Reserved for SGI/MIPS compilers */ ++#define SHT_MIPS_DEBUG 0x70000005 /* MIPS ECOFF debugging information*/ ++#define SHT_MIPS_REGINFO 0x70000006 /* Register usage information */ ++#define SHT_MIPS_PACKAGE 0x70000007 ++#define SHT_MIPS_PACKSYM 0x70000008 ++#define SHT_MIPS_RELD 0x70000009 ++#define SHT_MIPS_IFACE 0x7000000b ++#define SHT_MIPS_CONTENT 0x7000000c ++#define SHT_MIPS_OPTIONS 0x7000000d /* Miscellaneous options. */ ++#define SHT_MIPS_SHDR 0x70000010 ++#define SHT_MIPS_FDESC 0x70000011 ++#define SHT_MIPS_EXTSYM 0x70000012 ++#define SHT_MIPS_DENSE 0x70000013 ++#define SHT_MIPS_PDESC 0x70000014 ++#define SHT_MIPS_LOCSYM 0x70000015 ++#define SHT_MIPS_AUXSYM 0x70000016 ++#define SHT_MIPS_OPTSYM 0x70000017 ++#define SHT_MIPS_LOCSTR 0x70000018 ++#define SHT_MIPS_LINE 0x70000019 ++#define SHT_MIPS_RFDESC 0x7000001a ++#define SHT_MIPS_DELTASYM 0x7000001b ++#define SHT_MIPS_DELTAINST 0x7000001c ++#define SHT_MIPS_DELTACLASS 0x7000001d ++#define SHT_MIPS_DWARF 0x7000001e /* DWARF debugging information. */ ++#define SHT_MIPS_DELTADECL 0x7000001f ++#define SHT_MIPS_SYMBOL_LIB 0x70000020 ++#define SHT_MIPS_EVENTS 0x70000021 /* Event section. */ ++#define SHT_MIPS_TRANSLATE 0x70000022 ++#define SHT_MIPS_PIXIE 0x70000023 ++#define SHT_MIPS_XLATE 0x70000024 ++#define SHT_MIPS_XLATE_DEBUG 0x70000025 ++#define SHT_MIPS_WHIRL 0x70000026 ++#define SHT_MIPS_EH_REGION 0x70000027 ++#define SHT_MIPS_XLATE_OLD 0x70000028 ++#define SHT_MIPS_PDR_EXCEPTION 0x70000029 ++ ++/* Legal values for sh_flags field of Elf32_Shdr. */ ++ ++#define SHF_MIPS_GPREL 0x10000000 /* Must be part of global data area */ ++#define SHF_MIPS_MERGE 0x20000000 ++#define SHF_MIPS_ADDR 0x40000000 ++#define SHF_MIPS_STRINGS 0x80000000 ++#define SHF_MIPS_NOSTRIP 0x08000000 ++#define SHF_MIPS_LOCAL 0x04000000 ++#define SHF_MIPS_NAMES 0x02000000 ++#define SHF_MIPS_NODUPE 0x01000000 ++ ++ ++/* Symbol tables. */ ++ ++/* MIPS specific values for `st_other'. */ ++#define STO_MIPS_DEFAULT 0x0 ++#define STO_MIPS_INTERNAL 0x1 ++#define STO_MIPS_HIDDEN 0x2 ++#define STO_MIPS_PROTECTED 0x3 ++#define STO_MIPS_PLT 0x8 ++#define STO_MIPS_SC_ALIGN_UNUSED 0xff ++ ++/* MIPS specific values for `st_info'. */ ++#define STB_MIPS_SPLIT_COMMON 13 ++ ++/* Entries found in sections of type SHT_MIPS_GPTAB. */ ++ ++typedef union ++{ ++ struct ++ { ++ Elf32_Word gt_current_g_value; /* -G value used for compilation */ ++ Elf32_Word gt_unused; /* Not used */ ++ } gt_header; /* First entry in section */ ++ struct ++ { ++ Elf32_Word gt_g_value; /* If this value were used for -G */ ++ Elf32_Word gt_bytes; /* This many bytes would be used */ ++ } gt_entry; /* Subsequent entries in section */ ++} Elf32_gptab; ++ ++/* Entry found in sections of type SHT_MIPS_REGINFO. */ ++ ++typedef struct ++{ ++ Elf32_Word ri_gprmask; /* General registers used */ ++ Elf32_Word ri_cprmask[4]; /* Coprocessor registers used */ ++ Elf32_Sword ri_gp_value; /* $gp register value */ ++} Elf32_RegInfo; ++ ++/* Entries found in sections of type SHT_MIPS_OPTIONS. */ ++ ++typedef struct ++{ ++ unsigned char kind; /* Determines interpretation of the ++ variable part of descriptor. */ ++ unsigned char size; /* Size of descriptor, including header. */ ++ Elf32_Section section; /* Section header index of section affected, ++ 0 for global options. */ ++ Elf32_Word info; /* Kind-specific information. */ ++} Elf_Options; ++ ++/* Values for `kind' field in Elf_Options. */ ++ ++#define ODK_NULL 0 /* Undefined. */ ++#define ODK_REGINFO 1 /* Register usage information. */ ++#define ODK_EXCEPTIONS 2 /* Exception processing options. */ ++#define ODK_PAD 3 /* Section padding options. */ ++#define ODK_HWPATCH 4 /* Hardware workarounds performed */ ++#define ODK_FILL 5 /* record the fill value used by the linker. */ ++#define ODK_TAGS 6 /* reserve space for desktop tools to write. */ ++#define ODK_HWAND 7 /* HW workarounds. 'AND' bits when merging. */ ++#define ODK_HWOR 8 /* HW workarounds. 'OR' bits when merging. */ ++ ++/* Values for `info' in Elf_Options for ODK_EXCEPTIONS entries. */ ++ ++#define OEX_FPU_MIN 0x1f /* FPE's which MUST be enabled. */ ++#define OEX_FPU_MAX 0x1f00 /* FPE's which MAY be enabled. */ ++#define OEX_PAGE0 0x10000 /* page zero must be mapped. */ ++#define OEX_SMM 0x20000 /* Force sequential memory mode? */ ++#define OEX_FPDBUG 0x40000 /* Force floating point debug mode? */ ++#define OEX_PRECISEFP OEX_FPDBUG ++#define OEX_DISMISS 0x80000 /* Dismiss invalid address faults? */ ++ ++#define OEX_FPU_INVAL 0x10 ++#define OEX_FPU_DIV0 0x08 ++#define OEX_FPU_OFLO 0x04 ++#define OEX_FPU_UFLO 0x02 ++#define OEX_FPU_INEX 0x01 ++ ++/* Masks for `info' in Elf_Options for an ODK_HWPATCH entry. */ ++ ++#define OHW_R4KEOP 0x1 /* R4000 end-of-page patch. */ ++#define OHW_R8KPFETCH 0x2 /* may need R8000 prefetch patch. */ ++#define OHW_R5KEOP 0x4 /* R5000 end-of-page patch. */ ++#define OHW_R5KCVTL 0x8 /* R5000 cvt.[ds].l bug. clean=1. */ ++ ++#define OPAD_PREFIX 0x1 ++#define OPAD_POSTFIX 0x2 ++#define OPAD_SYMBOL 0x4 ++ ++/* Entry found in `.options' section. */ ++ ++typedef struct ++{ ++ Elf32_Word hwp_flags1; /* Extra flags. */ ++ Elf32_Word hwp_flags2; /* Extra flags. */ ++} Elf_Options_Hw; ++ ++/* Masks for `info' in ElfOptions for ODK_HWAND and ODK_HWOR entries. */ ++ ++#define OHWA0_R4KEOP_CHECKED 0x00000001 ++#define OHWA1_R4KEOP_CLEAN 0x00000002 ++ ++/* MIPS relocs. */ ++ ++#define R_MIPS_NONE 0 /* No reloc */ ++#define R_MIPS_16 1 /* Direct 16 bit */ ++#define R_MIPS_32 2 /* Direct 32 bit */ ++#define R_MIPS_REL32 3 /* PC relative 32 bit */ ++#define R_MIPS_26 4 /* Direct 26 bit shifted */ ++#define R_MIPS_HI16 5 /* High 16 bit */ ++#define R_MIPS_LO16 6 /* Low 16 bit */ ++#define R_MIPS_GPREL16 7 /* GP relative 16 bit */ ++#define R_MIPS_LITERAL 8 /* 16 bit literal entry */ ++#define R_MIPS_GOT16 9 /* 16 bit GOT entry */ ++#define R_MIPS_PC16 10 /* PC relative 16 bit */ ++#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */ ++#define R_MIPS_GPREL32 12 /* GP relative 32 bit */ ++ ++#define R_MIPS_SHIFT5 16 ++#define R_MIPS_SHIFT6 17 ++#define R_MIPS_64 18 ++#define R_MIPS_GOT_DISP 19 ++#define R_MIPS_GOT_PAGE 20 ++#define R_MIPS_GOT_OFST 21 ++#define R_MIPS_GOT_HI16 22 ++#define R_MIPS_GOT_LO16 23 ++#define R_MIPS_SUB 24 ++#define R_MIPS_INSERT_A 25 ++#define R_MIPS_INSERT_B 26 ++#define R_MIPS_DELETE 27 ++#define R_MIPS_HIGHER 28 ++#define R_MIPS_HIGHEST 29 ++#define R_MIPS_CALL_HI16 30 ++#define R_MIPS_CALL_LO16 31 ++#define R_MIPS_SCN_DISP 32 ++#define R_MIPS_REL16 33 ++#define R_MIPS_ADD_IMMEDIATE 34 ++#define R_MIPS_PJUMP 35 ++#define R_MIPS_RELGOT 36 ++#define R_MIPS_JALR 37 ++#define R_MIPS_TLS_DTPMOD32 38 /* Module number 32 bit */ ++#define R_MIPS_TLS_DTPREL32 39 /* Module-relative offset 32 bit */ ++#define R_MIPS_TLS_DTPMOD64 40 /* Module number 64 bit */ ++#define R_MIPS_TLS_DTPREL64 41 /* Module-relative offset 64 bit */ ++#define R_MIPS_TLS_GD 42 /* 16 bit GOT offset for GD */ ++#define R_MIPS_TLS_LDM 43 /* 16 bit GOT offset for LDM */ ++#define R_MIPS_TLS_DTPREL_HI16 44 /* Module-relative offset, high 16 bits */ ++#define R_MIPS_TLS_DTPREL_LO16 45 /* Module-relative offset, low 16 bits */ ++#define R_MIPS_TLS_GOTTPREL 46 /* 16 bit GOT offset for IE */ ++#define R_MIPS_TLS_TPREL32 47 /* TP-relative offset, 32 bit */ ++#define R_MIPS_TLS_TPREL64 48 /* TP-relative offset, 64 bit */ ++#define R_MIPS_TLS_TPREL_HI16 49 /* TP-relative offset, high 16 bits */ ++#define R_MIPS_TLS_TPREL_LO16 50 /* TP-relative offset, low 16 bits */ ++#define R_MIPS_GLOB_DAT 51 ++#define R_MIPS_COPY 126 ++#define R_MIPS_JUMP_SLOT 127 ++/* Keep this the last entry. */ ++#define R_MIPS_NUM 128 ++ ++/* Legal values for p_type field of Elf32_Phdr. */ ++ ++#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ ++#define PT_MIPS_RTPROC 0x70000001 /* Runtime procedure table. */ ++#define PT_MIPS_OPTIONS 0x70000002 ++ ++/* Special program header types. */ ++ ++#define PF_MIPS_LOCAL 0x10000000 ++ ++/* Legal values for d_tag field of Elf32_Dyn. */ ++ ++#define DT_MIPS_RLD_VERSION 0x70000001 /* Runtime linker interface version */ ++#define DT_MIPS_TIME_STAMP 0x70000002 /* Timestamp */ ++#define DT_MIPS_ICHECKSUM 0x70000003 /* Checksum */ ++#define DT_MIPS_IVERSION 0x70000004 /* Version string (string tbl index) */ ++#define DT_MIPS_FLAGS 0x70000005 /* Flags */ ++#define DT_MIPS_BASE_ADDRESS 0x70000006 /* Base address */ ++#define DT_MIPS_MSYM 0x70000007 ++#define DT_MIPS_CONFLICT 0x70000008 /* Address of CONFLICT section */ ++#define DT_MIPS_LIBLIST 0x70000009 /* Address of LIBLIST section */ ++#define DT_MIPS_LOCAL_GOTNO 0x7000000a /* Number of local GOT entries */ ++#define DT_MIPS_CONFLICTNO 0x7000000b /* Number of CONFLICT entries */ ++#define DT_MIPS_LIBLISTNO 0x70000010 /* Number of LIBLIST entries */ ++#define DT_MIPS_SYMTABNO 0x70000011 /* Number of DYNSYM entries */ ++#define DT_MIPS_UNREFEXTNO 0x70000012 /* First external DYNSYM */ ++#define DT_MIPS_GOTSYM 0x70000013 /* First GOT entry in DYNSYM */ ++#define DT_MIPS_HIPAGENO 0x70000014 /* Number of GOT page table entries */ ++#define DT_MIPS_RLD_MAP 0x70000016 /* Address of run time loader map. */ ++#define DT_MIPS_DELTA_CLASS 0x70000017 /* Delta C++ class definition. */ ++#define DT_MIPS_DELTA_CLASS_NO 0x70000018 /* Number of entries in ++ DT_MIPS_DELTA_CLASS. */ ++#define DT_MIPS_DELTA_INSTANCE 0x70000019 /* Delta C++ class instances. */ ++#define DT_MIPS_DELTA_INSTANCE_NO 0x7000001a /* Number of entries in ++ DT_MIPS_DELTA_INSTANCE. */ ++#define DT_MIPS_DELTA_RELOC 0x7000001b /* Delta relocations. */ ++#define DT_MIPS_DELTA_RELOC_NO 0x7000001c /* Number of entries in ++ DT_MIPS_DELTA_RELOC. */ ++#define DT_MIPS_DELTA_SYM 0x7000001d /* Delta symbols that Delta ++ relocations refer to. */ ++#define DT_MIPS_DELTA_SYM_NO 0x7000001e /* Number of entries in ++ DT_MIPS_DELTA_SYM. */ ++#define DT_MIPS_DELTA_CLASSSYM 0x70000020 /* Delta symbols that hold the ++ class declaration. */ ++#define DT_MIPS_DELTA_CLASSSYM_NO 0x70000021 /* Number of entries in ++ DT_MIPS_DELTA_CLASSSYM. */ ++#define DT_MIPS_CXX_FLAGS 0x70000022 /* Flags indicating for C++ flavor. */ ++#define DT_MIPS_PIXIE_INIT 0x70000023 ++#define DT_MIPS_SYMBOL_LIB 0x70000024 ++#define DT_MIPS_LOCALPAGE_GOTIDX 0x70000025 ++#define DT_MIPS_LOCAL_GOTIDX 0x70000026 ++#define DT_MIPS_HIDDEN_GOTIDX 0x70000027 ++#define DT_MIPS_PROTECTED_GOTIDX 0x70000028 ++#define DT_MIPS_OPTIONS 0x70000029 /* Address of .options. */ ++#define DT_MIPS_INTERFACE 0x7000002a /* Address of .interface. */ ++#define DT_MIPS_DYNSTR_ALIGN 0x7000002b ++#define DT_MIPS_INTERFACE_SIZE 0x7000002c /* Size of the .interface section. */ ++#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR 0x7000002d /* Address of rld_text_rsolve ++ function stored in GOT. */ ++#define DT_MIPS_PERF_SUFFIX 0x7000002e /* Default suffix of dso to be added ++ by rld on dlopen() calls. */ ++#define DT_MIPS_COMPACT_SIZE 0x7000002f /* (O32)Size of compact rel section. */ ++#define DT_MIPS_GP_VALUE 0x70000030 /* GP value for aux GOTs. */ ++#define DT_MIPS_AUX_DYNAMIC 0x70000031 /* Address of aux .dynamic. */ ++/* The address of .got.plt in an executable using the new non-PIC ABI. */ ++#define DT_MIPS_PLTGOT 0x70000032 ++/* The base of the PLT in an executable using the new non-PIC ABI if that ++ PLT is writable. For a non-writable PLT, this is omitted or has a zero ++ value. */ ++#define DT_MIPS_RWPLT 0x70000034 ++#define DT_MIPS_NUM 0x35 ++ ++/* Legal values for DT_MIPS_FLAGS Elf32_Dyn entry. */ ++ ++#define RHF_NONE 0 /* No flags */ ++#define RHF_QUICKSTART (1 << 0) /* Use quickstart */ ++#define RHF_NOTPOT (1 << 1) /* Hash size not power of 2 */ ++#define RHF_NO_LIBRARY_REPLACEMENT (1 << 2) /* Ignore LD_LIBRARY_PATH */ ++#define RHF_NO_MOVE (1 << 3) ++#define RHF_SGI_ONLY (1 << 4) ++#define RHF_GUARANTEE_INIT (1 << 5) ++#define RHF_DELTA_C_PLUS_PLUS (1 << 6) ++#define RHF_GUARANTEE_START_INIT (1 << 7) ++#define RHF_PIXIE (1 << 8) ++#define RHF_DEFAULT_DELAY_LOAD (1 << 9) ++#define RHF_REQUICKSTART (1 << 10) ++#define RHF_REQUICKSTARTED (1 << 11) ++#define RHF_CORD (1 << 12) ++#define RHF_NO_UNRES_UNDEF (1 << 13) ++#define RHF_RLD_ORDER_SAFE (1 << 14) ++ ++/* Entries found in sections of type SHT_MIPS_LIBLIST. */ ++ ++typedef struct ++{ ++ Elf32_Word l_name; /* Name (string table index) */ ++ Elf32_Word l_time_stamp; /* Timestamp */ ++ Elf32_Word l_checksum; /* Checksum */ ++ Elf32_Word l_version; /* Interface version */ ++ Elf32_Word l_flags; /* Flags */ ++} Elf32_Lib; ++ ++typedef struct ++{ ++ Elf64_Word l_name; /* Name (string table index) */ ++ Elf64_Word l_time_stamp; /* Timestamp */ ++ Elf64_Word l_checksum; /* Checksum */ ++ Elf64_Word l_version; /* Interface version */ ++ Elf64_Word l_flags; /* Flags */ ++} Elf64_Lib; ++ ++ ++/* Legal values for l_flags. */ ++ ++#define LL_NONE 0 ++#define LL_EXACT_MATCH (1 << 0) /* Require exact match */ ++#define LL_IGNORE_INT_VER (1 << 1) /* Ignore interface version */ ++#define LL_REQUIRE_MINOR (1 << 2) ++#define LL_EXPORTS (1 << 3) ++#define LL_DELAY_LOAD (1 << 4) ++#define LL_DELTA (1 << 5) ++ ++/* Entries found in sections of type SHT_MIPS_CONFLICT. */ ++ ++typedef Elf32_Addr Elf32_Conflict; ++ ++ ++/* HPPA specific definitions. */ ++ ++/* Legal values for e_flags field of Elf32_Ehdr. */ ++ ++#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */ ++#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */ ++#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */ ++#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */ ++#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch ++ prediction. */ ++#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */ ++#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */ ++ ++/* Defined values for `e_flags & EF_PARISC_ARCH' are: */ ++ ++#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */ ++#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */ ++#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */ ++ ++/* Additional section indeces. */ ++ ++#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared ++ symbols in ANSI C. */ ++#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ ++ ++/* Legal values for sh_type field of Elf32_Shdr. */ ++ ++#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */ ++#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */ ++#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */ ++ ++/* Legal values for sh_flags field of Elf32_Shdr. */ ++ ++#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */ ++#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */ ++#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */ ++ ++/* Legal values for ST_TYPE subfield of st_info (symbol type). */ ++ ++#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */ ++ ++#define STT_HP_OPAQUE (STT_LOOS + 0x1) ++#define STT_HP_STUB (STT_LOOS + 0x2) ++ ++/* HPPA relocs. */ ++ ++#define R_PARISC_NONE 0 /* No reloc. */ ++#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */ ++#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */ ++#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */ ++#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */ ++#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */ ++#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */ ++#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */ ++#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */ ++#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */ ++#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */ ++#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */ ++#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */ ++#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */ ++#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */ ++#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */ ++#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */ ++#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */ ++#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */ ++#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */ ++#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */ ++#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */ ++#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */ ++#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */ ++#define R_PARISC_FPTR64 64 /* 64 bits function address. */ ++#define R_PARISC_PLABEL32 65 /* 32 bits function address. */ ++#define R_PARISC_PLABEL21L 66 /* Left 21 bits of fdesc address. */ ++#define R_PARISC_PLABEL14R 70 /* Right 14 bits of fdesc address. */ ++#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */ ++#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */ ++#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */ ++#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */ ++#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */ ++#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */ ++#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */ ++#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */ ++#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */ ++#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */ ++#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */ ++#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */ ++#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */ ++#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */ ++#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */ ++#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */ ++#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */ ++#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */ ++#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */ ++#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */ ++#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */ ++#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */ ++#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */ ++#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */ ++#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */ ++#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */ ++#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */ ++#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */ ++#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */ ++#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */ ++#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */ ++#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */ ++#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */ ++#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */ ++#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */ ++#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */ ++#define R_PARISC_LORESERVE 128 ++#define R_PARISC_COPY 128 /* Copy relocation. */ ++#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */ ++#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */ ++#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */ ++#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */ ++#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */ ++#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */ ++#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/ ++#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */ ++#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */ ++#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */ ++#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */ ++#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */ ++#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */ ++#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */ ++#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */ ++#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/ ++#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/ ++#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */ ++#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */ ++#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */ ++#define R_PARISC_GNU_VTENTRY 232 ++#define R_PARISC_GNU_VTINHERIT 233 ++#define R_PARISC_TLS_GD21L 234 /* GD 21-bit left. */ ++#define R_PARISC_TLS_GD14R 235 /* GD 14-bit right. */ ++#define R_PARISC_TLS_GDCALL 236 /* GD call to __t_g_a. */ ++#define R_PARISC_TLS_LDM21L 237 /* LD module 21-bit left. */ ++#define R_PARISC_TLS_LDM14R 238 /* LD module 14-bit right. */ ++#define R_PARISC_TLS_LDMCALL 239 /* LD module call to __t_g_a. */ ++#define R_PARISC_TLS_LDO21L 240 /* LD offset 21-bit left. */ ++#define R_PARISC_TLS_LDO14R 241 /* LD offset 14-bit right. */ ++#define R_PARISC_TLS_DTPMOD32 242 /* DTP module 32-bit. */ ++#define R_PARISC_TLS_DTPMOD64 243 /* DTP module 64-bit. */ ++#define R_PARISC_TLS_DTPOFF32 244 /* DTP offset 32-bit. */ ++#define R_PARISC_TLS_DTPOFF64 245 /* DTP offset 32-bit. */ ++#define R_PARISC_TLS_LE21L R_PARISC_TPREL21L ++#define R_PARISC_TLS_LE14R R_PARISC_TPREL14R ++#define R_PARISC_TLS_IE21L R_PARISC_LTOFF_TP21L ++#define R_PARISC_TLS_IE14R R_PARISC_LTOFF_TP14R ++#define R_PARISC_TLS_TPREL32 R_PARISC_TPREL32 ++#define R_PARISC_TLS_TPREL64 R_PARISC_TPREL64 ++#define R_PARISC_HIRESERVE 255 ++ ++/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */ ++ ++#define PT_HP_TLS (PT_LOOS + 0x0) ++#define PT_HP_CORE_NONE (PT_LOOS + 0x1) ++#define PT_HP_CORE_VERSION (PT_LOOS + 0x2) ++#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3) ++#define PT_HP_CORE_COMM (PT_LOOS + 0x4) ++#define PT_HP_CORE_PROC (PT_LOOS + 0x5) ++#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6) ++#define PT_HP_CORE_STACK (PT_LOOS + 0x7) ++#define PT_HP_CORE_SHM (PT_LOOS + 0x8) ++#define PT_HP_CORE_MMF (PT_LOOS + 0x9) ++#define PT_HP_PARALLEL (PT_LOOS + 0x10) ++#define PT_HP_FASTBIND (PT_LOOS + 0x11) ++#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12) ++#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13) ++#define PT_HP_STACK (PT_LOOS + 0x14) ++ ++#define PT_PARISC_ARCHEXT 0x70000000 ++#define PT_PARISC_UNWIND 0x70000001 ++ ++/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */ ++ ++#define PF_PARISC_SBP 0x08000000 ++ ++#define PF_HP_PAGE_SIZE 0x00100000 ++#define PF_HP_FAR_SHARED 0x00200000 ++#define PF_HP_NEAR_SHARED 0x00400000 ++#define PF_HP_CODE 0x01000000 ++#define PF_HP_MODIFY 0x02000000 ++#define PF_HP_LAZYSWAP 0x04000000 ++#define PF_HP_SBP 0x08000000 ++ ++ ++/* Alpha specific definitions. */ ++ ++/* Legal values for e_flags field of Elf64_Ehdr. */ ++ ++#define EF_ALPHA_32BIT 1 /* All addresses must be < 2GB. */ ++#define EF_ALPHA_CANRELAX 2 /* Relocations for relaxing exist. */ ++ ++/* Legal values for sh_type field of Elf64_Shdr. */ ++ ++/* These two are primerily concerned with ECOFF debugging info. */ ++#define SHT_ALPHA_DEBUG 0x70000001 ++#define SHT_ALPHA_REGINFO 0x70000002 ++ ++/* Legal values for sh_flags field of Elf64_Shdr. */ ++ ++#define SHF_ALPHA_GPREL 0x10000000 ++ ++/* Legal values for st_other field of Elf64_Sym. */ ++#define STO_ALPHA_NOPV 0x80 /* No PV required. */ ++#define STO_ALPHA_STD_GPLOAD 0x88 /* PV only used for initial ldgp. */ ++ ++/* Alpha relocs. */ ++ ++#define R_ALPHA_NONE 0 /* No reloc */ ++#define R_ALPHA_REFLONG 1 /* Direct 32 bit */ ++#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */ ++#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */ ++#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */ ++#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */ ++#define R_ALPHA_GPDISP 6 /* Add displacement to GP */ ++#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */ ++#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */ ++#define R_ALPHA_SREL16 9 /* PC relative 16 bit */ ++#define R_ALPHA_SREL32 10 /* PC relative 32 bit */ ++#define R_ALPHA_SREL64 11 /* PC relative 64 bit */ ++#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ ++#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ ++#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */ ++#define R_ALPHA_COPY 24 /* Copy symbol at runtime */ ++#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */ ++#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ ++#define R_ALPHA_RELATIVE 27 /* Adjust by program base */ ++#define R_ALPHA_TLS_GD_HI 28 ++#define R_ALPHA_TLSGD 29 ++#define R_ALPHA_TLS_LDM 30 ++#define R_ALPHA_DTPMOD64 31 ++#define R_ALPHA_GOTDTPREL 32 ++#define R_ALPHA_DTPREL64 33 ++#define R_ALPHA_DTPRELHI 34 ++#define R_ALPHA_DTPRELLO 35 ++#define R_ALPHA_DTPREL16 36 ++#define R_ALPHA_GOTTPREL 37 ++#define R_ALPHA_TPREL64 38 ++#define R_ALPHA_TPRELHI 39 ++#define R_ALPHA_TPRELLO 40 ++#define R_ALPHA_TPREL16 41 ++/* Keep this the last entry. */ ++#define R_ALPHA_NUM 46 ++ ++/* Magic values of the LITUSE relocation addend. */ ++#define LITUSE_ALPHA_ADDR 0 ++#define LITUSE_ALPHA_BASE 1 ++#define LITUSE_ALPHA_BYTOFF 2 ++#define LITUSE_ALPHA_JSR 3 ++#define LITUSE_ALPHA_TLS_GD 4 ++#define LITUSE_ALPHA_TLS_LDM 5 ++ ++/* Legal values for d_tag of Elf64_Dyn. */ ++#define DT_ALPHA_PLTRO (DT_LOPROC + 0) ++#define DT_ALPHA_NUM 1 ++ ++/* PowerPC specific declarations */ ++ ++/* Values for Elf32/64_Ehdr.e_flags. */ ++#define EF_PPC_EMB 0x80000000 /* PowerPC embedded flag */ ++ ++/* Cygnus local bits below */ ++#define EF_PPC_RELOCATABLE 0x00010000 /* PowerPC -mrelocatable flag*/ ++#define EF_PPC_RELOCATABLE_LIB 0x00008000 /* PowerPC -mrelocatable-lib ++ flag */ ++ ++/* PowerPC relocations defined by the ABIs */ ++#define R_PPC_NONE 0 ++#define R_PPC_ADDR32 1 /* 32bit absolute address */ ++#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */ ++#define R_PPC_ADDR16 3 /* 16bit absolute address */ ++#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */ ++#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */ ++#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */ ++#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */ ++#define R_PPC_ADDR14_BRTAKEN 8 ++#define R_PPC_ADDR14_BRNTAKEN 9 ++#define R_PPC_REL24 10 /* PC relative 26 bit */ ++#define R_PPC_REL14 11 /* PC relative 16 bit */ ++#define R_PPC_REL14_BRTAKEN 12 ++#define R_PPC_REL14_BRNTAKEN 13 ++#define R_PPC_GOT16 14 ++#define R_PPC_GOT16_LO 15 ++#define R_PPC_GOT16_HI 16 ++#define R_PPC_GOT16_HA 17 ++#define R_PPC_PLTREL24 18 ++#define R_PPC_COPY 19 ++#define R_PPC_GLOB_DAT 20 ++#define R_PPC_JMP_SLOT 21 ++#define R_PPC_RELATIVE 22 ++#define R_PPC_LOCAL24PC 23 ++#define R_PPC_UADDR32 24 ++#define R_PPC_UADDR16 25 ++#define R_PPC_REL32 26 ++#define R_PPC_PLT32 27 ++#define R_PPC_PLTREL32 28 ++#define R_PPC_PLT16_LO 29 ++#define R_PPC_PLT16_HI 30 ++#define R_PPC_PLT16_HA 31 ++#define R_PPC_SDAREL16 32 ++#define R_PPC_SECTOFF 33 ++#define R_PPC_SECTOFF_LO 34 ++#define R_PPC_SECTOFF_HI 35 ++#define R_PPC_SECTOFF_HA 36 ++ ++/* PowerPC relocations defined for the TLS access ABI. */ ++#define R_PPC_TLS 67 /* none (sym+add)@tls */ ++#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */ ++#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */ ++#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */ ++#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */ ++#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */ ++#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */ ++#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */ ++#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */ ++#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */ ++#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */ ++#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */ ++#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */ ++#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */ ++#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */ ++#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */ ++#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */ ++#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */ ++#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */ ++#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */ ++#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */ ++#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */ ++#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */ ++#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */ ++#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */ ++#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */ ++#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */ ++#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */ ++ ++/* The remaining relocs are from the Embedded ELF ABI, and are not ++ in the SVR4 ELF ABI. */ ++#define R_PPC_EMB_NADDR32 101 ++#define R_PPC_EMB_NADDR16 102 ++#define R_PPC_EMB_NADDR16_LO 103 ++#define R_PPC_EMB_NADDR16_HI 104 ++#define R_PPC_EMB_NADDR16_HA 105 ++#define R_PPC_EMB_SDAI16 106 ++#define R_PPC_EMB_SDA2I16 107 ++#define R_PPC_EMB_SDA2REL 108 ++#define R_PPC_EMB_SDA21 109 /* 16 bit offset in SDA */ ++#define R_PPC_EMB_MRKREF 110 ++#define R_PPC_EMB_RELSEC16 111 ++#define R_PPC_EMB_RELST_LO 112 ++#define R_PPC_EMB_RELST_HI 113 ++#define R_PPC_EMB_RELST_HA 114 ++#define R_PPC_EMB_BIT_FLD 115 ++#define R_PPC_EMB_RELSDA 116 /* 16 bit relative offset in SDA */ ++ ++/* Diab tool relocations. */ ++#define R_PPC_DIAB_SDA21_LO 180 /* like EMB_SDA21, but lower 16 bit */ ++#define R_PPC_DIAB_SDA21_HI 181 /* like EMB_SDA21, but high 16 bit */ ++#define R_PPC_DIAB_SDA21_HA 182 /* like EMB_SDA21, adjusted high 16 */ ++#define R_PPC_DIAB_RELSDA_LO 183 /* like EMB_RELSDA, but lower 16 bit */ ++#define R_PPC_DIAB_RELSDA_HI 184 /* like EMB_RELSDA, but high 16 bit */ ++#define R_PPC_DIAB_RELSDA_HA 185 /* like EMB_RELSDA, adjusted high 16 */ ++ ++/* GNU extension to support local ifunc. */ ++#define R_PPC_IRELATIVE 248 ++ ++/* GNU relocs used in PIC code sequences. */ ++#define R_PPC_REL16 249 /* half16 (sym+add-.) */ ++#define R_PPC_REL16_LO 250 /* half16 (sym+add-.)@l */ ++#define R_PPC_REL16_HI 251 /* half16 (sym+add-.)@h */ ++#define R_PPC_REL16_HA 252 /* half16 (sym+add-.)@ha */ ++ ++/* This is a phony reloc to handle any old fashioned TOC16 references ++ that may still be in object files. */ ++#define R_PPC_TOC16 255 ++ ++/* PowerPC specific values for the Dyn d_tag field. */ ++#define DT_PPC_GOT (DT_LOPROC + 0) ++#define DT_PPC_NUM 1 ++ ++/* PowerPC64 relocations defined by the ABIs */ ++#define R_PPC64_NONE R_PPC_NONE ++#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address */ ++#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned */ ++#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address */ ++#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of address */ ++#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of address. */ ++#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */ ++#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned */ ++#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN ++#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN ++#define R_PPC64_REL24 R_PPC_REL24 /* PC-rel. 26 bit, word aligned */ ++#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit */ ++#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN ++#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN ++#define R_PPC64_GOT16 R_PPC_GOT16 ++#define R_PPC64_GOT16_LO R_PPC_GOT16_LO ++#define R_PPC64_GOT16_HI R_PPC_GOT16_HI ++#define R_PPC64_GOT16_HA R_PPC_GOT16_HA ++ ++#define R_PPC64_COPY R_PPC_COPY ++#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT ++#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT ++#define R_PPC64_RELATIVE R_PPC_RELATIVE ++ ++#define R_PPC64_UADDR32 R_PPC_UADDR32 ++#define R_PPC64_UADDR16 R_PPC_UADDR16 ++#define R_PPC64_REL32 R_PPC_REL32 ++#define R_PPC64_PLT32 R_PPC_PLT32 ++#define R_PPC64_PLTREL32 R_PPC_PLTREL32 ++#define R_PPC64_PLT16_LO R_PPC_PLT16_LO ++#define R_PPC64_PLT16_HI R_PPC_PLT16_HI ++#define R_PPC64_PLT16_HA R_PPC_PLT16_HA ++ ++#define R_PPC64_SECTOFF R_PPC_SECTOFF ++#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO ++#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI ++#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA ++#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2 */ ++#define R_PPC64_ADDR64 38 /* doubleword64 S + A */ ++#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A) */ ++#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A) */ ++#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A) */ ++#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A) */ ++#define R_PPC64_UADDR64 43 /* doubleword64 S + A */ ++#define R_PPC64_REL64 44 /* doubleword64 S + A - P */ ++#define R_PPC64_PLT64 45 /* doubleword64 L + A */ ++#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P */ ++#define R_PPC64_TOC16 47 /* half16* S + A - .TOC */ ++#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.) */ ++#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.) */ ++#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.) */ ++#define R_PPC64_TOC 51 /* doubleword64 .TOC */ ++#define R_PPC64_PLTGOT16 52 /* half16* M + A */ ++#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A) */ ++#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A) */ ++#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A) */ ++ ++#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2 */ ++#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2 */ ++#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2 */ ++#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2 */ ++#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2 */ ++#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2 */ ++#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2 */ ++#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2 */ ++#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2 */ ++#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2 */ ++#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2 */ ++ ++/* PowerPC64 relocations defined for the TLS access ABI. */ ++#define R_PPC64_TLS 67 /* none (sym+add)@tls */ ++#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */ ++#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */ ++#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */ ++#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */ ++#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */ ++#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */ ++#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */ ++#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */ ++#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */ ++#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */ ++#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */ ++#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */ ++#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */ ++#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */ ++#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */ ++#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */ ++#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */ ++#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */ ++#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */ ++#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */ ++#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */ ++#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */ ++#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */ ++#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */ ++#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */ ++#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */ ++#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */ ++#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */ ++#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */ ++#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */ ++#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */ ++#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */ ++#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */ ++#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */ ++#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */ ++#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */ ++#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */ ++#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */ ++#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */ ++ ++/* GNU extension to support local ifunc. */ ++#define R_PPC64_JMP_IREL 247 ++#define R_PPC64_IRELATIVE 248 ++#define R_PPC64_REL16 249 /* half16 (sym+add-.) */ ++#define R_PPC64_REL16_LO 250 /* half16 (sym+add-.)@l */ ++#define R_PPC64_REL16_HI 251 /* half16 (sym+add-.)@h */ ++#define R_PPC64_REL16_HA 252 /* half16 (sym+add-.)@ha */ ++ ++/* PowerPC64 specific values for the Dyn d_tag field. */ ++#define DT_PPC64_GLINK (DT_LOPROC + 0) ++#define DT_PPC64_OPD (DT_LOPROC + 1) ++#define DT_PPC64_OPDSZ (DT_LOPROC + 2) ++#define DT_PPC64_NUM 3 ++ ++ ++/* ARM specific declarations */ ++ ++/* Processor specific flags for the ELF header e_flags field. */ ++#define EF_ARM_RELEXEC 0x01 ++#define EF_ARM_HASENTRY 0x02 ++#define EF_ARM_INTERWORK 0x04 ++#define EF_ARM_APCS_26 0x08 ++#define EF_ARM_APCS_FLOAT 0x10 ++#define EF_ARM_PIC 0x20 ++#define EF_ARM_ALIGN8 0x40 /* 8-bit structure alignment is in use */ ++#define EF_ARM_NEW_ABI 0x80 ++#define EF_ARM_OLD_ABI 0x100 ++#define EF_ARM_SOFT_FLOAT 0x200 ++#define EF_ARM_VFP_FLOAT 0x400 ++#define EF_ARM_MAVERICK_FLOAT 0x800 ++ ++ ++/* Other constants defined in the ARM ELF spec. version B-01. */ ++/* NB. These conflict with values defined above. */ ++#define EF_ARM_SYMSARESORTED 0x04 ++#define EF_ARM_DYNSYMSUSESEGIDX 0x08 ++#define EF_ARM_MAPSYMSFIRST 0x10 ++#define EF_ARM_EABIMASK 0XFF000000 ++ ++/* Constants defined in AAELF. */ ++#define EF_ARM_BE8 0x00800000 ++#define EF_ARM_LE8 0x00400000 ++ ++#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK) ++#define EF_ARM_EABI_UNKNOWN 0x00000000 ++#define EF_ARM_EABI_VER1 0x01000000 ++#define EF_ARM_EABI_VER2 0x02000000 ++#define EF_ARM_EABI_VER3 0x03000000 ++#define EF_ARM_EABI_VER4 0x04000000 ++#define EF_ARM_EABI_VER5 0x05000000 ++ ++/* Additional symbol types for Thumb. */ ++#define STT_ARM_TFUNC STT_LOPROC /* A Thumb function. */ ++#define STT_ARM_16BIT STT_HIPROC /* A Thumb label. */ ++ ++/* ARM-specific values for sh_flags */ ++#define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */ ++#define SHF_ARM_COMDEF 0x80000000 /* Section may be multiply defined ++ in the input to a link step. */ ++ ++/* ARM-specific program header flags */ ++#define PF_ARM_SB 0x10000000 /* Segment contains the location ++ addressed by the static base. */ ++#define PF_ARM_PI 0x20000000 /* Position-independent segment. */ ++#define PF_ARM_ABS 0x40000000 /* Absolute segment. */ ++ ++/* Processor specific values for the Phdr p_type field. */ ++#define PT_ARM_EXIDX (PT_LOPROC + 1) /* ARM unwind segment. */ ++ ++/* Processor specific values for the Shdr sh_type field. */ ++#define SHT_ARM_EXIDX (SHT_LOPROC + 1) /* ARM unwind section. */ ++#define SHT_ARM_PREEMPTMAP (SHT_LOPROC + 2) /* Preemption details. */ ++#define SHT_ARM_ATTRIBUTES (SHT_LOPROC + 3) /* ARM attributes section. */ ++ ++ ++/* ARM relocs. */ ++ ++#define R_ARM_NONE 0 /* No reloc */ ++#define R_ARM_PC24 1 /* PC relative 26 bit branch */ ++#define R_ARM_ABS32 2 /* Direct 32 bit */ ++#define R_ARM_REL32 3 /* PC relative 32 bit */ ++#define R_ARM_PC13 4 ++#define R_ARM_ABS16 5 /* Direct 16 bit */ ++#define R_ARM_ABS12 6 /* Direct 12 bit */ ++#define R_ARM_THM_ABS5 7 ++#define R_ARM_ABS8 8 /* Direct 8 bit */ ++#define R_ARM_SBREL32 9 ++#define R_ARM_THM_PC22 10 ++#define R_ARM_THM_PC8 11 ++#define R_ARM_AMP_VCALL9 12 ++#define R_ARM_SWI24 13 /* Obsolete static relocation. */ ++#define R_ARM_TLS_DESC 13 /* Dynamic relocation. */ ++#define R_ARM_THM_SWI8 14 ++#define R_ARM_XPC25 15 ++#define R_ARM_THM_XPC22 16 ++#define R_ARM_TLS_DTPMOD32 17 /* ID of module containing symbol */ ++#define R_ARM_TLS_DTPOFF32 18 /* Offset in TLS block */ ++#define R_ARM_TLS_TPOFF32 19 /* Offset in static TLS block */ ++#define R_ARM_COPY 20 /* Copy symbol at runtime */ ++#define R_ARM_GLOB_DAT 21 /* Create GOT entry */ ++#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */ ++#define R_ARM_RELATIVE 23 /* Adjust by program base */ ++#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */ ++#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */ ++#define R_ARM_GOT32 26 /* 32 bit GOT entry */ ++#define R_ARM_PLT32 27 /* 32 bit PLT address */ ++#define R_ARM_ALU_PCREL_7_0 32 ++#define R_ARM_ALU_PCREL_15_8 33 ++#define R_ARM_ALU_PCREL_23_15 34 ++#define R_ARM_LDR_SBREL_11_0 35 ++#define R_ARM_ALU_SBREL_19_12 36 ++#define R_ARM_ALU_SBREL_27_20 37 ++#define R_ARM_TLS_GOTDESC 90 ++#define R_ARM_TLS_CALL 91 ++#define R_ARM_TLS_DESCSEQ 92 ++#define R_ARM_THM_TLS_CALL 93 ++#define R_ARM_GNU_VTENTRY 100 ++#define R_ARM_GNU_VTINHERIT 101 ++#define R_ARM_THM_PC11 102 /* thumb unconditional branch */ ++#define R_ARM_THM_PC9 103 /* thumb conditional branch */ ++#define R_ARM_TLS_GD32 104 /* PC-rel 32 bit for global dynamic ++ thread local data */ ++#define R_ARM_TLS_LDM32 105 /* PC-rel 32 bit for local dynamic ++ thread local data */ ++#define R_ARM_TLS_LDO32 106 /* 32 bit offset relative to TLS ++ block */ ++#define R_ARM_TLS_IE32 107 /* PC-rel 32 bit for GOT entry of ++ static TLS block offset */ ++#define R_ARM_TLS_LE32 108 /* 32 bit offset relative to static ++ TLS block */ ++#define R_ARM_THM_TLS_DESCSEQ 129 ++#define R_ARM_IRELATIVE 160 ++#define R_ARM_RXPC25 249 ++#define R_ARM_RSBREL32 250 ++#define R_ARM_THM_RPC22 251 ++#define R_ARM_RREL32 252 ++#define R_ARM_RABS22 253 ++#define R_ARM_RPC24 254 ++#define R_ARM_RBASE 255 ++/* Keep this the last entry. */ ++#define R_ARM_NUM 256 ++ ++/* IA-64 specific declarations. */ ++ ++/* Processor specific flags for the Ehdr e_flags field. */ ++#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */ ++#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */ ++#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */ ++ ++/* Processor specific values for the Phdr p_type field. */ ++#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */ ++#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */ ++#define PT_IA_64_HP_OPT_ANOT (PT_LOOS + 0x12) ++#define PT_IA_64_HP_HSL_ANOT (PT_LOOS + 0x13) ++#define PT_IA_64_HP_STACK (PT_LOOS + 0x14) ++ ++/* Processor specific flags for the Phdr p_flags field. */ ++#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */ ++ ++/* Processor specific values for the Shdr sh_type field. */ ++#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */ ++#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */ ++ ++/* Processor specific flags for the Shdr sh_flags field. */ ++#define SHF_IA_64_SHORT 0x10000000 /* section near gp */ ++#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */ ++ ++/* Processor specific values for the Dyn d_tag field. */ ++#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0) ++#define DT_IA_64_NUM 1 ++ ++/* IA-64 relocations. */ ++#define R_IA64_NONE 0x00 /* none */ ++#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ ++#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */ ++#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */ ++#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */ ++#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */ ++#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */ ++#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */ ++#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */ ++#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */ ++#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */ ++#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */ ++#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */ ++#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */ ++#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */ ++#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */ ++#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */ ++#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */ ++#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */ ++#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */ ++#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */ ++#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */ ++#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */ ++#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */ ++#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */ ++#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */ ++#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */ ++#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */ ++#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */ ++#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */ ++#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */ ++#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */ ++#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */ ++#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */ ++#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */ ++#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */ ++#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */ ++#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */ ++#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */ ++#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */ ++#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */ ++#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */ ++#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */ ++#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */ ++#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */ ++#define R_IA64_REL32MSB 0x6c /* data 4 + REL */ ++#define R_IA64_REL32LSB 0x6d /* data 4 + REL */ ++#define R_IA64_REL64MSB 0x6e /* data 8 + REL */ ++#define R_IA64_REL64LSB 0x6f /* data 8 + REL */ ++#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */ ++#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */ ++#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */ ++#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */ ++#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */ ++#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */ ++#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */ ++#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */ ++#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */ ++#define R_IA64_COPY 0x84 /* copy relocation */ ++#define R_IA64_SUB 0x85 /* Addend and symbol difference */ ++#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */ ++#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */ ++#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */ ++#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */ ++#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */ ++#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */ ++#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */ ++#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */ ++#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */ ++#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */ ++#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */ ++#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */ ++#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */ ++#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */ ++#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */ ++#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */ ++#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ ++ ++/* SH specific declarations */ ++ ++/* Processor specific flags for the ELF header e_flags field. */ ++#define EF_SH_MACH_MASK 0x1f ++#define EF_SH_UNKNOWN 0x0 ++#define EF_SH1 0x1 ++#define EF_SH2 0x2 ++#define EF_SH3 0x3 ++#define EF_SH_DSP 0x4 ++#define EF_SH3_DSP 0x5 ++#define EF_SH4AL_DSP 0x6 ++#define EF_SH3E 0x8 ++#define EF_SH4 0x9 ++#define EF_SH2E 0xb ++#define EF_SH4A 0xc ++#define EF_SH2A 0xd ++#define EF_SH4_NOFPU 0x10 ++#define EF_SH4A_NOFPU 0x11 ++#define EF_SH4_NOMMU_NOFPU 0x12 ++#define EF_SH2A_NOFPU 0x13 ++#define EF_SH3_NOMMU 0x14 ++#define EF_SH2A_SH4_NOFPU 0x15 ++#define EF_SH2A_SH3_NOFPU 0x16 ++#define EF_SH2A_SH4 0x17 ++#define EF_SH2A_SH3E 0x18 ++ ++/* SH relocs. */ ++#define R_SH_NONE 0 ++#define R_SH_DIR32 1 ++#define R_SH_REL32 2 ++#define R_SH_DIR8WPN 3 ++#define R_SH_IND12W 4 ++#define R_SH_DIR8WPL 5 ++#define R_SH_DIR8WPZ 6 ++#define R_SH_DIR8BP 7 ++#define R_SH_DIR8W 8 ++#define R_SH_DIR8L 9 ++#define R_SH_SWITCH16 25 ++#define R_SH_SWITCH32 26 ++#define R_SH_USES 27 ++#define R_SH_COUNT 28 ++#define R_SH_ALIGN 29 ++#define R_SH_CODE 30 ++#define R_SH_DATA 31 ++#define R_SH_LABEL 32 ++#define R_SH_SWITCH8 33 ++#define R_SH_GNU_VTINHERIT 34 ++#define R_SH_GNU_VTENTRY 35 ++#define R_SH_TLS_GD_32 144 ++#define R_SH_TLS_LD_32 145 ++#define R_SH_TLS_LDO_32 146 ++#define R_SH_TLS_IE_32 147 ++#define R_SH_TLS_LE_32 148 ++#define R_SH_TLS_DTPMOD32 149 ++#define R_SH_TLS_DTPOFF32 150 ++#define R_SH_TLS_TPOFF32 151 ++#define R_SH_GOT32 160 ++#define R_SH_PLT32 161 ++#define R_SH_COPY 162 ++#define R_SH_GLOB_DAT 163 ++#define R_SH_JMP_SLOT 164 ++#define R_SH_RELATIVE 165 ++#define R_SH_GOTOFF 166 ++#define R_SH_GOTPC 167 ++/* Keep this the last entry. */ ++#define R_SH_NUM 256 ++ ++/* S/390 specific definitions. */ ++ ++/* Valid values for the e_flags field. */ ++ ++#define EF_S390_HIGH_GPRS 0x00000001 /* High GPRs kernel facility needed. */ ++ ++/* Additional s390 relocs */ ++ ++#define R_390_NONE 0 /* No reloc. */ ++#define R_390_8 1 /* Direct 8 bit. */ ++#define R_390_12 2 /* Direct 12 bit. */ ++#define R_390_16 3 /* Direct 16 bit. */ ++#define R_390_32 4 /* Direct 32 bit. */ ++#define R_390_PC32 5 /* PC relative 32 bit. */ ++#define R_390_GOT12 6 /* 12 bit GOT offset. */ ++#define R_390_GOT32 7 /* 32 bit GOT offset. */ ++#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ ++#define R_390_COPY 9 /* Copy symbol at runtime. */ ++#define R_390_GLOB_DAT 10 /* Create GOT entry. */ ++#define R_390_JMP_SLOT 11 /* Create PLT entry. */ ++#define R_390_RELATIVE 12 /* Adjust by program base. */ ++#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ ++#define R_390_GOTPC 14 /* 32 bit PC relative offset to GOT. */ ++#define R_390_GOT16 15 /* 16 bit GOT offset. */ ++#define R_390_PC16 16 /* PC relative 16 bit. */ ++#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ ++#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ ++#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ ++#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ ++#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ ++#define R_390_64 22 /* Direct 64 bit. */ ++#define R_390_PC64 23 /* PC relative 64 bit. */ ++#define R_390_GOT64 24 /* 64 bit GOT offset. */ ++#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ ++#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ ++#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ ++#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ ++#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ ++#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ ++#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ ++#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ ++#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ ++#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ ++#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ ++#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ ++#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ ++#define R_390_TLS_GDCALL 38 /* Tag for function call in general ++ dynamic TLS code. */ ++#define R_390_TLS_LDCALL 39 /* Tag for function call in local ++ dynamic TLS code. */ ++#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic ++ thread local data. */ ++#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic ++ thread local data. */ ++#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic ++ thread local data in LE code. */ ++#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic ++ thread local data in LE code. */ ++#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for ++ negated static TLS block offset. */ ++#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for ++ negated static TLS block offset. */ ++#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for ++ negated static TLS block offset. */ ++#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to ++ static TLS block. */ ++#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to ++ static TLS block. */ ++#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS ++ block. */ ++#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS ++ block. */ ++#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ ++#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ ++#define R_390_TLS_TPOFF 56 /* Negated offset in static TLS ++ block. */ ++#define R_390_20 57 /* Direct 20 bit. */ ++#define R_390_GOT20 58 /* 20 bit GOT offset. */ ++#define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */ ++#define R_390_TLS_GOTIE20 60 /* 20 bit GOT offset for static TLS ++ block offset. */ ++#define R_390_IRELATIVE 61 /* STT_GNU_IFUNC relocation. */ ++/* Keep this the last entry. */ ++#define R_390_NUM 62 ++ ++ ++/* CRIS relocations. */ ++#define R_CRIS_NONE 0 ++#define R_CRIS_8 1 ++#define R_CRIS_16 2 ++#define R_CRIS_32 3 ++#define R_CRIS_8_PCREL 4 ++#define R_CRIS_16_PCREL 5 ++#define R_CRIS_32_PCREL 6 ++#define R_CRIS_GNU_VTINHERIT 7 ++#define R_CRIS_GNU_VTENTRY 8 ++#define R_CRIS_COPY 9 ++#define R_CRIS_GLOB_DAT 10 ++#define R_CRIS_JUMP_SLOT 11 ++#define R_CRIS_RELATIVE 12 ++#define R_CRIS_16_GOT 13 ++#define R_CRIS_32_GOT 14 ++#define R_CRIS_16_GOTPLT 15 ++#define R_CRIS_32_GOTPLT 16 ++#define R_CRIS_32_GOTREL 17 ++#define R_CRIS_32_PLT_GOTREL 18 ++#define R_CRIS_32_PLT_PCREL 19 ++ ++#define R_CRIS_NUM 20 ++ ++ ++/* AMD x86-64 relocations. */ ++#define R_X86_64_NONE 0 /* No reloc */ ++#define R_X86_64_64 1 /* Direct 64 bit */ ++#define R_X86_64_PC32 2 /* PC relative 32 bit signed */ ++#define R_X86_64_GOT32 3 /* 32 bit GOT entry */ ++#define R_X86_64_PLT32 4 /* 32 bit PLT address */ ++#define R_X86_64_COPY 5 /* Copy symbol at runtime */ ++#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ ++#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ ++#define R_X86_64_RELATIVE 8 /* Adjust by program base */ ++#define R_X86_64_GOTPCREL 9 /* 32 bit signed PC relative ++ offset to GOT */ ++#define R_X86_64_32 10 /* Direct 32 bit zero extended */ ++#define R_X86_64_32S 11 /* Direct 32 bit sign extended */ ++#define R_X86_64_16 12 /* Direct 16 bit zero extended */ ++#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ ++#define R_X86_64_8 14 /* Direct 8 bit sign extended */ ++#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ ++#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */ ++#define R_X86_64_DTPOFF64 17 /* Offset in module's TLS block */ ++#define R_X86_64_TPOFF64 18 /* Offset in initial TLS block */ ++#define R_X86_64_TLSGD 19 /* 32 bit signed PC relative offset ++ to two GOT entries for GD symbol */ ++#define R_X86_64_TLSLD 20 /* 32 bit signed PC relative offset ++ to two GOT entries for LD symbol */ ++#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */ ++#define R_X86_64_GOTTPOFF 22 /* 32 bit signed PC relative offset ++ to GOT entry for IE symbol */ ++#define R_X86_64_TPOFF32 23 /* Offset in initial TLS block */ ++#define R_X86_64_PC64 24 /* PC relative 64 bit */ ++#define R_X86_64_GOTOFF64 25 /* 64 bit offset to GOT */ ++#define R_X86_64_GOTPC32 26 /* 32 bit signed pc relative ++ offset to GOT */ ++#define R_X86_64_GOT64 27 /* 64-bit GOT entry offset */ ++#define R_X86_64_GOTPCREL64 28 /* 64-bit PC relative offset ++ to GOT entry */ ++#define R_X86_64_GOTPC64 29 /* 64-bit PC relative offset to GOT */ ++#define R_X86_64_GOTPLT64 30 /* like GOT64, says PLT entry needed */ ++#define R_X86_64_PLTOFF64 31 /* 64-bit GOT relative offset ++ to PLT entry */ ++#define R_X86_64_SIZE32 32 /* Size of symbol plus 32-bit addend */ ++#define R_X86_64_SIZE64 33 /* Size of symbol plus 64-bit addend */ ++#define R_X86_64_GOTPC32_TLSDESC 34 /* GOT offset for TLS descriptor. */ ++#define R_X86_64_TLSDESC_CALL 35 /* Marker for call through TLS ++ descriptor. */ ++#define R_X86_64_TLSDESC 36 /* TLS descriptor. */ ++#define R_X86_64_IRELATIVE 37 /* Adjust indirectly by program base */ ++#define R_X86_64_RELATIVE64 38 /* 64-bit adjust by program base */ ++ ++#define R_X86_64_NUM 39 ++ ++ ++/* AM33 relocations. */ ++#define R_MN10300_NONE 0 /* No reloc. */ ++#define R_MN10300_32 1 /* Direct 32 bit. */ ++#define R_MN10300_16 2 /* Direct 16 bit. */ ++#define R_MN10300_8 3 /* Direct 8 bit. */ ++#define R_MN10300_PCREL32 4 /* PC-relative 32-bit. */ ++#define R_MN10300_PCREL16 5 /* PC-relative 16-bit signed. */ ++#define R_MN10300_PCREL8 6 /* PC-relative 8-bit signed. */ ++#define R_MN10300_GNU_VTINHERIT 7 /* Ancient C++ vtable garbage... */ ++#define R_MN10300_GNU_VTENTRY 8 /* ... collection annotation. */ ++#define R_MN10300_24 9 /* Direct 24 bit. */ ++#define R_MN10300_GOTPC32 10 /* 32-bit PCrel offset to GOT. */ ++#define R_MN10300_GOTPC16 11 /* 16-bit PCrel offset to GOT. */ ++#define R_MN10300_GOTOFF32 12 /* 32-bit offset from GOT. */ ++#define R_MN10300_GOTOFF24 13 /* 24-bit offset from GOT. */ ++#define R_MN10300_GOTOFF16 14 /* 16-bit offset from GOT. */ ++#define R_MN10300_PLT32 15 /* 32-bit PCrel to PLT entry. */ ++#define R_MN10300_PLT16 16 /* 16-bit PCrel to PLT entry. */ ++#define R_MN10300_GOT32 17 /* 32-bit offset to GOT entry. */ ++#define R_MN10300_GOT24 18 /* 24-bit offset to GOT entry. */ ++#define R_MN10300_GOT16 19 /* 16-bit offset to GOT entry. */ ++#define R_MN10300_COPY 20 /* Copy symbol at runtime. */ ++#define R_MN10300_GLOB_DAT 21 /* Create GOT entry. */ ++#define R_MN10300_JMP_SLOT 22 /* Create PLT entry. */ ++#define R_MN10300_RELATIVE 23 /* Adjust by program base. */ ++ ++#define R_MN10300_NUM 24 ++ ++ ++/* M32R relocs. */ ++#define R_M32R_NONE 0 /* No reloc. */ ++#define R_M32R_16 1 /* Direct 16 bit. */ ++#define R_M32R_32 2 /* Direct 32 bit. */ ++#define R_M32R_24 3 /* Direct 24 bit. */ ++#define R_M32R_10_PCREL 4 /* PC relative 10 bit shifted. */ ++#define R_M32R_18_PCREL 5 /* PC relative 18 bit shifted. */ ++#define R_M32R_26_PCREL 6 /* PC relative 26 bit shifted. */ ++#define R_M32R_HI16_ULO 7 /* High 16 bit with unsigned low. */ ++#define R_M32R_HI16_SLO 8 /* High 16 bit with signed low. */ ++#define R_M32R_LO16 9 /* Low 16 bit. */ ++#define R_M32R_SDA16 10 /* 16 bit offset in SDA. */ ++#define R_M32R_GNU_VTINHERIT 11 ++#define R_M32R_GNU_VTENTRY 12 ++/* M32R relocs use SHT_RELA. */ ++#define R_M32R_16_RELA 33 /* Direct 16 bit. */ ++#define R_M32R_32_RELA 34 /* Direct 32 bit. */ ++#define R_M32R_24_RELA 35 /* Direct 24 bit. */ ++#define R_M32R_10_PCREL_RELA 36 /* PC relative 10 bit shifted. */ ++#define R_M32R_18_PCREL_RELA 37 /* PC relative 18 bit shifted. */ ++#define R_M32R_26_PCREL_RELA 38 /* PC relative 26 bit shifted. */ ++#define R_M32R_HI16_ULO_RELA 39 /* High 16 bit with unsigned low */ ++#define R_M32R_HI16_SLO_RELA 40 /* High 16 bit with signed low */ ++#define R_M32R_LO16_RELA 41 /* Low 16 bit */ ++#define R_M32R_SDA16_RELA 42 /* 16 bit offset in SDA */ ++#define R_M32R_RELA_GNU_VTINHERIT 43 ++#define R_M32R_RELA_GNU_VTENTRY 44 ++#define R_M32R_REL32 45 /* PC relative 32 bit. */ ++ ++#define R_M32R_GOT24 48 /* 24 bit GOT entry */ ++#define R_M32R_26_PLTREL 49 /* 26 bit PC relative to PLT shifted */ ++#define R_M32R_COPY 50 /* Copy symbol at runtime */ ++#define R_M32R_GLOB_DAT 51 /* Create GOT entry */ ++#define R_M32R_JMP_SLOT 52 /* Create PLT entry */ ++#define R_M32R_RELATIVE 53 /* Adjust by program base */ ++#define R_M32R_GOTOFF 54 /* 24 bit offset to GOT */ ++#define R_M32R_GOTPC24 55 /* 24 bit PC relative offset to GOT */ ++#define R_M32R_GOT16_HI_ULO 56 /* High 16 bit GOT entry with unsigned ++ low */ ++#define R_M32R_GOT16_HI_SLO 57 /* High 16 bit GOT entry with signed ++ low */ ++#define R_M32R_GOT16_LO 58 /* Low 16 bit GOT entry */ ++#define R_M32R_GOTPC_HI_ULO 59 /* High 16 bit PC relative offset to ++ GOT with unsigned low */ ++#define R_M32R_GOTPC_HI_SLO 60 /* High 16 bit PC relative offset to ++ GOT with signed low */ ++#define R_M32R_GOTPC_LO 61 /* Low 16 bit PC relative offset to ++ GOT */ ++#define R_M32R_GOTOFF_HI_ULO 62 /* High 16 bit offset to GOT ++ with unsigned low */ ++#define R_M32R_GOTOFF_HI_SLO 63 /* High 16 bit offset to GOT ++ with signed low */ ++#define R_M32R_GOTOFF_LO 64 /* Low 16 bit offset to GOT */ ++#define R_M32R_NUM 256 /* Keep this the last entry. */ ++ ++ ++/* TILEPro relocations. */ ++#define R_TILEPRO_NONE 0 /* No reloc */ ++#define R_TILEPRO_32 1 /* Direct 32 bit */ ++#define R_TILEPRO_16 2 /* Direct 16 bit */ ++#define R_TILEPRO_8 3 /* Direct 8 bit */ ++#define R_TILEPRO_32_PCREL 4 /* PC relative 32 bit */ ++#define R_TILEPRO_16_PCREL 5 /* PC relative 16 bit */ ++#define R_TILEPRO_8_PCREL 6 /* PC relative 8 bit */ ++#define R_TILEPRO_LO16 7 /* Low 16 bit */ ++#define R_TILEPRO_HI16 8 /* High 16 bit */ ++#define R_TILEPRO_HA16 9 /* High 16 bit, adjusted */ ++#define R_TILEPRO_COPY 10 /* Copy relocation */ ++#define R_TILEPRO_GLOB_DAT 11 /* Create GOT entry */ ++#define R_TILEPRO_JMP_SLOT 12 /* Create PLT entry */ ++#define R_TILEPRO_RELATIVE 13 /* Adjust by program base */ ++#define R_TILEPRO_BROFF_X1 14 /* X1 pipe branch offset */ ++#define R_TILEPRO_JOFFLONG_X1 15 /* X1 pipe jump offset */ ++#define R_TILEPRO_JOFFLONG_X1_PLT 16 /* X1 pipe jump offset to PLT */ ++#define R_TILEPRO_IMM8_X0 17 /* X0 pipe 8-bit */ ++#define R_TILEPRO_IMM8_Y0 18 /* Y0 pipe 8-bit */ ++#define R_TILEPRO_IMM8_X1 19 /* X1 pipe 8-bit */ ++#define R_TILEPRO_IMM8_Y1 20 /* Y1 pipe 8-bit */ ++#define R_TILEPRO_MT_IMM15_X1 21 /* X1 pipe mtspr */ ++#define R_TILEPRO_MF_IMM15_X1 22 /* X1 pipe mfspr */ ++#define R_TILEPRO_IMM16_X0 23 /* X0 pipe 16-bit */ ++#define R_TILEPRO_IMM16_X1 24 /* X1 pipe 16-bit */ ++#define R_TILEPRO_IMM16_X0_LO 25 /* X0 pipe low 16-bit */ ++#define R_TILEPRO_IMM16_X1_LO 26 /* X1 pipe low 16-bit */ ++#define R_TILEPRO_IMM16_X0_HI 27 /* X0 pipe high 16-bit */ ++#define R_TILEPRO_IMM16_X1_HI 28 /* X1 pipe high 16-bit */ ++#define R_TILEPRO_IMM16_X0_HA 29 /* X0 pipe high 16-bit, adjusted */ ++#define R_TILEPRO_IMM16_X1_HA 30 /* X1 pipe high 16-bit, adjusted */ ++#define R_TILEPRO_IMM16_X0_PCREL 31 /* X0 pipe PC relative 16 bit */ ++#define R_TILEPRO_IMM16_X1_PCREL 32 /* X1 pipe PC relative 16 bit */ ++#define R_TILEPRO_IMM16_X0_LO_PCREL 33 /* X0 pipe PC relative low 16 bit */ ++#define R_TILEPRO_IMM16_X1_LO_PCREL 34 /* X1 pipe PC relative low 16 bit */ ++#define R_TILEPRO_IMM16_X0_HI_PCREL 35 /* X0 pipe PC relative high 16 bit */ ++#define R_TILEPRO_IMM16_X1_HI_PCREL 36 /* X1 pipe PC relative high 16 bit */ ++#define R_TILEPRO_IMM16_X0_HA_PCREL 37 /* X0 pipe PC relative ha() 16 bit */ ++#define R_TILEPRO_IMM16_X1_HA_PCREL 38 /* X1 pipe PC relative ha() 16 bit */ ++#define R_TILEPRO_IMM16_X0_GOT 39 /* X0 pipe 16-bit GOT offset */ ++#define R_TILEPRO_IMM16_X1_GOT 40 /* X1 pipe 16-bit GOT offset */ ++#define R_TILEPRO_IMM16_X0_GOT_LO 41 /* X0 pipe low 16-bit GOT offset */ ++#define R_TILEPRO_IMM16_X1_GOT_LO 42 /* X1 pipe low 16-bit GOT offset */ ++#define R_TILEPRO_IMM16_X0_GOT_HI 43 /* X0 pipe high 16-bit GOT offset */ ++#define R_TILEPRO_IMM16_X1_GOT_HI 44 /* X1 pipe high 16-bit GOT offset */ ++#define R_TILEPRO_IMM16_X0_GOT_HA 45 /* X0 pipe ha() 16-bit GOT offset */ ++#define R_TILEPRO_IMM16_X1_GOT_HA 46 /* X1 pipe ha() 16-bit GOT offset */ ++#define R_TILEPRO_MMSTART_X0 47 /* X0 pipe mm "start" */ ++#define R_TILEPRO_MMEND_X0 48 /* X0 pipe mm "end" */ ++#define R_TILEPRO_MMSTART_X1 49 /* X1 pipe mm "start" */ ++#define R_TILEPRO_MMEND_X1 50 /* X1 pipe mm "end" */ ++#define R_TILEPRO_SHAMT_X0 51 /* X0 pipe shift amount */ ++#define R_TILEPRO_SHAMT_X1 52 /* X1 pipe shift amount */ ++#define R_TILEPRO_SHAMT_Y0 53 /* Y0 pipe shift amount */ ++#define R_TILEPRO_SHAMT_Y1 54 /* Y1 pipe shift amount */ ++#define R_TILEPRO_DEST_IMM8_X1 55 /* X1 pipe destination 8-bit */ ++/* Relocs 56-59 are currently not defined. */ ++#define R_TILEPRO_TLS_GD_CALL 60 /* "jal" for TLS GD */ ++#define R_TILEPRO_IMM8_X0_TLS_GD_ADD 61 /* X0 pipe "addi" for TLS GD */ ++#define R_TILEPRO_IMM8_X1_TLS_GD_ADD 62 /* X1 pipe "addi" for TLS GD */ ++#define R_TILEPRO_IMM8_Y0_TLS_GD_ADD 63 /* Y0 pipe "addi" for TLS GD */ ++#define R_TILEPRO_IMM8_Y1_TLS_GD_ADD 64 /* Y1 pipe "addi" for TLS GD */ ++#define R_TILEPRO_TLS_IE_LOAD 65 /* "lw_tls" for TLS IE */ ++#define R_TILEPRO_IMM16_X0_TLS_GD 66 /* X0 pipe 16-bit TLS GD offset */ ++#define R_TILEPRO_IMM16_X1_TLS_GD 67 /* X1 pipe 16-bit TLS GD offset */ ++#define R_TILEPRO_IMM16_X0_TLS_GD_LO 68 /* X0 pipe low 16-bit TLS GD offset */ ++#define R_TILEPRO_IMM16_X1_TLS_GD_LO 69 /* X1 pipe low 16-bit TLS GD offset */ ++#define R_TILEPRO_IMM16_X0_TLS_GD_HI 70 /* X0 pipe high 16-bit TLS GD offset */ ++#define R_TILEPRO_IMM16_X1_TLS_GD_HI 71 /* X1 pipe high 16-bit TLS GD offset */ ++#define R_TILEPRO_IMM16_X0_TLS_GD_HA 72 /* X0 pipe ha() 16-bit TLS GD offset */ ++#define R_TILEPRO_IMM16_X1_TLS_GD_HA 73 /* X1 pipe ha() 16-bit TLS GD offset */ ++#define R_TILEPRO_IMM16_X0_TLS_IE 74 /* X0 pipe 16-bit TLS IE offset */ ++#define R_TILEPRO_IMM16_X1_TLS_IE 75 /* X1 pipe 16-bit TLS IE offset */ ++#define R_TILEPRO_IMM16_X0_TLS_IE_LO 76 /* X0 pipe low 16-bit TLS IE offset */ ++#define R_TILEPRO_IMM16_X1_TLS_IE_LO 77 /* X1 pipe low 16-bit TLS IE offset */ ++#define R_TILEPRO_IMM16_X0_TLS_IE_HI 78 /* X0 pipe high 16-bit TLS IE offset */ ++#define R_TILEPRO_IMM16_X1_TLS_IE_HI 79 /* X1 pipe high 16-bit TLS IE offset */ ++#define R_TILEPRO_IMM16_X0_TLS_IE_HA 80 /* X0 pipe ha() 16-bit TLS IE offset */ ++#define R_TILEPRO_IMM16_X1_TLS_IE_HA 81 /* X1 pipe ha() 16-bit TLS IE offset */ ++#define R_TILEPRO_TLS_DTPMOD32 82 /* ID of module containing symbol */ ++#define R_TILEPRO_TLS_DTPOFF32 83 /* Offset in TLS block */ ++#define R_TILEPRO_TLS_TPOFF32 84 /* Offset in static TLS block */ ++#define R_TILEPRO_IMM16_X0_TLS_LE 85 /* X0 pipe 16-bit TLS LE offset */ ++#define R_TILEPRO_IMM16_X1_TLS_LE 86 /* X1 pipe 16-bit TLS LE offset */ ++#define R_TILEPRO_IMM16_X0_TLS_LE_LO 87 /* X0 pipe low 16-bit TLS LE offset */ ++#define R_TILEPRO_IMM16_X1_TLS_LE_LO 88 /* X1 pipe low 16-bit TLS LE offset */ ++#define R_TILEPRO_IMM16_X0_TLS_LE_HI 89 /* X0 pipe high 16-bit TLS LE offset */ ++#define R_TILEPRO_IMM16_X1_TLS_LE_HI 90 /* X1 pipe high 16-bit TLS LE offset */ ++#define R_TILEPRO_IMM16_X0_TLS_LE_HA 91 /* X0 pipe ha() 16-bit TLS LE offset */ ++#define R_TILEPRO_IMM16_X1_TLS_LE_HA 92 /* X1 pipe ha() 16-bit TLS LE offset */ ++ ++#define R_TILEPRO_GNU_VTINHERIT 128 /* GNU C++ vtable hierarchy */ ++#define R_TILEPRO_GNU_VTENTRY 129 /* GNU C++ vtable member usage */ ++ ++#define R_TILEPRO_NUM 130 ++ ++ ++/* TILE-Gx relocations. */ ++#define R_TILEGX_NONE 0 /* No reloc */ ++#define R_TILEGX_64 1 /* Direct 64 bit */ ++#define R_TILEGX_32 2 /* Direct 32 bit */ ++#define R_TILEGX_16 3 /* Direct 16 bit */ ++#define R_TILEGX_8 4 /* Direct 8 bit */ ++#define R_TILEGX_64_PCREL 5 /* PC relative 64 bit */ ++#define R_TILEGX_32_PCREL 6 /* PC relative 32 bit */ ++#define R_TILEGX_16_PCREL 7 /* PC relative 16 bit */ ++#define R_TILEGX_8_PCREL 8 /* PC relative 8 bit */ ++#define R_TILEGX_HW0 9 /* hword 0 16-bit */ ++#define R_TILEGX_HW1 10 /* hword 1 16-bit */ ++#define R_TILEGX_HW2 11 /* hword 2 16-bit */ ++#define R_TILEGX_HW3 12 /* hword 3 16-bit */ ++#define R_TILEGX_HW0_LAST 13 /* last hword 0 16-bit */ ++#define R_TILEGX_HW1_LAST 14 /* last hword 1 16-bit */ ++#define R_TILEGX_HW2_LAST 15 /* last hword 2 16-bit */ ++#define R_TILEGX_COPY 16 /* Copy relocation */ ++#define R_TILEGX_GLOB_DAT 17 /* Create GOT entry */ ++#define R_TILEGX_JMP_SLOT 18 /* Create PLT entry */ ++#define R_TILEGX_RELATIVE 19 /* Adjust by program base */ ++#define R_TILEGX_BROFF_X1 20 /* X1 pipe branch offset */ ++#define R_TILEGX_JUMPOFF_X1 21 /* X1 pipe jump offset */ ++#define R_TILEGX_JUMPOFF_X1_PLT 22 /* X1 pipe jump offset to PLT */ ++#define R_TILEGX_IMM8_X0 23 /* X0 pipe 8-bit */ ++#define R_TILEGX_IMM8_Y0 24 /* Y0 pipe 8-bit */ ++#define R_TILEGX_IMM8_X1 25 /* X1 pipe 8-bit */ ++#define R_TILEGX_IMM8_Y1 26 /* Y1 pipe 8-bit */ ++#define R_TILEGX_DEST_IMM8_X1 27 /* X1 pipe destination 8-bit */ ++#define R_TILEGX_MT_IMM14_X1 28 /* X1 pipe mtspr */ ++#define R_TILEGX_MF_IMM14_X1 29 /* X1 pipe mfspr */ ++#define R_TILEGX_MMSTART_X0 30 /* X0 pipe mm "start" */ ++#define R_TILEGX_MMEND_X0 31 /* X0 pipe mm "end" */ ++#define R_TILEGX_SHAMT_X0 32 /* X0 pipe shift amount */ ++#define R_TILEGX_SHAMT_X1 33 /* X1 pipe shift amount */ ++#define R_TILEGX_SHAMT_Y0 34 /* Y0 pipe shift amount */ ++#define R_TILEGX_SHAMT_Y1 35 /* Y1 pipe shift amount */ ++#define R_TILEGX_IMM16_X0_HW0 36 /* X0 pipe hword 0 */ ++#define R_TILEGX_IMM16_X1_HW0 37 /* X1 pipe hword 0 */ ++#define R_TILEGX_IMM16_X0_HW1 38 /* X0 pipe hword 1 */ ++#define R_TILEGX_IMM16_X1_HW1 39 /* X1 pipe hword 1 */ ++#define R_TILEGX_IMM16_X0_HW2 40 /* X0 pipe hword 2 */ ++#define R_TILEGX_IMM16_X1_HW2 41 /* X1 pipe hword 2 */ ++#define R_TILEGX_IMM16_X0_HW3 42 /* X0 pipe hword 3 */ ++#define R_TILEGX_IMM16_X1_HW3 43 /* X1 pipe hword 3 */ ++#define R_TILEGX_IMM16_X0_HW0_LAST 44 /* X0 pipe last hword 0 */ ++#define R_TILEGX_IMM16_X1_HW0_LAST 45 /* X1 pipe last hword 0 */ ++#define R_TILEGX_IMM16_X0_HW1_LAST 46 /* X0 pipe last hword 1 */ ++#define R_TILEGX_IMM16_X1_HW1_LAST 47 /* X1 pipe last hword 1 */ ++#define R_TILEGX_IMM16_X0_HW2_LAST 48 /* X0 pipe last hword 2 */ ++#define R_TILEGX_IMM16_X1_HW2_LAST 49 /* X1 pipe last hword 2 */ ++#define R_TILEGX_IMM16_X0_HW0_PCREL 50 /* X0 pipe PC relative hword 0 */ ++#define R_TILEGX_IMM16_X1_HW0_PCREL 51 /* X1 pipe PC relative hword 0 */ ++#define R_TILEGX_IMM16_X0_HW1_PCREL 52 /* X0 pipe PC relative hword 1 */ ++#define R_TILEGX_IMM16_X1_HW1_PCREL 53 /* X1 pipe PC relative hword 1 */ ++#define R_TILEGX_IMM16_X0_HW2_PCREL 54 /* X0 pipe PC relative hword 2 */ ++#define R_TILEGX_IMM16_X1_HW2_PCREL 55 /* X1 pipe PC relative hword 2 */ ++#define R_TILEGX_IMM16_X0_HW3_PCREL 56 /* X0 pipe PC relative hword 3 */ ++#define R_TILEGX_IMM16_X1_HW3_PCREL 57 /* X1 pipe PC relative hword 3 */ ++#define R_TILEGX_IMM16_X0_HW0_LAST_PCREL 58 /* X0 pipe PC-rel last hword 0 */ ++#define R_TILEGX_IMM16_X1_HW0_LAST_PCREL 59 /* X1 pipe PC-rel last hword 0 */ ++#define R_TILEGX_IMM16_X0_HW1_LAST_PCREL 60 /* X0 pipe PC-rel last hword 1 */ ++#define R_TILEGX_IMM16_X1_HW1_LAST_PCREL 61 /* X1 pipe PC-rel last hword 1 */ ++#define R_TILEGX_IMM16_X0_HW2_LAST_PCREL 62 /* X0 pipe PC-rel last hword 2 */ ++#define R_TILEGX_IMM16_X1_HW2_LAST_PCREL 63 /* X1 pipe PC-rel last hword 2 */ ++#define R_TILEGX_IMM16_X0_HW0_GOT 64 /* X0 pipe hword 0 GOT offset */ ++#define R_TILEGX_IMM16_X1_HW0_GOT 65 /* X1 pipe hword 0 GOT offset */ ++/* Relocs 66-71 are currently not defined. */ ++#define R_TILEGX_IMM16_X0_HW0_LAST_GOT 72 /* X0 pipe last hword 0 GOT offset */ ++#define R_TILEGX_IMM16_X1_HW0_LAST_GOT 73 /* X1 pipe last hword 0 GOT offset */ ++#define R_TILEGX_IMM16_X0_HW1_LAST_GOT 74 /* X0 pipe last hword 1 GOT offset */ ++#define R_TILEGX_IMM16_X1_HW1_LAST_GOT 75 /* X1 pipe last hword 1 GOT offset */ ++/* Relocs 76-77 are currently not defined. */ ++#define R_TILEGX_IMM16_X0_HW0_TLS_GD 78 /* X0 pipe hword 0 TLS GD offset */ ++#define R_TILEGX_IMM16_X1_HW0_TLS_GD 79 /* X1 pipe hword 0 TLS GD offset */ ++#define R_TILEGX_IMM16_X0_HW0_TLS_LE 80 /* X0 pipe hword 0 TLS LE offset */ ++#define R_TILEGX_IMM16_X1_HW0_TLS_LE 81 /* X1 pipe hword 0 TLS LE offset */ ++#define R_TILEGX_IMM16_X0_HW0_LAST_TLS_LE 82 /* X0 pipe last hword 0 LE off */ ++#define R_TILEGX_IMM16_X1_HW0_LAST_TLS_LE 83 /* X1 pipe last hword 0 LE off */ ++#define R_TILEGX_IMM16_X0_HW1_LAST_TLS_LE 84 /* X0 pipe last hword 1 LE off */ ++#define R_TILEGX_IMM16_X1_HW1_LAST_TLS_LE 85 /* X1 pipe last hword 1 LE off */ ++#define R_TILEGX_IMM16_X0_HW0_LAST_TLS_GD 86 /* X0 pipe last hword 0 GD off */ ++#define R_TILEGX_IMM16_X1_HW0_LAST_TLS_GD 87 /* X1 pipe last hword 0 GD off */ ++#define R_TILEGX_IMM16_X0_HW1_LAST_TLS_GD 88 /* X0 pipe last hword 1 GD off */ ++#define R_TILEGX_IMM16_X1_HW1_LAST_TLS_GD 89 /* X1 pipe last hword 1 GD off */ ++/* Relocs 90-91 are currently not defined. */ ++#define R_TILEGX_IMM16_X0_HW0_TLS_IE 92 /* X0 pipe hword 0 TLS IE offset */ ++#define R_TILEGX_IMM16_X1_HW0_TLS_IE 93 /* X1 pipe hword 0 TLS IE offset */ ++/* Relocs 94-99 are currently not defined. */ ++#define R_TILEGX_IMM16_X0_HW0_LAST_TLS_IE 100 /* X0 pipe last hword 0 IE off */ ++#define R_TILEGX_IMM16_X1_HW0_LAST_TLS_IE 101 /* X1 pipe last hword 0 IE off */ ++#define R_TILEGX_IMM16_X0_HW1_LAST_TLS_IE 102 /* X0 pipe last hword 1 IE off */ ++#define R_TILEGX_IMM16_X1_HW1_LAST_TLS_IE 103 /* X1 pipe last hword 1 IE off */ ++/* Relocs 104-105 are currently not defined. */ ++#define R_TILEGX_TLS_DTPMOD64 106 /* 64-bit ID of symbol's module */ ++#define R_TILEGX_TLS_DTPOFF64 107 /* 64-bit offset in TLS block */ ++#define R_TILEGX_TLS_TPOFF64 108 /* 64-bit offset in static TLS block */ ++#define R_TILEGX_TLS_DTPMOD32 109 /* 32-bit ID of symbol's module */ ++#define R_TILEGX_TLS_DTPOFF32 110 /* 32-bit offset in TLS block */ ++#define R_TILEGX_TLS_TPOFF32 111 /* 32-bit offset in static TLS block */ ++#define R_TILEGX_TLS_GD_CALL 112 /* "jal" for TLS GD */ ++#define R_TILEGX_IMM8_X0_TLS_GD_ADD 113 /* X0 pipe "addi" for TLS GD */ ++#define R_TILEGX_IMM8_X1_TLS_GD_ADD 114 /* X1 pipe "addi" for TLS GD */ ++#define R_TILEGX_IMM8_Y0_TLS_GD_ADD 115 /* Y0 pipe "addi" for TLS GD */ ++#define R_TILEGX_IMM8_Y1_TLS_GD_ADD 116 /* Y1 pipe "addi" for TLS GD */ ++#define R_TILEGX_TLS_IE_LOAD 117 /* "ld_tls" for TLS IE */ ++#define R_TILEGX_IMM8_X0_TLS_ADD 118 /* X0 pipe "addi" for TLS GD/IE */ ++#define R_TILEGX_IMM8_X1_TLS_ADD 119 /* X1 pipe "addi" for TLS GD/IE */ ++#define R_TILEGX_IMM8_Y0_TLS_ADD 120 /* Y0 pipe "addi" for TLS GD/IE */ ++#define R_TILEGX_IMM8_Y1_TLS_ADD 121 /* Y1 pipe "addi" for TLS GD/IE */ ++ ++#define R_TILEGX_GNU_VTINHERIT 128 /* GNU C++ vtable hierarchy */ ++#define R_TILEGX_GNU_VTENTRY 129 /* GNU C++ vtable member usage */ ++ ++#define R_TILEGX_NUM 130 ++ ++#endif /* elf.h */ diff --git a/target/linux/generic/patches-3.8/212-byteshift_portability.patch b/target/linux/generic/patches-3.8/212-byteshift_portability.patch new file mode 100644 index 0000000000..bba55f5358 --- /dev/null +++ b/target/linux/generic/patches-3.8/212-byteshift_portability.patch @@ -0,0 +1,53 @@ +--- a/tools/include/tools/be_byteshift.h ++++ b/tools/include/tools/be_byteshift.h +@@ -1,7 +1,11 @@ + #ifndef _TOOLS_BE_BYTESHIFT_H + #define _TOOLS_BE_BYTESHIFT_H + ++#ifdef __linux__ + #include <linux/types.h> ++#else ++#include "linux_types.h" ++#endif + + static inline __u16 __get_unaligned_be16(const __u8 *p) + { +--- a/tools/include/tools/le_byteshift.h ++++ b/tools/include/tools/le_byteshift.h +@@ -1,7 +1,11 @@ + #ifndef _TOOLS_LE_BYTESHIFT_H + #define _TOOLS_LE_BYTESHIFT_H + ++#ifdef __linux__ + #include <linux/types.h> ++#else ++#include "linux_types.h" ++#endif + + static inline __u16 __get_unaligned_le16(const __u8 *p) + { +--- /dev/null ++++ b/tools/include/tools/linux_types.h +@@ -0,0 +1,22 @@ ++#ifndef __LINUX_TYPES_H ++#define __LINUX_TYPES_H ++ ++#include <stdint.h> ++ ++typedef uint8_t __u8; ++typedef uint8_t __be8; ++typedef uint8_t __le8; ++ ++typedef uint16_t __u16; ++typedef uint16_t __be16; ++typedef uint16_t __le16; ++ ++typedef uint32_t __u32; ++typedef uint32_t __be32; ++typedef uint32_t __le32; ++ ++typedef uint64_t __u64; ++typedef uint64_t __be64; ++typedef uint64_t __le64; ++ ++#endif diff --git a/target/linux/generic/patches-3.8/220-module_exports.patch b/target/linux/generic/patches-3.8/220-module_exports.patch new file mode 100644 index 0000000000..fc382c16f9 --- /dev/null +++ b/target/linux/generic/patches-3.8/220-module_exports.patch @@ -0,0 +1,91 @@ +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -52,6 +52,18 @@ + #define LOAD_OFFSET 0 + #endif + ++#ifndef SYMTAB_KEEP_STR ++#define SYMTAB_KEEP *(SORT(___ksymtab+*)) ++#define SYMTAB_KEEP_GPL *(SORT(___ksymtab_gpl+*)) ++#define SYMTAB_KEEP_STR *(__ksymtab_strings+*) ++#endif ++ ++#ifndef SYMTAB_DISCARD ++#define SYMTAB_DISCARD ++#define SYMTAB_DISCARD_GPL ++#define SYMTAB_DISCARD_STR ++#endif ++ + #ifndef SYMBOL_PREFIX + #define VMLINUX_SYMBOL(sym) sym + #else +@@ -276,14 +288,14 @@ + /* Kernel symbol table: Normal symbols */ \ + __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab) = .; \ +- *(SORT(___ksymtab+*)) \ ++ SYMTAB_KEEP \ + VMLINUX_SYMBOL(__stop___ksymtab) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only symbols */ \ + __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ +- *(SORT(___ksymtab_gpl+*)) \ ++ SYMTAB_KEEP_GPL \ + VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ + } \ + \ +@@ -345,7 +357,7 @@ + \ + /* Kernel symbol table: strings */ \ + __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ +- *(__ksymtab_strings) \ ++ SYMTAB_KEEP_STR \ + } \ + \ + /* __*init sections */ \ +@@ -679,6 +691,9 @@ + EXIT_TEXT \ + EXIT_DATA \ + EXIT_CALL \ ++ SYMTAB_DISCARD \ ++ SYMTAB_DISCARD_GPL \ ++ SYMTAB_DISCARD_STR \ + *(.discard) \ + *(.discard.*) \ + } +--- a/include/linux/export.h ++++ b/include/linux/export.h +@@ -45,12 +45,19 @@ extern struct module __this_module; + #define __CRC_SYMBOL(sym, sec) + #endif + ++#ifdef MODULE ++#define __EXPORT_SUFFIX(sym) ++#else ++#define __EXPORT_SUFFIX(sym) "+" #sym ++#endif ++ + /* For every exported symbol, place a struct in the __ksymtab section */ + #define __EXPORT_SYMBOL(sym, sec) \ + extern typeof(sym) sym; \ + __CRC_SYMBOL(sym, sec) \ + static const char __kstrtab_##sym[] \ +- __attribute__((section("__ksymtab_strings"), aligned(1))) \ ++ __attribute__((section("__ksymtab_strings" \ ++ __EXPORT_SUFFIX(sym)), aligned(1))) \ + = MODULE_SYMBOL_PREFIX #sym; \ + static const struct kernel_symbol __ksymtab_##sym \ + __used \ +--- a/scripts/Makefile.build ++++ b/scripts/Makefile.build +@@ -348,7 +348,7 @@ targets += $(extra-y) $(MAKECMDGOALS) $( + # Linker scripts preprocessor (.lds.S -> .lds) + # --------------------------------------------------------------------------- + quiet_cmd_cpp_lds_S = LDS $@ +- cmd_cpp_lds_S = $(CPP) $(cpp_flags) -P -C -U$(ARCH) \ ++ cmd_cpp_lds_S = $(CPP) $(EXTRA_LDSFLAGS) $(cpp_flags) -P -C -U$(ARCH) \ + -D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $< + + $(obj)/%.lds: $(src)/%.lds.S FORCE diff --git a/target/linux/generic/patches-3.8/230-openwrt_lzma_options.patch b/target/linux/generic/patches-3.8/230-openwrt_lzma_options.patch new file mode 100644 index 0000000000..55434c13ab --- /dev/null +++ b/target/linux/generic/patches-3.8/230-openwrt_lzma_options.patch @@ -0,0 +1,54 @@ +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -299,7 +299,7 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^) + + quiet_cmd_lzma = LZMA $@ + cmd_lzma = (cat $(filter-out FORCE,$^) | \ +- lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ ++ lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + (rm -f $@ ; false) + + quiet_cmd_lzo = LZO $@ +--- a/scripts/gen_initramfs_list.sh ++++ b/scripts/gen_initramfs_list.sh +@@ -226,7 +226,7 @@ cpio_list= + output="/dev/stdout" + output_file="" + is_cpio_compressed= +-compr="gzip -n -9 -f" ++compr="gzip -n -9 -f -" + + arg="$1" + case "$arg" in +@@ -240,9 +240,9 @@ case "$arg" in + output_file="$1" + cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)" + output=${cpio_list} +- echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f" +- echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f" +- echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f" ++ echo "$output_file" | grep -q "\.gz$" && compr="gzip -n -9 -f -" ++ echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f -" ++ echo "$output_file" | grep -q "\.lzma$" && compr="lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so" + echo "$output_file" | grep -q "\.xz$" && \ + compr="xz --check=crc32 --lzma2=dict=1MiB" + echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f" +@@ -303,7 +303,7 @@ if [ ! -z ${output_file} ]; then + if [ "${is_cpio_compressed}" = "compressed" ]; then + cat ${cpio_tfile} > ${output_file} + else +- (cat ${cpio_tfile} | ${compr} - > ${output_file}) \ ++ (cat ${cpio_tfile} | ${compr} > ${output_file}) \ + || (rm -f ${output_file} ; false) + fi + [ -z ${cpio_file} ] && rm ${cpio_tfile} +--- a/lib/decompress.c ++++ b/lib/decompress.c +@@ -43,6 +43,7 @@ static const struct compress_format comp + { {037, 0236}, "gzip", gunzip }, + { {0x42, 0x5a}, "bzip2", bunzip2 }, + { {0x5d, 0x00}, "lzma", unlzma }, ++ { {0x6d, 0x00}, "lzma-openwrt", unlzma }, + { {0xfd, 0x37}, "xz", unxz }, + { {0x89, 0x4c}, "lzo", unlzo }, + { {0, 0}, NULL, NULL } diff --git a/target/linux/generic/patches-3.8/250-netfilter_depends.patch b/target/linux/generic/patches-3.8/250-netfilter_depends.patch new file mode 100644 index 0000000000..3c9013400a --- /dev/null +++ b/target/linux/generic/patches-3.8/250-netfilter_depends.patch @@ -0,0 +1,18 @@ +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -191,7 +191,6 @@ config NF_CONNTRACK_FTP + + config NF_CONNTRACK_H323 + tristate "H.323 protocol support" +- depends on (IPV6 || IPV6=n) + depends on NETFILTER_ADVANCED + help + H.323 is a VoIP signalling protocol from ITU-T. As one of the most +@@ -750,7 +749,6 @@ config NETFILTER_XT_TARGET_SECMARK + + config NETFILTER_XT_TARGET_TCPMSS + tristate '"TCPMSS" target support' +- depends on (IPV6 || IPV6=n) + default m if NETFILTER_ADVANCED=n + ---help--- + This option adds a `TCPMSS' target, which allows you to alter the diff --git a/target/linux/generic/patches-3.8/251-sound_kconfig.patch b/target/linux/generic/patches-3.8/251-sound_kconfig.patch new file mode 100644 index 0000000000..f374009a65 --- /dev/null +++ b/target/linux/generic/patches-3.8/251-sound_kconfig.patch @@ -0,0 +1,11 @@ +--- a/sound/core/Kconfig ++++ b/sound/core/Kconfig +@@ -7,7 +7,7 @@ config SND_PCM + select SND_TIMER + + config SND_HWDEP +- tristate ++ tristate "Sound hardware support" + + config SND_RAWMIDI + tristate diff --git a/target/linux/generic/patches-3.8/252-mv_cesa_depends.patch b/target/linux/generic/patches-3.8/252-mv_cesa_depends.patch new file mode 100644 index 0000000000..fee28db120 --- /dev/null +++ b/target/linux/generic/patches-3.8/252-mv_cesa_depends.patch @@ -0,0 +1,10 @@ +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -164,6 +164,7 @@ config CRYPTO_DEV_MV_CESA + depends on PLAT_ORION + select CRYPTO_ALGAPI + select CRYPTO_AES ++ select CRYPTO_HASH2 + select CRYPTO_BLKCIPHER2 + select CRYPTO_HASH + help diff --git a/target/linux/generic/patches-3.8/253-ssb_b43_default_on.patch b/target/linux/generic/patches-3.8/253-ssb_b43_default_on.patch new file mode 100644 index 0000000000..29d2a41a3b --- /dev/null +++ b/target/linux/generic/patches-3.8/253-ssb_b43_default_on.patch @@ -0,0 +1,29 @@ +--- a/drivers/ssb/Kconfig ++++ b/drivers/ssb/Kconfig +@@ -29,6 +29,7 @@ config SSB_SPROM + config SSB_BLOCKIO + bool + depends on SSB ++ default y + + config SSB_PCIHOST_POSSIBLE + bool +@@ -49,7 +50,7 @@ config SSB_PCIHOST + config SSB_B43_PCI_BRIDGE + bool + depends on SSB_PCIHOST +- default n ++ default y + + config SSB_PCMCIAHOST_POSSIBLE + bool +--- a/drivers/bcma/Kconfig ++++ b/drivers/bcma/Kconfig +@@ -17,6 +17,7 @@ config BCMA + config BCMA_BLOCKIO + bool + depends on BCMA ++ default y + + config BCMA_HOST_PCI_POSSIBLE + bool diff --git a/target/linux/generic/patches-3.8/254-textsearch_kconfig_hacks.patch b/target/linux/generic/patches-3.8/254-textsearch_kconfig_hacks.patch new file mode 100644 index 0000000000..9b0ed648f9 --- /dev/null +++ b/target/linux/generic/patches-3.8/254-textsearch_kconfig_hacks.patch @@ -0,0 +1,23 @@ +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -280,16 +280,16 @@ config BCH_CONST_T + # Textsearch support is select'ed if needed + # + config TEXTSEARCH +- boolean ++ boolean "Textsearch support" + + config TEXTSEARCH_KMP +- tristate ++ tristate "Textsearch KMP" + + config TEXTSEARCH_BM +- tristate ++ tristate "Textsearch BM" + + config TEXTSEARCH_FSM +- tristate ++ tristate "Textsearch FSM" + + config BTREE + boolean diff --git a/target/linux/generic/patches-3.8/255-lib80211_kconfig_hacks.patch b/target/linux/generic/patches-3.8/255-lib80211_kconfig_hacks.patch new file mode 100644 index 0000000000..8cc6ba3513 --- /dev/null +++ b/target/linux/generic/patches-3.8/255-lib80211_kconfig_hacks.patch @@ -0,0 +1,19 @@ +--- a/net/wireless/Kconfig ++++ b/net/wireless/Kconfig +@@ -149,13 +149,13 @@ config LIB80211 + Drivers should select this themselves if needed. + + config LIB80211_CRYPT_WEP +- tristate ++ tristate "LIB80211_CRYPT_WEP" + + config LIB80211_CRYPT_CCMP +- tristate ++ tristate "LIB80211_CRYPT_CCMP" + + config LIB80211_CRYPT_TKIP +- tristate ++ tristate "LIB80211_CRYPT_TKIP" + + config LIB80211_DEBUG + bool "lib80211 debugging messages" diff --git a/target/linux/generic/patches-3.8/256-crypto_add_kconfig_prompts.patch b/target/linux/generic/patches-3.8/256-crypto_add_kconfig_prompts.patch new file mode 100644 index 0000000000..8462c711f0 --- /dev/null +++ b/target/linux/generic/patches-3.8/256-crypto_add_kconfig_prompts.patch @@ -0,0 +1,47 @@ +--- a/crypto/Kconfig ++++ b/crypto/Kconfig +@@ -31,7 +31,7 @@ config CRYPTO_FIPS + this is. + + config CRYPTO_ALGAPI +- tristate ++ tristate "ALGAPI" + select CRYPTO_ALGAPI2 + help + This option provides the API for cryptographic algorithms. +@@ -40,7 +40,7 @@ config CRYPTO_ALGAPI2 + tristate + + config CRYPTO_AEAD +- tristate ++ tristate "AEAD" + select CRYPTO_AEAD2 + select CRYPTO_ALGAPI + +@@ -49,7 +49,7 @@ config CRYPTO_AEAD2 + select CRYPTO_ALGAPI2 + + config CRYPTO_BLKCIPHER +- tristate ++ tristate "BLKCIPHER" + select CRYPTO_BLKCIPHER2 + select CRYPTO_ALGAPI + +@@ -60,7 +60,7 @@ config CRYPTO_BLKCIPHER2 + select CRYPTO_WORKQUEUE + + config CRYPTO_HASH +- tristate ++ tristate "HASH" + select CRYPTO_HASH2 + select CRYPTO_ALGAPI + +@@ -69,7 +69,7 @@ config CRYPTO_HASH2 + select CRYPTO_ALGAPI2 + + config CRYPTO_RNG +- tristate ++ tristate "RNG" + select CRYPTO_RNG2 + select CRYPTO_ALGAPI + diff --git a/target/linux/generic/patches-3.8/257-wireless_ext_kconfig_hack.patch b/target/linux/generic/patches-3.8/257-wireless_ext_kconfig_hack.patch new file mode 100644 index 0000000000..daac5898ae --- /dev/null +++ b/target/linux/generic/patches-3.8/257-wireless_ext_kconfig_hack.patch @@ -0,0 +1,22 @@ +--- a/net/wireless/Kconfig ++++ b/net/wireless/Kconfig +@@ -1,5 +1,5 @@ + config WIRELESS_EXT +- bool ++ bool "Wireless extensions" + + config WEXT_CORE + def_bool y +@@ -11,10 +11,10 @@ config WEXT_PROC + depends on WEXT_CORE + + config WEXT_SPY +- bool ++ bool "WEXT_SPY" + + config WEXT_PRIV +- bool ++ bool "WEXT_PRIV" + + config CFG80211 + tristate "cfg80211 - wireless configuration API" diff --git a/target/linux/generic/patches-3.8/258-netfilter_netlink_kconfig_hack.patch b/target/linux/generic/patches-3.8/258-netfilter_netlink_kconfig_hack.patch new file mode 100644 index 0000000000..9d827c253b --- /dev/null +++ b/target/linux/generic/patches-3.8/258-netfilter_netlink_kconfig_hack.patch @@ -0,0 +1,11 @@ +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -2,7 +2,7 @@ menu "Core Netfilter Configuration" + depends on NET && INET && NETFILTER + + config NETFILTER_NETLINK +- tristate ++ tristate "Netfilter NFNETLINK interface" + + config NETFILTER_NETLINK_ACCT + tristate "Netfilter NFACCT over NFNETLINK interface" diff --git a/target/linux/generic/patches-3.8/259-regmap_dynamic.patch b/target/linux/generic/patches-3.8/259-regmap_dynamic.patch new file mode 100644 index 0000000000..a6b4df4fb2 --- /dev/null +++ b/target/linux/generic/patches-3.8/259-regmap_dynamic.patch @@ -0,0 +1,55 @@ +--- a/drivers/base/regmap/Kconfig ++++ b/drivers/base/regmap/Kconfig +@@ -3,20 +3,23 @@ + # subsystems should select the appropriate symbols. + + config REGMAP +- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ) + select LZO_COMPRESS + select LZO_DECOMPRESS + select IRQ_DOMAIN if REGMAP_IRQ +- bool ++ tristate "Regmap" + + config REGMAP_I2C +- tristate ++ select REGMAP ++ tristate "Regmap I2C" + + config REGMAP_SPI +- tristate ++ select REGMAP ++ tristate "Regmap SPI" + + config REGMAP_MMIO ++ select REGMAP + tristate + + config REGMAP_IRQ ++ select REGMAP + bool +--- a/include/linux/regmap.h ++++ b/include/linux/regmap.h +@@ -44,7 +44,7 @@ struct reg_default { + unsigned int def; + }; + +-#ifdef CONFIG_REGMAP ++#if IS_ENABLED(CONFIG_REGMAP) + + enum regmap_endian { + /* Unspecified -> 0 -> Backwards compatible default */ +--- a/drivers/base/regmap/Makefile ++++ b/drivers/base/regmap/Makefile +@@ -1,6 +1,8 @@ +-obj-$(CONFIG_REGMAP) += regmap.o regcache.o +-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o +-obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o ++regmap-core-objs = regmap.o regcache.o regcache-rbtree.o regcache-lzo.o ++ifdef CONFIG_DEBUG_FS ++regmap-core-objs += regmap-debugfs.o ++endif ++obj-$(CONFIG_REGMAP) += regmap-core.o + obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o + obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o + obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o diff --git a/target/linux/generic/patches-3.8/300-mips_expose_boot_raw.patch b/target/linux/generic/patches-3.8/300-mips_expose_boot_raw.patch new file mode 100644 index 0000000000..59b500d120 --- /dev/null +++ b/target/linux/generic/patches-3.8/300-mips_expose_boot_raw.patch @@ -0,0 +1,39 @@ +From: Mark Miller <mark@mirell.org> + +This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on +certain Broadcom chipsets running CFE in order to load the kernel. + +Signed-off-by: Mark Miller <mark@mirell.org> +Acked-by: Rob Landley <rob@landley.net> +--- +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -893,9 +893,6 @@ config FW_ARC + config ARCH_MAY_HAVE_PC_FDC + bool + +-config BOOT_RAW +- bool +- + config CEVT_BCM1480 + bool + +@@ -2374,6 +2371,18 @@ config USE_OF + select OF_EARLY_FLATTREE + select IRQ_DOMAIN + ++config BOOT_RAW ++ bool "Enable the kernel to be executed from the load address" ++ default n ++ help ++ Allow the kernel to be executed from the load address for ++ bootloaders which cannot read the ELF format. This places ++ a jump to start_kernel at the load address. ++ ++ If unsure, say N. ++ ++ ++ + endmenu + + config LOCKDEP_SUPPORT diff --git a/target/linux/generic/patches-3.8/301-mips_image_cmdline_hack.patch b/target/linux/generic/patches-3.8/301-mips_image_cmdline_hack.patch new file mode 100644 index 0000000000..3c23825b06 --- /dev/null +++ b/target/linux/generic/patches-3.8/301-mips_image_cmdline_hack.patch @@ -0,0 +1,28 @@ +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -984,6 +984,10 @@ config SYNC_R4K + config MIPS_MACHINE + def_bool n + ++config IMAGE_CMDLINE_HACK ++ bool "OpenWrt specific image command line hack" ++ default n ++ + config NO_IOPORT + def_bool n + +--- a/arch/mips/kernel/head.S ++++ b/arch/mips/kernel/head.S +@@ -141,6 +141,12 @@ FEXPORT(__kernel_entry) + j kernel_entry + #endif + ++#ifdef CONFIG_IMAGE_CMDLINE_HACK ++ .ascii "CMDLINE:" ++EXPORT(__image_cmdline) ++ .fill 0x400 ++#endif /* CONFIG_IMAGE_CMDLINE_HACK */ ++ + __REF + + NESTED(kernel_entry, 16, sp) # kernel entry point diff --git a/target/linux/generic/patches-3.8/302-mips_no_branch_likely.patch b/target/linux/generic/patches-3.8/302-mips_no_branch_likely.patch new file mode 100644 index 0000000000..1da9540050 --- /dev/null +++ b/target/linux/generic/patches-3.8/302-mips_no_branch_likely.patch @@ -0,0 +1,11 @@ +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -87,7 +87,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin + # machines may also. Since BFD is incredibly buggy with respect to + # crossformat linking we rely on the elf2ecoff tool for format conversion. + # +-cflags-y += -G 0 -mno-abicalls -fno-pic -pipe ++cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely + cflags-y += -msoft-float + LDFLAGS_vmlinux += -G 0 -static -n -nostdlib + KBUILD_AFLAGS_MODULE += -mlong-calls diff --git a/target/linux/generic/patches-3.8/304-mips_disable_fpu.patch b/target/linux/generic/patches-3.8/304-mips_disable_fpu.patch new file mode 100644 index 0000000000..89beeec587 --- /dev/null +++ b/target/linux/generic/patches-3.8/304-mips_disable_fpu.patch @@ -0,0 +1,160 @@ +MIPS: allow disabling the kernel FPU emulator + +This patch allows turning off the in-kernel Algorithmics +FPU emulator support, which allows one to save a couple of +precious blocks on an embedded system. + +Signed-off-by: Florian Fainelli <florian@openwrt.org> +-- +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -969,6 +969,17 @@ config I8259 + config MIPS_BONITO64 + bool + ++config MIPS_FPU_EMU ++ bool "Enable FPU emulation" ++ default y ++ help ++ This option allows building a kernel with or without the Algorithmics ++ FPU emulator enabled. Turning off this option results in a kernel which ++ does not catch floating operations exceptions. Make sure that your toolchain ++ is configured to enable software floating point emulation in that case. ++ ++ If unsure say Y here. ++ + config MIPS_MSC + bool + +--- a/arch/mips/math-emu/Makefile ++++ b/arch/mips/math-emu/Makefile +@@ -2,11 +2,13 @@ + # Makefile for the Linux/MIPS kernel FPU emulation. + # + +-obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \ ++obj-y := kernel_linkage.o dsemul.o cp1emu.o ++ ++obj-$(CONFIG_MIPS_FPU_EMU) += ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \ + ieee754xcpt.o dp_frexp.o dp_modf.o dp_div.o dp_mul.o dp_sub.o \ + dp_add.o dp_fsp.o dp_cmp.o dp_logb.o dp_scalb.o dp_simple.o \ + dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \ + sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \ + sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \ +- dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o ++ dp_sqrt.o sp_sqrt.o + +--- a/arch/mips/math-emu/cp1emu.c ++++ b/arch/mips/math-emu/cp1emu.c +@@ -58,7 +58,11 @@ + #define __mips 4 + + /* Function which emulates a floating point instruction. */ ++#ifdef CONFIG_DEBUG_FS ++DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); ++#endif + ++#ifdef CONFIG_MIPS_FPU_EMU + static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *, + mips_instruction); + +@@ -69,10 +73,6 @@ static int fpux_emu(struct pt_regs *, + + /* Further private data for which no space exists in mips_fpu_struct */ + +-#ifdef CONFIG_DEBUG_FS +-DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); +-#endif +- + /* Control registers */ + + #define FPCREG_RID 0 /* $0 = revision id */ +@@ -1361,7 +1361,6 @@ int fpu_emulator_cop1Handler(struct pt_r + + return sig; + } +- + #ifdef CONFIG_DEBUG_FS + + static int fpuemu_stat_get(void *data, u64 *val) +@@ -1410,4 +1409,11 @@ static int __init debugfs_fpuemu(void) + return 0; + } + __initcall(debugfs_fpuemu); +-#endif ++#endif /* CONFIG_DEBUGFS */ ++#else ++int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, ++ int has_fpu) ++{ ++ return 0; ++} ++#endif /* CONFIG_MIPS_FPU_EMU */ +--- a/arch/mips/math-emu/dsemul.c ++++ b/arch/mips/math-emu/dsemul.c +@@ -108,6 +108,7 @@ int mips_dsemul(struct pt_regs *regs, mi + return SIGILL; /* force out of emulation loop */ + } + ++#ifdef CONFIG_MIPS_FPU_EMU + int do_dsemulret(struct pt_regs *xcp) + { + struct emuframe __user *fr; +@@ -164,3 +165,9 @@ int do_dsemulret(struct pt_regs *xcp) + + return 1; + } ++#else ++int do_dsemulret(struct pt_regs *xcp) ++{ ++ return 0; ++} ++#endif /* CONFIG_MIPS_FPU_EMU */ +--- a/arch/mips/math-emu/kernel_linkage.c ++++ b/arch/mips/math-emu/kernel_linkage.c +@@ -29,6 +29,7 @@ + + #define SIGNALLING_NAN 0x7ff800007ff80000LL + ++#ifdef CONFIG_MIPS_FPU_EMU + void fpu_emulator_init_fpu(void) + { + static int first = 1; +@@ -112,4 +113,36 @@ int fpu_emulator_restore_context32(struc + + return err; + } +-#endif ++#endif /* CONFIG_64BIT */ ++#else ++ ++void fpu_emulator_init_fpu(void) ++{ ++ printk(KERN_INFO "FPU emulator disabled, make sure your toolchain" ++ "was compiled with software floating point support (soft-float)\n"); ++ return; ++} ++ ++int fpu_emulator_save_context(struct sigcontext __user *sc) ++{ ++ return 0; ++} ++ ++int fpu_emulator_restore_context(struct sigcontext __user *sc) ++{ ++ return 0; ++} ++ ++int fpu_emulator_save_context32(struct sigcontext32 __user *sc) ++{ ++ return 0; ++} ++ ++int fpu_emulator_restore_context32(struct sigcontext32 __user *sc) ++{ ++ return 0; ++} ++ ++#ifdef CONFIG_64BIT ++#endif /* CONFIG_64BIT */ ++#endif /* CONFIG_MIPS_FPU_EMU */ diff --git a/target/linux/generic/patches-3.8/306-mips_mem_functions_performance.patch b/target/linux/generic/patches-3.8/306-mips_mem_functions_performance.patch new file mode 100644 index 0000000000..9818677425 --- /dev/null +++ b/target/linux/generic/patches-3.8/306-mips_mem_functions_performance.patch @@ -0,0 +1,83 @@ +--- a/arch/mips/include/asm/string.h ++++ b/arch/mips/include/asm/string.h +@@ -133,11 +133,44 @@ strncmp(__const__ char *__cs, __const__ + + #define __HAVE_ARCH_MEMSET + extern void *memset(void *__s, int __c, size_t __count); ++#define memset(__s, __c, len) \ ++({ \ ++ size_t __len = (len); \ ++ void *__ret; \ ++ if (__builtin_constant_p(len) && __len >= 64) \ ++ __ret = memset((__s), (__c), __len); \ ++ else \ ++ __ret = __builtin_memset((__s), (__c), __len); \ ++ __ret; \ ++}) + + #define __HAVE_ARCH_MEMCPY + extern void *memcpy(void *__to, __const__ void *__from, size_t __n); ++#define memcpy(dst, src, len) \ ++({ \ ++ size_t __len = (len); \ ++ void *__ret; \ ++ if (__builtin_constant_p(len) && __len >= 64) \ ++ __ret = memcpy((dst), (src), __len); \ ++ else \ ++ __ret = __builtin_memcpy((dst), (src), __len); \ ++ __ret; \ ++}) + + #define __HAVE_ARCH_MEMMOVE + extern void *memmove(void *__dest, __const__ void *__src, size_t __n); ++#define memmove(dst, src, len) \ ++({ \ ++ size_t __len = (len); \ ++ void *__ret; \ ++ if (__builtin_constant_p(len) && __len >= 64) \ ++ __ret = memmove((dst), (src), __len); \ ++ else \ ++ __ret = __builtin_memmove((dst), (src), __len); \ ++ __ret; \ ++}) ++ ++#define __HAVE_ARCH_MEMCMP ++#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len)) + + #endif /* _ASM_STRING_H */ +--- a/arch/mips/lib/Makefile ++++ b/arch/mips/lib/Makefile +@@ -4,7 +4,7 @@ + + lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ + mips-atomic.o strlen_user.o strncpy_user.o \ +- strnlen_user.o uncached.o ++ strnlen_user.o uncached.o memcmp.o + + obj-y += iomap.o + obj-$(CONFIG_PCI) += iomap-pci.o +--- /dev/null ++++ b/arch/mips/lib/memcmp.c +@@ -0,0 +1,22 @@ ++/* ++ * copied from linux/lib/string.c ++ * ++ * Copyright (C) 1991, 1992 Linus Torvalds ++ */ ++ ++#include <linux/module.h> ++#include <linux/string.h> ++ ++#undef memcmp ++int memcmp(const void *cs, const void *ct, size_t count) ++{ ++ const unsigned char *su1, *su2; ++ int res = 0; ++ ++ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) ++ if ((res = *su1 - *su2) != 0) ++ break; ++ return res; ++} ++EXPORT_SYMBOL(memcmp); ++ diff --git a/target/linux/generic/patches-3.8/308-mips-show-correct-cpu-name-for-24KEc.patch b/target/linux/generic/patches-3.8/308-mips-show-correct-cpu-name-for-24KEc.patch new file mode 100644 index 0000000000..923992e816 --- /dev/null +++ b/target/linux/generic/patches-3.8/308-mips-show-correct-cpu-name-for-24KEc.patch @@ -0,0 +1,17 @@ +--- a/arch/mips/kernel/cpu-probe.c ++++ b/arch/mips/kernel/cpu-probe.c +@@ -838,10 +838,13 @@ static inline void cpu_probe_mips(struct + __cpu_name[cpu] = "MIPS 20Kc"; + break; + case PRID_IMP_24K: +- case PRID_IMP_24KE: + c->cputype = CPU_24K; + __cpu_name[cpu] = "MIPS 24Kc"; + break; ++ case PRID_IMP_24KE: ++ c->cputype = CPU_24K; ++ __cpu_name[cpu] = "MIPS 24KEc"; ++ break; + case PRID_IMP_25KF: + c->cputype = CPU_25KF; + __cpu_name[cpu] = "MIPS 25Kc"; diff --git a/target/linux/generic/patches-3.8/309-mips_fuse_workaround.patch b/target/linux/generic/patches-3.8/309-mips_fuse_workaround.patch new file mode 100644 index 0000000000..5cc27a7085 --- /dev/null +++ b/target/linux/generic/patches-3.8/309-mips_fuse_workaround.patch @@ -0,0 +1,32 @@ +--- a/arch/mips/mm/cache.c ++++ b/arch/mips/mm/cache.c +@@ -39,6 +39,7 @@ void (*__flush_kernel_vmap_range)(unsign + void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size); + + EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); ++EXPORT_SYMBOL(__flush_cache_all); + + /* MIPS specific cache operations */ + void (*flush_cache_sigtramp)(unsigned long addr); +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -19,6 +19,9 @@ + #include <linux/pipe_fs_i.h> + #include <linux/swap.h> + #include <linux/splice.h> ++#ifdef CONFIG_MIPS ++#include <asm/cacheflush.h> ++#endif + + MODULE_ALIAS_MISCDEV(FUSE_MINOR); + MODULE_ALIAS("devname:fuse"); +@@ -654,6 +657,9 @@ static int fuse_copy_fill(struct fuse_co + static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) + { + unsigned ncpy = min(*size, cs->len); ++#ifdef CONFIG_MIPS ++ __flush_cache_all(); ++#endif + if (val) { + if (cs->write) + memcpy(cs->buf, *val, ncpy); diff --git a/target/linux/generic/patches-3.8/310-arm_module_unresolved_weak_sym.patch b/target/linux/generic/patches-3.8/310-arm_module_unresolved_weak_sym.patch new file mode 100644 index 0000000000..d1eba55038 --- /dev/null +++ b/target/linux/generic/patches-3.8/310-arm_module_unresolved_weak_sym.patch @@ -0,0 +1,13 @@ +--- a/arch/arm/kernel/module.c ++++ b/arch/arm/kernel/module.c +@@ -81,6 +81,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons + return -ENOEXEC; + } + ++ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) && ++ ELF_ST_BIND(sym->st_info) == STB_WEAK) ++ continue; ++ + loc = dstsec->sh_addr + rel->r_offset; + + switch (ELF32_R_TYPE(rel->r_info)) { diff --git a/target/linux/generic/patches-3.8/320-ppc4xx_optimization.patch b/target/linux/generic/patches-3.8/320-ppc4xx_optimization.patch new file mode 100644 index 0000000000..885f347d95 --- /dev/null +++ b/target/linux/generic/patches-3.8/320-ppc4xx_optimization.patch @@ -0,0 +1,31 @@ +Upstream doesn't optimize the kernel and bootwrappers for ppc44x because +they still want to support gcc 3.3 -- well, we don't. + +--- a/arch/powerpc/Makefile ++++ b/arch/powerpc/Makefile +@@ -119,7 +119,8 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y) + KBUILD_CFLAGS += -mno-sched-epilog + endif + +-cpu-as-$(CONFIG_4xx) += -Wa,-m405 ++cpu-as-$(CONFIG_40x) += -Wa,-m405 ++cpu-as-$(CONFIG_44x) += -Wa,-m440 + cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec + cpu-as-$(CONFIG_E500) += -Wa,-me500 + cpu-as-$(CONFIG_E200) += -Wa,-me200 +--- a/arch/powerpc/boot/Makefile ++++ b/arch/powerpc/boot/Makefile +@@ -38,10 +38,10 @@ BOOTCFLAGS += -I$(obj) -I$(srctree)/$(ob + DTC_FLAGS ?= -p 1024 + + $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405 +-$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405 ++$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440 + $(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405 +-$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405 +-$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405 ++$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440 ++$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440 + $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405 + $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405 + $(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405 diff --git a/target/linux/generic/patches-3.8/321-powerpc_crtsavres_prereq.patch b/target/linux/generic/patches-3.8/321-powerpc_crtsavres_prereq.patch new file mode 100644 index 0000000000..fe5d7c576e --- /dev/null +++ b/target/linux/generic/patches-3.8/321-powerpc_crtsavres_prereq.patch @@ -0,0 +1,10 @@ +--- a/arch/powerpc/Makefile ++++ b/arch/powerpc/Makefile +@@ -86,7 +86,6 @@ CPP = $(CC) -E $(KBUILD_CFLAGS) + + CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__ + +-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o + + # No AltiVec or VSX instructions when building kernel + KBUILD_CFLAGS += $(call cc-option,-mno-altivec) diff --git a/target/linux/generic/patches-3.8/322-ppc4xx-crypto-compile-fix.patch b/target/linux/generic/patches-3.8/322-ppc4xx-crypto-compile-fix.patch new file mode 100644 index 0000000000..226e41154c --- /dev/null +++ b/target/linux/generic/patches-3.8/322-ppc4xx-crypto-compile-fix.patch @@ -0,0 +1,10 @@ +--- a/drivers/crypto/amcc/crypto4xx_core.c ++++ b/drivers/crypto/amcc/crypto4xx_core.c +@@ -19,6 +19,7 @@ + */ + + #include <linux/kernel.h> ++#include <linux/module.h> + #include <linux/interrupt.h> + #include <linux/spinlock_types.h> + #include <linux/random.h> diff --git a/target/linux/generic/patches-3.8/340-module_alloc_size_check.patch b/target/linux/generic/patches-3.8/340-module_alloc_size_check.patch new file mode 100644 index 0000000000..2ed631855f --- /dev/null +++ b/target/linux/generic/patches-3.8/340-module_alloc_size_check.patch @@ -0,0 +1,20 @@ +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -2374,12 +2374,15 @@ static void dynamic_debug_remove(struct + + void * __weak module_alloc(unsigned long size) + { +- return vmalloc_exec(size); ++ return size == 0 ? NULL : vmalloc_exec(size); + } + + static void *module_alloc_update_bounds(unsigned long size) + { +- void *ret = module_alloc(size); ++ void *ret = NULL; ++ ++ if (size) ++ ret = module_alloc(size); + + if (ret) { + mutex_lock(&module_mutex); diff --git a/target/linux/generic/patches-3.8/400-rootfs_split.patch b/target/linux/generic/patches-3.8/400-rootfs_split.patch new file mode 100644 index 0000000000..b43ac9c5a6 --- /dev/null +++ b/target/linux/generic/patches-3.8/400-rootfs_split.patch @@ -0,0 +1,327 @@ +--- a/drivers/mtd/Kconfig ++++ b/drivers/mtd/Kconfig +@@ -23,6 +23,14 @@ config MTD_TESTS + WARNING: some of the tests will ERASE entire MTD device which they + test. Do not use these tests unless you really know what you do. + ++config MTD_ROOTFS_ROOT_DEV ++ bool "Automatically set 'rootfs' partition to be root filesystem" ++ default y ++ ++config MTD_ROOTFS_SPLIT ++ bool "Automatically split 'rootfs' partition for squashfs" ++ default y ++ + config MTD_REDBOOT_PARTS + tristate "RedBoot partition table parsing" + ---help--- +--- a/drivers/mtd/mtdpart.c ++++ b/drivers/mtd/mtdpart.c +@@ -29,6 +29,8 @@ + #include <linux/kmod.h> + #include <linux/mtd/mtd.h> + #include <linux/mtd/partitions.h> ++#include <linux/root_dev.h> ++#include <linux/magic.h> + #include <linux/err.h> + + #include "mtdcore.h" +@@ -50,7 +52,7 @@ struct mtd_part { + * the pointer to that structure with this macro. + */ + #define PART(x) ((struct mtd_part *)(x)) +- ++#define IS_PART(mtd) (mtd->_read == part_read) + + /* + * MTD methods which simply translate the effective address and pass through +@@ -613,6 +615,155 @@ int mtd_del_partition(struct mtd_info *m + } + EXPORT_SYMBOL_GPL(mtd_del_partition); + ++#ifdef CONFIG_MTD_ROOTFS_SPLIT ++#define ROOTFS_SPLIT_NAME "rootfs_data" ++#define ROOTFS_REMOVED_NAME "<removed>" ++ ++struct squashfs_super_block { ++ __le32 s_magic; ++ __le32 pad0[9]; ++ __le64 bytes_used; ++}; ++ ++ ++static int split_squashfs(struct mtd_info *master, int offset, int *split_offset) ++{ ++ struct squashfs_super_block sb; ++ int len, ret; ++ ++ ret = mtd_read(master, offset, sizeof(sb), &len, (void *) &sb); ++ if (ret || (len != sizeof(sb))) { ++ printk(KERN_ALERT "split_squashfs: error occured while reading " ++ "from \"%s\"\n", master->name); ++ return -EINVAL; ++ } ++ ++ if (SQUASHFS_MAGIC != le32_to_cpu(sb.s_magic) ) { ++ printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n", ++ master->name); ++ *split_offset = 0; ++ return 0; ++ } ++ ++ if (le64_to_cpu((sb.bytes_used)) <= 0) { ++ printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n", ++ master->name); ++ *split_offset = 0; ++ return 0; ++ } ++ ++ len = (u32) le64_to_cpu(sb.bytes_used); ++ len += (offset & 0x000fffff); ++ len += (master->erasesize - 1); ++ len &= ~(master->erasesize - 1); ++ len -= (offset & 0x000fffff); ++ *split_offset = offset + len; ++ ++ return 0; ++} ++ ++static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, const struct mtd_partition *part) ++{ ++ struct mtd_partition *dpart; ++ struct mtd_part *slave = NULL; ++ struct mtd_part *spart; ++ int ret, split_offset = 0; ++ ++ spart = PART(rpart); ++ ret = split_squashfs(master, spart->offset, &split_offset); ++ if (ret) ++ return ret; ++ ++ if (split_offset <= 0) ++ return 0; ++ ++ dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL); ++ if (dpart == NULL) { ++ printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n", ++ ROOTFS_SPLIT_NAME); ++ return -ENOMEM; ++ } ++ ++ memcpy(dpart, part, sizeof(*part)); ++ dpart->name = (unsigned char *)&dpart[1]; ++ strcpy(dpart->name, ROOTFS_SPLIT_NAME); ++ ++ dpart->size = rpart->size - (split_offset - spart->offset); ++ dpart->offset = split_offset; ++ ++ if (dpart == NULL) ++ return 1; ++ ++ printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%llX, len=%llX \n", ++ ROOTFS_SPLIT_NAME, dpart->offset, dpart->size); ++ ++ slave = allocate_partition(master, dpart, 0, split_offset); ++ if (IS_ERR(slave)) ++ return PTR_ERR(slave); ++ mutex_lock(&mtd_partitions_mutex); ++ list_add(&slave->list, &mtd_partitions); ++ mutex_unlock(&mtd_partitions_mutex); ++ ++ add_mtd_device(&slave->mtd); ++ ++ rpart->split = &slave->mtd; ++ ++ return 0; ++} ++ ++static int refresh_rootfs_split(struct mtd_info *mtd) ++{ ++ struct mtd_partition tpart; ++ struct mtd_part *part; ++ char *name; ++ //int index = 0; ++ int offset, size; ++ int ret; ++ ++ part = PART(mtd); ++ ++ /* check for the new squashfs offset first */ ++ ret = split_squashfs(part->master, part->offset, &offset); ++ if (ret) ++ return ret; ++ ++ if ((offset > 0) && !mtd->split) { ++ printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name); ++ /* if we don't have a rootfs split partition, create a new one */ ++ tpart.name = (char *) mtd->name; ++ tpart.size = mtd->size; ++ tpart.offset = part->offset; ++ ++ return split_rootfs_data(part->master, &part->mtd, &tpart); ++ } else if ((offset > 0) && mtd->split) { ++ /* update the offsets of the existing partition */ ++ size = mtd->size + part->offset - offset; ++ ++ part = PART(mtd->split); ++ part->offset = offset; ++ part->mtd.size = size; ++ printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n", ++ __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"), ++ (u32) part->offset, (u32) part->mtd.size); ++ name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL); ++ strcpy(name, ROOTFS_SPLIT_NAME); ++ part->mtd.name = name; ++ } else if ((offset <= 0) && mtd->split) { ++ printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name); ++ ++ /* mark existing partition as removed */ ++ part = PART(mtd->split); ++ name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL); ++ strcpy(name, ROOTFS_REMOVED_NAME); ++ part->mtd.name = name; ++ part->offset = 0; ++ part->mtd.size = 0; ++ } ++ ++ return 0; ++} ++#endif /* CONFIG_MTD_ROOTFS_SPLIT */ ++ + /* + * This function, given a master MTD object and a partition table, creates + * and registers slave MTD objects which are bound to the master according to +@@ -629,6 +780,9 @@ int add_mtd_partitions(struct mtd_info * + struct mtd_part *slave; + uint64_t cur_offset = 0; + int i; ++#ifdef CONFIG_MTD_ROOTFS_SPLIT ++ int ret; ++#endif + + printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); + +@@ -643,12 +797,53 @@ int add_mtd_partitions(struct mtd_info * + + add_mtd_device(&slave->mtd); + ++ if (!strcmp(parts[i].name, "rootfs")) { ++#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV ++ if (ROOT_DEV == 0) { ++ printk(KERN_NOTICE "mtd: partition \"rootfs\" " ++ "set to be root filesystem\n"); ++ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index); ++ } ++#endif ++#ifdef CONFIG_MTD_ROOTFS_SPLIT ++ ret = split_rootfs_data(master, &slave->mtd, &parts[i]); ++ /* if (ret == 0) ++ * j++; */ ++#endif ++ } ++ + cur_offset = slave->offset + slave->mtd.size; + } + + return 0; + } + ++int mtd_device_refresh(struct mtd_info *mtd) ++{ ++ int ret = 0; ++ ++ if (IS_PART(mtd)) { ++ struct mtd_part *part; ++ struct mtd_info *master; ++ ++ part = PART(mtd); ++ master = part->master; ++ if (master->refresh_device) ++ ret = master->refresh_device(master); ++ } ++ ++ if (!ret && mtd->refresh_device) ++ ret = mtd->refresh_device(mtd); ++ ++#ifdef CONFIG_MTD_ROOTFS_SPLIT ++ if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs")) ++ refresh_rootfs_split(mtd); ++#endif ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(mtd_device_refresh); ++ + static DEFINE_SPINLOCK(part_parser_lock); + static LIST_HEAD(part_parsers); + +--- a/drivers/mtd/mtdchar.c ++++ b/drivers/mtd/mtdchar.c +@@ -1012,6 +1012,12 @@ static int mtdchar_ioctl(struct file *fi + break; + } + ++ case MTDREFRESH: ++ { ++ ret = mtd_device_refresh(mtd); ++ break; ++ } ++ + default: + ret = -ENOTTY; + } +--- a/include/linux/mtd/mtd.h ++++ b/include/linux/mtd/mtd.h +@@ -114,6 +114,7 @@ struct nand_ecclayout { + + struct module; /* only needed for owner field in mtd_info */ + ++struct mtd_info; + struct mtd_info { + u_char type; + uint32_t flags; +@@ -226,6 +227,9 @@ struct mtd_info { + int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); + int (*_suspend) (struct mtd_info *mtd); + void (*_resume) (struct mtd_info *mtd); ++ int (*refresh_device)(struct mtd_info *mtd); ++ struct mtd_info *split; ++ + /* + * If the driver is something smart, like UBI, it may need to maintain + * its own reference counting. The below functions are only for driver. +@@ -368,6 +372,7 @@ extern int mtd_device_parse_register(str + int defnr_parts); + #define mtd_device_register(master, parts, nr_parts) \ + mtd_device_parse_register(master, NULL, NULL, parts, nr_parts) ++extern int mtd_device_refresh(struct mtd_info *master); + extern int mtd_device_unregister(struct mtd_info *master); + extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); + extern int __get_mtd_device(struct mtd_info *mtd); +--- a/include/linux/mtd/partitions.h ++++ b/include/linux/mtd/partitions.h +@@ -36,12 +36,14 @@ + * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). + */ + ++struct mtd_partition; + struct mtd_partition { + char *name; /* identifier string */ + uint64_t size; /* partition size */ + uint64_t offset; /* offset within the master MTD space */ + uint32_t mask_flags; /* master MTD flags to mask out for this partition */ + struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */ ++ int (*refresh_partition)(struct mtd_info *); + }; + + #define MTDPART_OFS_RETAIN (-3) +--- a/include/uapi/mtd/mtd-abi.h ++++ b/include/uapi/mtd/mtd-abi.h +@@ -202,6 +202,7 @@ struct otp_info { + * without OOB, e.g., NOR flash. + */ + #define MEMWRITE _IOWR('M', 24, struct mtd_write_req) ++#define MTDREFRESH _IO('M', 50) + + /* + * Obsolete legacy interface. Keep it in order not to break userspace diff --git a/target/linux/generic/patches-3.8/401-partial_eraseblock_write.patch b/target/linux/generic/patches-3.8/401-partial_eraseblock_write.patch new file mode 100644 index 0000000000..f2fa3676fb --- /dev/null +++ b/target/linux/generic/patches-3.8/401-partial_eraseblock_write.patch @@ -0,0 +1,145 @@ +--- a/drivers/mtd/mtdpart.c ++++ b/drivers/mtd/mtdpart.c +@@ -35,6 +35,8 @@ + + #include "mtdcore.h" + ++#define MTD_ERASE_PARTIAL 0x8000 /* partition only covers parts of an erase block */ ++ + /* Our partition linked list */ + static LIST_HEAD(mtd_partitions); + static DEFINE_MUTEX(mtd_partitions_mutex); +@@ -230,13 +232,60 @@ static int part_erase(struct mtd_info *m + struct mtd_part *part = PART(mtd); + int ret; + ++ ++ instr->partial_start = false; ++ if (mtd->flags & MTD_ERASE_PARTIAL) { ++ size_t readlen = 0; ++ u64 mtd_ofs; ++ ++ instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC); ++ if (!instr->erase_buf) ++ return -ENOMEM; ++ ++ mtd_ofs = part->offset + instr->addr; ++ instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize); ++ ++ if (instr->erase_buf_ofs > 0) { ++ instr->addr -= instr->erase_buf_ofs; ++ ret = mtd_read(part->master, ++ instr->addr + part->offset, ++ part->master->erasesize, ++ &readlen, instr->erase_buf); ++ ++ instr->partial_start = true; ++ } else { ++ mtd_ofs = part->offset + part->mtd.size; ++ instr->erase_buf_ofs = part->master->erasesize - ++ do_div(mtd_ofs, part->master->erasesize); ++ ++ if (instr->erase_buf_ofs > 0) { ++ instr->len += instr->erase_buf_ofs; ++ ret = mtd_read(part->master, ++ part->offset + instr->addr + ++ instr->len - part->master->erasesize, ++ part->master->erasesize, &readlen, ++ instr->erase_buf); ++ } else { ++ ret = 0; ++ } ++ } ++ if (ret < 0) { ++ kfree(instr->erase_buf); ++ return ret; ++ } ++ ++ } ++ + instr->addr += part->offset; + ret = part->master->_erase(part->master, instr); + if (ret) { + if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) + instr->fail_addr -= part->offset; + instr->addr -= part->offset; ++ if (mtd->flags & MTD_ERASE_PARTIAL) ++ kfree(instr->erase_buf); + } ++ + return ret; + } + +@@ -244,7 +293,25 @@ void mtd_erase_callback(struct erase_inf + { + if (instr->mtd->_erase == part_erase) { + struct mtd_part *part = PART(instr->mtd); ++ size_t wrlen = 0; + ++ if (instr->mtd->flags & MTD_ERASE_PARTIAL) { ++ if (instr->partial_start) { ++ part->master->_write(part->master, ++ instr->addr, instr->erase_buf_ofs, ++ &wrlen, instr->erase_buf); ++ instr->addr += instr->erase_buf_ofs; ++ } else { ++ instr->len -= instr->erase_buf_ofs; ++ part->master->_write(part->master, ++ instr->addr + instr->len, ++ instr->erase_buf_ofs, &wrlen, ++ instr->erase_buf + ++ part->master->erasesize - ++ instr->erase_buf_ofs); ++ } ++ kfree(instr->erase_buf); ++ } + if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) + instr->fail_addr -= part->offset; + instr->addr -= part->offset; +@@ -504,18 +571,24 @@ static struct mtd_part *allocate_partiti + if ((slave->mtd.flags & MTD_WRITEABLE) && + mtd_mod_by_eb(slave->offset, &slave->mtd)) { + /* Doesn't start on a boundary of major erase size */ +- /* FIXME: Let it be writable if it is on a boundary of +- * _minor_ erase size though */ +- slave->mtd.flags &= ~MTD_WRITEABLE; +- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", +- part->name); ++ slave->mtd.flags |= MTD_ERASE_PARTIAL; ++ if (((u32) slave->mtd.size) > master->erasesize) ++ slave->mtd.flags &= ~MTD_WRITEABLE; ++ else ++ slave->mtd.erasesize = slave->mtd.size; + } + if ((slave->mtd.flags & MTD_WRITEABLE) && +- mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { +- slave->mtd.flags &= ~MTD_WRITEABLE; +- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", +- part->name); ++ mtd_mod_by_eb(slave->offset + slave->mtd.size, &slave->mtd)) { ++ slave->mtd.flags |= MTD_ERASE_PARTIAL; ++ ++ if ((u32) slave->mtd.size > master->erasesize) ++ slave->mtd.flags &= ~MTD_WRITEABLE; ++ else ++ slave->mtd.erasesize = slave->mtd.size; + } ++ if ((slave->mtd.flags & (MTD_ERASE_PARTIAL|MTD_WRITEABLE)) == MTD_ERASE_PARTIAL) ++ printk(KERN_WARNING"mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n", ++ part->name); + + slave->mtd.ecclayout = master->ecclayout; + slave->mtd.ecc_strength = master->ecc_strength; +--- a/include/linux/mtd/mtd.h ++++ b/include/linux/mtd/mtd.h +@@ -58,6 +58,10 @@ struct erase_info { + u_long priv; + u_char state; + struct erase_info *next; ++ ++ u8 *erase_buf; ++ u32 erase_buf_ofs; ++ bool partial_start; + }; + + struct mtd_erase_region_info { diff --git a/target/linux/generic/patches-3.8/410-mtd_info_move_forward_decl.patch b/target/linux/generic/patches-3.8/410-mtd_info_move_forward_decl.patch new file mode 100644 index 0000000000..251f522e44 --- /dev/null +++ b/target/linux/generic/patches-3.8/410-mtd_info_move_forward_decl.patch @@ -0,0 +1,18 @@ +--- a/include/linux/mtd/partitions.h ++++ b/include/linux/mtd/partitions.h +@@ -35,6 +35,7 @@ + * Note: writeable partitions require their size and offset be + * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). + */ ++struct mtd_info; + + struct mtd_partition; + struct mtd_partition { +@@ -52,7 +53,6 @@ struct mtd_partition { + #define MTDPART_SIZ_FULL (0) + + +-struct mtd_info; + struct device_node; + + /** diff --git a/target/linux/generic/patches-3.8/420-redboot_space.patch b/target/linux/generic/patches-3.8/420-redboot_space.patch new file mode 100644 index 0000000000..f74affcef7 --- /dev/null +++ b/target/linux/generic/patches-3.8/420-redboot_space.patch @@ -0,0 +1,30 @@ +--- a/drivers/mtd/redboot.c ++++ b/drivers/mtd/redboot.c +@@ -265,14 +265,21 @@ static int parse_redboot_partitions(stru + #endif + names += strlen(names)+1; + +-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED + if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) { +- i++; +- parts[i].offset = parts[i-1].size + parts[i-1].offset; +- parts[i].size = fl->next->img->flash_base - parts[i].offset; +- parts[i].name = nullname; +- } ++ if (!strcmp(parts[i].name, "rootfs")) { ++ parts[i].size = fl->next->img->flash_base; ++ parts[i].size &= ~(master->erasesize - 1); ++ parts[i].size -= parts[i].offset; ++#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED ++ nrparts--; ++ } else { ++ i++; ++ parts[i].offset = parts[i-1].size + parts[i-1].offset; ++ parts[i].size = fl->next->img->flash_base - parts[i].offset; ++ parts[i].name = nullname; + #endif ++ } ++ } + tmp_fl = fl; + fl = fl->next; + kfree(tmp_fl); diff --git a/target/linux/generic/patches-3.8/430-mtd_myloader_partition_parser.patch b/target/linux/generic/patches-3.8/430-mtd_myloader_partition_parser.patch new file mode 100644 index 0000000000..0f3379897d --- /dev/null +++ b/target/linux/generic/patches-3.8/430-mtd_myloader_partition_parser.patch @@ -0,0 +1,35 @@ +--- a/drivers/mtd/Kconfig ++++ b/drivers/mtd/Kconfig +@@ -163,6 +163,22 @@ config MTD_BCM47XX_PARTS + This provides partitions parser for devices based on BCM47xx + boards. + ++config MTD_MYLOADER_PARTS ++ tristate "MyLoader partition parsing" ++ depends on ADM5120 || ATHEROS_AR231X || ATHEROS_AR71XX || ATH79 ++ ---help--- ++ MyLoader is a bootloader which allows the user to define partitions ++ in flash devices, by putting a table in the second erase block ++ on the device, similar to a partition table. This table gives the ++ offsets and lengths of the user defined partitions. ++ ++ If you need code which can detect and parse these tables, and ++ register MTD 'partitions' corresponding to each image detected, ++ enable this option. ++ ++ You will still need the parsing functions to be called by the driver ++ for your particular device. It won't happen automatically. ++ + comment "User Modules And Translation Layers" + + config MTD_CHAR +--- a/drivers/mtd/Makefile ++++ b/drivers/mtd/Makefile +@@ -13,6 +13,7 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o + obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o + obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o + obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o ++obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o + + # 'Users' - code which presents functionality to userspace. + obj-$(CONFIG_MTD_CHAR) += mtdchar.o diff --git a/target/linux/generic/patches-3.8/440-block2mtd_init.patch b/target/linux/generic/patches-3.8/440-block2mtd_init.patch new file mode 100644 index 0000000000..7ea827db85 --- /dev/null +++ b/target/linux/generic/patches-3.8/440-block2mtd_init.patch @@ -0,0 +1,116 @@ +--- a/drivers/mtd/devices/block2mtd.c ++++ b/drivers/mtd/devices/block2mtd.c +@@ -14,6 +14,7 @@ + #include <linux/list.h> + #include <linux/init.h> + #include <linux/mtd/mtd.h> ++#include <linux/mtd/partitions.h> + #include <linux/mutex.h> + #include <linux/mount.h> + #include <linux/slab.h> +@@ -210,11 +211,12 @@ static void block2mtd_free_device(struct + + + /* FIXME: ensure that mtd->size % erase_size == 0 */ +-static struct block2mtd_dev *add_device(char *devname, int erase_size) ++static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname) + { + const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; + struct block_device *bdev; + struct block2mtd_dev *dev; ++ struct mtd_partition *part; + char *name; + + if (!devname) +@@ -253,13 +255,16 @@ static struct block2mtd_dev *add_device( + + /* Setup the MTD structure */ + /* make the name contain the block device in */ +- name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname); ++ if (!mtdname) ++ mtdname = devname; ++ name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL); + if (!name) + goto devinit_err; + ++ strcpy(name, mtdname); + dev->mtd.name = name; + +- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; ++ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1); + dev->mtd.erasesize = erase_size; + dev->mtd.writesize = 1; + dev->mtd.writebufsize = PAGE_SIZE; +@@ -272,14 +277,17 @@ static struct block2mtd_dev *add_device( + dev->mtd.priv = dev; + dev->mtd.owner = THIS_MODULE; + +- if (mtd_device_register(&dev->mtd, NULL, 0)) { ++ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL); ++ part->name = name; ++ part->offset = 0; ++ part->size = dev->mtd.size; ++ if (mtd_device_register(&dev->mtd, part, 1)) { + /* Device didn't get added, so free the entry */ + goto devinit_err; + } + list_add(&dev->list, &blkmtd_device_list); + INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index, +- dev->mtd.name + strlen("block2mtd: "), +- dev->mtd.erasesize >> 10, dev->mtd.erasesize); ++ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize); + return dev; + + devinit_err: +@@ -352,9 +360,9 @@ static char block2mtd_paramline[80 + 12] + + static int block2mtd_setup2(const char *val) + { +- char buf[80 + 12]; /* 80 for device, 12 for erase size */ ++ char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */ + char *str = buf; +- char *token[2]; ++ char *token[3]; + char *name; + size_t erase_size = PAGE_SIZE; + int i, ret; +@@ -365,7 +373,7 @@ static int block2mtd_setup2(const char * + strcpy(str, val); + kill_final_newline(str); + +- for (i = 0; i < 2; i++) ++ for (i = 0; i < 3; i++) + token[i] = strsep(&str, ","); + + if (str) +@@ -384,8 +392,10 @@ static int block2mtd_setup2(const char * + parse_err("illegal erase size"); + } + } ++ if (token[2] && (strlen(token[2]) + 1 > 80)) ++ parse_err("mtd device name too long"); + +- add_device(name, erase_size); ++ add_device(name, erase_size, token[2]); + + return 0; + } +@@ -419,7 +429,7 @@ static int block2mtd_setup(const char *v + + + module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200); +-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\""); ++MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\""); + + static int __init block2mtd_init(void) + { +--- a/block/partition-generic.c ++++ b/block/partition-generic.c +@@ -548,6 +548,7 @@ int invalidate_partitions(struct gendisk + + return 0; + } ++EXPORT_SYMBOL(rescan_partitions); + + unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) + { diff --git a/target/linux/generic/patches-3.8/441-block2mtd_refresh.patch b/target/linux/generic/patches-3.8/441-block2mtd_refresh.patch new file mode 100644 index 0000000000..11b743db2e --- /dev/null +++ b/target/linux/generic/patches-3.8/441-block2mtd_refresh.patch @@ -0,0 +1,268 @@ +--- a/drivers/mtd/devices/block2mtd.c ++++ b/drivers/mtd/devices/block2mtd.c +@@ -29,6 +29,8 @@ struct block2mtd_dev { + struct block_device *blkdev; + struct mtd_info mtd; + struct mutex write_mutex; ++ rwlock_t bdev_mutex; ++ char devname[0]; + }; + + +@@ -80,6 +82,12 @@ static int block2mtd_erase(struct mtd_in + size_t len = instr->len; + int err; + ++ read_lock(&dev->bdev_mutex); ++ if (!dev->blkdev) { ++ err = -EINVAL; ++ goto done; ++ } ++ + instr->state = MTD_ERASING; + mutex_lock(&dev->write_mutex); + err = _block2mtd_erase(dev, from, len); +@@ -91,6 +99,10 @@ static int block2mtd_erase(struct mtd_in + instr->state = MTD_ERASE_DONE; + + mtd_erase_callback(instr); ++ ++done: ++ read_unlock(&dev->bdev_mutex); ++ + return err; + } + +@@ -102,7 +114,13 @@ static int block2mtd_read(struct mtd_inf + struct page *page; + int index = from >> PAGE_SHIFT; + int offset = from & (PAGE_SIZE-1); +- int cpylen; ++ int cpylen, err = 0; ++ ++ read_lock(&dev->bdev_mutex); ++ if (!dev->blkdev || (from > mtd->size)) { ++ err = -EINVAL; ++ goto done; ++ } + + while (len) { + if ((offset + len) > PAGE_SIZE) +@@ -112,8 +130,10 @@ static int block2mtd_read(struct mtd_inf + len = len - cpylen; + + page = page_read(dev->blkdev->bd_inode->i_mapping, index); +- if (IS_ERR(page)) ++ if (IS_ERR(page)) { + return PTR_ERR(page); ++ goto done; ++ } + + memcpy(buf, page_address(page) + offset, cpylen); + page_cache_release(page); +@@ -124,7 +144,10 @@ static int block2mtd_read(struct mtd_inf + offset = 0; + index++; + } +- return 0; ++ ++done: ++ read_unlock(&dev->bdev_mutex); ++ return err; + } + + +@@ -173,13 +196,22 @@ static int block2mtd_write(struct mtd_in + size_t *retlen, const u_char *buf) + { + struct block2mtd_dev *dev = mtd->priv; +- int err; ++ int err = 0; ++ ++ read_lock(&dev->bdev_mutex); ++ if (!dev->blkdev) { ++ err = -EINVAL; ++ goto done; ++ } + + mutex_lock(&dev->write_mutex); + err = _block2mtd_write(dev, buf, to, len, retlen); + mutex_unlock(&dev->write_mutex); + if (err > 0) + err = 0; ++ ++done: ++ read_unlock(&dev->bdev_mutex); + return err; + } + +@@ -188,33 +220,110 @@ static int block2mtd_write(struct mtd_in + static void block2mtd_sync(struct mtd_info *mtd) + { + struct block2mtd_dev *dev = mtd->priv; ++ read_lock(&dev->bdev_mutex); ++ if (dev->blkdev) + sync_blockdev(dev->blkdev); ++ read_unlock(&dev->bdev_mutex); ++ + return; + } + + ++static int _open_bdev(struct block2mtd_dev *dev) ++{ ++ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; ++ struct block_device *bdev; ++ ++ /* Get a handle on the device */ ++ bdev = blkdev_get_by_path(dev->devname, mode, dev); ++#ifndef MODULE ++ if (IS_ERR(bdev)) { ++ dev_t devt; ++ ++ /* We might not have rootfs mounted at this point. Try ++ to resolve the device name by other means. */ ++ ++ devt = name_to_dev_t(dev->devname); ++ if (devt) ++ bdev = blkdev_get_by_dev(devt, mode, dev); ++ } ++#endif ++ ++ if (IS_ERR(bdev)) { ++ ERROR("error: cannot open device %s", dev->devname); ++ return 1; ++ } ++ dev->blkdev = bdev; ++ ++ if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { ++ ERROR("attempting to use an MTD device as a block device"); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static void _close_bdev(struct block2mtd_dev *dev) ++{ ++ struct block_device *bdev; ++ ++ if (!dev->blkdev) ++ return; ++ ++ bdev = dev->blkdev; ++ invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1); ++ blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); ++ dev->blkdev = NULL; ++} ++ + static void block2mtd_free_device(struct block2mtd_dev *dev) + { + if (!dev) + return; + + kfree(dev->mtd.name); +- +- if (dev->blkdev) { +- invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, +- 0, -1); +- blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); +- } +- ++ _close_bdev(dev); + kfree(dev); + } + + +-/* FIXME: ensure that mtd->size % erase_size == 0 */ +-static struct block2mtd_dev *add_device(char *devname, int erase_size, const char *mtdname) ++static int block2mtd_refresh(struct mtd_info *mtd) + { +- const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; ++ struct block2mtd_dev *dev = mtd->priv; + struct block_device *bdev; ++ dev_t devt; ++ int err = 0; ++ ++ /* no other mtd function can run at this point */ ++ write_lock(&dev->bdev_mutex); ++ ++ /* get the device number for the whole disk */ ++ devt = MKDEV(MAJOR(dev->blkdev->bd_dev), 0); ++ ++ /* close the old block device */ ++ _close_bdev(dev); ++ ++ /* open the whole disk, issue a partition rescan, then */ ++ bdev = blkdev_get_by_dev(devt, FMODE_WRITE | FMODE_READ, mtd); ++ if (!bdev || !bdev->bd_disk) ++ err = -EINVAL; ++#ifndef CONFIG_MTD_BLOCK2MTD_MODULE ++ else ++ err = rescan_partitions(bdev->bd_disk, bdev); ++#endif ++ if (bdev) ++ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); ++ ++ /* try to open the partition block device again */ ++ _open_bdev(dev); ++ write_unlock(&dev->bdev_mutex); ++ ++ return err; ++} ++ ++/* FIXME: ensure that mtd->size % erase_size == 0 */ ++static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname) ++{ + struct block2mtd_dev *dev; + struct mtd_partition *part; + char *name; +@@ -222,36 +331,17 @@ static struct block2mtd_dev *add_device( + if (!devname) + return NULL; + +- dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL); ++ dev = kzalloc(sizeof(struct block2mtd_dev) + strlen(devname) + 1, GFP_KERNEL); + if (!dev) + return NULL; + +- /* Get a handle on the device */ +- bdev = blkdev_get_by_path(devname, mode, dev); +-#ifndef MODULE +- if (IS_ERR(bdev)) { ++ strcpy(dev->devname, devname); + +- /* We might not have rootfs mounted at this point. Try +- to resolve the device name by other means. */ +- +- dev_t devt = name_to_dev_t(devname); +- if (devt) +- bdev = blkdev_get_by_dev(devt, mode, dev); +- } +-#endif +- +- if (IS_ERR(bdev)) { +- ERROR("error: cannot open device %s", devname); +- goto devinit_err; +- } +- dev->blkdev = bdev; +- +- if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { +- ERROR("attempting to use an MTD device as a block device"); ++ if (_open_bdev(dev)) + goto devinit_err; +- } + + mutex_init(&dev->write_mutex); ++ rwlock_init(&dev->bdev_mutex); + + /* Setup the MTD structure */ + /* make the name contain the block device in */ +@@ -276,6 +366,7 @@ static struct block2mtd_dev *add_device( + dev->mtd._read = block2mtd_read; + dev->mtd.priv = dev; + dev->mtd.owner = THIS_MODULE; ++ dev->mtd.refresh_device = block2mtd_refresh; + + part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL); + part->name = name; diff --git a/target/linux/generic/patches-3.8/442-block2mtd_probe.patch b/target/linux/generic/patches-3.8/442-block2mtd_probe.patch new file mode 100644 index 0000000000..244c31e1f2 --- /dev/null +++ b/target/linux/generic/patches-3.8/442-block2mtd_probe.patch @@ -0,0 +1,10 @@ +--- a/drivers/mtd/devices/block2mtd.c ++++ b/drivers/mtd/devices/block2mtd.c +@@ -243,6 +243,7 @@ static int _open_bdev(struct block2mtd_d + /* We might not have rootfs mounted at this point. Try + to resolve the device name by other means. */ + ++ wait_for_device_probe(); + devt = name_to_dev_t(dev->devname); + if (devt) + bdev = blkdev_get_by_dev(devt, mode, dev); diff --git a/target/linux/generic/patches-3.8/450-mtd_plat_nand_chip_fixup.patch b/target/linux/generic/patches-3.8/450-mtd_plat_nand_chip_fixup.patch new file mode 100644 index 0000000000..f1783d0de3 --- /dev/null +++ b/target/linux/generic/patches-3.8/450-mtd_plat_nand_chip_fixup.patch @@ -0,0 +1,37 @@ +--- + drivers/mtd/nand/plat_nand.c | 13 ++++++++++++- + include/linux/mtd/nand.h | 1 + + 2 files changed, 13 insertions(+), 1 deletion(-) + +--- a/include/linux/mtd/nand.h ++++ b/include/linux/mtd/nand.h +@@ -647,6 +647,7 @@ struct platform_nand_chip { + unsigned int options; + unsigned int bbt_options; + const char **part_probe_types; ++ int (*chip_fixup)(struct mtd_info *mtd); + }; + + /* Keep gcc happy */ +--- a/drivers/mtd/nand/plat_nand.c ++++ b/drivers/mtd/nand/plat_nand.c +@@ -103,7 +103,18 @@ static int plat_nand_probe(struct platfo + } + + /* Scan to find existence of the device */ +- if (nand_scan(&data->mtd, pdata->chip.nr_chips)) { ++ if (nand_scan_ident(&data->mtd, pdata->chip.nr_chips, NULL)) { ++ err = -ENXIO; ++ goto out; ++ } ++ ++ if (pdata->chip.chip_fixup) { ++ err = pdata->chip.chip_fixup(&data->mtd); ++ if (err) ++ goto out; ++ } ++ ++ if (nand_scan_tail(&data->mtd)) { + err = -ENXIO; + goto out; + } diff --git a/target/linux/generic/patches-3.8/451-mtd_fix_nand_correct_data_return_code.patch b/target/linux/generic/patches-3.8/451-mtd_fix_nand_correct_data_return_code.patch new file mode 100644 index 0000000000..2f72d8594d --- /dev/null +++ b/target/linux/generic/patches-3.8/451-mtd_fix_nand_correct_data_return_code.patch @@ -0,0 +1,12 @@ +--- a/drivers/mtd/nand/nand_ecc.c ++++ b/drivers/mtd/nand/nand_ecc.c +@@ -507,8 +507,7 @@ int __nand_correct_data(unsigned char *b + if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) + return 1; /* error in ECC data; no action needed */ + +- printk(KERN_ERR "uncorrectable error : "); +- return -1; ++ return -EBADMSG; + } + EXPORT_SYMBOL(__nand_correct_data); + diff --git a/target/linux/generic/patches-3.8/460-cfi_cmdset_0002_no_erase_suspend.patch b/target/linux/generic/patches-3.8/460-cfi_cmdset_0002_no_erase_suspend.patch new file mode 100644 index 0000000000..d64f340d1d --- /dev/null +++ b/target/linux/generic/patches-3.8/460-cfi_cmdset_0002_no_erase_suspend.patch @@ -0,0 +1,11 @@ +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -762,7 +762,7 @@ static int get_chip(struct map_info *map + return 0; + + case FL_ERASING: +- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || ++ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) || + !(mode == FL_READY || mode == FL_POINT || + (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) + goto sleep; diff --git a/target/linux/generic/patches-3.8/470-mtd_m25p80_add_pm25lv_flash_support.patch b/target/linux/generic/patches-3.8/470-mtd_m25p80_add_pm25lv_flash_support.patch new file mode 100644 index 0000000000..36d9caac7b --- /dev/null +++ b/target/linux/generic/patches-3.8/470-mtd_m25p80_add_pm25lv_flash_support.patch @@ -0,0 +1,39 @@ +--- a/drivers/mtd/devices/m25p80.c ++++ b/drivers/mtd/devices/m25p80.c +@@ -45,6 +45,7 @@ + #define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ + #define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ + #define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */ ++#define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips*/ + #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ + #define OPCODE_RDID 0x9f /* Read JEDEC ID */ + +@@ -591,6 +592,7 @@ struct flash_info { + u16 flags; + #define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */ + #define M25P_NO_ERASE 0x02 /* No erase command needed */ ++#define SECT_4K_PMC 0x04 /* OPCODE_BE_4K_PMC works uniformly */ + }; + + #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ +@@ -665,6 +667,10 @@ static const struct spi_device_id m25p_i + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, + { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, + ++ /* PMC -- pm25x "blocks" are 32K, sectors are 4K */ ++ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, ++ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) }, ++ + /* Spansion -- single (large) sector size only, at least + * for the chips listed here (without boot sectors). + */ +@@ -909,6 +915,9 @@ static int m25p_probe(struct spi_device + if (info->flags & SECT_4K) { + flash->erase_opcode = OPCODE_BE_4K; + flash->mtd.erasesize = 4096; ++ } else if (info->flags & SECT_4K_PMC) { ++ flash->erase_opcode = OPCODE_BE_4K_PMC; ++ flash->mtd.erasesize = 4096; + } else { + flash->erase_opcode = OPCODE_SE; + flash->mtd.erasesize = info->sector_size; diff --git a/target/linux/generic/patches-3.8/473-mtd_m25p80_add_w25q128.patch b/target/linux/generic/patches-3.8/473-mtd_m25p80_add_w25q128.patch new file mode 100644 index 0000000000..515ec10e2a --- /dev/null +++ b/target/linux/generic/patches-3.8/473-mtd_m25p80_add_w25q128.patch @@ -0,0 +1,10 @@ +--- a/drivers/mtd/devices/m25p80.c ++++ b/drivers/mtd/devices/m25p80.c +@@ -750,6 +750,7 @@ static const struct spi_device_id m25p_i + { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, + { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, ++ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, + { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, + + /* Catalyst / On Semiconductor -- non-JEDEC */ diff --git a/target/linux/generic/patches-3.8/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch b/target/linux/generic/patches-3.8/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch new file mode 100644 index 0000000000..ea5d958165 --- /dev/null +++ b/target/linux/generic/patches-3.8/475-mtd_cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch @@ -0,0 +1,18 @@ +From: George Kashperko <george@znau.edu.ua> + +Issue map read after Write Buffer Load command to ensure chip is ready +to receive data. +Signed-off-by: George Kashperko <george@znau.edu.ua> +--- + drivers/mtd/chips/cfi_cmdset_0002.c | 1 + + 1 file changed, 1 insertion(+) +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -1480,6 +1480,7 @@ static int __xipram do_write_buffer(stru + + /* Write Buffer Load */ + map_write(map, CMD(0x25), cmd_adr); ++ (void) map_read(map, cmd_adr); + + chip->state = FL_WRITING_TO_BUFFER; + diff --git a/target/linux/generic/patches-3.8/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch b/target/linux/generic/patches-3.8/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch new file mode 100644 index 0000000000..ba1526ce52 --- /dev/null +++ b/target/linux/generic/patches-3.8/476-mtd-m25p80-allow-to-disable-small-sector-erase.patch @@ -0,0 +1,41 @@ +--- a/drivers/mtd/devices/Kconfig ++++ b/drivers/mtd/devices/Kconfig +@@ -110,6 +110,14 @@ config MTD_SPEAR_SMI + help + This enable SNOR support on SPEAR platforms using SMI controller + ++config M25PXX_PREFER_SMALL_SECTOR_ERASE ++ bool "Prefer small sector erase" ++ depends on MTD_M25P80 ++ default y ++ help ++ This option enables use of the small erase sectors if that is ++ supported by the flash chip. ++ + config MTD_SST25L + tristate "Support SST25L (non JEDEC) SPI Flash chips" + depends on SPI_MASTER +--- a/drivers/mtd/devices/m25p80.c ++++ b/drivers/mtd/devices/m25p80.c +@@ -76,6 +76,12 @@ + + #define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16) + ++#ifdef CONFIG_M25PXX_PREFER_SMALL_SECTOR_ERASE ++#define PREFER_SMALL_SECTOR_ERASE 1 ++#else ++#define PREFER_SMALL_SECTOR_ERASE 0 ++#endif ++ + /****************************************************************************/ + + struct m25p { +@@ -913,7 +919,7 @@ static int m25p_probe(struct spi_device + flash->mtd._write = m25p80_write; + + /* prefer "small sector" erase if possible */ +- if (info->flags & SECT_4K) { ++ if (PREFER_SMALL_SECTOR_ERASE && (info->flags & SECT_4K)) { + flash->erase_opcode = OPCODE_BE_4K; + flash->mtd.erasesize = 4096; + } else if (info->flags & SECT_4K_PMC) { diff --git a/target/linux/generic/patches-3.8/478-mtd-partial_eraseblock_unlock.patch b/target/linux/generic/patches-3.8/478-mtd-partial_eraseblock_unlock.patch new file mode 100644 index 0000000000..0fc4154fa6 --- /dev/null +++ b/target/linux/generic/patches-3.8/478-mtd-partial_eraseblock_unlock.patch @@ -0,0 +1,18 @@ +--- a/drivers/mtd/mtdpart.c ++++ b/drivers/mtd/mtdpart.c +@@ -330,7 +330,14 @@ static int part_lock(struct mtd_info *mt + static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) + { + struct mtd_part *part = PART(mtd); +- return part->master->_unlock(part->master, ofs + part->offset, len); ++ ++ ofs += part->offset; ++ if (mtd->flags & MTD_ERASE_PARTIAL) { ++ /* round up len to next erasesize and round down offset to prev block */ ++ len = (mtd_div_by_eb(len, part->master) + 1) * part->master->erasesize; ++ ofs &= ~(part->master->erasesize - 1); ++ } ++ return part->master->_unlock(part->master, ofs, len); + } + + static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) diff --git a/target/linux/generic/patches-3.8/500-yaffs-Kbuild-integration.patch b/target/linux/generic/patches-3.8/500-yaffs-Kbuild-integration.patch new file mode 100644 index 0000000000..3940c1050e --- /dev/null +++ b/target/linux/generic/patches-3.8/500-yaffs-Kbuild-integration.patch @@ -0,0 +1,18 @@ +--- a/fs/Kconfig ++++ b/fs/Kconfig +@@ -39,6 +39,7 @@ source "fs/gfs2/Kconfig" + source "fs/ocfs2/Kconfig" + source "fs/btrfs/Kconfig" + source "fs/nilfs2/Kconfig" ++source "fs/yaffs2/Kconfig" + + endif # BLOCK + +--- a/fs/Makefile ++++ b/fs/Makefile +@@ -128,3 +128,5 @@ obj-$(CONFIG_F2FS_FS) += f2fs/ + obj-y += exofs/ # Multiple modules + obj-$(CONFIG_CEPH_FS) += ceph/ + obj-$(CONFIG_PSTORE) += pstore/ ++obj-$(CONFIG_YAFFS_FS) += yaffs2/ ++ diff --git a/target/linux/generic/patches-3.8/501-yaffs-Fix-directory-unlinking-in-yaffs1-mode.patch b/target/linux/generic/patches-3.8/501-yaffs-Fix-directory-unlinking-in-yaffs1-mode.patch new file mode 100644 index 0000000000..d2bad01016 --- /dev/null +++ b/target/linux/generic/patches-3.8/501-yaffs-Fix-directory-unlinking-in-yaffs1-mode.patch @@ -0,0 +1,31 @@ +From 2505e8b0a13d3d5c5bbeaaae4eb889864f44c9df Mon Sep 17 00:00:00 2001 +From: Charles Manning <cdhmanning@gmail.com> +Date: Thu, 3 Feb 2011 05:55:30 +1300 +Subject: [PATCH] yaffs: Fix directory unlinking in yaffs1 mode + +commit 964b3425a71890e6701c830e38b04d8557c04f49 upstream. + +Treat both yaffs2 and yaffs1 paths the same. + +Signed-off-by: Charles Manning <cdhmanning@gmail.com> +--- + yaffs_guts.c | 8 +------- + 1 file changed, 1 insertion(+), 7 deletions(-) + +--- a/fs/yaffs2/yaffs_guts.c ++++ b/fs/yaffs2/yaffs_guts.c +@@ -1708,13 +1708,7 @@ static int yaffs_change_obj_name(yaffs_o + YBUG(); + } + +- /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */ +- if (obj->my_dev->param.is_yaffs2) +- unlinkOp = (new_dir == obj->my_dev->unlinked_dir); +- else +- unlinkOp = (new_dir == obj->my_dev->unlinked_dir +- && obj->variant_type == YAFFS_OBJECT_TYPE_FILE); +- ++ unlinkOp = (new_dir == obj->my_dev->unlinked_dir); + deleteOp = (new_dir == obj->my_dev->del_dir); + + existingTarget = yaffs_find_by_name(new_dir, new_name); diff --git a/target/linux/generic/patches-3.8/502-yaffs-Switch-from-semaphores-to-mutexes.patch b/target/linux/generic/patches-3.8/502-yaffs-Switch-from-semaphores-to-mutexes.patch new file mode 100644 index 0000000000..8d1872e3fb --- /dev/null +++ b/target/linux/generic/patches-3.8/502-yaffs-Switch-from-semaphores-to-mutexes.patch @@ -0,0 +1,138 @@ +From c0c289363e84c53b5872f7c0c5069045096dca07 Mon Sep 17 00:00:00 2001 +From: Charles Manning <cdhmanning@gmail.com> +Date: Wed, 3 Nov 2010 16:01:12 +1300 +Subject: [PATCH] yaffs: Switch from semaphores to mutexes + +commit 73c54aa8c1de3f61a4c211cd47431293a6092f18 upstream. + +Mutex is faster and init_MUTEX has been deprecated, so we'll just switch +to mutexes. + +Signed-off-by: Charles Manning <cdhmanning@gmail.com> +--- + yaffs_linux.h | 2 +- + yaffs_vfs.c | 24 ++++++++++++------------ + yaffs_vfs_multi.c | 26 +++++++++++++------------- + 3 files changed, 26 insertions(+), 26 deletions(-) + +--- a/fs/yaffs2/yaffs_linux.h ++++ b/fs/yaffs2/yaffs_linux.h +@@ -25,7 +25,7 @@ struct yaffs_LinuxContext { + struct super_block * superBlock; + struct task_struct *bgThread; /* Background thread for this device */ + int bgRunning; +- struct semaphore grossLock; /* Gross locking semaphore */ ++ struct mutex grossLock; /* Gross locking mutex*/ + __u8 *spareBuffer; /* For mtdif2 use. Don't know the size of the buffer + * at compile time so we have to allocate it. + */ +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -515,14 +515,14 @@ static unsigned yaffs_gc_control_callbac + static void yaffs_gross_lock(yaffs_dev_t *dev) + { + T(YAFFS_TRACE_LOCK, (TSTR("yaffs locking %p\n"), current)); +- down(&(yaffs_dev_to_lc(dev)->grossLock)); ++ mutex_lock(&(yaffs_dev_to_lc(dev)->grossLock)); + T(YAFFS_TRACE_LOCK, (TSTR("yaffs locked %p\n"), current)); + } + + static void yaffs_gross_unlock(yaffs_dev_t *dev) + { + T(YAFFS_TRACE_LOCK, (TSTR("yaffs unlocking %p\n"), current)); +- up(&(yaffs_dev_to_lc(dev)->grossLock)); ++ mutex_unlock(&(yaffs_dev_to_lc(dev)->grossLock)); + } + + #ifdef YAFFS_COMPILE_EXPORTFS +@@ -2542,7 +2542,7 @@ static void yaffs_read_inode(struct inod + #endif + + static YLIST_HEAD(yaffs_context_list); +-struct semaphore yaffs_context_lock; ++struct mutex yaffs_context_lock; + + static void yaffs_put_super(struct super_block *sb) + { +@@ -2568,9 +2568,9 @@ static void yaffs_put_super(struct super + + yaffs_gross_unlock(dev); + +- down(&yaffs_context_lock); ++ mutex_lock(&yaffs_context_lock); + ylist_del_init(&(yaffs_dev_to_lc(dev)->contextList)); +- up(&yaffs_context_lock); ++ mutex_unlock(&yaffs_context_lock); + + if (yaffs_dev_to_lc(dev)->spareBuffer) { + YFREE(yaffs_dev_to_lc(dev)->spareBuffer); +@@ -3016,7 +3016,7 @@ static struct super_block *yaffs_interna + param->skip_checkpt_rd = options.skip_checkpoint_read; + param->skip_checkpt_wr = options.skip_checkpoint_write; + +- down(&yaffs_context_lock); ++ mutex_lock(&yaffs_context_lock); + /* Get a mount id */ + found = 0; + for(mount_id=0; ! found; mount_id++){ +@@ -3030,13 +3030,13 @@ static struct super_block *yaffs_interna + context->mount_id = mount_id; + + ylist_add_tail(&(yaffs_dev_to_lc(dev)->contextList), &yaffs_context_list); +- up(&yaffs_context_lock); ++ mutex_unlock(&yaffs_context_lock); + + /* Directory search handling...*/ + YINIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->searchContexts)); + param->remove_obj_fn = yaffs_remove_obj_callback; + +- init_MUTEX(&(yaffs_dev_to_lc(dev)->grossLock)); ++ mutex_init(&(yaffs_dev_to_lc(dev)->grossLock)); + + yaffs_gross_lock(dev); + +@@ -3268,7 +3268,7 @@ static int yaffs_proc_read(char *page, + else { + step-=2; + +- down(&yaffs_context_lock); ++ mutex_lock(&yaffs_context_lock); + + /* Locate and print the Nth entry. Order N-squared but N is small. */ + ylist_for_each(item, &yaffs_context_list) { +@@ -3287,7 +3287,7 @@ static int yaffs_proc_read(char *page, + + break; + } +- up(&yaffs_context_lock); ++ mutex_unlock(&yaffs_context_lock); + } + + return buf - page < count ? buf - page : count; +@@ -3301,7 +3301,7 @@ static int yaffs_stats_proc_read(char *p + char *buf = page; + int n = 0; + +- down(&yaffs_context_lock); ++ mutex_lock(&yaffs_context_lock); + + /* Locate and print the Nth entry. Order N-squared but N is small. */ + ylist_for_each(item, &yaffs_context_list) { +@@ -3317,7 +3317,7 @@ static int yaffs_stats_proc_read(char *p + dev->bg_gcs, dev->oldest_dirty_gc_count, + dev->n_obj, dev->n_tnodes); + } +- up(&yaffs_context_lock); ++ mutex_unlock(&yaffs_context_lock); + + + return buf - page < count ? buf - page : count; +@@ -3494,7 +3494,7 @@ static int __init init_yaffs_fs(void) + + + +- init_MUTEX(&yaffs_context_lock); ++ mutex_init(&yaffs_context_lock); + + /* Install the proc_fs entries */ + my_proc_entry = create_proc_entry("yaffs", diff --git a/target/linux/generic/patches-3.8/503-yaffs-Replace-yaffs_dir_llseek-with-Linux-generic-ll.patch b/target/linux/generic/patches-3.8/503-yaffs-Replace-yaffs_dir_llseek-with-Linux-generic-ll.patch new file mode 100644 index 0000000000..d283e8559c --- /dev/null +++ b/target/linux/generic/patches-3.8/503-yaffs-Replace-yaffs_dir_llseek-with-Linux-generic-ll.patch @@ -0,0 +1,72 @@ +From cd6657c4bde20886b0805ea9f2cbac7ec25ac2e5 Mon Sep 17 00:00:00 2001 +From: Charles Manning <cdhmanning@gmail.com> +Date: Tue, 30 Nov 2010 16:01:28 +1300 +Subject: [PATCH 1/2] yaffs: Replace yaffs_dir_llseek with Linux generic + llseek + +commit ed8188fb7659cfb65b5adbe154d143190ade0324 upstream. + +There was not much point in having the yaffs version as it is +functionally equivalent to the kernel one. + +This also gets rid of using BKL in yaffs2. + +Signed-off-by: Charles Manning <cdhmanning@gmail.com> +--- + yaffs_vfs.c | 30 +----------------------------- + yaffs_vfs_multi.c | 30 +----------------------------- + 2 files changed, 2 insertions(+), 58 deletions(-) + +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -342,8 +342,6 @@ static int yaffs_follow_link(struct dent + + static void yaffs_touch_super(yaffs_dev_t *dev); + +-static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin); +- + static int yaffs_vfs_setattr(struct inode *, struct iattr *); + + +@@ -460,7 +458,7 @@ static const struct file_operations yaff + .read = generic_read_dir, + .readdir = yaffs_readdir, + .fsync = yaffs_sync_object, +- .llseek = yaffs_dir_llseek, ++ .llseek = generic_file_llseek, + }; + + static const struct super_operations yaffs_super_ops = { +@@ -1534,32 +1532,6 @@ static void yaffs_release_space(struct f + } + + +-static loff_t yaffs_dir_llseek(struct file *file, loff_t offset, int origin) +-{ +- long long retval; +- +- lock_kernel(); +- +- switch (origin){ +- case 2: +- offset += i_size_read(file->f_path.dentry->d_inode); +- break; +- case 1: +- offset += file->f_pos; +- } +- retval = -EINVAL; +- +- if (offset >= 0){ +- if (offset != file->f_pos) +- file->f_pos = offset; +- +- retval = offset; +- } +- unlock_kernel(); +- return retval; +-} +- +- + static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir) + { + yaffs_obj_t *obj; diff --git a/target/linux/generic/patches-3.8/504-yaffs-Mods-for-Linux-3.0-and-fix-a-typo.patch b/target/linux/generic/patches-3.8/504-yaffs-Mods-for-Linux-3.0-and-fix-a-typo.patch new file mode 100644 index 0000000000..1b814e97fd --- /dev/null +++ b/target/linux/generic/patches-3.8/504-yaffs-Mods-for-Linux-3.0-and-fix-a-typo.patch @@ -0,0 +1,110 @@ +From e1537a700c2e750c5eacc5ad93f30821f1e94424 Mon Sep 17 00:00:00 2001 +From: Charles Manning <cdhmanning@gmail.com> +Date: Mon, 15 Aug 2011 11:40:30 +1200 +Subject: [PATCH 2/2] Mods for Linux 3.0 and fix a typo + +commit a7b5dcf904ba6f7890e4b77ce1f56388b855d0f6 upstream. + +Roll in NCB's patch and some other changes for Linux 3.0. +Also fix a dumb type retired_writes->retried_writes + +Signed-off-by: Charles Manning <cdhmanning@gmail.com> +--- + patch-ker.sh | 2 +- + yaffs_vfs_glue.c | 42 ++++++++++++++++++++++++++++++++++-------- + 2 files changed, 35 insertions(+), 9 deletions(-) + +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -72,7 +72,9 @@ + #include <linux/init.h> + #include <linux/fs.h> + #include <linux/proc_fs.h> ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) + #include <linux/smp_lock.h> ++#endif + #include <linux/pagemap.h> + #include <linux/mtd/mtd.h> + #include <linux/interrupt.h> +@@ -236,7 +238,9 @@ static int yaffs_file_flush(struct file + static int yaffs_file_flush(struct file *file); + #endif + +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync); ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) + static int yaffs_sync_object(struct file *file, int datasync); + #else + static int yaffs_sync_object(struct file *file, struct dentry *dentry, +@@ -1864,7 +1868,9 @@ static int yaffs_symlink(struct inode *d + return -ENOMEM; + } + +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync) ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34)) + static int yaffs_sync_object(struct file *file, int datasync) + #else + static int yaffs_sync_object(struct file *file, struct dentry *dentry, +@@ -3067,7 +3073,13 @@ static int yaffs_internal_read_super_mtd + return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL; + } + +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags, ++ const char *dev_name, void *data) ++{ ++ return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd); ++} ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) + static int yaffs_read_super(struct file_system_type *fs, + int flags, const char *dev_name, + void *data, struct vfsmount *mnt) +@@ -3090,8 +3102,12 @@ static struct super_block *yaffs_read_su + static struct file_system_type yaffs_fs_type = { + .owner = THIS_MODULE, + .name = "yaffs", +- .get_sb = yaffs_read_super, +- .kill_sb = kill_block_super, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++ .mount = yaffs_mount, ++#else ++ .get_sb = yaffs_read_super, ++#endif ++ .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, + }; + #else +@@ -3115,7 +3131,13 @@ static int yaffs2_internal_read_super_mt + return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL; + } + +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags, ++ const char *dev_name, void *data) ++{ ++ return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd); ++} ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) + static int yaffs2_read_super(struct file_system_type *fs, + int flags, const char *dev_name, void *data, + struct vfsmount *mnt) +@@ -3137,8 +3159,12 @@ static struct super_block *yaffs2_read_s + static struct file_system_type yaffs2_fs_type = { + .owner = THIS_MODULE, + .name = "yaffs2", +- .get_sb = yaffs2_read_super, +- .kill_sb = kill_block_super, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) ++ .mount = yaffs2_mount, ++#else ++ .get_sb = yaffs2_read_super, ++#endif ++ .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, + }; + #else diff --git a/target/linux/generic/patches-3.8/505-yaffs-3.2-use-MTD_OPS_AUTO_OOB.patch b/target/linux/generic/patches-3.8/505-yaffs-3.2-use-MTD_OPS_AUTO_OOB.patch new file mode 100644 index 0000000000..463f4a208b --- /dev/null +++ b/target/linux/generic/patches-3.8/505-yaffs-3.2-use-MTD_OPS_AUTO_OOB.patch @@ -0,0 +1,54 @@ +--- a/fs/yaffs2/yaffs_mtdif1.c ++++ b/fs/yaffs2/yaffs_mtdif1.c +@@ -127,7 +127,7 @@ int nandmtd1_WriteChunkWithTagsToNAND(ya + #endif + + memset(&ops, 0, sizeof(ops)); +- ops.mode = MTD_OOB_AUTO; ++ ops.mode = MTD_OPS_AUTO_OOB; + ops.len = (data) ? chunkBytes : 0; + ops.ooblen = YTAG1_SIZE; + ops.datbuf = (__u8 *)data; +@@ -179,7 +179,7 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y + int deleted; + + memset(&ops, 0, sizeof(ops)); +- ops.mode = MTD_OOB_AUTO; ++ ops.mode = MTD_OPS_AUTO_OOB; + ops.len = (data) ? chunkBytes : 0; + ops.ooblen = YTAG1_SIZE; + ops.datbuf = data; +--- a/fs/yaffs2/yaffs_mtdif2.c ++++ b/fs/yaffs2/yaffs_mtdif2.c +@@ -71,7 +71,7 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya + yaffs_PackTags2(&pt, tags, !dev->param.no_tags_ecc); + + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) +- ops.mode = MTD_OOB_AUTO; ++ ops.mode = MTD_OPS_AUTO_OOB; + ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size; + ops.len = dev->param.total_bytes_per_chunk; + ops.ooboffs = 0; +@@ -136,7 +136,7 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y + retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk, + &dummy, data); + else if (tags) { +- ops.mode = MTD_OOB_AUTO; ++ ops.mode = MTD_OPS_AUTO_OOB; + ops.ooblen = packed_tags_size; + ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size; + ops.ooboffs = 0; +--- a/fs/yaffs2/yaffs_mtdif.h ++++ b/fs/yaffs2/yaffs_mtdif.h +@@ -24,4 +24,11 @@ extern struct nand_oobinfo yaffs_noeccin + #endif + int nandmtd_EraseBlockInNAND(yaffs_dev_t *dev, int blockNumber); + int nandmtd_InitialiseNAND(yaffs_dev_t *dev); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) ++#include <mtd/mtd-abi.h> ++#else ++#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO ++#endif ++ + #endif diff --git a/target/linux/generic/patches-3.8/506-yaffs-3.2-dont-use-i_nlink-directly.patch b/target/linux/generic/patches-3.8/506-yaffs-3.2-dont-use-i_nlink-directly.patch new file mode 100644 index 0000000000..91e8281196 --- /dev/null +++ b/target/linux/generic/patches-3.8/506-yaffs-3.2-dont-use-i_nlink-directly.patch @@ -0,0 +1,78 @@ +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -220,6 +220,29 @@ static struct inode *yaffs_iget(struct s + #define yaffs_SuperToDevice(sb) ((yaffs_dev_t *)sb->u.generic_sbp) + #endif + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) ++static inline void yaffs_set_nlink(struct inode *inode, unsigned int nlink) ++{ ++ set_nlink(inode, nlink); ++} ++ ++static inline void yaffs_dec_link_count(struct inode *inode) ++{ ++ inode_dec_link_count(inode); ++} ++#else ++static inline void yaffs_set_nlink(struct inode *inode, unsigned int nlink) ++{ ++ inode->i_nlink = nlink; ++} ++ ++static inline void yaffs_dec_link_count(struct inode *inode) ++{ ++ inode->i_nlink--; ++ mark_inode_dirty(inode) ++} ++#endif ++ + + #define update_dir_time(dir) do {\ + (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \ +@@ -1362,7 +1385,7 @@ static void yaffs_fill_inode_from_obj(st + inode->i_size = yaffs_get_obj_length(obj); + inode->i_blocks = (inode->i_size + 511) >> 9; + +- inode->i_nlink = yaffs_get_obj_link_count(obj); ++ yaffs_set_nlink(inode, yaffs_get_obj_link_count(obj)); + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_fill_inode mode %x uid %d gid %d size %d count %d\n"), +@@ -1784,10 +1807,9 @@ static int yaffs_unlink(struct inode *di + retVal = yaffs_unlinker(obj, dentry->d_name.name); + + if (retVal == YAFFS_OK) { +- dentry->d_inode->i_nlink--; ++ yaffs_dec_link_count(dentry->d_inode); + dir->i_version++; + yaffs_gross_unlock(dev); +- mark_inode_dirty(dentry->d_inode); + update_dir_time(dir); + return 0; + } +@@ -1818,7 +1840,8 @@ static int yaffs_link(struct dentry *old + obj); + + if (link) { +- old_dentry->d_inode->i_nlink = yaffs_get_obj_link_count(obj); ++ yaffs_set_nlink(old_dentry->d_inode, ++ yaffs_get_obj_link_count(obj)); + d_instantiate(dentry, old_dentry->d_inode); + atomic_inc(&old_dentry->d_inode->i_count); + T(YAFFS_TRACE_OS, +@@ -1937,11 +1960,9 @@ static int yaffs_rename(struct inode *ol + yaffs_gross_unlock(dev); + + if (retVal == YAFFS_OK) { +- if (target) { +- new_dentry->d_inode->i_nlink--; +- mark_inode_dirty(new_dentry->d_inode); +- } +- ++ if (target) ++ yaffs_dec_link_count(new_dentry->d_inode); ++ + update_dir_time(old_dir); + if(old_dir != new_dir) + update_dir_time(new_dir); diff --git a/target/linux/generic/patches-3.8/507-yaffs-3.3_fix.patch b/target/linux/generic/patches-3.8/507-yaffs-3.3_fix.patch new file mode 100644 index 0000000000..d823fc49c4 --- /dev/null +++ b/target/linux/generic/patches-3.8/507-yaffs-3.3_fix.patch @@ -0,0 +1,71 @@ +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -273,8 +273,13 @@ static int yaffs_sync_object(struct file + static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir); + + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) ++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, ++ struct nameidata *n); ++#else + static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *n); ++#endif + static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *n); + #else +@@ -286,9 +291,17 @@ static int yaffs_link(struct dentry *old + static int yaffs_unlink(struct inode *dir, struct dentry *dentry); + static int yaffs_symlink(struct inode *dir, struct dentry *dentry, + const char *symname); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) ++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); ++#else + static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode); ++#endif + +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) ++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, ++ dev_t dev); ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, + dev_t dev); + #else +@@ -1679,7 +1692,10 @@ out: + #define YCRED(x) (x->cred) + #endif + +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) ++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, ++ dev_t rdev) ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode, + dev_t rdev) + #else +@@ -1769,7 +1785,11 @@ static int yaffs_mknod(struct inode *dir + return error; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) ++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) ++#else + static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) ++#endif + { + int retVal; + T(YAFFS_TRACE_OS, (TSTR("yaffs_mkdir\n"))); +@@ -1777,7 +1797,10 @@ static int yaffs_mkdir(struct inode *dir + return retVal; + } + +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) ++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, ++ struct nameidata *n) ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *n) + #else diff --git a/target/linux/generic/patches-3.8/508-yaffs-3.3-use-mtd_-helpers.patch b/target/linux/generic/patches-3.8/508-yaffs-3.3-use-mtd_-helpers.patch new file mode 100644 index 0000000000..65d45a0be1 --- /dev/null +++ b/target/linux/generic/patches-3.8/508-yaffs-3.3-use-mtd_-helpers.patch @@ -0,0 +1,160 @@ +--- a/fs/yaffs2/yaffs_mtdif1.c ++++ b/fs/yaffs2/yaffs_mtdif1.c +@@ -133,7 +133,7 @@ int nandmtd1_WriteChunkWithTagsToNAND(ya + ops.datbuf = (__u8 *)data; + ops.oobbuf = (__u8 *)&pt1; + +- retval = mtd->write_oob(mtd, addr, &ops); ++ retval = mtd_write_oob(mtd, addr, &ops); + if (retval) { + T(YAFFS_TRACE_MTD, + (TSTR("write_oob failed, chunk %d, mtd error %d"TENDSTR), +@@ -194,7 +194,7 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y + /* Read page and oob using MTD. + * Check status and determine ECC result. + */ +- retval = mtd->read_oob(mtd, addr, &ops); ++ retval = mtd_read_oob(mtd, addr, &ops); + if (retval) { + T(YAFFS_TRACE_MTD, + (TSTR("read_oob failed, chunk %d, mtd error %d"TENDSTR), +@@ -218,7 +218,7 @@ int nandmtd1_ReadChunkWithTagsFromNAND(y + /* fall into... */ + default: + rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0); +- etags->block_bad = (mtd->block_isbad)(mtd, addr); ++ etags->block_bad = mtd_block_isbad(mtd, addr); + return YAFFS_FAIL; + } + +@@ -286,7 +286,7 @@ int nandmtd1_MarkNANDBlockBad(struct yaf + + T(YAFFS_TRACE_BAD_BLOCKS,(TSTR("marking block %d bad"TENDSTR), block_no)); + +- retval = mtd->block_markbad(mtd, (loff_t)blocksize * block_no); ++ retval = mtd_block_markbad(mtd, (loff_t)blocksize * block_no); + return (retval) ? YAFFS_FAIL : YAFFS_OK; + } + +@@ -336,7 +336,7 @@ int nandmtd1_QueryNANDBlock(struct yaffs + return YAFFS_FAIL; + + retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags); +- etags.block_bad = (mtd->block_isbad)(mtd, addr); ++ etags.block_bad = mtd_block_isbad(mtd, addr); + if (etags.block_bad) { + T(YAFFS_TRACE_BAD_BLOCKS, + (TSTR("block %d is marked bad"TENDSTR), block_no)); +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -2607,8 +2607,8 @@ static void yaffs_MTDPutSuper(struct sup + { + struct mtd_info *mtd = yaffs_dev_to_mtd(yaffs_SuperToDevice(sb)); + +- if (mtd->sync) +- mtd->sync(mtd); ++ if (mtd) ++ mtd_sync(mtd); + + put_mtd_device(mtd); + } +--- a/fs/yaffs2/yaffs_mtdif2.c ++++ b/fs/yaffs2/yaffs_mtdif2.c +@@ -77,7 +77,7 @@ int nandmtd2_WriteChunkWithTagsToNAND(ya + ops.ooboffs = 0; + ops.datbuf = (__u8 *)data; + ops.oobbuf = (dev->param.inband_tags) ? NULL : packed_tags_ptr; +- retval = mtd->write_oob(mtd, addr, &ops); ++ retval = mtd_write_oob(mtd, addr, &ops); + + #else + if (!dev->param.inband_tags) { +@@ -133,7 +133,7 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y + + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)) + if (dev->param.inband_tags || (data && !tags)) +- retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk, ++ retval = mtd_read(mtd, addr, dev->param.total_bytes_per_chunk, + &dummy, data); + else if (tags) { + ops.mode = MTD_OPS_AUTO_OOB; +@@ -142,7 +142,7 @@ int nandmtd2_ReadChunkWithTagsFromNAND(y + ops.ooboffs = 0; + ops.datbuf = data; + ops.oobbuf = yaffs_dev_to_lc(dev)->spareBuffer; +- retval = mtd->read_oob(mtd, addr, &ops); ++ retval = mtd_read_oob(mtd, addr, &ops); + } + #else + if (!dev->param.inband_tags && data && tags) { +@@ -201,7 +201,7 @@ int nandmtd2_MarkNANDBlockBad(struct yaf + (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), block_no)); + + retval = +- mtd->block_markbad(mtd, ++ mtd_block_markbad(mtd, + block_no * dev->param.chunks_per_block * + dev->param.total_bytes_per_chunk); + +@@ -221,7 +221,7 @@ int nandmtd2_QueryNANDBlock(struct yaffs + T(YAFFS_TRACE_MTD, + (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), block_no)); + retval = +- mtd->block_isbad(mtd, ++ mtd_block_isbad(mtd, + block_no * dev->param.chunks_per_block * + dev->param.total_bytes_per_chunk); + +--- a/fs/yaffs2/yaffs_mtdif.h ++++ b/fs/yaffs2/yaffs_mtdif.h +@@ -31,4 +31,39 @@ int nandmtd_InitialiseNAND(yaffs_dev_t * + #define MTD_OPS_AUTO_OOB MTD_OOB_AUTO + #endif + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) ++static inline int mtd_erase(struct mdt_info *mtd, struct erase_info *ei) ++{ ++ return mtd->erase(mtd, ei); ++} ++ ++static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) ++{ ++ return mtd->block_mark_bad(mtd, ofs); ++} ++ ++static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) ++{ ++ return mtd->block_is_bad(mtd, ofs); ++} ++ ++static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, ++ struct mtd_oob_ops *ops) ++{ ++ return mtd->read_oob(mtd, from, ops); ++} ++ ++static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, ++ struct mtd_oob_ops *ops) ++{ ++ return mtd->write_oob(mtd, to, ops); ++} ++ ++static inline void mtd_sync(struct mtd_info *mtd) ++{ ++ if (mtd->sync) ++ mtd->sync(mtd); ++} ++#endif ++ + #endif +--- a/fs/yaffs2/yaffs_mtdif.c ++++ b/fs/yaffs2/yaffs_mtdif.c +@@ -41,7 +41,7 @@ int nandmtd_EraseBlockInNAND(yaffs_dev_t + ei.callback = NULL; + ei.priv = (u_long) dev; + +- retval = mtd->erase(mtd, &ei); ++ retval = mtd_erase(mtd, &ei); + + if (retval == 0) + return YAFFS_OK; diff --git a/target/linux/generic/patches-3.8/509-yaffs-3.4-add-underscore-to-mtd-internal-names.patch b/target/linux/generic/patches-3.8/509-yaffs-3.4-add-underscore-to-mtd-internal-names.patch new file mode 100644 index 0000000000..14f342da80 --- /dev/null +++ b/target/linux/generic/patches-3.8/509-yaffs-3.4-add-underscore-to-mtd-internal-names.patch @@ -0,0 +1,72 @@ +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -2793,6 +2793,15 @@ static struct super_block *yaffs_interna + return NULL; + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) ++ T(YAFFS_TRACE_OS, (TSTR(" erase %p\n"), mtd->_erase)); ++ T(YAFFS_TRACE_OS, (TSTR(" read %p\n"), mtd->_read)); ++ T(YAFFS_TRACE_OS, (TSTR(" write %p\n"), mtd->_write)); ++ T(YAFFS_TRACE_OS, (TSTR(" readoob %p\n"), mtd->_read_oob)); ++ T(YAFFS_TRACE_OS, (TSTR(" writeoob %p\n"), mtd->_write_oob)); ++ T(YAFFS_TRACE_OS, (TSTR(" block_isbad %p\n"), mtd->_block_isbad)); ++ T(YAFFS_TRACE_OS, (TSTR(" block_markbad %p\n"), mtd->_block_markbad)); ++#else + T(YAFFS_TRACE_OS, (TSTR(" erase %p\n"), mtd->erase)); + T(YAFFS_TRACE_OS, (TSTR(" read %p\n"), mtd->read)); + T(YAFFS_TRACE_OS, (TSTR(" write %p\n"), mtd->write)); +@@ -2800,6 +2809,7 @@ static struct super_block *yaffs_interna + T(YAFFS_TRACE_OS, (TSTR(" writeoob %p\n"), mtd->write_oob)); + T(YAFFS_TRACE_OS, (TSTR(" block_isbad %p\n"), mtd->block_isbad)); + T(YAFFS_TRACE_OS, (TSTR(" block_markbad %p\n"), mtd->block_markbad)); ++#endif + T(YAFFS_TRACE_OS, (TSTR(" %s %d\n"), WRITE_SIZE_STR, WRITE_SIZE(mtd))); + T(YAFFS_TRACE_OS, (TSTR(" oobsize %d\n"), mtd->oobsize)); + T(YAFFS_TRACE_OS, (TSTR(" erasesize %d\n"), mtd->erasesize)); +@@ -2828,6 +2838,15 @@ static struct super_block *yaffs_interna + + if (yaffs_version == 2) { + /* Check for version 2 style functions */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) ++ if (!mtd->_erase || ++ !mtd->_block_isbad || ++ !mtd->_block_markbad || ++ !mtd->_read || ++ !mtd->_write || ++ !mtd->_read_oob || ++ !mtd->_write_oob) { ++#else + if (!mtd->erase || + !mtd->block_isbad || + !mtd->block_markbad || +@@ -2839,6 +2858,7 @@ static struct super_block *yaffs_interna + !mtd->write_ecc || + !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) { + #endif ++#endif + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: MTD device does not support required " + "functions\n"))); +@@ -2855,6 +2875,13 @@ static struct super_block *yaffs_interna + } + } else { + /* Check for V1 style functions */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) ++ if (!mtd->_erase || ++ !mtd->_read || ++ !mtd->_write || ++ !mtd->_read_oob || ++ !mtd->_write_oob) { ++#else + if (!mtd->erase || + !mtd->read || + !mtd->write || +@@ -2864,6 +2891,7 @@ static struct super_block *yaffs_interna + !mtd->write_ecc || + !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) { + #endif ++#endif + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs: MTD device does not support required " + "functions\n"))); diff --git a/target/linux/generic/patches-3.8/510-yaffs-3.4-use-d_make_root.patch b/target/linux/generic/patches-3.8/510-yaffs-3.4-use-d_make_root.patch new file mode 100644 index 0000000000..0ef59d606e --- /dev/null +++ b/target/linux/generic/patches-3.8/510-yaffs-3.4-use-d_make_root.patch @@ -0,0 +1,14 @@ +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -3119,7 +3119,11 @@ static struct super_block *yaffs_interna + + T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: got root inode\n"))); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) ++ root = d_make_root(inode); ++#else + root = d_alloc_root(inode); ++#endif + + T(YAFFS_TRACE_OS, (TSTR("yaffs_read_super: d_alloc_root done\n"))); + diff --git a/target/linux/generic/patches-3.8/511-yaffs-3.5-use-clear_inode.patch b/target/linux/generic/patches-3.8/511-yaffs-3.5-use-clear_inode.patch new file mode 100644 index 0000000000..f4479675a2 --- /dev/null +++ b/target/linux/generic/patches-3.8/511-yaffs-3.5-use-clear_inode.patch @@ -0,0 +1,15 @@ +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -924,7 +924,11 @@ static void yaffs_evict_inode( struct in + if (!inode->i_nlink && !is_bad_inode(inode)) + deleteme = 1; + truncate_inode_pages(&inode->i_data,0); +- end_writeback(inode); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) ++ clear_inode(inode); ++#else ++ end_writeback(inode); ++#endif + + if(deleteme && obj){ + dev = obj->my_dev; diff --git a/target/linux/generic/patches-3.8/512-yaffs-3.5-convert-to-use-kuid_t-kgid_t.patch b/target/linux/generic/patches-3.8/512-yaffs-3.5-convert-to-use-kuid_t-kgid_t.patch new file mode 100644 index 0000000000..1ca189d2ee --- /dev/null +++ b/target/linux/generic/patches-3.8/512-yaffs-3.5-convert-to-use-kuid_t-kgid_t.patch @@ -0,0 +1,570 @@ +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -243,11 +243,10 @@ static inline void yaffs_dec_link_count( + } + #endif + +- + #define update_dir_time(dir) do {\ + (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \ + } while(0) +- ++ + static void yaffs_put_super(struct super_block *sb); + + static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n, +@@ -397,6 +396,33 @@ static struct address_space_operations y + #endif + }; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) ++#define YCRED_FSUID() from_kuid(&init_user_ns, current_fsuid()) ++#define YCRED_FSGID() from_kgid(&init_user_ns, current_fsgid()) ++#else ++#define YCRED_FSUID() YCRED(current)->fsuid ++#define YCRED_FSGID() YCRED(current)->fsgid ++ ++static inline uid_t i_uid_read(const struct inode *inode) ++{ ++ return inode->i_uid; ++} ++ ++static inline gid_t i_gid_read(const struct inode *inode) ++{ ++ return inode->i_gid; ++} ++ ++static inline void i_uid_write(struct inode *inode, uid_t uid) ++{ ++ inode->i_uid = uid; ++} ++ ++static inline void i_gid_write(struct inode *inode, gid_t gid) ++{ ++ inode->i_gid = gid; ++} ++#endif + + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22)) + static const struct file_operations yaffs_file_operations = { +@@ -549,7 +575,7 @@ static unsigned yaffs_gc_control_callbac + { + return yaffs_gc_control; + } +- ++ + static void yaffs_gross_lock(yaffs_dev_t *dev) + { + T(YAFFS_TRACE_LOCK, (TSTR("yaffs locking %p\n"), current)); +@@ -1379,8 +1405,8 @@ static void yaffs_fill_inode_from_obj(st + + inode->i_ino = obj->obj_id; + inode->i_mode = obj->yst_mode; +- inode->i_uid = obj->yst_uid; +- inode->i_gid = obj->yst_gid; ++ i_uid_write(inode, obj->yst_uid); ++ i_gid_write(inode, obj->yst_gid); + #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)) + inode->i_blksize = inode->i_sb->s_blocksize; + #endif +@@ -1406,7 +1432,7 @@ static void yaffs_fill_inode_from_obj(st + + T(YAFFS_TRACE_OS, + (TSTR("yaffs_fill_inode mode %x uid %d gid %d size %d count %d\n"), +- inode->i_mode, inode->i_uid, inode->i_gid, ++ inode->i_mode, i_uid_read(inode), i_gid_read(inode), + (int)inode->i_size, atomic_read(&inode->i_count))); + + switch (obj->yst_mode & S_IFMT) { +@@ -1715,8 +1741,8 @@ static int yaffs_mknod(struct inode *dir + yaffs_obj_t *parent = yaffs_InodeToObject(dir); + + int error = -ENOSPC; +- uid_t uid = YCRED(current)->fsuid; +- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid; ++ uid_t uid = YCRED_FSUID(); ++ gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID(); + + if ((dir->i_mode & S_ISGID) && S_ISDIR(mode)) + mode |= S_ISGID; +@@ -1892,8 +1918,8 @@ static int yaffs_symlink(struct inode *d + { + yaffs_obj_t *obj; + yaffs_dev_t *dev; +- uid_t uid = YCRED(current)->fsuid; +- gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid; ++ uid_t uid = YCRED_FSUID(); ++ gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID(); + + T(YAFFS_TRACE_OS, (TSTR("yaffs_symlink\n"))); + +@@ -2009,7 +2035,7 @@ static int yaffs_setattr(struct dentry * + (TSTR("yaffs_setattr of object %d\n"), + yaffs_InodeToObject(inode)->obj_id)); + +- /* Fail if a requested resize >= 2GB */ ++ /* Fail if a requested resize >= 2GB */ + if (attr->ia_valid & ATTR_SIZE && + (attr->ia_size >> 31)) + error = -EINVAL; +@@ -2240,7 +2266,7 @@ static void yaffs_flush_inodes(struct su + { + struct inode *iptr; + yaffs_obj_t *obj; +- ++ + list_for_each_entry(iptr,&sb->s_inodes, i_sb_list){ + obj = yaffs_InodeToObject(iptr); + if(obj){ +@@ -2254,10 +2280,10 @@ static void yaffs_flush_inodes(struct su + + static void yaffs_flush_super(struct super_block *sb, int do_checkpoint) + { +- yaffs_dev_t *dev = yaffs_SuperToDevice(sb); ++ yaffs_dev_t *dev = yaffs_SuperToDevice(sb); + if(!dev) + return; +- ++ + yaffs_flush_inodes(sb); + yaffs_update_dirty_dirs(dev); + yaffs_flush_whole_cache(dev); +@@ -2325,7 +2351,7 @@ static int yaffs_do_sync_fs(struct super + * yaffs_bg_start() launches the background thread. + * yaffs_bg_stop() cleans up the background thread. + * +- * NB: ++ * NB: + * The thread should only run after the yaffs is initialised + * The thread should be stopped before yaffs is unmounted. + * The thread should not do any writing while the fs is in read only. +@@ -2924,7 +2950,7 @@ static struct super_block *yaffs_interna + + dev = kmalloc(sizeof(yaffs_dev_t), GFP_KERNEL); + context = kmalloc(sizeof(struct yaffs_LinuxContext),GFP_KERNEL); +- ++ + if(!dev || !context ){ + if(dev) + kfree(dev); +@@ -2957,7 +2983,7 @@ static struct super_block *yaffs_interna + #else + sb->u.generic_sbp = dev; + #endif +- ++ + dev->driver_context = mtd; + param->name = mtd->name; + +@@ -3057,7 +3083,7 @@ static struct super_block *yaffs_interna + param->gc_control = yaffs_gc_control_callback; + + yaffs_dev_to_lc(dev)->superBlock= sb; +- ++ + + #ifndef CONFIG_YAFFS_DOES_ECC + param->use_nand_ecc = 1; +@@ -3099,10 +3125,10 @@ static struct super_block *yaffs_interna + T(YAFFS_TRACE_OS, + (TSTR("yaffs_read_super: guts initialised %s\n"), + (err == YAFFS_OK) ? "OK" : "FAILED")); +- ++ + if(err == YAFFS_OK) + yaffs_bg_start(dev); +- ++ + if(!context->bgThread) + param->defered_dir_update = 0; + +@@ -3345,7 +3371,7 @@ static int yaffs_proc_read(char *page, + buf += sprintf(buf,"\n"); + else { + step-=2; +- ++ + mutex_lock(&yaffs_context_lock); + + /* Locate and print the Nth entry. Order N-squared but N is small. */ +@@ -3362,7 +3388,7 @@ static int yaffs_proc_read(char *page, + buf = yaffs_dump_dev_part0(buf, dev); + } else + buf = yaffs_dump_dev_part1(buf, dev); +- ++ + break; + } + mutex_unlock(&yaffs_context_lock); +@@ -3389,7 +3415,7 @@ static int yaffs_stats_proc_read(char *p + int erasedChunks; + + erasedChunks = dev->n_erased_blocks * dev->param.chunks_per_block; +- ++ + buf += sprintf(buf,"%d, %d, %d, %u, %u, %u, %u\n", + n, dev->n_free_chunks, erasedChunks, + dev->bg_gcs, dev->oldest_dirty_gc_count, +--- a/fs/yaffs2/yaffs_guts.c ++++ b/fs/yaffs2/yaffs_guts.c +@@ -370,7 +370,7 @@ static int yaffs_verify_chunk_written(ya + yaffs_ext_tags tempTags; + __u8 *buffer = yaffs_get_temp_buffer(dev,__LINE__); + int result; +- ++ + result = yaffs_rd_chunk_tags_nand(dev,nand_chunk,buffer,&tempTags); + if(memcmp(buffer,data,dev->data_bytes_per_chunk) || + tempTags.obj_id != tags->obj_id || +@@ -424,7 +424,7 @@ static int yaffs_write_new_chunk(struct + * lot of checks that are most likely not needed. + * + * Mods to the above +- * If an erase check fails or the write fails we skip the ++ * If an erase check fails or the write fails we skip the + * rest of the block. + */ + +@@ -486,7 +486,7 @@ static int yaffs_write_new_chunk(struct + } + + +- ++ + /* + * Block retiring for handling a broken block. + */ +@@ -496,7 +496,7 @@ static void yaffs_retire_block(yaffs_dev + yaffs_block_info_t *bi = yaffs_get_block_info(dev, flash_block); + + yaffs2_checkpt_invalidate(dev); +- ++ + yaffs2_clear_oldest_dirty_seq(dev,bi); + + if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) { +@@ -899,7 +899,7 @@ static int yaffs_find_chunk_in_group(yaf + for (j = 0; theChunk && j < dev->chunk_grp_size; j++) { + if (yaffs_check_chunk_bit(dev, theChunk / dev->param.chunks_per_block, + theChunk % dev->param.chunks_per_block)) { +- ++ + if(dev->chunk_grp_size == 1) + return theChunk; + else { +@@ -1802,7 +1802,7 @@ int yaffs_rename_obj(yaffs_obj_t *old_di + yaffs_update_parent(old_dir); + if(new_dir != old_dir) + yaffs_update_parent(new_dir); +- ++ + return result; + } + return YAFFS_FAIL; +@@ -2125,7 +2125,7 @@ static int yaffs_gc_block(yaffs_dev_t *d + + if(bi->block_state == YAFFS_BLOCK_STATE_FULL) + bi->block_state = YAFFS_BLOCK_STATE_COLLECTING; +- ++ + bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */ + + dev->gc_disable = 1; +@@ -2207,7 +2207,7 @@ static int yaffs_gc_block(yaffs_dev_t *d + * No need to copy this, just forget about it and + * fix up the object. + */ +- ++ + /* Free chunks already includes softdeleted chunks. + * How ever this chunk is going to soon be really deleted + * which will increment free chunks. +@@ -2752,7 +2752,7 @@ int yaffs_put_chunk_in_file(yaffs_obj_t + NULL); + if (!tn) + return YAFFS_FAIL; +- ++ + if(!nand_chunk) + /* Dummy insert, bail now */ + return YAFFS_OK; +@@ -2881,7 +2881,7 @@ void yaffs_chunk_del(yaffs_dev_t *dev, i + chunk_id)); + + bi = yaffs_get_block_info(dev, block); +- ++ + yaffs2_update_oldest_dirty_seq(dev, block, bi); + + T(YAFFS_TRACE_DELETION, +@@ -2966,8 +2966,8 @@ static int yaffs_wr_data_obj(yaffs_obj_t + (TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), n_bytes)); + YBUG(); + } +- +- ++ ++ + newChunkId = + yaffs_write_new_chunk(dev, buffer, &newTags, + useReserve); +@@ -3795,14 +3795,14 @@ int yaffs_resize_file(yaffs_obj_t *in, l + + if (new_size == oldFileSize) + return YAFFS_OK; +- ++ + if(new_size > oldFileSize){ + yaffs2_handle_hole(in,new_size); + in->variant.file_variant.file_size = new_size; + } else { +- /* new_size < oldFileSize */ ++ /* new_size < oldFileSize */ + yaffs_resize_file_down(in, new_size); +- } ++ } + + /* Write a new object header to reflect the resize. + * show we've shrunk the file, if need be +@@ -4231,7 +4231,7 @@ static void yaffs_strip_deleted_objs(yaf + * This fixes the problem where directories might have inadvertently been deleted + * leaving the object "hanging" without being rooted in the directory tree. + */ +- ++ + static int yaffs_has_null_parent(yaffs_dev_t *dev, yaffs_obj_t *obj) + { + return (obj == dev->del_dir || +@@ -4262,7 +4262,7 @@ static void yaffs_fix_hanging_objs(yaffs + if (lh) { + obj = ylist_entry(lh, yaffs_obj_t, hash_link); + parent= obj->parent; +- ++ + if(yaffs_has_null_parent(dev,obj)){ + /* These directories are not hanging */ + hanging = 0; +@@ -4311,7 +4311,7 @@ static void yaffs_del_dir_contents(yaffs + + if(dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) + YBUG(); +- ++ + ylist_for_each_safe(lh, n, &dir->variant.dir_variant.children) { + if (lh) { + obj = ylist_entry(lh, yaffs_obj_t, siblings); +@@ -4325,10 +4325,10 @@ static void yaffs_del_dir_contents(yaffs + /* Need to use UnlinkObject since Delete would not handle + * hardlinked objects correctly. + */ +- yaffs_unlink_obj(obj); ++ yaffs_unlink_obj(obj); + } + } +- ++ + } + + static void yaffs_empty_l_n_f(yaffs_dev_t *dev) +@@ -4410,7 +4410,7 @@ static void yaffs_check_obj_details_load + * If the directory updating is defered then yaffs_update_dirty_dirs must be + * called periodically. + */ +- ++ + static void yaffs_update_parent(yaffs_obj_t *obj) + { + yaffs_dev_t *dev; +@@ -4422,8 +4422,8 @@ static void yaffs_update_parent(yaffs_ob + obj->dirty = 1; + obj->yst_mtime = obj->yst_ctime = Y_CURRENT_TIME; + if(dev->param.defered_dir_update){ +- struct ylist_head *link = &obj->variant.dir_variant.dirty; +- ++ struct ylist_head *link = &obj->variant.dir_variant.dirty; ++ + if(ylist_empty(link)){ + ylist_add(link,&dev->dirty_dirs); + T(YAFFS_TRACE_BACKGROUND, (TSTR("Added object %d to dirty directories" TENDSTR),obj->obj_id)); +@@ -4446,7 +4446,7 @@ void yaffs_update_dirty_dirs(yaffs_dev_t + while(!ylist_empty(&dev->dirty_dirs)){ + link = dev->dirty_dirs.next; + ylist_del_init(link); +- ++ + dS=ylist_entry(link,yaffs_dir_s,dirty); + oV = ylist_entry(dS,yaffs_obj_variant,dir_variant); + obj = ylist_entry(oV,yaffs_obj_t,variant); +@@ -4474,7 +4474,7 @@ static void yaffs_remove_obj_from_dir(ya + + ylist_del_init(&obj->siblings); + obj->parent = NULL; +- ++ + yaffs_verify_dir(parent); + } + +@@ -4645,7 +4645,7 @@ yaffs_obj_t *yaffs_get_equivalent_obj(ya + * system to share files. + * + * These automatic unicode are stored slightly differently... +- * - If the name can fit in the ASCII character space then they are saved as ++ * - If the name can fit in the ASCII character space then they are saved as + * ascii names as per above. + * - If the name needs Unicode then the name is saved in Unicode + * starting at oh->name[1]. +@@ -4686,7 +4686,7 @@ static void yaffs_load_name_from_oh(yaff + asciiOhName++; + n--; + } +- } else ++ } else + yaffs_strncpy(name,ohName+1, bufferSize -1); + } else + #endif +@@ -4705,7 +4705,7 @@ static void yaffs_load_oh_from_name(yaff + + isAscii = 1; + w = name; +- ++ + /* Figure out if the name will fit in ascii character set */ + while(isAscii && *w){ + if((*w) & 0xff00) +@@ -4729,7 +4729,7 @@ static void yaffs_load_oh_from_name(yaff + yaffs_strncpy(ohName+1,name, YAFFS_MAX_NAME_LENGTH -2); + } + } +- else ++ else + #endif + yaffs_strncpy(ohName,name, YAFFS_MAX_NAME_LENGTH - 1); + +@@ -4738,12 +4738,12 @@ static void yaffs_load_oh_from_name(yaff + int yaffs_get_obj_name(yaffs_obj_t * obj, YCHAR * name, int buffer_size) + { + memset(name, 0, buffer_size * sizeof(YCHAR)); +- ++ + yaffs_check_obj_details_loaded(obj); + + if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) { + yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1); +- } ++ } + #ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM + else if (obj->short_name[0]) { + yaffs_strcpy(name, obj->short_name); +@@ -4861,9 +4861,9 @@ int yaffs_set_attribs(yaffs_obj_t *obj, + if (valid & ATTR_MODE) + obj->yst_mode = attr->ia_mode; + if (valid & ATTR_UID) +- obj->yst_uid = attr->ia_uid; ++ obj->yst_uid = ia_uid_read(attr); + if (valid & ATTR_GID) +- obj->yst_gid = attr->ia_gid; ++ obj->yst_gid = ia_gid_read(attr); + + if (valid & ATTR_ATIME) + obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime); +@@ -4886,9 +4886,9 @@ int yaffs_get_attribs(yaffs_obj_t *obj, + + attr->ia_mode = obj->yst_mode; + valid |= ATTR_MODE; +- attr->ia_uid = obj->yst_uid; ++ ia_uid_write(attr, obj->yst_uid); + valid |= ATTR_UID; +- attr->ia_gid = obj->yst_gid; ++ ia_gid_write(attr, obj->yst_gid); + valid |= ATTR_GID; + + Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime; +--- a/fs/yaffs2/yportenv.h ++++ b/fs/yaffs2/yportenv.h +@@ -170,7 +170,7 @@ + #define O_RDWR 02 + #endif + +-#ifndef O_CREAT ++#ifndef O_CREAT + #define O_CREAT 0100 + #endif + +@@ -218,7 +218,7 @@ + #define EACCES 13 + #endif + +-#ifndef EXDEV ++#ifndef EXDEV + #define EXDEV 18 + #endif + +@@ -281,7 +281,7 @@ + #define S_IFREG 0100000 + #endif + +-#ifndef S_IREAD ++#ifndef S_IREAD + #define S_IREAD 0000400 + #endif + +--- a/fs/yaffs2/devextras.h ++++ b/fs/yaffs2/devextras.h +@@ -87,6 +87,8 @@ struct iattr { + unsigned int ia_attr_flags; + }; + ++/* TODO: add ia_* functions */ ++ + #endif + + #else +@@ -95,7 +97,48 @@ struct iattr { + #include <linux/fs.h> + #include <linux/stat.h> + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) ++static inline uid_t ia_uid_read(const struct iattr *iattr) ++{ ++ return from_kuid(&init_user_ns, iattr->ia_uid); ++} ++ ++static inline gid_t ia_gid_read(const struct iattr *iattr) ++{ ++ return from_kgid(&init_user_ns, iattr->ia_gid); ++} ++ ++static inline void ia_uid_write(struct iattr *iattr, uid_t uid) ++{ ++ iattr->ia_uid = make_kuid(&init_user_ns, uid); ++} ++ ++static inline void ia_gid_write(struct iattr *iattr, gid_t gid) ++{ ++ iattr->ia_gid = make_kgid(&init_user_ns, gid); ++} ++#else ++static inline uid_t ia_uid_read(const struct iattr *iattr) ++{ ++ return iattr->ia_uid; ++} ++ ++static inline gid_t ia_gid_read(const struct iattr *inode) ++{ ++ return iattr->ia_gid; ++} ++ ++static inline void ia_uid_write(struct iattr *iattr, uid_t uid) ++{ ++ iattr->ia_uid = uid; ++} ++ ++static inline void ia_gid_write(struct iattr *iattr, gid_t gid) ++{ ++ iattr->ia_gid = gid; ++} + #endif + ++#endif + + #endif diff --git a/target/linux/generic/patches-3.8/513-yaffs-3.6-fix-dir_inode-ops.patch b/target/linux/generic/patches-3.8/513-yaffs-3.6-fix-dir_inode-ops.patch new file mode 100644 index 0000000000..220927a535 --- /dev/null +++ b/target/linux/generic/patches-3.8/513-yaffs-3.6-fix-dir_inode-ops.patch @@ -0,0 +1,60 @@ +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -271,20 +271,29 @@ static int yaffs_sync_object(struct file + + static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir); + +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) ++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, ++ bool excl); ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) + static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, + struct nameidata *n); +-#else ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *n); ++#else ++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode); + #endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) ++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, ++ unsigned int flags); ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *n); + #else +-static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode); + static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry); + #endif ++ + static int yaffs_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry); + static int yaffs_unlink(struct inode *dir, struct dentry *dentry); +@@ -837,7 +846,10 @@ struct inode *yaffs_get_inode(struct sup + /* + * Lookup is used to find objects in the fs + */ +-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) ++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, ++ unsigned int flags) ++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) + + static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *n) +@@ -1827,7 +1839,10 @@ static int yaffs_mkdir(struct inode *dir + return retVal; + } + +-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) ++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, ++ bool excl) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) + static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode, + struct nameidata *n) + #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) diff --git a/target/linux/generic/patches-3.8/514-yaffs-3.6-use-delayed-work-instead-of-write_super.patch b/target/linux/generic/patches-3.8/514-yaffs-3.6-use-delayed-work-instead-of-write_super.patch new file mode 100644 index 0000000000..c6ecddf0b2 --- /dev/null +++ b/target/linux/generic/patches-3.8/514-yaffs-3.6-use-delayed-work-instead-of-write_super.patch @@ -0,0 +1,180 @@ +--- a/fs/yaffs2/yaffs_vfs_glue.c ++++ b/fs/yaffs2/yaffs_vfs_glue.c +@@ -393,6 +393,84 @@ static void yaffs_touch_super(yaffs_dev_ + static int yaffs_vfs_setattr(struct inode *, struct iattr *); + + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) ++ ++#define yaffs_super_to_dev(sb) ((struct yaffs_dev_s *)sb->s_fs_info) ++ ++static inline struct yaffs_LinuxContext * ++yaffs_sb_to_ylc(struct super_block *sb) ++{ ++ struct yaffs_dev_s *ydev; ++ struct yaffs_LinuxContext *ylc; ++ ++ ydev = yaffs_super_to_dev(sb); ++ ylc = yaffs_dev_to_lc(ydev); ++ return ylc; ++} ++ ++static inline struct super_block *yaffs_work_to_sb(struct work_struct *work) ++{ ++ struct delayed_work *dwork; ++ struct yaffs_LinuxContext *ylc; ++ ++ dwork = container_of(work, struct delayed_work, work); ++ ylc = container_of(dwork, struct yaffs_LinuxContext, sb_sync_dwork); ++ return ylc->superBlock; ++} ++ ++static void yaffs_sb_sync_dwork_func(struct work_struct *work) ++{ ++ struct super_block *sb = yaffs_work_to_sb(work); ++ ++ yaffs_write_super(sb); ++} ++ ++static void yaffs_init_sb_sync_dwork(struct yaffs_LinuxContext *ylc) ++{ ++ INIT_DELAYED_WORK(&ylc->sb_sync_dwork, yaffs_sb_sync_dwork_func); ++} ++ ++static void yaffs_cancel_sb_sync_dwork(struct super_block *sb) ++{ ++ struct yaffs_LinuxContext *ylc = yaffs_sb_to_ylc(sb); ++ ++ cancel_delayed_work_sync(&ylc->sb_sync_dwork); ++} ++ ++static inline bool yaffs_sb_is_dirty(struct super_block *sb) ++{ ++ struct yaffs_LinuxContext *ylc = yaffs_sb_to_ylc(sb); ++ ++ return !!ylc->sb_dirty; ++} ++ ++static inline void yaffs_sb_set_dirty(struct super_block *sb, int dirty) ++{ ++ struct yaffs_LinuxContext *ylc = yaffs_sb_to_ylc(sb); ++ ++ if (ylc->sb_dirty == dirty) ++ return; ++ ++ ylc->sb_dirty = dirty; ++ if (dirty) ++ queue_delayed_work(system_long_wq, &ylc->sb_sync_dwork, ++ msecs_to_jiffies(5000)); ++} ++#else ++static inline bool yaffs_sb_is_dirty(struct super_block *sb) ++{ ++ return !!sb->s_dirt; ++} ++ ++static inline void yaffs_sb_set_dirty(struct super_block *sb, int dirty) ++{ ++ sb->s_dirt = dirty; ++} ++ ++static inline void yaffs_init_sb_sync_dwork(struct yaffs_LinuxContext *ylc) {} ++static inline void yaffs_cancel_sb_sync_dwork(struct super_block *sb) {} ++#endif /* >= 3.6.0 */ ++ + static struct address_space_operations yaffs_file_address_operations = { + .readpage = yaffs_readpage, + .writepage = yaffs_writepage, +@@ -553,7 +631,9 @@ static const struct super_operations yaf + .clear_inode = yaffs_clear_inode, + #endif + .sync_fs = yaffs_sync_fs, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + .write_super = yaffs_write_super, ++#endif + }; + + +@@ -2340,7 +2420,7 @@ static int yaffs_do_sync_fs(struct super + T(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND, + (TSTR("yaffs_do_sync_fs: gc-urgency %d %s %s%s\n"), + gc_urgent, +- sb->s_dirt ? "dirty" : "clean", ++ yaffs_sb_is_dirty(sb) ? "dirty" : "clean", + request_checkpoint ? "checkpoint requested" : "no checkpoint", + oneshot_checkpoint ? " one-shot" : "" )); + +@@ -2349,9 +2429,9 @@ static int yaffs_do_sync_fs(struct super + oneshot_checkpoint) && + !dev->is_checkpointed; + +- if (sb->s_dirt || do_checkpoint) { ++ if (yaffs_sb_is_dirty(sb) || do_checkpoint) { + yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint); +- sb->s_dirt = 0; ++ yaffs_sb_set_dirty(sb, 0); + if(oneshot_checkpoint) + yaffs_auto_checkpoint &= ~4; + } +@@ -2627,6 +2707,8 @@ static void yaffs_put_super(struct super + + yaffs_flush_super(sb,1); + ++ yaffs_cancel_sb_sync_dwork(sb); ++ + if (yaffs_dev_to_lc(dev)->putSuperFunc) + yaffs_dev_to_lc(dev)->putSuperFunc(sb); + +@@ -2665,7 +2747,7 @@ static void yaffs_touch_super(yaffs_dev_ + + T(YAFFS_TRACE_OS, (TSTR("yaffs_touch_super() sb = %p\n"), sb)); + if (sb) +- sb->s_dirt = 1; ++ yaffs_sb_set_dirty(sb, 1); + } + + typedef struct { +@@ -2991,6 +3073,8 @@ static struct super_block *yaffs_interna + context->dev = dev; + context->superBlock = sb; + ++ yaffs_init_sb_sync_dwork(context); ++ + dev->read_only = read_only; + + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)) +@@ -3177,7 +3261,7 @@ static struct super_block *yaffs_interna + return NULL; + } + sb->s_root = root; +- sb->s_dirt = !dev->is_checkpointed; ++ yaffs_sb_set_dirty(sb, !dev->is_checkpointed); + T(YAFFS_TRACE_ALWAYS, + (TSTR("yaffs_read_super: is_checkpointed %d\n"), + dev->is_checkpointed)); +--- a/fs/yaffs2/yaffs_linux.h ++++ b/fs/yaffs2/yaffs_linux.h +@@ -34,6 +34,11 @@ struct yaffs_LinuxContext { + + struct task_struct *readdirProcess; + unsigned mount_id; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) ++ struct delayed_work sb_sync_dwork; /* superblock write-out work */ ++ int sb_dirty; /* superblock is dirty */ ++#endif + }; + + #define yaffs_dev_to_lc(dev) ((struct yaffs_LinuxContext *)((dev)->os_context)) +--- a/fs/yaffs2/yportenv.h ++++ b/fs/yaffs2/yportenv.h +@@ -49,6 +49,9 @@ + #include <linux/slab.h> + #include <linux/vmalloc.h> + #include <linux/xattr.h> ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) ++#include <linux/workqueue.h> ++#endif + + #define YCHAR char + #define YUCHAR unsigned char diff --git a/target/linux/generic/patches-3.8/520-squashfs_update_xz_comp_opts.patch b/target/linux/generic/patches-3.8/520-squashfs_update_xz_comp_opts.patch new file mode 100644 index 0000000000..523b89f9ac --- /dev/null +++ b/target/linux/generic/patches-3.8/520-squashfs_update_xz_comp_opts.patch @@ -0,0 +1,25 @@ +From f31b7c0efa255dd17a5f584022a319387f09b0d8 Mon Sep 17 00:00:00 2001 +From: Jonas Gorski <jonas.gorski@gmail.com> +Date: Tue, 12 Apr 2011 19:55:41 +0200 +Subject: [PATCH] squashfs: update xz compressor options struct. + +Update the xz compressor options struct to match the squashfs userspace +one. +--- + fs/squashfs/xz_wrapper.c | 4 +++- + 1 files changed, 3 insertions(+), 1 deletions(-) + +--- a/fs/squashfs/xz_wrapper.c ++++ b/fs/squashfs/xz_wrapper.c +@@ -39,8 +39,10 @@ struct squashfs_xz { + }; + + struct comp_opts { +- __le32 dictionary_size; + __le32 flags; ++ __le16 bit_opts; ++ __le16 fb; ++ __le32 dictionary_size; + }; + + static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff, diff --git a/target/linux/generic/patches-3.8/530-jffs2_make_lzma_available.patch b/target/linux/generic/patches-3.8/530-jffs2_make_lzma_available.patch new file mode 100644 index 0000000000..dae8092e6a --- /dev/null +++ b/target/linux/generic/patches-3.8/530-jffs2_make_lzma_available.patch @@ -0,0 +1,5142 @@ +--- a/fs/jffs2/Kconfig ++++ b/fs/jffs2/Kconfig +@@ -139,6 +139,15 @@ config JFFS2_LZO + This feature was added in July, 2007. Say 'N' if you need + compatibility with older bootloaders or kernels. + ++config JFFS2_LZMA ++ bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS ++ select LZMA_COMPRESS ++ select LZMA_DECOMPRESS ++ depends on JFFS2_FS ++ default n ++ help ++ JFFS2 wrapper to the LZMA C SDK ++ + config JFFS2_RTIME + bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS + depends on JFFS2_FS +--- a/fs/jffs2/Makefile ++++ b/fs/jffs2/Makefile +@@ -18,4 +18,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rub + jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o + jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o + jffs2-$(CONFIG_JFFS2_LZO) += compr_lzo.o ++jffs2-$(CONFIG_JFFS2_LZMA) += compr_lzma.o + jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o ++ ++CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma +--- a/fs/jffs2/compr.c ++++ b/fs/jffs2/compr.c +@@ -378,6 +378,9 @@ int __init jffs2_compressors_init(void) + #ifdef CONFIG_JFFS2_LZO + jffs2_lzo_init(); + #endif ++#ifdef CONFIG_JFFS2_LZMA ++ jffs2_lzma_init(); ++#endif + /* Setting default compression mode */ + #ifdef CONFIG_JFFS2_CMODE_NONE + jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; +@@ -401,6 +404,9 @@ int __init jffs2_compressors_init(void) + int jffs2_compressors_exit(void) + { + /* Unregistering compressors */ ++#ifdef CONFIG_JFFS2_LZMA ++ jffs2_lzma_exit(); ++#endif + #ifdef CONFIG_JFFS2_LZO + jffs2_lzo_exit(); + #endif +--- a/fs/jffs2/compr.h ++++ b/fs/jffs2/compr.h +@@ -29,9 +29,9 @@ + #define JFFS2_DYNRUBIN_PRIORITY 20 + #define JFFS2_LZARI_PRIORITY 30 + #define JFFS2_RTIME_PRIORITY 50 +-#define JFFS2_ZLIB_PRIORITY 60 +-#define JFFS2_LZO_PRIORITY 80 +- ++#define JFFS2_LZMA_PRIORITY 70 ++#define JFFS2_ZLIB_PRIORITY 80 ++#define JFFS2_LZO_PRIORITY 90 + + #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */ + #define JFFS2_DYNRUBIN_DISABLED /* for decompression */ +@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void); + int jffs2_lzo_init(void); + void jffs2_lzo_exit(void); + #endif ++#ifdef CONFIG_JFFS2_LZMA ++int jffs2_lzma_init(void); ++void jffs2_lzma_exit(void); ++#endif + + #endif /* __JFFS2_COMPR_H__ */ +--- /dev/null ++++ b/fs/jffs2/compr_lzma.c +@@ -0,0 +1,128 @@ ++/* ++ * JFFS2 -- Journalling Flash File System, Version 2. ++ * ++ * For licensing information, see the file 'LICENCE' in this directory. ++ * ++ * JFFS2 wrapper to the LZMA C SDK ++ * ++ */ ++ ++#include <linux/lzma.h> ++#include "compr.h" ++ ++#ifdef __KERNEL__ ++ static DEFINE_MUTEX(deflate_mutex); ++#endif ++ ++CLzmaEncHandle *p; ++Byte propsEncoded[LZMA_PROPS_SIZE]; ++SizeT propsSize = sizeof(propsEncoded); ++ ++STATIC void lzma_free_workspace(void) ++{ ++ LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc); ++} ++ ++STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props) ++{ ++ if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL) ++ { ++ PRINT_ERROR("Failed to allocate lzma deflate workspace\n"); ++ return -ENOMEM; ++ } ++ ++ if (LzmaEnc_SetProps(p, props) != SZ_OK) ++ { ++ lzma_free_workspace(); ++ return -1; ++ } ++ ++ if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK) ++ { ++ lzma_free_workspace(); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out, ++ uint32_t *sourcelen, uint32_t *dstlen) ++{ ++ SizeT compress_size = (SizeT)(*dstlen); ++ int ret; ++ ++ #ifdef __KERNEL__ ++ mutex_lock(&deflate_mutex); ++ #endif ++ ++ ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen, ++ 0, NULL, &lzma_alloc, &lzma_alloc); ++ ++ #ifdef __KERNEL__ ++ mutex_unlock(&deflate_mutex); ++ #endif ++ ++ if (ret != SZ_OK) ++ return -1; ++ ++ *dstlen = (uint32_t)compress_size; ++ ++ return 0; ++} ++ ++STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out, ++ uint32_t srclen, uint32_t destlen) ++{ ++ int ret; ++ SizeT dl = (SizeT)destlen; ++ SizeT sl = (SizeT)srclen; ++ ELzmaStatus status; ++ ++ ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded, ++ propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc); ++ ++ if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen) ++ return -1; ++ ++ return 0; ++} ++ ++static struct jffs2_compressor jffs2_lzma_comp = { ++ .priority = JFFS2_LZMA_PRIORITY, ++ .name = "lzma", ++ .compr = JFFS2_COMPR_LZMA, ++ .compress = &jffs2_lzma_compress, ++ .decompress = &jffs2_lzma_decompress, ++ .disabled = 0, ++}; ++ ++int INIT jffs2_lzma_init(void) ++{ ++ int ret; ++ CLzmaEncProps props; ++ LzmaEncProps_Init(&props); ++ ++ props.dictSize = LZMA_BEST_DICT(0x2000); ++ props.level = LZMA_BEST_LEVEL; ++ props.lc = LZMA_BEST_LC; ++ props.lp = LZMA_BEST_LP; ++ props.pb = LZMA_BEST_PB; ++ props.fb = LZMA_BEST_FB; ++ ++ ret = lzma_alloc_workspace(&props); ++ if (ret < 0) ++ return ret; ++ ++ ret = jffs2_register_compressor(&jffs2_lzma_comp); ++ if (ret) ++ lzma_free_workspace(); ++ ++ return ret; ++} ++ ++void jffs2_lzma_exit(void) ++{ ++ jffs2_unregister_compressor(&jffs2_lzma_comp); ++ lzma_free_workspace(); ++} +--- a/fs/jffs2/super.c ++++ b/fs/jffs2/super.c +@@ -373,14 +373,41 @@ static int __init init_jffs2_fs(void) + BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68); + BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32); + +- pr_info("version 2.2." ++ pr_info("version 2.2" + #ifdef CONFIG_JFFS2_FS_WRITEBUFFER + " (NAND)" + #endif + #ifdef CONFIG_JFFS2_SUMMARY +- " (SUMMARY) " ++ " (SUMMARY)" + #endif +- " © 2001-2006 Red Hat, Inc.\n"); ++#ifdef CONFIG_JFFS2_ZLIB ++ " (ZLIB)" ++#endif ++#ifdef CONFIG_JFFS2_LZO ++ " (LZO)" ++#endif ++#ifdef CONFIG_JFFS2_LZMA ++ " (LZMA)" ++#endif ++#ifdef CONFIG_JFFS2_RTIME ++ " (RTIME)" ++#endif ++#ifdef CONFIG_JFFS2_RUBIN ++ " (RUBIN)" ++#endif ++#ifdef CONFIG_JFFS2_CMODE_NONE ++ " (CMODE_NONE)" ++#endif ++#ifdef CONFIG_JFFS2_CMODE_PRIORITY ++ " (CMODE_PRIORITY)" ++#endif ++#ifdef CONFIG_JFFS2_CMODE_SIZE ++ " (CMODE_SIZE)" ++#endif ++#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO ++ " (CMODE_FAVOURLZO)" ++#endif ++ " (c) 2001-2006 Red Hat, Inc.\n"); + + jffs2_inode_cachep = kmem_cache_create("jffs2_i", + sizeof(struct jffs2_inode_info), +--- a/include/uapi/linux/jffs2.h ++++ b/include/uapi/linux/jffs2.h +@@ -46,6 +46,7 @@ + #define JFFS2_COMPR_DYNRUBIN 0x05 + #define JFFS2_COMPR_ZLIB 0x06 + #define JFFS2_COMPR_LZO 0x07 ++#define JFFS2_COMPR_LZMA 0x08 + /* Compatibility flags. */ + #define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */ + #define JFFS2_NODE_ACCURATE 0x2000 +--- /dev/null ++++ b/include/linux/lzma.h +@@ -0,0 +1,62 @@ ++#ifndef __LZMA_H__ ++#define __LZMA_H__ ++ ++#ifdef __KERNEL__ ++ #include <linux/kernel.h> ++ #include <linux/sched.h> ++ #include <linux/slab.h> ++ #include <linux/vmalloc.h> ++ #include <linux/init.h> ++ #define LZMA_MALLOC vmalloc ++ #define LZMA_FREE vfree ++ #define PRINT_ERROR(msg) printk(KERN_WARNING #msg) ++ #define INIT __init ++ #define STATIC static ++#else ++ #include <stdint.h> ++ #include <stdlib.h> ++ #include <stdio.h> ++ #include <unistd.h> ++ #include <string.h> ++ #include <asm/types.h> ++ #include <errno.h> ++ #include <linux/jffs2.h> ++ #ifndef PAGE_SIZE ++ extern int page_size; ++ #define PAGE_SIZE page_size ++ #endif ++ #define LZMA_MALLOC malloc ++ #define LZMA_FREE free ++ #define PRINT_ERROR(msg) fprintf(stderr, msg) ++ #define INIT ++ #define STATIC ++#endif ++ ++#include "lzma/LzmaDec.h" ++#include "lzma/LzmaEnc.h" ++ ++#define LZMA_BEST_LEVEL (9) ++#define LZMA_BEST_LC (0) ++#define LZMA_BEST_LP (0) ++#define LZMA_BEST_PB (0) ++#define LZMA_BEST_FB (273) ++ ++#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2) ++ ++static void *p_lzma_malloc(void *p, size_t size) ++{ ++ if (size == 0) ++ return NULL; ++ ++ return LZMA_MALLOC(size); ++} ++ ++static void p_lzma_free(void *p, void *address) ++{ ++ if (address != NULL) ++ LZMA_FREE(address); ++} ++ ++static ISzAlloc lzma_alloc = {p_lzma_malloc, p_lzma_free}; ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/LzFind.h +@@ -0,0 +1,115 @@ ++/* LzFind.h -- Match finder for LZ algorithms ++2009-04-22 : Igor Pavlov : Public domain */ ++ ++#ifndef __LZ_FIND_H ++#define __LZ_FIND_H ++ ++#include "Types.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++typedef UInt32 CLzRef; ++ ++typedef struct _CMatchFinder ++{ ++ Byte *buffer; ++ UInt32 pos; ++ UInt32 posLimit; ++ UInt32 streamPos; ++ UInt32 lenLimit; ++ ++ UInt32 cyclicBufferPos; ++ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */ ++ ++ UInt32 matchMaxLen; ++ CLzRef *hash; ++ CLzRef *son; ++ UInt32 hashMask; ++ UInt32 cutValue; ++ ++ Byte *bufferBase; ++ ISeqInStream *stream; ++ int streamEndWasReached; ++ ++ UInt32 blockSize; ++ UInt32 keepSizeBefore; ++ UInt32 keepSizeAfter; ++ ++ UInt32 numHashBytes; ++ int directInput; ++ size_t directInputRem; ++ int btMode; ++ int bigHash; ++ UInt32 historySize; ++ UInt32 fixedHashSize; ++ UInt32 hashSizeSum; ++ UInt32 numSons; ++ SRes result; ++ UInt32 crc[256]; ++} CMatchFinder; ++ ++#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer) ++#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)]) ++ ++#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos) ++ ++int MatchFinder_NeedMove(CMatchFinder *p); ++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p); ++void MatchFinder_MoveBlock(CMatchFinder *p); ++void MatchFinder_ReadIfRequired(CMatchFinder *p); ++ ++void MatchFinder_Construct(CMatchFinder *p); ++ ++/* Conditions: ++ historySize <= 3 GB ++ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB ++*/ ++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize, ++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ++ ISzAlloc *alloc); ++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc); ++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems); ++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue); ++ ++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son, ++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue, ++ UInt32 *distances, UInt32 maxLen); ++ ++/* ++Conditions: ++ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func. ++ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function ++*/ ++ ++typedef void (*Mf_Init_Func)(void *object); ++typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index); ++typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object); ++typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object); ++typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances); ++typedef void (*Mf_Skip_Func)(void *object, UInt32); ++ ++typedef struct _IMatchFinder ++{ ++ Mf_Init_Func Init; ++ Mf_GetIndexByte_Func GetIndexByte; ++ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes; ++ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos; ++ Mf_GetMatches_Func GetMatches; ++ Mf_Skip_Func Skip; ++} IMatchFinder; ++ ++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable); ++ ++void MatchFinder_Init(CMatchFinder *p); ++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); ++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); ++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); ++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/LzHash.h +@@ -0,0 +1,54 @@ ++/* LzHash.h -- HASH functions for LZ algorithms ++2009-02-07 : Igor Pavlov : Public domain */ ++ ++#ifndef __LZ_HASH_H ++#define __LZ_HASH_H ++ ++#define kHash2Size (1 << 10) ++#define kHash3Size (1 << 16) ++#define kHash4Size (1 << 20) ++ ++#define kFix3HashSize (kHash2Size) ++#define kFix4HashSize (kHash2Size + kHash3Size) ++#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size) ++ ++#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8); ++ ++#define HASH3_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; } ++ ++#define HASH4_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \ ++ hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; } ++ ++#define HASH5_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \ ++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \ ++ hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \ ++ hash4Value &= (kHash4Size - 1); } ++ ++/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */ ++#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF; ++ ++ ++#define MT_HASH2_CALC \ ++ hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1); ++ ++#define MT_HASH3_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); } ++ ++#define MT_HASH4_CALC { \ ++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ ++ hash2Value = temp & (kHash2Size - 1); \ ++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \ ++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); } ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/LzmaDec.h +@@ -0,0 +1,231 @@ ++/* LzmaDec.h -- LZMA Decoder ++2009-02-07 : Igor Pavlov : Public domain */ ++ ++#ifndef __LZMA_DEC_H ++#define __LZMA_DEC_H ++ ++#include "Types.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/* #define _LZMA_PROB32 */ ++/* _LZMA_PROB32 can increase the speed on some CPUs, ++ but memory usage for CLzmaDec::probs will be doubled in that case */ ++ ++#ifdef _LZMA_PROB32 ++#define CLzmaProb UInt32 ++#else ++#define CLzmaProb UInt16 ++#endif ++ ++ ++/* ---------- LZMA Properties ---------- */ ++ ++#define LZMA_PROPS_SIZE 5 ++ ++typedef struct _CLzmaProps ++{ ++ unsigned lc, lp, pb; ++ UInt32 dicSize; ++} CLzmaProps; ++ ++/* LzmaProps_Decode - decodes properties ++Returns: ++ SZ_OK ++ SZ_ERROR_UNSUPPORTED - Unsupported properties ++*/ ++ ++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size); ++ ++ ++/* ---------- LZMA Decoder state ---------- */ ++ ++/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case. ++ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */ ++ ++#define LZMA_REQUIRED_INPUT_MAX 20 ++ ++typedef struct ++{ ++ CLzmaProps prop; ++ CLzmaProb *probs; ++ Byte *dic; ++ const Byte *buf; ++ UInt32 range, code; ++ SizeT dicPos; ++ SizeT dicBufSize; ++ UInt32 processedPos; ++ UInt32 checkDicSize; ++ unsigned state; ++ UInt32 reps[4]; ++ unsigned remainLen; ++ int needFlush; ++ int needInitState; ++ UInt32 numProbs; ++ unsigned tempBufSize; ++ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX]; ++} CLzmaDec; ++ ++#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; } ++ ++void LzmaDec_Init(CLzmaDec *p); ++ ++/* There are two types of LZMA streams: ++ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size. ++ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */ ++ ++typedef enum ++{ ++ LZMA_FINISH_ANY, /* finish at any point */ ++ LZMA_FINISH_END /* block must be finished at the end */ ++} ELzmaFinishMode; ++ ++/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!! ++ ++ You must use LZMA_FINISH_END, when you know that current output buffer ++ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY. ++ ++ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK, ++ and output value of destLen will be less than output buffer size limit. ++ You can check status result also. ++ ++ You can use multiple checks to test data integrity after full decompression: ++ 1) Check Result and "status" variable. ++ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize. ++ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize. ++ You must use correct finish mode in that case. */ ++ ++typedef enum ++{ ++ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */ ++ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */ ++ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */ ++ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */ ++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */ ++} ELzmaStatus; ++ ++/* ELzmaStatus is used only as output value for function call */ ++ ++ ++/* ---------- Interfaces ---------- */ ++ ++/* There are 3 levels of interfaces: ++ 1) Dictionary Interface ++ 2) Buffer Interface ++ 3) One Call Interface ++ You can select any of these interfaces, but don't mix functions from different ++ groups for same object. */ ++ ++ ++/* There are two variants to allocate state for Dictionary Interface: ++ 1) LzmaDec_Allocate / LzmaDec_Free ++ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs ++ You can use variant 2, if you set dictionary buffer manually. ++ For Buffer Interface you must always use variant 1. ++ ++LzmaDec_Allocate* can return: ++ SZ_OK ++ SZ_ERROR_MEM - Memory allocation error ++ SZ_ERROR_UNSUPPORTED - Unsupported properties ++*/ ++ ++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc); ++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc); ++ ++SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc); ++void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc); ++ ++/* ---------- Dictionary Interface ---------- */ ++ ++/* You can use it, if you want to eliminate the overhead for data copying from ++ dictionary to some other external buffer. ++ You must work with CLzmaDec variables directly in this interface. ++ ++ STEPS: ++ LzmaDec_Constr() ++ LzmaDec_Allocate() ++ for (each new stream) ++ { ++ LzmaDec_Init() ++ while (it needs more decompression) ++ { ++ LzmaDec_DecodeToDic() ++ use data from CLzmaDec::dic and update CLzmaDec::dicPos ++ } ++ } ++ LzmaDec_Free() ++*/ ++ ++/* LzmaDec_DecodeToDic ++ ++ The decoding to internal dictionary buffer (CLzmaDec::dic). ++ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!! ++ ++finishMode: ++ It has meaning only if the decoding reaches output limit (dicLimit). ++ LZMA_FINISH_ANY - Decode just dicLimit bytes. ++ LZMA_FINISH_END - Stream must be finished after dicLimit. ++ ++Returns: ++ SZ_OK ++ status: ++ LZMA_STATUS_FINISHED_WITH_MARK ++ LZMA_STATUS_NOT_FINISHED ++ LZMA_STATUS_NEEDS_MORE_INPUT ++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK ++ SZ_ERROR_DATA - Data error ++*/ ++ ++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, ++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); ++ ++ ++/* ---------- Buffer Interface ---------- */ ++ ++/* It's zlib-like interface. ++ See LzmaDec_DecodeToDic description for information about STEPS and return results, ++ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need ++ to work with CLzmaDec variables manually. ++ ++finishMode: ++ It has meaning only if the decoding reaches output limit (*destLen). ++ LZMA_FINISH_ANY - Decode just destLen bytes. ++ LZMA_FINISH_END - Stream must be finished after (*destLen). ++*/ ++ ++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, ++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); ++ ++ ++/* ---------- One Call Interface ---------- */ ++ ++/* LzmaDecode ++ ++finishMode: ++ It has meaning only if the decoding reaches output limit (*destLen). ++ LZMA_FINISH_ANY - Decode just destLen bytes. ++ LZMA_FINISH_END - Stream must be finished after (*destLen). ++ ++Returns: ++ SZ_OK ++ status: ++ LZMA_STATUS_FINISHED_WITH_MARK ++ LZMA_STATUS_NOT_FINISHED ++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK ++ SZ_ERROR_DATA - Data error ++ SZ_ERROR_MEM - Memory allocation error ++ SZ_ERROR_UNSUPPORTED - Unsupported properties ++ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src). ++*/ ++ ++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode, ++ ELzmaStatus *status, ISzAlloc *alloc); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/LzmaEnc.h +@@ -0,0 +1,80 @@ ++/* LzmaEnc.h -- LZMA Encoder ++2009-02-07 : Igor Pavlov : Public domain */ ++ ++#ifndef __LZMA_ENC_H ++#define __LZMA_ENC_H ++ ++#include "Types.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#define LZMA_PROPS_SIZE 5 ++ ++typedef struct _CLzmaEncProps ++{ ++ int level; /* 0 <= level <= 9 */ ++ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version ++ (1 << 12) <= dictSize <= (1 << 30) for 64-bit version ++ default = (1 << 24) */ ++ int lc; /* 0 <= lc <= 8, default = 3 */ ++ int lp; /* 0 <= lp <= 4, default = 0 */ ++ int pb; /* 0 <= pb <= 4, default = 2 */ ++ int algo; /* 0 - fast, 1 - normal, default = 1 */ ++ int fb; /* 5 <= fb <= 273, default = 32 */ ++ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */ ++ int numHashBytes; /* 2, 3 or 4, default = 4 */ ++ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */ ++ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */ ++ int numThreads; /* 1 or 2, default = 2 */ ++} CLzmaEncProps; ++ ++void LzmaEncProps_Init(CLzmaEncProps *p); ++void LzmaEncProps_Normalize(CLzmaEncProps *p); ++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2); ++ ++ ++/* ---------- CLzmaEncHandle Interface ---------- */ ++ ++/* LzmaEnc_* functions can return the following exit codes: ++Returns: ++ SZ_OK - OK ++ SZ_ERROR_MEM - Memory allocation error ++ SZ_ERROR_PARAM - Incorrect paramater in props ++ SZ_ERROR_WRITE - Write callback error. ++ SZ_ERROR_PROGRESS - some break from progress callback ++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version) ++*/ ++ ++typedef void * CLzmaEncHandle; ++ ++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc); ++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig); ++SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props); ++SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size); ++SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream, ++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); ++SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, ++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); ++ ++/* ---------- One Call Interface ---------- */ ++ ++/* LzmaEncode ++Return code: ++ SZ_OK - OK ++ SZ_ERROR_MEM - Memory allocation error ++ SZ_ERROR_PARAM - Incorrect paramater ++ SZ_ERROR_OUTPUT_EOF - output buffer overflow ++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version) ++*/ ++ ++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, ++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, ++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif +--- /dev/null ++++ b/include/linux/lzma/Types.h +@@ -0,0 +1,226 @@ ++/* Types.h -- Basic types ++2009-11-23 : Igor Pavlov : Public domain */ ++ ++#ifndef __7Z_TYPES_H ++#define __7Z_TYPES_H ++ ++#include <stddef.h> ++ ++#ifdef _WIN32 ++#include <windows.h> ++#endif ++ ++#ifndef EXTERN_C_BEGIN ++#ifdef __cplusplus ++#define EXTERN_C_BEGIN extern "C" { ++#define EXTERN_C_END } ++#else ++#define EXTERN_C_BEGIN ++#define EXTERN_C_END ++#endif ++#endif ++ ++EXTERN_C_BEGIN ++ ++#define SZ_OK 0 ++ ++#define SZ_ERROR_DATA 1 ++#define SZ_ERROR_MEM 2 ++#define SZ_ERROR_CRC 3 ++#define SZ_ERROR_UNSUPPORTED 4 ++#define SZ_ERROR_PARAM 5 ++#define SZ_ERROR_INPUT_EOF 6 ++#define SZ_ERROR_OUTPUT_EOF 7 ++#define SZ_ERROR_READ 8 ++#define SZ_ERROR_WRITE 9 ++#define SZ_ERROR_PROGRESS 10 ++#define SZ_ERROR_FAIL 11 ++#define SZ_ERROR_THREAD 12 ++ ++#define SZ_ERROR_ARCHIVE 16 ++#define SZ_ERROR_NO_ARCHIVE 17 ++ ++typedef int SRes; ++ ++#ifdef _WIN32 ++typedef DWORD WRes; ++#else ++typedef int WRes; ++#endif ++ ++#ifndef RINOK ++#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; } ++#endif ++ ++typedef unsigned char Byte; ++typedef short Int16; ++typedef unsigned short UInt16; ++ ++#ifdef _LZMA_UINT32_IS_ULONG ++typedef long Int32; ++typedef unsigned long UInt32; ++#else ++typedef int Int32; ++typedef unsigned int UInt32; ++#endif ++ ++#ifdef _SZ_NO_INT_64 ++ ++/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers. ++ NOTES: Some code will work incorrectly in that case! */ ++ ++typedef long Int64; ++typedef unsigned long UInt64; ++ ++#else ++ ++#if defined(_MSC_VER) || defined(__BORLANDC__) ++typedef __int64 Int64; ++typedef unsigned __int64 UInt64; ++#else ++typedef long long int Int64; ++typedef unsigned long long int UInt64; ++#endif ++ ++#endif ++ ++#ifdef _LZMA_NO_SYSTEM_SIZE_T ++typedef UInt32 SizeT; ++#else ++typedef size_t SizeT; ++#endif ++ ++typedef int Bool; ++#define True 1 ++#define False 0 ++ ++ ++#ifdef _WIN32 ++#define MY_STD_CALL __stdcall ++#else ++#define MY_STD_CALL ++#endif ++ ++#ifdef _MSC_VER ++ ++#if _MSC_VER >= 1300 ++#define MY_NO_INLINE __declspec(noinline) ++#else ++#define MY_NO_INLINE ++#endif ++ ++#define MY_CDECL __cdecl ++#define MY_FAST_CALL __fastcall ++ ++#else ++ ++#define MY_CDECL ++#define MY_FAST_CALL ++ ++#endif ++ ++ ++/* The following interfaces use first parameter as pointer to structure */ ++ ++typedef struct ++{ ++ SRes (*Read)(void *p, void *buf, size_t *size); ++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. ++ (output(*size) < input(*size)) is allowed */ ++} ISeqInStream; ++ ++/* it can return SZ_ERROR_INPUT_EOF */ ++SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size); ++SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType); ++SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf); ++ ++typedef struct ++{ ++ size_t (*Write)(void *p, const void *buf, size_t size); ++ /* Returns: result - the number of actually written bytes. ++ (result < size) means error */ ++} ISeqOutStream; ++ ++typedef enum ++{ ++ SZ_SEEK_SET = 0, ++ SZ_SEEK_CUR = 1, ++ SZ_SEEK_END = 2 ++} ESzSeek; ++ ++typedef struct ++{ ++ SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */ ++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin); ++} ISeekInStream; ++ ++typedef struct ++{ ++ SRes (*Look)(void *p, void **buf, size_t *size); ++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. ++ (output(*size) > input(*size)) is not allowed ++ (output(*size) < input(*size)) is allowed */ ++ SRes (*Skip)(void *p, size_t offset); ++ /* offset must be <= output(*size) of Look */ ++ ++ SRes (*Read)(void *p, void *buf, size_t *size); ++ /* reads directly (without buffer). It's same as ISeqInStream::Read */ ++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin); ++} ILookInStream; ++ ++SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size); ++SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset); ++ ++/* reads via ILookInStream::Read */ ++SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType); ++SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size); ++ ++#define LookToRead_BUF_SIZE (1 << 14) ++ ++typedef struct ++{ ++ ILookInStream s; ++ ISeekInStream *realStream; ++ size_t pos; ++ size_t size; ++ Byte buf[LookToRead_BUF_SIZE]; ++} CLookToRead; ++ ++void LookToRead_CreateVTable(CLookToRead *p, int lookahead); ++void LookToRead_Init(CLookToRead *p); ++ ++typedef struct ++{ ++ ISeqInStream s; ++ ILookInStream *realStream; ++} CSecToLook; ++ ++void SecToLook_CreateVTable(CSecToLook *p); ++ ++typedef struct ++{ ++ ISeqInStream s; ++ ILookInStream *realStream; ++} CSecToRead; ++ ++void SecToRead_CreateVTable(CSecToRead *p); ++ ++typedef struct ++{ ++ SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize); ++ /* Returns: result. (result != SZ_OK) means break. ++ Value (UInt64)(Int64)-1 for size means unknown value. */ ++} ICompressProgress; ++ ++typedef struct ++{ ++ void *(*Alloc)(void *p, size_t size); ++ void (*Free)(void *p, void *address); /* address can be 0 */ ++} ISzAlloc; ++ ++#define IAlloc_Alloc(p, size) (p)->Alloc((p), size) ++#define IAlloc_Free(p, a) (p)->Free((p), a) ++ ++EXTERN_C_END ++ ++#endif +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -191,6 +191,12 @@ config LZO_DECOMPRESS + + source "lib/xz/Kconfig" + ++config LZMA_COMPRESS ++ tristate ++ ++config LZMA_DECOMPRESS ++ tristate ++ + # + # These all provide a common interface (hence the apparent duplication with + # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.) +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -2,6 +2,16 @@ + # Makefile for some libs needed in the kernel. + # + ++ifdef CONFIG_JFFS2_ZLIB ++ CONFIG_ZLIB_INFLATE:=y ++ CONFIG_ZLIB_DEFLATE:=y ++endif ++ ++ifdef CONFIG_JFFS2_LZMA ++ CONFIG_LZMA_DECOMPRESS:=y ++ CONFIG_LZMA_COMPRESS:=y ++endif ++ + ifdef CONFIG_FUNCTION_TRACER + ORIG_CFLAGS := $(KBUILD_CFLAGS) + KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) +@@ -74,6 +84,8 @@ obj-$(CONFIG_LZO_COMPRESS) += lzo/ + obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ + obj-$(CONFIG_XZ_DEC) += xz/ + obj-$(CONFIG_RAID6_PQ) += raid6/ ++obj-$(CONFIG_LZMA_COMPRESS) += lzma/ ++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/ + + lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o + lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o +--- /dev/null ++++ b/lib/lzma/LzFind.c +@@ -0,0 +1,761 @@ ++/* LzFind.c -- Match finder for LZ algorithms ++2009-04-22 : Igor Pavlov : Public domain */ ++ ++#include <string.h> ++ ++#include "LzFind.h" ++#include "LzHash.h" ++ ++#define kEmptyHashValue 0 ++#define kMaxValForNormalize ((UInt32)0xFFFFFFFF) ++#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */ ++#define kNormalizeMask (~(kNormalizeStepMin - 1)) ++#define kMaxHistorySize ((UInt32)3 << 30) ++ ++#define kStartMaxLen 3 ++ ++static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc) ++{ ++ if (!p->directInput) ++ { ++ alloc->Free(alloc, p->bufferBase); ++ p->bufferBase = 0; ++ } ++} ++ ++/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */ ++ ++static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc) ++{ ++ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv; ++ if (p->directInput) ++ { ++ p->blockSize = blockSize; ++ return 1; ++ } ++ if (p->bufferBase == 0 || p->blockSize != blockSize) ++ { ++ LzInWindow_Free(p, alloc); ++ p->blockSize = blockSize; ++ p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize); ++ } ++ return (p->bufferBase != 0); ++} ++ ++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; } ++Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; } ++ ++UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; } ++ ++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue) ++{ ++ p->posLimit -= subValue; ++ p->pos -= subValue; ++ p->streamPos -= subValue; ++} ++ ++static void MatchFinder_ReadBlock(CMatchFinder *p) ++{ ++ if (p->streamEndWasReached || p->result != SZ_OK) ++ return; ++ if (p->directInput) ++ { ++ UInt32 curSize = 0xFFFFFFFF - p->streamPos; ++ if (curSize > p->directInputRem) ++ curSize = (UInt32)p->directInputRem; ++ p->directInputRem -= curSize; ++ p->streamPos += curSize; ++ if (p->directInputRem == 0) ++ p->streamEndWasReached = 1; ++ return; ++ } ++ for (;;) ++ { ++ Byte *dest = p->buffer + (p->streamPos - p->pos); ++ size_t size = (p->bufferBase + p->blockSize - dest); ++ if (size == 0) ++ return; ++ p->result = p->stream->Read(p->stream, dest, &size); ++ if (p->result != SZ_OK) ++ return; ++ if (size == 0) ++ { ++ p->streamEndWasReached = 1; ++ return; ++ } ++ p->streamPos += (UInt32)size; ++ if (p->streamPos - p->pos > p->keepSizeAfter) ++ return; ++ } ++} ++ ++void MatchFinder_MoveBlock(CMatchFinder *p) ++{ ++ memmove(p->bufferBase, ++ p->buffer - p->keepSizeBefore, ++ (size_t)(p->streamPos - p->pos + p->keepSizeBefore)); ++ p->buffer = p->bufferBase + p->keepSizeBefore; ++} ++ ++int MatchFinder_NeedMove(CMatchFinder *p) ++{ ++ if (p->directInput) ++ return 0; ++ /* if (p->streamEndWasReached) return 0; */ ++ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter); ++} ++ ++void MatchFinder_ReadIfRequired(CMatchFinder *p) ++{ ++ if (p->streamEndWasReached) ++ return; ++ if (p->keepSizeAfter >= p->streamPos - p->pos) ++ MatchFinder_ReadBlock(p); ++} ++ ++static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p) ++{ ++ if (MatchFinder_NeedMove(p)) ++ MatchFinder_MoveBlock(p); ++ MatchFinder_ReadBlock(p); ++} ++ ++static void MatchFinder_SetDefaultSettings(CMatchFinder *p) ++{ ++ p->cutValue = 32; ++ p->btMode = 1; ++ p->numHashBytes = 4; ++ p->bigHash = 0; ++} ++ ++#define kCrcPoly 0xEDB88320 ++ ++void MatchFinder_Construct(CMatchFinder *p) ++{ ++ UInt32 i; ++ p->bufferBase = 0; ++ p->directInput = 0; ++ p->hash = 0; ++ MatchFinder_SetDefaultSettings(p); ++ ++ for (i = 0; i < 256; i++) ++ { ++ UInt32 r = i; ++ int j; ++ for (j = 0; j < 8; j++) ++ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1)); ++ p->crc[i] = r; ++ } ++} ++ ++static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->hash); ++ p->hash = 0; ++} ++ ++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc) ++{ ++ MatchFinder_FreeThisClassMemory(p, alloc); ++ LzInWindow_Free(p, alloc); ++} ++ ++static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc) ++{ ++ size_t sizeInBytes = (size_t)num * sizeof(CLzRef); ++ if (sizeInBytes / sizeof(CLzRef) != num) ++ return 0; ++ return (CLzRef *)alloc->Alloc(alloc, sizeInBytes); ++} ++ ++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize, ++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ++ ISzAlloc *alloc) ++{ ++ UInt32 sizeReserv; ++ if (historySize > kMaxHistorySize) ++ { ++ MatchFinder_Free(p, alloc); ++ return 0; ++ } ++ sizeReserv = historySize >> 1; ++ if (historySize > ((UInt32)2 << 30)) ++ sizeReserv = historySize >> 2; ++ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19); ++ ++ p->keepSizeBefore = historySize + keepAddBufferBefore + 1; ++ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter; ++ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */ ++ if (LzInWindow_Create(p, sizeReserv, alloc)) ++ { ++ UInt32 newCyclicBufferSize = historySize + 1; ++ UInt32 hs; ++ p->matchMaxLen = matchMaxLen; ++ { ++ p->fixedHashSize = 0; ++ if (p->numHashBytes == 2) ++ hs = (1 << 16) - 1; ++ else ++ { ++ hs = historySize - 1; ++ hs |= (hs >> 1); ++ hs |= (hs >> 2); ++ hs |= (hs >> 4); ++ hs |= (hs >> 8); ++ hs >>= 1; ++ hs |= 0xFFFF; /* don't change it! It's required for Deflate */ ++ if (hs > (1 << 24)) ++ { ++ if (p->numHashBytes == 3) ++ hs = (1 << 24) - 1; ++ else ++ hs >>= 1; ++ } ++ } ++ p->hashMask = hs; ++ hs++; ++ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size; ++ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size; ++ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size; ++ hs += p->fixedHashSize; ++ } ++ ++ { ++ UInt32 prevSize = p->hashSizeSum + p->numSons; ++ UInt32 newSize; ++ p->historySize = historySize; ++ p->hashSizeSum = hs; ++ p->cyclicBufferSize = newCyclicBufferSize; ++ p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize); ++ newSize = p->hashSizeSum + p->numSons; ++ if (p->hash != 0 && prevSize == newSize) ++ return 1; ++ MatchFinder_FreeThisClassMemory(p, alloc); ++ p->hash = AllocRefs(newSize, alloc); ++ if (p->hash != 0) ++ { ++ p->son = p->hash + p->hashSizeSum; ++ return 1; ++ } ++ } ++ } ++ MatchFinder_Free(p, alloc); ++ return 0; ++} ++ ++static void MatchFinder_SetLimits(CMatchFinder *p) ++{ ++ UInt32 limit = kMaxValForNormalize - p->pos; ++ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos; ++ if (limit2 < limit) ++ limit = limit2; ++ limit2 = p->streamPos - p->pos; ++ if (limit2 <= p->keepSizeAfter) ++ { ++ if (limit2 > 0) ++ limit2 = 1; ++ } ++ else ++ limit2 -= p->keepSizeAfter; ++ if (limit2 < limit) ++ limit = limit2; ++ { ++ UInt32 lenLimit = p->streamPos - p->pos; ++ if (lenLimit > p->matchMaxLen) ++ lenLimit = p->matchMaxLen; ++ p->lenLimit = lenLimit; ++ } ++ p->posLimit = p->pos + limit; ++} ++ ++void MatchFinder_Init(CMatchFinder *p) ++{ ++ UInt32 i; ++ for (i = 0; i < p->hashSizeSum; i++) ++ p->hash[i] = kEmptyHashValue; ++ p->cyclicBufferPos = 0; ++ p->buffer = p->bufferBase; ++ p->pos = p->streamPos = p->cyclicBufferSize; ++ p->result = SZ_OK; ++ p->streamEndWasReached = 0; ++ MatchFinder_ReadBlock(p); ++ MatchFinder_SetLimits(p); ++} ++ ++static UInt32 MatchFinder_GetSubValue(CMatchFinder *p) ++{ ++ return (p->pos - p->historySize - 1) & kNormalizeMask; ++} ++ ++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems) ++{ ++ UInt32 i; ++ for (i = 0; i < numItems; i++) ++ { ++ UInt32 value = items[i]; ++ if (value <= subValue) ++ value = kEmptyHashValue; ++ else ++ value -= subValue; ++ items[i] = value; ++ } ++} ++ ++static void MatchFinder_Normalize(CMatchFinder *p) ++{ ++ UInt32 subValue = MatchFinder_GetSubValue(p); ++ MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons); ++ MatchFinder_ReduceOffsets(p, subValue); ++} ++ ++static void MatchFinder_CheckLimits(CMatchFinder *p) ++{ ++ if (p->pos == kMaxValForNormalize) ++ MatchFinder_Normalize(p); ++ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos) ++ MatchFinder_CheckAndMoveAndRead(p); ++ if (p->cyclicBufferPos == p->cyclicBufferSize) ++ p->cyclicBufferPos = 0; ++ MatchFinder_SetLimits(p); ++} ++ ++static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, ++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue, ++ UInt32 *distances, UInt32 maxLen) ++{ ++ son[_cyclicBufferPos] = curMatch; ++ for (;;) ++ { ++ UInt32 delta = pos - curMatch; ++ if (cutValue-- == 0 || delta >= _cyclicBufferSize) ++ return distances; ++ { ++ const Byte *pb = cur - delta; ++ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)]; ++ if (pb[maxLen] == cur[maxLen] && *pb == *cur) ++ { ++ UInt32 len = 0; ++ while (++len != lenLimit) ++ if (pb[len] != cur[len]) ++ break; ++ if (maxLen < len) ++ { ++ *distances++ = maxLen = len; ++ *distances++ = delta - 1; ++ if (len == lenLimit) ++ return distances; ++ } ++ } ++ } ++ } ++} ++ ++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, ++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue, ++ UInt32 *distances, UInt32 maxLen) ++{ ++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1; ++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1); ++ UInt32 len0 = 0, len1 = 0; ++ for (;;) ++ { ++ UInt32 delta = pos - curMatch; ++ if (cutValue-- == 0 || delta >= _cyclicBufferSize) ++ { ++ *ptr0 = *ptr1 = kEmptyHashValue; ++ return distances; ++ } ++ { ++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1); ++ const Byte *pb = cur - delta; ++ UInt32 len = (len0 < len1 ? len0 : len1); ++ if (pb[len] == cur[len]) ++ { ++ if (++len != lenLimit && pb[len] == cur[len]) ++ while (++len != lenLimit) ++ if (pb[len] != cur[len]) ++ break; ++ if (maxLen < len) ++ { ++ *distances++ = maxLen = len; ++ *distances++ = delta - 1; ++ if (len == lenLimit) ++ { ++ *ptr1 = pair[0]; ++ *ptr0 = pair[1]; ++ return distances; ++ } ++ } ++ } ++ if (pb[len] < cur[len]) ++ { ++ *ptr1 = curMatch; ++ ptr1 = pair + 1; ++ curMatch = *ptr1; ++ len1 = len; ++ } ++ else ++ { ++ *ptr0 = curMatch; ++ ptr0 = pair; ++ curMatch = *ptr0; ++ len0 = len; ++ } ++ } ++ } ++} ++ ++static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, ++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue) ++{ ++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1; ++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1); ++ UInt32 len0 = 0, len1 = 0; ++ for (;;) ++ { ++ UInt32 delta = pos - curMatch; ++ if (cutValue-- == 0 || delta >= _cyclicBufferSize) ++ { ++ *ptr0 = *ptr1 = kEmptyHashValue; ++ return; ++ } ++ { ++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1); ++ const Byte *pb = cur - delta; ++ UInt32 len = (len0 < len1 ? len0 : len1); ++ if (pb[len] == cur[len]) ++ { ++ while (++len != lenLimit) ++ if (pb[len] != cur[len]) ++ break; ++ { ++ if (len == lenLimit) ++ { ++ *ptr1 = pair[0]; ++ *ptr0 = pair[1]; ++ return; ++ } ++ } ++ } ++ if (pb[len] < cur[len]) ++ { ++ *ptr1 = curMatch; ++ ptr1 = pair + 1; ++ curMatch = *ptr1; ++ len1 = len; ++ } ++ else ++ { ++ *ptr0 = curMatch; ++ ptr0 = pair; ++ curMatch = *ptr0; ++ len0 = len; ++ } ++ } ++ } ++} ++ ++#define MOVE_POS \ ++ ++p->cyclicBufferPos; \ ++ p->buffer++; \ ++ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p); ++ ++#define MOVE_POS_RET MOVE_POS return offset; ++ ++static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; } ++ ++#define GET_MATCHES_HEADER2(minLen, ret_op) \ ++ UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \ ++ lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \ ++ cur = p->buffer; ++ ++#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0) ++#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue) ++ ++#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue ++ ++#define GET_MATCHES_FOOTER(offset, maxLen) \ ++ offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \ ++ distances + offset, maxLen) - distances); MOVE_POS_RET; ++ ++#define SKIP_FOOTER \ ++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS; ++ ++static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 offset; ++ GET_MATCHES_HEADER(2) ++ HASH2_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ offset = 0; ++ GET_MATCHES_FOOTER(offset, 1) ++} ++ ++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 offset; ++ GET_MATCHES_HEADER(3) ++ HASH_ZIP_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ offset = 0; ++ GET_MATCHES_FOOTER(offset, 2) ++} ++ ++static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 hash2Value, delta2, maxLen, offset; ++ GET_MATCHES_HEADER(3) ++ ++ HASH3_CALC; ++ ++ delta2 = p->pos - p->hash[hash2Value]; ++ curMatch = p->hash[kFix3HashSize + hashValue]; ++ ++ p->hash[hash2Value] = ++ p->hash[kFix3HashSize + hashValue] = p->pos; ++ ++ ++ maxLen = 2; ++ offset = 0; ++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur) ++ { ++ for (; maxLen != lenLimit; maxLen++) ++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen]) ++ break; ++ distances[0] = maxLen; ++ distances[1] = delta2 - 1; ++ offset = 2; ++ if (maxLen == lenLimit) ++ { ++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); ++ MOVE_POS_RET; ++ } ++ } ++ GET_MATCHES_FOOTER(offset, maxLen) ++} ++ ++static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset; ++ GET_MATCHES_HEADER(4) ++ ++ HASH4_CALC; ++ ++ delta2 = p->pos - p->hash[ hash2Value]; ++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value]; ++ curMatch = p->hash[kFix4HashSize + hashValue]; ++ ++ p->hash[ hash2Value] = ++ p->hash[kFix3HashSize + hash3Value] = ++ p->hash[kFix4HashSize + hashValue] = p->pos; ++ ++ maxLen = 1; ++ offset = 0; ++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur) ++ { ++ distances[0] = maxLen = 2; ++ distances[1] = delta2 - 1; ++ offset = 2; ++ } ++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur) ++ { ++ maxLen = 3; ++ distances[offset + 1] = delta3 - 1; ++ offset += 2; ++ delta2 = delta3; ++ } ++ if (offset != 0) ++ { ++ for (; maxLen != lenLimit; maxLen++) ++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen]) ++ break; ++ distances[offset - 2] = maxLen; ++ if (maxLen == lenLimit) ++ { ++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); ++ MOVE_POS_RET; ++ } ++ } ++ if (maxLen < 3) ++ maxLen = 3; ++ GET_MATCHES_FOOTER(offset, maxLen) ++} ++ ++static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset; ++ GET_MATCHES_HEADER(4) ++ ++ HASH4_CALC; ++ ++ delta2 = p->pos - p->hash[ hash2Value]; ++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value]; ++ curMatch = p->hash[kFix4HashSize + hashValue]; ++ ++ p->hash[ hash2Value] = ++ p->hash[kFix3HashSize + hash3Value] = ++ p->hash[kFix4HashSize + hashValue] = p->pos; ++ ++ maxLen = 1; ++ offset = 0; ++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur) ++ { ++ distances[0] = maxLen = 2; ++ distances[1] = delta2 - 1; ++ offset = 2; ++ } ++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur) ++ { ++ maxLen = 3; ++ distances[offset + 1] = delta3 - 1; ++ offset += 2; ++ delta2 = delta3; ++ } ++ if (offset != 0) ++ { ++ for (; maxLen != lenLimit; maxLen++) ++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen]) ++ break; ++ distances[offset - 2] = maxLen; ++ if (maxLen == lenLimit) ++ { ++ p->son[p->cyclicBufferPos] = curMatch; ++ MOVE_POS_RET; ++ } ++ } ++ if (maxLen < 3) ++ maxLen = 3; ++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p), ++ distances + offset, maxLen) - (distances)); ++ MOVE_POS_RET ++} ++ ++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++{ ++ UInt32 offset; ++ GET_MATCHES_HEADER(3) ++ HASH_ZIP_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p), ++ distances, 2) - (distances)); ++ MOVE_POS_RET ++} ++ ++static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ SKIP_HEADER(2) ++ HASH2_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ SKIP_FOOTER ++ } ++ while (--num != 0); ++} ++ ++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ SKIP_HEADER(3) ++ HASH_ZIP_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ SKIP_FOOTER ++ } ++ while (--num != 0); ++} ++ ++static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ UInt32 hash2Value; ++ SKIP_HEADER(3) ++ HASH3_CALC; ++ curMatch = p->hash[kFix3HashSize + hashValue]; ++ p->hash[hash2Value] = ++ p->hash[kFix3HashSize + hashValue] = p->pos; ++ SKIP_FOOTER ++ } ++ while (--num != 0); ++} ++ ++static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ UInt32 hash2Value, hash3Value; ++ SKIP_HEADER(4) ++ HASH4_CALC; ++ curMatch = p->hash[kFix4HashSize + hashValue]; ++ p->hash[ hash2Value] = ++ p->hash[kFix3HashSize + hash3Value] = p->pos; ++ p->hash[kFix4HashSize + hashValue] = p->pos; ++ SKIP_FOOTER ++ } ++ while (--num != 0); ++} ++ ++static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ UInt32 hash2Value, hash3Value; ++ SKIP_HEADER(4) ++ HASH4_CALC; ++ curMatch = p->hash[kFix4HashSize + hashValue]; ++ p->hash[ hash2Value] = ++ p->hash[kFix3HashSize + hash3Value] = ++ p->hash[kFix4HashSize + hashValue] = p->pos; ++ p->son[p->cyclicBufferPos] = curMatch; ++ MOVE_POS ++ } ++ while (--num != 0); ++} ++ ++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++{ ++ do ++ { ++ SKIP_HEADER(3) ++ HASH_ZIP_CALC; ++ curMatch = p->hash[hashValue]; ++ p->hash[hashValue] = p->pos; ++ p->son[p->cyclicBufferPos] = curMatch; ++ MOVE_POS ++ } ++ while (--num != 0); ++} ++ ++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable) ++{ ++ vTable->Init = (Mf_Init_Func)MatchFinder_Init; ++ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte; ++ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes; ++ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos; ++ if (!p->btMode) ++ { ++ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches; ++ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip; ++ } ++ else if (p->numHashBytes == 2) ++ { ++ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches; ++ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip; ++ } ++ else if (p->numHashBytes == 3) ++ { ++ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches; ++ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip; ++ } ++ else ++ { ++ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches; ++ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip; ++ } ++} +--- /dev/null ++++ b/lib/lzma/LzmaDec.c +@@ -0,0 +1,999 @@ ++/* LzmaDec.c -- LZMA Decoder ++2009-09-20 : Igor Pavlov : Public domain */ ++ ++#include "LzmaDec.h" ++ ++#include <string.h> ++ ++#define kNumTopBits 24 ++#define kTopValue ((UInt32)1 << kNumTopBits) ++ ++#define kNumBitModelTotalBits 11 ++#define kBitModelTotal (1 << kNumBitModelTotalBits) ++#define kNumMoveBits 5 ++ ++#define RC_INIT_SIZE 5 ++ ++#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); } ++ ++#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound) ++#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits)); ++#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits)); ++#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \ ++ { UPDATE_0(p); i = (i + i); A0; } else \ ++ { UPDATE_1(p); i = (i + i) + 1; A1; } ++#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;) ++ ++#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); } ++#define TREE_DECODE(probs, limit, i) \ ++ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; } ++ ++/* #define _LZMA_SIZE_OPT */ ++ ++#ifdef _LZMA_SIZE_OPT ++#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i) ++#else ++#define TREE_6_DECODE(probs, i) \ ++ { i = 1; \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ TREE_GET_BIT(probs, i); \ ++ i -= 0x40; } ++#endif ++ ++#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); } ++ ++#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound) ++#define UPDATE_0_CHECK range = bound; ++#define UPDATE_1_CHECK range -= bound; code -= bound; ++#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \ ++ { UPDATE_0_CHECK; i = (i + i); A0; } else \ ++ { UPDATE_1_CHECK; i = (i + i) + 1; A1; } ++#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;) ++#define TREE_DECODE_CHECK(probs, limit, i) \ ++ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; } ++ ++ ++#define kNumPosBitsMax 4 ++#define kNumPosStatesMax (1 << kNumPosBitsMax) ++ ++#define kLenNumLowBits 3 ++#define kLenNumLowSymbols (1 << kLenNumLowBits) ++#define kLenNumMidBits 3 ++#define kLenNumMidSymbols (1 << kLenNumMidBits) ++#define kLenNumHighBits 8 ++#define kLenNumHighSymbols (1 << kLenNumHighBits) ++ ++#define LenChoice 0 ++#define LenChoice2 (LenChoice + 1) ++#define LenLow (LenChoice2 + 1) ++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits)) ++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits)) ++#define kNumLenProbs (LenHigh + kLenNumHighSymbols) ++ ++ ++#define kNumStates 12 ++#define kNumLitStates 7 ++ ++#define kStartPosModelIndex 4 ++#define kEndPosModelIndex 14 ++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1)) ++ ++#define kNumPosSlotBits 6 ++#define kNumLenToPosStates 4 ++ ++#define kNumAlignBits 4 ++#define kAlignTableSize (1 << kNumAlignBits) ++ ++#define kMatchMinLen 2 ++#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols) ++ ++#define IsMatch 0 ++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax)) ++#define IsRepG0 (IsRep + kNumStates) ++#define IsRepG1 (IsRepG0 + kNumStates) ++#define IsRepG2 (IsRepG1 + kNumStates) ++#define IsRep0Long (IsRepG2 + kNumStates) ++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax)) ++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits)) ++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex) ++#define LenCoder (Align + kAlignTableSize) ++#define RepLenCoder (LenCoder + kNumLenProbs) ++#define Literal (RepLenCoder + kNumLenProbs) ++ ++#define LZMA_BASE_SIZE 1846 ++#define LZMA_LIT_SIZE 768 ++ ++#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp))) ++ ++#if Literal != LZMA_BASE_SIZE ++StopCompilingDueBUG ++#endif ++ ++#define LZMA_DIC_MIN (1 << 12) ++ ++/* First LZMA-symbol is always decoded. ++And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization ++Out: ++ Result: ++ SZ_OK - OK ++ SZ_ERROR_DATA - Error ++ p->remainLen: ++ < kMatchSpecLenStart : normal remain ++ = kMatchSpecLenStart : finished ++ = kMatchSpecLenStart + 1 : Flush marker ++ = kMatchSpecLenStart + 2 : State Init Marker ++*/ ++ ++static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit) ++{ ++ CLzmaProb *probs = p->probs; ++ ++ unsigned state = p->state; ++ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3]; ++ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1; ++ unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1; ++ unsigned lc = p->prop.lc; ++ ++ Byte *dic = p->dic; ++ SizeT dicBufSize = p->dicBufSize; ++ SizeT dicPos = p->dicPos; ++ ++ UInt32 processedPos = p->processedPos; ++ UInt32 checkDicSize = p->checkDicSize; ++ unsigned len = 0; ++ ++ const Byte *buf = p->buf; ++ UInt32 range = p->range; ++ UInt32 code = p->code; ++ ++ do ++ { ++ CLzmaProb *prob; ++ UInt32 bound; ++ unsigned ttt; ++ unsigned posState = processedPos & pbMask; ++ ++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState; ++ IF_BIT_0(prob) ++ { ++ unsigned symbol; ++ UPDATE_0(prob); ++ prob = probs + Literal; ++ if (checkDicSize != 0 || processedPos != 0) ++ prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) + ++ (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc)))); ++ ++ if (state < kNumLitStates) ++ { ++ state -= (state < 4) ? state : 3; ++ symbol = 1; ++ do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100); ++ } ++ else ++ { ++ unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)]; ++ unsigned offs = 0x100; ++ state -= (state < 10) ? 3 : 6; ++ symbol = 1; ++ do ++ { ++ unsigned bit; ++ CLzmaProb *probLit; ++ matchByte <<= 1; ++ bit = (matchByte & offs); ++ probLit = prob + offs + bit + symbol; ++ GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit) ++ } ++ while (symbol < 0x100); ++ } ++ dic[dicPos++] = (Byte)symbol; ++ processedPos++; ++ continue; ++ } ++ else ++ { ++ UPDATE_1(prob); ++ prob = probs + IsRep + state; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ state += kNumStates; ++ prob = probs + LenCoder; ++ } ++ else ++ { ++ UPDATE_1(prob); ++ if (checkDicSize == 0 && processedPos == 0) ++ return SZ_ERROR_DATA; ++ prob = probs + IsRepG0 + state; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)]; ++ dicPos++; ++ processedPos++; ++ state = state < kNumLitStates ? 9 : 11; ++ continue; ++ } ++ UPDATE_1(prob); ++ } ++ else ++ { ++ UInt32 distance; ++ UPDATE_1(prob); ++ prob = probs + IsRepG1 + state; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ distance = rep1; ++ } ++ else ++ { ++ UPDATE_1(prob); ++ prob = probs + IsRepG2 + state; ++ IF_BIT_0(prob) ++ { ++ UPDATE_0(prob); ++ distance = rep2; ++ } ++ else ++ { ++ UPDATE_1(prob); ++ distance = rep3; ++ rep3 = rep2; ++ } ++ rep2 = rep1; ++ } ++ rep1 = rep0; ++ rep0 = distance; ++ } ++ state = state < kNumLitStates ? 8 : 11; ++ prob = probs + RepLenCoder; ++ } ++ { ++ unsigned limit, offset; ++ CLzmaProb *probLen = prob + LenChoice; ++ IF_BIT_0(probLen) ++ { ++ UPDATE_0(probLen); ++ probLen = prob + LenLow + (posState << kLenNumLowBits); ++ offset = 0; ++ limit = (1 << kLenNumLowBits); ++ } ++ else ++ { ++ UPDATE_1(probLen); ++ probLen = prob + LenChoice2; ++ IF_BIT_0(probLen) ++ { ++ UPDATE_0(probLen); ++ probLen = prob + LenMid + (posState << kLenNumMidBits); ++ offset = kLenNumLowSymbols; ++ limit = (1 << kLenNumMidBits); ++ } ++ else ++ { ++ UPDATE_1(probLen); ++ probLen = prob + LenHigh; ++ offset = kLenNumLowSymbols + kLenNumMidSymbols; ++ limit = (1 << kLenNumHighBits); ++ } ++ } ++ TREE_DECODE(probLen, limit, len); ++ len += offset; ++ } ++ ++ if (state >= kNumStates) ++ { ++ UInt32 distance; ++ prob = probs + PosSlot + ++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits); ++ TREE_6_DECODE(prob, distance); ++ if (distance >= kStartPosModelIndex) ++ { ++ unsigned posSlot = (unsigned)distance; ++ int numDirectBits = (int)(((distance >> 1) - 1)); ++ distance = (2 | (distance & 1)); ++ if (posSlot < kEndPosModelIndex) ++ { ++ distance <<= numDirectBits; ++ prob = probs + SpecPos + distance - posSlot - 1; ++ { ++ UInt32 mask = 1; ++ unsigned i = 1; ++ do ++ { ++ GET_BIT2(prob + i, i, ; , distance |= mask); ++ mask <<= 1; ++ } ++ while (--numDirectBits != 0); ++ } ++ } ++ else ++ { ++ numDirectBits -= kNumAlignBits; ++ do ++ { ++ NORMALIZE ++ range >>= 1; ++ ++ { ++ UInt32 t; ++ code -= range; ++ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */ ++ distance = (distance << 1) + (t + 1); ++ code += range & t; ++ } ++ /* ++ distance <<= 1; ++ if (code >= range) ++ { ++ code -= range; ++ distance |= 1; ++ } ++ */ ++ } ++ while (--numDirectBits != 0); ++ prob = probs + Align; ++ distance <<= kNumAlignBits; ++ { ++ unsigned i = 1; ++ GET_BIT2(prob + i, i, ; , distance |= 1); ++ GET_BIT2(prob + i, i, ; , distance |= 2); ++ GET_BIT2(prob + i, i, ; , distance |= 4); ++ GET_BIT2(prob + i, i, ; , distance |= 8); ++ } ++ if (distance == (UInt32)0xFFFFFFFF) ++ { ++ len += kMatchSpecLenStart; ++ state -= kNumStates; ++ break; ++ } ++ } ++ } ++ rep3 = rep2; ++ rep2 = rep1; ++ rep1 = rep0; ++ rep0 = distance + 1; ++ if (checkDicSize == 0) ++ { ++ if (distance >= processedPos) ++ return SZ_ERROR_DATA; ++ } ++ else if (distance >= checkDicSize) ++ return SZ_ERROR_DATA; ++ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3; ++ } ++ ++ len += kMatchMinLen; ++ ++ if (limit == dicPos) ++ return SZ_ERROR_DATA; ++ { ++ SizeT rem = limit - dicPos; ++ unsigned curLen = ((rem < len) ? (unsigned)rem : len); ++ SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0); ++ ++ processedPos += curLen; ++ ++ len -= curLen; ++ if (pos + curLen <= dicBufSize) ++ { ++ Byte *dest = dic + dicPos; ++ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos; ++ const Byte *lim = dest + curLen; ++ dicPos += curLen; ++ do ++ *(dest) = (Byte)*(dest + src); ++ while (++dest != lim); ++ } ++ else ++ { ++ do ++ { ++ dic[dicPos++] = dic[pos]; ++ if (++pos == dicBufSize) ++ pos = 0; ++ } ++ while (--curLen != 0); ++ } ++ } ++ } ++ } ++ while (dicPos < limit && buf < bufLimit); ++ NORMALIZE; ++ p->buf = buf; ++ p->range = range; ++ p->code = code; ++ p->remainLen = len; ++ p->dicPos = dicPos; ++ p->processedPos = processedPos; ++ p->reps[0] = rep0; ++ p->reps[1] = rep1; ++ p->reps[2] = rep2; ++ p->reps[3] = rep3; ++ p->state = state; ++ ++ return SZ_OK; ++} ++ ++static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit) ++{ ++ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart) ++ { ++ Byte *dic = p->dic; ++ SizeT dicPos = p->dicPos; ++ SizeT dicBufSize = p->dicBufSize; ++ unsigned len = p->remainLen; ++ UInt32 rep0 = p->reps[0]; ++ if (limit - dicPos < len) ++ len = (unsigned)(limit - dicPos); ++ ++ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len) ++ p->checkDicSize = p->prop.dicSize; ++ ++ p->processedPos += len; ++ p->remainLen -= len; ++ while (len-- != 0) ++ { ++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)]; ++ dicPos++; ++ } ++ p->dicPos = dicPos; ++ } ++} ++ ++static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit) ++{ ++ do ++ { ++ SizeT limit2 = limit; ++ if (p->checkDicSize == 0) ++ { ++ UInt32 rem = p->prop.dicSize - p->processedPos; ++ if (limit - p->dicPos > rem) ++ limit2 = p->dicPos + rem; ++ } ++ RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit)); ++ if (p->processedPos >= p->prop.dicSize) ++ p->checkDicSize = p->prop.dicSize; ++ LzmaDec_WriteRem(p, limit); ++ } ++ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart); ++ ++ if (p->remainLen > kMatchSpecLenStart) ++ { ++ p->remainLen = kMatchSpecLenStart; ++ } ++ return 0; ++} ++ ++typedef enum ++{ ++ DUMMY_ERROR, /* unexpected end of input stream */ ++ DUMMY_LIT, ++ DUMMY_MATCH, ++ DUMMY_REP ++} ELzmaDummy; ++ ++static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize) ++{ ++ UInt32 range = p->range; ++ UInt32 code = p->code; ++ const Byte *bufLimit = buf + inSize; ++ CLzmaProb *probs = p->probs; ++ unsigned state = p->state; ++ ELzmaDummy res; ++ ++ { ++ CLzmaProb *prob; ++ UInt32 bound; ++ unsigned ttt; ++ unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1); ++ ++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK ++ ++ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */ ++ ++ prob = probs + Literal; ++ if (p->checkDicSize != 0 || p->processedPos != 0) ++ prob += (LZMA_LIT_SIZE * ++ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) + ++ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc)))); ++ ++ if (state < kNumLitStates) ++ { ++ unsigned symbol = 1; ++ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100); ++ } ++ else ++ { ++ unsigned matchByte = p->dic[p->dicPos - p->reps[0] + ++ ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)]; ++ unsigned offs = 0x100; ++ unsigned symbol = 1; ++ do ++ { ++ unsigned bit; ++ CLzmaProb *probLit; ++ matchByte <<= 1; ++ bit = (matchByte & offs); ++ probLit = prob + offs + bit + symbol; ++ GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit) ++ } ++ while (symbol < 0x100); ++ } ++ res = DUMMY_LIT; ++ } ++ else ++ { ++ unsigned len; ++ UPDATE_1_CHECK; ++ ++ prob = probs + IsRep + state; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ state = 0; ++ prob = probs + LenCoder; ++ res = DUMMY_MATCH; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ res = DUMMY_REP; ++ prob = probs + IsRepG0 + state; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ NORMALIZE_CHECK; ++ return DUMMY_REP; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ } ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ prob = probs + IsRepG1 + state; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ prob = probs + IsRepG2 + state; ++ IF_BIT_0_CHECK(prob) ++ { ++ UPDATE_0_CHECK; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ } ++ } ++ } ++ state = kNumStates; ++ prob = probs + RepLenCoder; ++ } ++ { ++ unsigned limit, offset; ++ CLzmaProb *probLen = prob + LenChoice; ++ IF_BIT_0_CHECK(probLen) ++ { ++ UPDATE_0_CHECK; ++ probLen = prob + LenLow + (posState << kLenNumLowBits); ++ offset = 0; ++ limit = 1 << kLenNumLowBits; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ probLen = prob + LenChoice2; ++ IF_BIT_0_CHECK(probLen) ++ { ++ UPDATE_0_CHECK; ++ probLen = prob + LenMid + (posState << kLenNumMidBits); ++ offset = kLenNumLowSymbols; ++ limit = 1 << kLenNumMidBits; ++ } ++ else ++ { ++ UPDATE_1_CHECK; ++ probLen = prob + LenHigh; ++ offset = kLenNumLowSymbols + kLenNumMidSymbols; ++ limit = 1 << kLenNumHighBits; ++ } ++ } ++ TREE_DECODE_CHECK(probLen, limit, len); ++ len += offset; ++ } ++ ++ if (state < 4) ++ { ++ unsigned posSlot; ++ prob = probs + PosSlot + ++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << ++ kNumPosSlotBits); ++ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot); ++ if (posSlot >= kStartPosModelIndex) ++ { ++ int numDirectBits = ((posSlot >> 1) - 1); ++ ++ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */ ++ ++ if (posSlot < kEndPosModelIndex) ++ { ++ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1; ++ } ++ else ++ { ++ numDirectBits -= kNumAlignBits; ++ do ++ { ++ NORMALIZE_CHECK ++ range >>= 1; ++ code -= range & (((code - range) >> 31) - 1); ++ /* if (code >= range) code -= range; */ ++ } ++ while (--numDirectBits != 0); ++ prob = probs + Align; ++ numDirectBits = kNumAlignBits; ++ } ++ { ++ unsigned i = 1; ++ do ++ { ++ GET_BIT_CHECK(prob + i, i); ++ } ++ while (--numDirectBits != 0); ++ } ++ } ++ } ++ } ++ } ++ NORMALIZE_CHECK; ++ return res; ++} ++ ++ ++static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data) ++{ ++ p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]); ++ p->range = 0xFFFFFFFF; ++ p->needFlush = 0; ++} ++ ++void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState) ++{ ++ p->needFlush = 1; ++ p->remainLen = 0; ++ p->tempBufSize = 0; ++ ++ if (initDic) ++ { ++ p->processedPos = 0; ++ p->checkDicSize = 0; ++ p->needInitState = 1; ++ } ++ if (initState) ++ p->needInitState = 1; ++} ++ ++void LzmaDec_Init(CLzmaDec *p) ++{ ++ p->dicPos = 0; ++ LzmaDec_InitDicAndState(p, True, True); ++} ++ ++static void LzmaDec_InitStateReal(CLzmaDec *p) ++{ ++ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp)); ++ UInt32 i; ++ CLzmaProb *probs = p->probs; ++ for (i = 0; i < numProbs; i++) ++ probs[i] = kBitModelTotal >> 1; ++ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1; ++ p->state = 0; ++ p->needInitState = 0; ++} ++ ++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen, ++ ELzmaFinishMode finishMode, ELzmaStatus *status) ++{ ++ SizeT inSize = *srcLen; ++ (*srcLen) = 0; ++ LzmaDec_WriteRem(p, dicLimit); ++ ++ *status = LZMA_STATUS_NOT_SPECIFIED; ++ ++ while (p->remainLen != kMatchSpecLenStart) ++ { ++ int checkEndMarkNow; ++ ++ if (p->needFlush != 0) ++ { ++ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--) ++ p->tempBuf[p->tempBufSize++] = *src++; ++ if (p->tempBufSize < RC_INIT_SIZE) ++ { ++ *status = LZMA_STATUS_NEEDS_MORE_INPUT; ++ return SZ_OK; ++ } ++ if (p->tempBuf[0] != 0) ++ return SZ_ERROR_DATA; ++ ++ LzmaDec_InitRc(p, p->tempBuf); ++ p->tempBufSize = 0; ++ } ++ ++ checkEndMarkNow = 0; ++ if (p->dicPos >= dicLimit) ++ { ++ if (p->remainLen == 0 && p->code == 0) ++ { ++ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK; ++ return SZ_OK; ++ } ++ if (finishMode == LZMA_FINISH_ANY) ++ { ++ *status = LZMA_STATUS_NOT_FINISHED; ++ return SZ_OK; ++ } ++ if (p->remainLen != 0) ++ { ++ *status = LZMA_STATUS_NOT_FINISHED; ++ return SZ_ERROR_DATA; ++ } ++ checkEndMarkNow = 1; ++ } ++ ++ if (p->needInitState) ++ LzmaDec_InitStateReal(p); ++ ++ if (p->tempBufSize == 0) ++ { ++ SizeT processed; ++ const Byte *bufLimit; ++ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow) ++ { ++ int dummyRes = LzmaDec_TryDummy(p, src, inSize); ++ if (dummyRes == DUMMY_ERROR) ++ { ++ memcpy(p->tempBuf, src, inSize); ++ p->tempBufSize = (unsigned)inSize; ++ (*srcLen) += inSize; ++ *status = LZMA_STATUS_NEEDS_MORE_INPUT; ++ return SZ_OK; ++ } ++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH) ++ { ++ *status = LZMA_STATUS_NOT_FINISHED; ++ return SZ_ERROR_DATA; ++ } ++ bufLimit = src; ++ } ++ else ++ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX; ++ p->buf = src; ++ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0) ++ return SZ_ERROR_DATA; ++ processed = (SizeT)(p->buf - src); ++ (*srcLen) += processed; ++ src += processed; ++ inSize -= processed; ++ } ++ else ++ { ++ unsigned rem = p->tempBufSize, lookAhead = 0; ++ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize) ++ p->tempBuf[rem++] = src[lookAhead++]; ++ p->tempBufSize = rem; ++ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow) ++ { ++ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem); ++ if (dummyRes == DUMMY_ERROR) ++ { ++ (*srcLen) += lookAhead; ++ *status = LZMA_STATUS_NEEDS_MORE_INPUT; ++ return SZ_OK; ++ } ++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH) ++ { ++ *status = LZMA_STATUS_NOT_FINISHED; ++ return SZ_ERROR_DATA; ++ } ++ } ++ p->buf = p->tempBuf; ++ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0) ++ return SZ_ERROR_DATA; ++ lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf)); ++ (*srcLen) += lookAhead; ++ src += lookAhead; ++ inSize -= lookAhead; ++ p->tempBufSize = 0; ++ } ++ } ++ if (p->code == 0) ++ *status = LZMA_STATUS_FINISHED_WITH_MARK; ++ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA; ++} ++ ++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status) ++{ ++ SizeT outSize = *destLen; ++ SizeT inSize = *srcLen; ++ *srcLen = *destLen = 0; ++ for (;;) ++ { ++ SizeT inSizeCur = inSize, outSizeCur, dicPos; ++ ELzmaFinishMode curFinishMode; ++ SRes res; ++ if (p->dicPos == p->dicBufSize) ++ p->dicPos = 0; ++ dicPos = p->dicPos; ++ if (outSize > p->dicBufSize - dicPos) ++ { ++ outSizeCur = p->dicBufSize; ++ curFinishMode = LZMA_FINISH_ANY; ++ } ++ else ++ { ++ outSizeCur = dicPos + outSize; ++ curFinishMode = finishMode; ++ } ++ ++ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status); ++ src += inSizeCur; ++ inSize -= inSizeCur; ++ *srcLen += inSizeCur; ++ outSizeCur = p->dicPos - dicPos; ++ memcpy(dest, p->dic + dicPos, outSizeCur); ++ dest += outSizeCur; ++ outSize -= outSizeCur; ++ *destLen += outSizeCur; ++ if (res != 0) ++ return res; ++ if (outSizeCur == 0 || outSize == 0) ++ return SZ_OK; ++ } ++} ++ ++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->probs); ++ p->probs = 0; ++} ++ ++static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->dic); ++ p->dic = 0; ++} ++ ++void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc) ++{ ++ LzmaDec_FreeProbs(p, alloc); ++ LzmaDec_FreeDict(p, alloc); ++} ++ ++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size) ++{ ++ UInt32 dicSize; ++ Byte d; ++ ++ if (size < LZMA_PROPS_SIZE) ++ return SZ_ERROR_UNSUPPORTED; ++ else ++ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24); ++ ++ if (dicSize < LZMA_DIC_MIN) ++ dicSize = LZMA_DIC_MIN; ++ p->dicSize = dicSize; ++ ++ d = data[0]; ++ if (d >= (9 * 5 * 5)) ++ return SZ_ERROR_UNSUPPORTED; ++ ++ p->lc = d % 9; ++ d /= 9; ++ p->pb = d / 5; ++ p->lp = d % 5; ++ ++ return SZ_OK; ++} ++ ++static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc) ++{ ++ UInt32 numProbs = LzmaProps_GetNumProbs(propNew); ++ if (p->probs == 0 || numProbs != p->numProbs) ++ { ++ LzmaDec_FreeProbs(p, alloc); ++ p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb)); ++ p->numProbs = numProbs; ++ if (p->probs == 0) ++ return SZ_ERROR_MEM; ++ } ++ return SZ_OK; ++} ++ ++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) ++{ ++ CLzmaProps propNew; ++ RINOK(LzmaProps_Decode(&propNew, props, propsSize)); ++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc)); ++ p->prop = propNew; ++ return SZ_OK; ++} ++ ++SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) ++{ ++ CLzmaProps propNew; ++ SizeT dicBufSize; ++ RINOK(LzmaProps_Decode(&propNew, props, propsSize)); ++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc)); ++ dicBufSize = propNew.dicSize; ++ if (p->dic == 0 || dicBufSize != p->dicBufSize) ++ { ++ LzmaDec_FreeDict(p, alloc); ++ p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize); ++ if (p->dic == 0) ++ { ++ LzmaDec_FreeProbs(p, alloc); ++ return SZ_ERROR_MEM; ++ } ++ } ++ p->dicBufSize = dicBufSize; ++ p->prop = propNew; ++ return SZ_OK; ++} ++ ++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode, ++ ELzmaStatus *status, ISzAlloc *alloc) ++{ ++ CLzmaDec p; ++ SRes res; ++ SizeT inSize = *srcLen; ++ SizeT outSize = *destLen; ++ *srcLen = *destLen = 0; ++ if (inSize < RC_INIT_SIZE) ++ return SZ_ERROR_INPUT_EOF; ++ ++ LzmaDec_Construct(&p); ++ res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc); ++ if (res != 0) ++ return res; ++ p.dic = dest; ++ p.dicBufSize = outSize; ++ ++ LzmaDec_Init(&p); ++ ++ *srcLen = inSize; ++ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status); ++ ++ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT) ++ res = SZ_ERROR_INPUT_EOF; ++ ++ (*destLen) = p.dicPos; ++ LzmaDec_FreeProbs(&p, alloc); ++ return res; ++} +--- /dev/null ++++ b/lib/lzma/LzmaEnc.c +@@ -0,0 +1,2271 @@ ++/* LzmaEnc.c -- LZMA Encoder ++2009-11-24 : Igor Pavlov : Public domain */ ++ ++#include <string.h> ++ ++/* #define SHOW_STAT */ ++/* #define SHOW_STAT2 */ ++ ++#if defined(SHOW_STAT) || defined(SHOW_STAT2) ++#include <stdio.h> ++#endif ++ ++#include "LzmaEnc.h" ++ ++/* disable MT */ ++#define _7ZIP_ST ++ ++#include "LzFind.h" ++#ifndef _7ZIP_ST ++#include "LzFindMt.h" ++#endif ++ ++#ifdef SHOW_STAT ++static int ttt = 0; ++#endif ++ ++#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1) ++ ++#define kBlockSize (9 << 10) ++#define kUnpackBlockSize (1 << 18) ++#define kMatchArraySize (1 << 21) ++#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX) ++ ++#define kNumMaxDirectBits (31) ++ ++#define kNumTopBits 24 ++#define kTopValue ((UInt32)1 << kNumTopBits) ++ ++#define kNumBitModelTotalBits 11 ++#define kBitModelTotal (1 << kNumBitModelTotalBits) ++#define kNumMoveBits 5 ++#define kProbInitValue (kBitModelTotal >> 1) ++ ++#define kNumMoveReducingBits 4 ++#define kNumBitPriceShiftBits 4 ++#define kBitPrice (1 << kNumBitPriceShiftBits) ++ ++void LzmaEncProps_Init(CLzmaEncProps *p) ++{ ++ p->level = 5; ++ p->dictSize = p->mc = 0; ++ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1; ++ p->writeEndMark = 0; ++} ++ ++void LzmaEncProps_Normalize(CLzmaEncProps *p) ++{ ++ int level = p->level; ++ if (level < 0) level = 5; ++ p->level = level; ++ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26))); ++ if (p->lc < 0) p->lc = 3; ++ if (p->lp < 0) p->lp = 0; ++ if (p->pb < 0) p->pb = 2; ++ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1); ++ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64); ++ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1); ++ if (p->numHashBytes < 0) p->numHashBytes = 4; ++ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1); ++ if (p->numThreads < 0) ++ p->numThreads = ++ #ifndef _7ZIP_ST ++ ((p->btMode && p->algo) ? 2 : 1); ++ #else ++ 1; ++ #endif ++} ++ ++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2) ++{ ++ CLzmaEncProps props = *props2; ++ LzmaEncProps_Normalize(&props); ++ return props.dictSize; ++} ++ ++/* #define LZMA_LOG_BSR */ ++/* Define it for Intel's CPU */ ++ ++ ++#ifdef LZMA_LOG_BSR ++ ++#define kDicLogSizeMaxCompress 30 ++ ++#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); } ++ ++UInt32 GetPosSlot1(UInt32 pos) ++{ ++ UInt32 res; ++ BSR2_RET(pos, res); ++ return res; ++} ++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); } ++#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); } ++ ++#else ++ ++#define kNumLogBits (9 + (int)sizeof(size_t) / 2) ++#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7) ++ ++void LzmaEnc_FastPosInit(Byte *g_FastPos) ++{ ++ int c = 2, slotFast; ++ g_FastPos[0] = 0; ++ g_FastPos[1] = 1; ++ ++ for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++) ++ { ++ UInt32 k = (1 << ((slotFast >> 1) - 1)); ++ UInt32 j; ++ for (j = 0; j < k; j++, c++) ++ g_FastPos[c] = (Byte)slotFast; ++ } ++} ++ ++#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \ ++ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \ ++ res = p->g_FastPos[pos >> i] + (i * 2); } ++/* ++#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \ ++ p->g_FastPos[pos >> 6] + 12 : \ ++ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; } ++*/ ++ ++#define GetPosSlot1(pos) p->g_FastPos[pos] ++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); } ++#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); } ++ ++#endif ++ ++ ++#define LZMA_NUM_REPS 4 ++ ++typedef unsigned CState; ++ ++typedef struct ++{ ++ UInt32 price; ++ ++ CState state; ++ int prev1IsChar; ++ int prev2; ++ ++ UInt32 posPrev2; ++ UInt32 backPrev2; ++ ++ UInt32 posPrev; ++ UInt32 backPrev; ++ UInt32 backs[LZMA_NUM_REPS]; ++} COptimal; ++ ++#define kNumOpts (1 << 12) ++ ++#define kNumLenToPosStates 4 ++#define kNumPosSlotBits 6 ++#define kDicLogSizeMin 0 ++#define kDicLogSizeMax 32 ++#define kDistTableSizeMax (kDicLogSizeMax * 2) ++ ++ ++#define kNumAlignBits 4 ++#define kAlignTableSize (1 << kNumAlignBits) ++#define kAlignMask (kAlignTableSize - 1) ++ ++#define kStartPosModelIndex 4 ++#define kEndPosModelIndex 14 ++#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex) ++ ++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1)) ++ ++#ifdef _LZMA_PROB32 ++#define CLzmaProb UInt32 ++#else ++#define CLzmaProb UInt16 ++#endif ++ ++#define LZMA_PB_MAX 4 ++#define LZMA_LC_MAX 8 ++#define LZMA_LP_MAX 4 ++ ++#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX) ++ ++ ++#define kLenNumLowBits 3 ++#define kLenNumLowSymbols (1 << kLenNumLowBits) ++#define kLenNumMidBits 3 ++#define kLenNumMidSymbols (1 << kLenNumMidBits) ++#define kLenNumHighBits 8 ++#define kLenNumHighSymbols (1 << kLenNumHighBits) ++ ++#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols) ++ ++#define LZMA_MATCH_LEN_MIN 2 ++#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1) ++ ++#define kNumStates 12 ++ ++typedef struct ++{ ++ CLzmaProb choice; ++ CLzmaProb choice2; ++ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits]; ++ CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits]; ++ CLzmaProb high[kLenNumHighSymbols]; ++} CLenEnc; ++ ++typedef struct ++{ ++ CLenEnc p; ++ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal]; ++ UInt32 tableSize; ++ UInt32 counters[LZMA_NUM_PB_STATES_MAX]; ++} CLenPriceEnc; ++ ++typedef struct ++{ ++ UInt32 range; ++ Byte cache; ++ UInt64 low; ++ UInt64 cacheSize; ++ Byte *buf; ++ Byte *bufLim; ++ Byte *bufBase; ++ ISeqOutStream *outStream; ++ UInt64 processed; ++ SRes res; ++} CRangeEnc; ++ ++typedef struct ++{ ++ CLzmaProb *litProbs; ++ ++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX]; ++ CLzmaProb isRep[kNumStates]; ++ CLzmaProb isRepG0[kNumStates]; ++ CLzmaProb isRepG1[kNumStates]; ++ CLzmaProb isRepG2[kNumStates]; ++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX]; ++ ++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits]; ++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex]; ++ CLzmaProb posAlignEncoder[1 << kNumAlignBits]; ++ ++ CLenPriceEnc lenEnc; ++ CLenPriceEnc repLenEnc; ++ ++ UInt32 reps[LZMA_NUM_REPS]; ++ UInt32 state; ++} CSaveState; ++ ++typedef struct ++{ ++ IMatchFinder matchFinder; ++ void *matchFinderObj; ++ ++ #ifndef _7ZIP_ST ++ Bool mtMode; ++ CMatchFinderMt matchFinderMt; ++ #endif ++ ++ CMatchFinder matchFinderBase; ++ ++ #ifndef _7ZIP_ST ++ Byte pad[128]; ++ #endif ++ ++ UInt32 optimumEndIndex; ++ UInt32 optimumCurrentIndex; ++ ++ UInt32 longestMatchLength; ++ UInt32 numPairs; ++ UInt32 numAvail; ++ COptimal opt[kNumOpts]; ++ ++ #ifndef LZMA_LOG_BSR ++ Byte g_FastPos[1 << kNumLogBits]; ++ #endif ++ ++ UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits]; ++ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1]; ++ UInt32 numFastBytes; ++ UInt32 additionalOffset; ++ UInt32 reps[LZMA_NUM_REPS]; ++ UInt32 state; ++ ++ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax]; ++ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances]; ++ UInt32 alignPrices[kAlignTableSize]; ++ UInt32 alignPriceCount; ++ ++ UInt32 distTableSize; ++ ++ unsigned lc, lp, pb; ++ unsigned lpMask, pbMask; ++ ++ CLzmaProb *litProbs; ++ ++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX]; ++ CLzmaProb isRep[kNumStates]; ++ CLzmaProb isRepG0[kNumStates]; ++ CLzmaProb isRepG1[kNumStates]; ++ CLzmaProb isRepG2[kNumStates]; ++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX]; ++ ++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits]; ++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex]; ++ CLzmaProb posAlignEncoder[1 << kNumAlignBits]; ++ ++ CLenPriceEnc lenEnc; ++ CLenPriceEnc repLenEnc; ++ ++ unsigned lclp; ++ ++ Bool fastMode; ++ ++ CRangeEnc rc; ++ ++ Bool writeEndMark; ++ UInt64 nowPos64; ++ UInt32 matchPriceCount; ++ Bool finished; ++ Bool multiThread; ++ ++ SRes result; ++ UInt32 dictSize; ++ UInt32 matchFinderCycles; ++ ++ int needInit; ++ ++ CSaveState saveState; ++} CLzmaEnc; ++ ++void LzmaEnc_SaveState(CLzmaEncHandle pp) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ CSaveState *dest = &p->saveState; ++ int i; ++ dest->lenEnc = p->lenEnc; ++ dest->repLenEnc = p->repLenEnc; ++ dest->state = p->state; ++ ++ for (i = 0; i < kNumStates; i++) ++ { ++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i])); ++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i])); ++ } ++ for (i = 0; i < kNumLenToPosStates; i++) ++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i])); ++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep)); ++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0)); ++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1)); ++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2)); ++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders)); ++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder)); ++ memcpy(dest->reps, p->reps, sizeof(p->reps)); ++ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb)); ++} ++ ++void LzmaEnc_RestoreState(CLzmaEncHandle pp) ++{ ++ CLzmaEnc *dest = (CLzmaEnc *)pp; ++ const CSaveState *p = &dest->saveState; ++ int i; ++ dest->lenEnc = p->lenEnc; ++ dest->repLenEnc = p->repLenEnc; ++ dest->state = p->state; ++ ++ for (i = 0; i < kNumStates; i++) ++ { ++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i])); ++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i])); ++ } ++ for (i = 0; i < kNumLenToPosStates; i++) ++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i])); ++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep)); ++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0)); ++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1)); ++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2)); ++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders)); ++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder)); ++ memcpy(dest->reps, p->reps, sizeof(p->reps)); ++ memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb)); ++} ++ ++SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ CLzmaEncProps props = *props2; ++ LzmaEncProps_Normalize(&props); ++ ++ if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX || ++ props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30)) ++ return SZ_ERROR_PARAM; ++ p->dictSize = props.dictSize; ++ p->matchFinderCycles = props.mc; ++ { ++ unsigned fb = props.fb; ++ if (fb < 5) ++ fb = 5; ++ if (fb > LZMA_MATCH_LEN_MAX) ++ fb = LZMA_MATCH_LEN_MAX; ++ p->numFastBytes = fb; ++ } ++ p->lc = props.lc; ++ p->lp = props.lp; ++ p->pb = props.pb; ++ p->fastMode = (props.algo == 0); ++ p->matchFinderBase.btMode = props.btMode; ++ { ++ UInt32 numHashBytes = 4; ++ if (props.btMode) ++ { ++ if (props.numHashBytes < 2) ++ numHashBytes = 2; ++ else if (props.numHashBytes < 4) ++ numHashBytes = props.numHashBytes; ++ } ++ p->matchFinderBase.numHashBytes = numHashBytes; ++ } ++ ++ p->matchFinderBase.cutValue = props.mc; ++ ++ p->writeEndMark = props.writeEndMark; ++ ++ #ifndef _7ZIP_ST ++ /* ++ if (newMultiThread != _multiThread) ++ { ++ ReleaseMatchFinder(); ++ _multiThread = newMultiThread; ++ } ++ */ ++ p->multiThread = (props.numThreads > 1); ++ #endif ++ ++ return SZ_OK; ++} ++ ++static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5}; ++static const int kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10}; ++static const int kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11}; ++static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11}; ++ ++#define IsCharState(s) ((s) < 7) ++ ++#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1) ++ ++#define kInfinityPrice (1 << 30) ++ ++static void RangeEnc_Construct(CRangeEnc *p) ++{ ++ p->outStream = 0; ++ p->bufBase = 0; ++} ++ ++#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize) ++ ++#define RC_BUF_SIZE (1 << 16) ++static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc) ++{ ++ if (p->bufBase == 0) ++ { ++ p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE); ++ if (p->bufBase == 0) ++ return 0; ++ p->bufLim = p->bufBase + RC_BUF_SIZE; ++ } ++ return 1; ++} ++ ++static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->bufBase); ++ p->bufBase = 0; ++} ++ ++static void RangeEnc_Init(CRangeEnc *p) ++{ ++ /* Stream.Init(); */ ++ p->low = 0; ++ p->range = 0xFFFFFFFF; ++ p->cacheSize = 1; ++ p->cache = 0; ++ ++ p->buf = p->bufBase; ++ ++ p->processed = 0; ++ p->res = SZ_OK; ++} ++ ++static void RangeEnc_FlushStream(CRangeEnc *p) ++{ ++ size_t num; ++ if (p->res != SZ_OK) ++ return; ++ num = p->buf - p->bufBase; ++ if (num != p->outStream->Write(p->outStream, p->bufBase, num)) ++ p->res = SZ_ERROR_WRITE; ++ p->processed += num; ++ p->buf = p->bufBase; ++} ++ ++static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p) ++{ ++ if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0) ++ { ++ Byte temp = p->cache; ++ do ++ { ++ Byte *buf = p->buf; ++ *buf++ = (Byte)(temp + (Byte)(p->low >> 32)); ++ p->buf = buf; ++ if (buf == p->bufLim) ++ RangeEnc_FlushStream(p); ++ temp = 0xFF; ++ } ++ while (--p->cacheSize != 0); ++ p->cache = (Byte)((UInt32)p->low >> 24); ++ } ++ p->cacheSize++; ++ p->low = (UInt32)p->low << 8; ++} ++ ++static void RangeEnc_FlushData(CRangeEnc *p) ++{ ++ int i; ++ for (i = 0; i < 5; i++) ++ RangeEnc_ShiftLow(p); ++} ++ ++static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits) ++{ ++ do ++ { ++ p->range >>= 1; ++ p->low += p->range & (0 - ((value >> --numBits) & 1)); ++ if (p->range < kTopValue) ++ { ++ p->range <<= 8; ++ RangeEnc_ShiftLow(p); ++ } ++ } ++ while (numBits != 0); ++} ++ ++static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol) ++{ ++ UInt32 ttt = *prob; ++ UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt; ++ if (symbol == 0) ++ { ++ p->range = newBound; ++ ttt += (kBitModelTotal - ttt) >> kNumMoveBits; ++ } ++ else ++ { ++ p->low += newBound; ++ p->range -= newBound; ++ ttt -= ttt >> kNumMoveBits; ++ } ++ *prob = (CLzmaProb)ttt; ++ if (p->range < kTopValue) ++ { ++ p->range <<= 8; ++ RangeEnc_ShiftLow(p); ++ } ++} ++ ++static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol) ++{ ++ symbol |= 0x100; ++ do ++ { ++ RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1); ++ symbol <<= 1; ++ } ++ while (symbol < 0x10000); ++} ++ ++static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte) ++{ ++ UInt32 offs = 0x100; ++ symbol |= 0x100; ++ do ++ { ++ matchByte <<= 1; ++ RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1); ++ symbol <<= 1; ++ offs &= ~(matchByte ^ symbol); ++ } ++ while (symbol < 0x10000); ++} ++ ++void LzmaEnc_InitPriceTables(UInt32 *ProbPrices) ++{ ++ UInt32 i; ++ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits)) ++ { ++ const int kCyclesBits = kNumBitPriceShiftBits; ++ UInt32 w = i; ++ UInt32 bitCount = 0; ++ int j; ++ for (j = 0; j < kCyclesBits; j++) ++ { ++ w = w * w; ++ bitCount <<= 1; ++ while (w >= ((UInt32)1 << 16)) ++ { ++ w >>= 1; ++ bitCount++; ++ } ++ } ++ ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount); ++ } ++} ++ ++ ++#define GET_PRICE(prob, symbol) \ ++ p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]; ++ ++#define GET_PRICEa(prob, symbol) \ ++ ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]; ++ ++#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits] ++#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits] ++ ++#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits] ++#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits] ++ ++static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices) ++{ ++ UInt32 price = 0; ++ symbol |= 0x100; ++ do ++ { ++ price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1); ++ symbol <<= 1; ++ } ++ while (symbol < 0x10000); ++ return price; ++} ++ ++static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices) ++{ ++ UInt32 price = 0; ++ UInt32 offs = 0x100; ++ symbol |= 0x100; ++ do ++ { ++ matchByte <<= 1; ++ price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1); ++ symbol <<= 1; ++ offs &= ~(matchByte ^ symbol); ++ } ++ while (symbol < 0x10000); ++ return price; ++} ++ ++ ++static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol) ++{ ++ UInt32 m = 1; ++ int i; ++ for (i = numBitLevels; i != 0;) ++ { ++ UInt32 bit; ++ i--; ++ bit = (symbol >> i) & 1; ++ RangeEnc_EncodeBit(rc, probs + m, bit); ++ m = (m << 1) | bit; ++ } ++} ++ ++static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol) ++{ ++ UInt32 m = 1; ++ int i; ++ for (i = 0; i < numBitLevels; i++) ++ { ++ UInt32 bit = symbol & 1; ++ RangeEnc_EncodeBit(rc, probs + m, bit); ++ m = (m << 1) | bit; ++ symbol >>= 1; ++ } ++} ++ ++static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices) ++{ ++ UInt32 price = 0; ++ symbol |= (1 << numBitLevels); ++ while (symbol != 1) ++ { ++ price += GET_PRICEa(probs[symbol >> 1], symbol & 1); ++ symbol >>= 1; ++ } ++ return price; ++} ++ ++static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices) ++{ ++ UInt32 price = 0; ++ UInt32 m = 1; ++ int i; ++ for (i = numBitLevels; i != 0; i--) ++ { ++ UInt32 bit = symbol & 1; ++ symbol >>= 1; ++ price += GET_PRICEa(probs[m], bit); ++ m = (m << 1) | bit; ++ } ++ return price; ++} ++ ++ ++static void LenEnc_Init(CLenEnc *p) ++{ ++ unsigned i; ++ p->choice = p->choice2 = kProbInitValue; ++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++) ++ p->low[i] = kProbInitValue; ++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++) ++ p->mid[i] = kProbInitValue; ++ for (i = 0; i < kLenNumHighSymbols; i++) ++ p->high[i] = kProbInitValue; ++} ++ ++static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState) ++{ ++ if (symbol < kLenNumLowSymbols) ++ { ++ RangeEnc_EncodeBit(rc, &p->choice, 0); ++ RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol); ++ } ++ else ++ { ++ RangeEnc_EncodeBit(rc, &p->choice, 1); ++ if (symbol < kLenNumLowSymbols + kLenNumMidSymbols) ++ { ++ RangeEnc_EncodeBit(rc, &p->choice2, 0); ++ RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols); ++ } ++ else ++ { ++ RangeEnc_EncodeBit(rc, &p->choice2, 1); ++ RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols); ++ } ++ } ++} ++ ++static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices) ++{ ++ UInt32 a0 = GET_PRICE_0a(p->choice); ++ UInt32 a1 = GET_PRICE_1a(p->choice); ++ UInt32 b0 = a1 + GET_PRICE_0a(p->choice2); ++ UInt32 b1 = a1 + GET_PRICE_1a(p->choice2); ++ UInt32 i = 0; ++ for (i = 0; i < kLenNumLowSymbols; i++) ++ { ++ if (i >= numSymbols) ++ return; ++ prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices); ++ } ++ for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++) ++ { ++ if (i >= numSymbols) ++ return; ++ prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices); ++ } ++ for (; i < numSymbols; i++) ++ prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices); ++} ++ ++static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices) ++{ ++ LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices); ++ p->counters[posState] = p->tableSize; ++} ++ ++static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices) ++{ ++ UInt32 posState; ++ for (posState = 0; posState < numPosStates; posState++) ++ LenPriceEnc_UpdateTable(p, posState, ProbPrices); ++} ++ ++static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices) ++{ ++ LenEnc_Encode(&p->p, rc, symbol, posState); ++ if (updatePrice) ++ if (--p->counters[posState] == 0) ++ LenPriceEnc_UpdateTable(p, posState, ProbPrices); ++} ++ ++ ++ ++ ++static void MovePos(CLzmaEnc *p, UInt32 num) ++{ ++ #ifdef SHOW_STAT ++ ttt += num; ++ printf("\n MovePos %d", num); ++ #endif ++ if (num != 0) ++ { ++ p->additionalOffset += num; ++ p->matchFinder.Skip(p->matchFinderObj, num); ++ } ++} ++ ++static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes) ++{ ++ UInt32 lenRes = 0, numPairs; ++ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj); ++ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches); ++ #ifdef SHOW_STAT ++ printf("\n i = %d numPairs = %d ", ttt, numPairs / 2); ++ ttt++; ++ { ++ UInt32 i; ++ for (i = 0; i < numPairs; i += 2) ++ printf("%2d %6d | ", p->matches[i], p->matches[i + 1]); ++ } ++ #endif ++ if (numPairs > 0) ++ { ++ lenRes = p->matches[numPairs - 2]; ++ if (lenRes == p->numFastBytes) ++ { ++ const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ UInt32 distance = p->matches[numPairs - 1] + 1; ++ UInt32 numAvail = p->numAvail; ++ if (numAvail > LZMA_MATCH_LEN_MAX) ++ numAvail = LZMA_MATCH_LEN_MAX; ++ { ++ const Byte *pby2 = pby - distance; ++ for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++); ++ } ++ } ++ } ++ p->additionalOffset++; ++ *numDistancePairsRes = numPairs; ++ return lenRes; ++} ++ ++ ++#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False; ++#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False; ++#define IsShortRep(p) ((p)->backPrev == 0) ++ ++static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState) ++{ ++ return ++ GET_PRICE_0(p->isRepG0[state]) + ++ GET_PRICE_0(p->isRep0Long[state][posState]); ++} ++ ++static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState) ++{ ++ UInt32 price; ++ if (repIndex == 0) ++ { ++ price = GET_PRICE_0(p->isRepG0[state]); ++ price += GET_PRICE_1(p->isRep0Long[state][posState]); ++ } ++ else ++ { ++ price = GET_PRICE_1(p->isRepG0[state]); ++ if (repIndex == 1) ++ price += GET_PRICE_0(p->isRepG1[state]); ++ else ++ { ++ price += GET_PRICE_1(p->isRepG1[state]); ++ price += GET_PRICE(p->isRepG2[state], repIndex - 2); ++ } ++ } ++ return price; ++} ++ ++static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState) ++{ ++ return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] + ++ GetPureRepPrice(p, repIndex, state, posState); ++} ++ ++static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur) ++{ ++ UInt32 posMem = p->opt[cur].posPrev; ++ UInt32 backMem = p->opt[cur].backPrev; ++ p->optimumEndIndex = cur; ++ do ++ { ++ if (p->opt[cur].prev1IsChar) ++ { ++ MakeAsChar(&p->opt[posMem]) ++ p->opt[posMem].posPrev = posMem - 1; ++ if (p->opt[cur].prev2) ++ { ++ p->opt[posMem - 1].prev1IsChar = False; ++ p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2; ++ p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2; ++ } ++ } ++ { ++ UInt32 posPrev = posMem; ++ UInt32 backCur = backMem; ++ ++ backMem = p->opt[posPrev].backPrev; ++ posMem = p->opt[posPrev].posPrev; ++ ++ p->opt[posPrev].backPrev = backCur; ++ p->opt[posPrev].posPrev = cur; ++ cur = posPrev; ++ } ++ } ++ while (cur != 0); ++ *backRes = p->opt[0].backPrev; ++ p->optimumCurrentIndex = p->opt[0].posPrev; ++ return p->optimumCurrentIndex; ++} ++ ++#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300) ++ ++static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes) ++{ ++ UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur; ++ UInt32 matchPrice, repMatchPrice, normalMatchPrice; ++ UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS]; ++ UInt32 *matches; ++ const Byte *data; ++ Byte curByte, matchByte; ++ if (p->optimumEndIndex != p->optimumCurrentIndex) ++ { ++ const COptimal *opt = &p->opt[p->optimumCurrentIndex]; ++ UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex; ++ *backRes = opt->backPrev; ++ p->optimumCurrentIndex = opt->posPrev; ++ return lenRes; ++ } ++ p->optimumCurrentIndex = p->optimumEndIndex = 0; ++ ++ if (p->additionalOffset == 0) ++ mainLen = ReadMatchDistances(p, &numPairs); ++ else ++ { ++ mainLen = p->longestMatchLength; ++ numPairs = p->numPairs; ++ } ++ ++ numAvail = p->numAvail; ++ if (numAvail < 2) ++ { ++ *backRes = (UInt32)(-1); ++ return 1; ++ } ++ if (numAvail > LZMA_MATCH_LEN_MAX) ++ numAvail = LZMA_MATCH_LEN_MAX; ++ ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ repMaxIndex = 0; ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ { ++ UInt32 lenTest; ++ const Byte *data2; ++ reps[i] = p->reps[i]; ++ data2 = data - (reps[i] + 1); ++ if (data[0] != data2[0] || data[1] != data2[1]) ++ { ++ repLens[i] = 0; ++ continue; ++ } ++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++); ++ repLens[i] = lenTest; ++ if (lenTest > repLens[repMaxIndex]) ++ repMaxIndex = i; ++ } ++ if (repLens[repMaxIndex] >= p->numFastBytes) ++ { ++ UInt32 lenRes; ++ *backRes = repMaxIndex; ++ lenRes = repLens[repMaxIndex]; ++ MovePos(p, lenRes - 1); ++ return lenRes; ++ } ++ ++ matches = p->matches; ++ if (mainLen >= p->numFastBytes) ++ { ++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS; ++ MovePos(p, mainLen - 1); ++ return mainLen; ++ } ++ curByte = *data; ++ matchByte = *(data - (reps[0] + 1)); ++ ++ if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2) ++ { ++ *backRes = (UInt32)-1; ++ return 1; ++ } ++ ++ p->opt[0].state = (CState)p->state; ++ ++ posState = (position & p->pbMask); ++ ++ { ++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1)); ++ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) + ++ (!IsCharState(p->state) ? ++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) : ++ LitEnc_GetPrice(probs, curByte, p->ProbPrices)); ++ } ++ ++ MakeAsChar(&p->opt[1]); ++ ++ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]); ++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]); ++ ++ if (matchByte == curByte) ++ { ++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState); ++ if (shortRepPrice < p->opt[1].price) ++ { ++ p->opt[1].price = shortRepPrice; ++ MakeAsShortRep(&p->opt[1]); ++ } ++ } ++ lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]); ++ ++ if (lenEnd < 2) ++ { ++ *backRes = p->opt[1].backPrev; ++ return 1; ++ } ++ ++ p->opt[1].posPrev = 0; ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ p->opt[0].backs[i] = reps[i]; ++ ++ len = lenEnd; ++ do ++ p->opt[len--].price = kInfinityPrice; ++ while (len >= 2); ++ ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ { ++ UInt32 repLen = repLens[i]; ++ UInt32 price; ++ if (repLen < 2) ++ continue; ++ price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState); ++ do ++ { ++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2]; ++ COptimal *opt = &p->opt[repLen]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = 0; ++ opt->backPrev = i; ++ opt->prev1IsChar = False; ++ } ++ } ++ while (--repLen >= 2); ++ } ++ ++ normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]); ++ ++ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2); ++ if (len <= mainLen) ++ { ++ UInt32 offs = 0; ++ while (len > matches[offs]) ++ offs += 2; ++ for (; ; len++) ++ { ++ COptimal *opt; ++ UInt32 distance = matches[offs + 1]; ++ ++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN]; ++ UInt32 lenToPosState = GetLenToPosState(len); ++ if (distance < kNumFullDistances) ++ curAndLenPrice += p->distancesPrices[lenToPosState][distance]; ++ else ++ { ++ UInt32 slot; ++ GetPosSlot2(distance, slot); ++ curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot]; ++ } ++ opt = &p->opt[len]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = 0; ++ opt->backPrev = distance + LZMA_NUM_REPS; ++ opt->prev1IsChar = False; ++ } ++ if (len == matches[offs]) ++ { ++ offs += 2; ++ if (offs == numPairs) ++ break; ++ } ++ } ++ } ++ ++ cur = 0; ++ ++ #ifdef SHOW_STAT2 ++ if (position >= 0) ++ { ++ unsigned i; ++ printf("\n pos = %4X", position); ++ for (i = cur; i <= lenEnd; i++) ++ printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price); ++ } ++ #endif ++ ++ for (;;) ++ { ++ UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen; ++ UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice; ++ Bool nextIsChar; ++ Byte curByte, matchByte; ++ const Byte *data; ++ COptimal *curOpt; ++ COptimal *nextOpt; ++ ++ cur++; ++ if (cur == lenEnd) ++ return Backward(p, backRes, cur); ++ ++ newLen = ReadMatchDistances(p, &numPairs); ++ if (newLen >= p->numFastBytes) ++ { ++ p->numPairs = numPairs; ++ p->longestMatchLength = newLen; ++ return Backward(p, backRes, cur); ++ } ++ position++; ++ curOpt = &p->opt[cur]; ++ posPrev = curOpt->posPrev; ++ if (curOpt->prev1IsChar) ++ { ++ posPrev--; ++ if (curOpt->prev2) ++ { ++ state = p->opt[curOpt->posPrev2].state; ++ if (curOpt->backPrev2 < LZMA_NUM_REPS) ++ state = kRepNextStates[state]; ++ else ++ state = kMatchNextStates[state]; ++ } ++ else ++ state = p->opt[posPrev].state; ++ state = kLiteralNextStates[state]; ++ } ++ else ++ state = p->opt[posPrev].state; ++ if (posPrev == cur - 1) ++ { ++ if (IsShortRep(curOpt)) ++ state = kShortRepNextStates[state]; ++ else ++ state = kLiteralNextStates[state]; ++ } ++ else ++ { ++ UInt32 pos; ++ const COptimal *prevOpt; ++ if (curOpt->prev1IsChar && curOpt->prev2) ++ { ++ posPrev = curOpt->posPrev2; ++ pos = curOpt->backPrev2; ++ state = kRepNextStates[state]; ++ } ++ else ++ { ++ pos = curOpt->backPrev; ++ if (pos < LZMA_NUM_REPS) ++ state = kRepNextStates[state]; ++ else ++ state = kMatchNextStates[state]; ++ } ++ prevOpt = &p->opt[posPrev]; ++ if (pos < LZMA_NUM_REPS) ++ { ++ UInt32 i; ++ reps[0] = prevOpt->backs[pos]; ++ for (i = 1; i <= pos; i++) ++ reps[i] = prevOpt->backs[i - 1]; ++ for (; i < LZMA_NUM_REPS; i++) ++ reps[i] = prevOpt->backs[i]; ++ } ++ else ++ { ++ UInt32 i; ++ reps[0] = (pos - LZMA_NUM_REPS); ++ for (i = 1; i < LZMA_NUM_REPS; i++) ++ reps[i] = prevOpt->backs[i - 1]; ++ } ++ } ++ curOpt->state = (CState)state; ++ ++ curOpt->backs[0] = reps[0]; ++ curOpt->backs[1] = reps[1]; ++ curOpt->backs[2] = reps[2]; ++ curOpt->backs[3] = reps[3]; ++ ++ curPrice = curOpt->price; ++ nextIsChar = False; ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ curByte = *data; ++ matchByte = *(data - (reps[0] + 1)); ++ ++ posState = (position & p->pbMask); ++ ++ curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]); ++ { ++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1)); ++ curAnd1Price += ++ (!IsCharState(state) ? ++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) : ++ LitEnc_GetPrice(probs, curByte, p->ProbPrices)); ++ } ++ ++ nextOpt = &p->opt[cur + 1]; ++ ++ if (curAnd1Price < nextOpt->price) ++ { ++ nextOpt->price = curAnd1Price; ++ nextOpt->posPrev = cur; ++ MakeAsChar(nextOpt); ++ nextIsChar = True; ++ } ++ ++ matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]); ++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]); ++ ++ if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0)) ++ { ++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState); ++ if (shortRepPrice <= nextOpt->price) ++ { ++ nextOpt->price = shortRepPrice; ++ nextOpt->posPrev = cur; ++ MakeAsShortRep(nextOpt); ++ nextIsChar = True; ++ } ++ } ++ numAvailFull = p->numAvail; ++ { ++ UInt32 temp = kNumOpts - 1 - cur; ++ if (temp < numAvailFull) ++ numAvailFull = temp; ++ } ++ ++ if (numAvailFull < 2) ++ continue; ++ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes); ++ ++ if (!nextIsChar && matchByte != curByte) /* speed optimization */ ++ { ++ /* try Literal + rep0 */ ++ UInt32 temp; ++ UInt32 lenTest2; ++ const Byte *data2 = data - (reps[0] + 1); ++ UInt32 limit = p->numFastBytes + 1; ++ if (limit > numAvailFull) ++ limit = numAvailFull; ++ ++ for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++); ++ lenTest2 = temp - 1; ++ if (lenTest2 >= 2) ++ { ++ UInt32 state2 = kLiteralNextStates[state]; ++ UInt32 posStateNext = (position + 1) & p->pbMask; ++ UInt32 nextRepMatchPrice = curAnd1Price + ++ GET_PRICE_1(p->isMatch[state2][posStateNext]) + ++ GET_PRICE_1(p->isRep[state2]); ++ /* for (; lenTest2 >= 2; lenTest2--) */ ++ { ++ UInt32 curAndLenPrice; ++ COptimal *opt; ++ UInt32 offset = cur + 1 + lenTest2; ++ while (lenEnd < offset) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext); ++ opt = &p->opt[offset]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur + 1; ++ opt->backPrev = 0; ++ opt->prev1IsChar = True; ++ opt->prev2 = False; ++ } ++ } ++ } ++ } ++ ++ startLen = 2; /* speed optimization */ ++ { ++ UInt32 repIndex; ++ for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++) ++ { ++ UInt32 lenTest; ++ UInt32 lenTestTemp; ++ UInt32 price; ++ const Byte *data2 = data - (reps[repIndex] + 1); ++ if (data[0] != data2[0] || data[1] != data2[1]) ++ continue; ++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++); ++ while (lenEnd < cur + lenTest) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ lenTestTemp = lenTest; ++ price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState); ++ do ++ { ++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2]; ++ COptimal *opt = &p->opt[cur + lenTest]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur; ++ opt->backPrev = repIndex; ++ opt->prev1IsChar = False; ++ } ++ } ++ while (--lenTest >= 2); ++ lenTest = lenTestTemp; ++ ++ if (repIndex == 0) ++ startLen = lenTest + 1; ++ ++ /* if (_maxMode) */ ++ { ++ UInt32 lenTest2 = lenTest + 1; ++ UInt32 limit = lenTest2 + p->numFastBytes; ++ UInt32 nextRepMatchPrice; ++ if (limit > numAvailFull) ++ limit = numAvailFull; ++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++); ++ lenTest2 -= lenTest + 1; ++ if (lenTest2 >= 2) ++ { ++ UInt32 state2 = kRepNextStates[state]; ++ UInt32 posStateNext = (position + lenTest) & p->pbMask; ++ UInt32 curAndLenCharPrice = ++ price + p->repLenEnc.prices[posState][lenTest - 2] + ++ GET_PRICE_0(p->isMatch[state2][posStateNext]) + ++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]), ++ data[lenTest], data2[lenTest], p->ProbPrices); ++ state2 = kLiteralNextStates[state2]; ++ posStateNext = (position + lenTest + 1) & p->pbMask; ++ nextRepMatchPrice = curAndLenCharPrice + ++ GET_PRICE_1(p->isMatch[state2][posStateNext]) + ++ GET_PRICE_1(p->isRep[state2]); ++ ++ /* for (; lenTest2 >= 2; lenTest2--) */ ++ { ++ UInt32 curAndLenPrice; ++ COptimal *opt; ++ UInt32 offset = cur + lenTest + 1 + lenTest2; ++ while (lenEnd < offset) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext); ++ opt = &p->opt[offset]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur + lenTest + 1; ++ opt->backPrev = 0; ++ opt->prev1IsChar = True; ++ opt->prev2 = True; ++ opt->posPrev2 = cur; ++ opt->backPrev2 = repIndex; ++ } ++ } ++ } ++ } ++ } ++ } ++ /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */ ++ if (newLen > numAvail) ++ { ++ newLen = numAvail; ++ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2); ++ matches[numPairs] = newLen; ++ numPairs += 2; ++ } ++ if (newLen >= startLen) ++ { ++ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]); ++ UInt32 offs, curBack, posSlot; ++ UInt32 lenTest; ++ while (lenEnd < cur + newLen) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ ++ offs = 0; ++ while (startLen > matches[offs]) ++ offs += 2; ++ curBack = matches[offs + 1]; ++ GetPosSlot2(curBack, posSlot); ++ for (lenTest = /*2*/ startLen; ; lenTest++) ++ { ++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN]; ++ UInt32 lenToPosState = GetLenToPosState(lenTest); ++ COptimal *opt; ++ if (curBack < kNumFullDistances) ++ curAndLenPrice += p->distancesPrices[lenToPosState][curBack]; ++ else ++ curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask]; ++ ++ opt = &p->opt[cur + lenTest]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur; ++ opt->backPrev = curBack + LZMA_NUM_REPS; ++ opt->prev1IsChar = False; ++ } ++ ++ if (/*_maxMode && */lenTest == matches[offs]) ++ { ++ /* Try Match + Literal + Rep0 */ ++ const Byte *data2 = data - (curBack + 1); ++ UInt32 lenTest2 = lenTest + 1; ++ UInt32 limit = lenTest2 + p->numFastBytes; ++ UInt32 nextRepMatchPrice; ++ if (limit > numAvailFull) ++ limit = numAvailFull; ++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++); ++ lenTest2 -= lenTest + 1; ++ if (lenTest2 >= 2) ++ { ++ UInt32 state2 = kMatchNextStates[state]; ++ UInt32 posStateNext = (position + lenTest) & p->pbMask; ++ UInt32 curAndLenCharPrice = curAndLenPrice + ++ GET_PRICE_0(p->isMatch[state2][posStateNext]) + ++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]), ++ data[lenTest], data2[lenTest], p->ProbPrices); ++ state2 = kLiteralNextStates[state2]; ++ posStateNext = (posStateNext + 1) & p->pbMask; ++ nextRepMatchPrice = curAndLenCharPrice + ++ GET_PRICE_1(p->isMatch[state2][posStateNext]) + ++ GET_PRICE_1(p->isRep[state2]); ++ ++ /* for (; lenTest2 >= 2; lenTest2--) */ ++ { ++ UInt32 offset = cur + lenTest + 1 + lenTest2; ++ UInt32 curAndLenPrice; ++ COptimal *opt; ++ while (lenEnd < offset) ++ p->opt[++lenEnd].price = kInfinityPrice; ++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext); ++ opt = &p->opt[offset]; ++ if (curAndLenPrice < opt->price) ++ { ++ opt->price = curAndLenPrice; ++ opt->posPrev = cur + lenTest + 1; ++ opt->backPrev = 0; ++ opt->prev1IsChar = True; ++ opt->prev2 = True; ++ opt->posPrev2 = cur; ++ opt->backPrev2 = curBack + LZMA_NUM_REPS; ++ } ++ } ++ } ++ offs += 2; ++ if (offs == numPairs) ++ break; ++ curBack = matches[offs + 1]; ++ if (curBack >= kNumFullDistances) ++ GetPosSlot2(curBack, posSlot); ++ } ++ } ++ } ++ } ++} ++ ++#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist)) ++ ++static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes) ++{ ++ UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i; ++ const Byte *data; ++ const UInt32 *matches; ++ ++ if (p->additionalOffset == 0) ++ mainLen = ReadMatchDistances(p, &numPairs); ++ else ++ { ++ mainLen = p->longestMatchLength; ++ numPairs = p->numPairs; ++ } ++ ++ numAvail = p->numAvail; ++ *backRes = (UInt32)-1; ++ if (numAvail < 2) ++ return 1; ++ if (numAvail > LZMA_MATCH_LEN_MAX) ++ numAvail = LZMA_MATCH_LEN_MAX; ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ ++ repLen = repIndex = 0; ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ { ++ UInt32 len; ++ const Byte *data2 = data - (p->reps[i] + 1); ++ if (data[0] != data2[0] || data[1] != data2[1]) ++ continue; ++ for (len = 2; len < numAvail && data[len] == data2[len]; len++); ++ if (len >= p->numFastBytes) ++ { ++ *backRes = i; ++ MovePos(p, len - 1); ++ return len; ++ } ++ if (len > repLen) ++ { ++ repIndex = i; ++ repLen = len; ++ } ++ } ++ ++ matches = p->matches; ++ if (mainLen >= p->numFastBytes) ++ { ++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS; ++ MovePos(p, mainLen - 1); ++ return mainLen; ++ } ++ ++ mainDist = 0; /* for GCC */ ++ if (mainLen >= 2) ++ { ++ mainDist = matches[numPairs - 1]; ++ while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1) ++ { ++ if (!ChangePair(matches[numPairs - 3], mainDist)) ++ break; ++ numPairs -= 2; ++ mainLen = matches[numPairs - 2]; ++ mainDist = matches[numPairs - 1]; ++ } ++ if (mainLen == 2 && mainDist >= 0x80) ++ mainLen = 1; ++ } ++ ++ if (repLen >= 2 && ( ++ (repLen + 1 >= mainLen) || ++ (repLen + 2 >= mainLen && mainDist >= (1 << 9)) || ++ (repLen + 3 >= mainLen && mainDist >= (1 << 15)))) ++ { ++ *backRes = repIndex; ++ MovePos(p, repLen - 1); ++ return repLen; ++ } ++ ++ if (mainLen < 2 || numAvail <= 2) ++ return 1; ++ ++ p->longestMatchLength = ReadMatchDistances(p, &p->numPairs); ++ if (p->longestMatchLength >= 2) ++ { ++ UInt32 newDistance = matches[p->numPairs - 1]; ++ if ((p->longestMatchLength >= mainLen && newDistance < mainDist) || ++ (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) || ++ (p->longestMatchLength > mainLen + 1) || ++ (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist))) ++ return 1; ++ } ++ ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; ++ for (i = 0; i < LZMA_NUM_REPS; i++) ++ { ++ UInt32 len, limit; ++ const Byte *data2 = data - (p->reps[i] + 1); ++ if (data[0] != data2[0] || data[1] != data2[1]) ++ continue; ++ limit = mainLen - 1; ++ for (len = 2; len < limit && data[len] == data2[len]; len++); ++ if (len >= limit) ++ return 1; ++ } ++ *backRes = mainDist + LZMA_NUM_REPS; ++ MovePos(p, mainLen - 2); ++ return mainLen; ++} ++ ++static void WriteEndMarker(CLzmaEnc *p, UInt32 posState) ++{ ++ UInt32 len; ++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1); ++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0); ++ p->state = kMatchNextStates[p->state]; ++ len = LZMA_MATCH_LEN_MIN; ++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices); ++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1); ++ RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits); ++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask); ++} ++ ++static SRes CheckErrors(CLzmaEnc *p) ++{ ++ if (p->result != SZ_OK) ++ return p->result; ++ if (p->rc.res != SZ_OK) ++ p->result = SZ_ERROR_WRITE; ++ if (p->matchFinderBase.result != SZ_OK) ++ p->result = SZ_ERROR_READ; ++ if (p->result != SZ_OK) ++ p->finished = True; ++ return p->result; ++} ++ ++static SRes Flush(CLzmaEnc *p, UInt32 nowPos) ++{ ++ /* ReleaseMFStream(); */ ++ p->finished = True; ++ if (p->writeEndMark) ++ WriteEndMarker(p, nowPos & p->pbMask); ++ RangeEnc_FlushData(&p->rc); ++ RangeEnc_FlushStream(&p->rc); ++ return CheckErrors(p); ++} ++ ++static void FillAlignPrices(CLzmaEnc *p) ++{ ++ UInt32 i; ++ for (i = 0; i < kAlignTableSize; i++) ++ p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices); ++ p->alignPriceCount = 0; ++} ++ ++static void FillDistancesPrices(CLzmaEnc *p) ++{ ++ UInt32 tempPrices[kNumFullDistances]; ++ UInt32 i, lenToPosState; ++ for (i = kStartPosModelIndex; i < kNumFullDistances; i++) ++ { ++ UInt32 posSlot = GetPosSlot1(i); ++ UInt32 footerBits = ((posSlot >> 1) - 1); ++ UInt32 base = ((2 | (posSlot & 1)) << footerBits); ++ tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices); ++ } ++ ++ for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++) ++ { ++ UInt32 posSlot; ++ const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState]; ++ UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState]; ++ for (posSlot = 0; posSlot < p->distTableSize; posSlot++) ++ posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices); ++ for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++) ++ posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits); ++ ++ { ++ UInt32 *distancesPrices = p->distancesPrices[lenToPosState]; ++ UInt32 i; ++ for (i = 0; i < kStartPosModelIndex; i++) ++ distancesPrices[i] = posSlotPrices[i]; ++ for (; i < kNumFullDistances; i++) ++ distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i]; ++ } ++ } ++ p->matchPriceCount = 0; ++} ++ ++void LzmaEnc_Construct(CLzmaEnc *p) ++{ ++ RangeEnc_Construct(&p->rc); ++ MatchFinder_Construct(&p->matchFinderBase); ++ #ifndef _7ZIP_ST ++ MatchFinderMt_Construct(&p->matchFinderMt); ++ p->matchFinderMt.MatchFinder = &p->matchFinderBase; ++ #endif ++ ++ { ++ CLzmaEncProps props; ++ LzmaEncProps_Init(&props); ++ LzmaEnc_SetProps(p, &props); ++ } ++ ++ #ifndef LZMA_LOG_BSR ++ LzmaEnc_FastPosInit(p->g_FastPos); ++ #endif ++ ++ LzmaEnc_InitPriceTables(p->ProbPrices); ++ p->litProbs = 0; ++ p->saveState.litProbs = 0; ++} ++ ++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc) ++{ ++ void *p; ++ p = alloc->Alloc(alloc, sizeof(CLzmaEnc)); ++ if (p != 0) ++ LzmaEnc_Construct((CLzmaEnc *)p); ++ return p; ++} ++ ++void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc) ++{ ++ alloc->Free(alloc, p->litProbs); ++ alloc->Free(alloc, p->saveState.litProbs); ++ p->litProbs = 0; ++ p->saveState.litProbs = 0; ++} ++ ++void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ #ifndef _7ZIP_ST ++ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig); ++ #endif ++ MatchFinder_Free(&p->matchFinderBase, allocBig); ++ LzmaEnc_FreeLits(p, alloc); ++ RangeEnc_Free(&p->rc, alloc); ++} ++ ++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig); ++ alloc->Free(alloc, p); ++} ++ ++static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize) ++{ ++ UInt32 nowPos32, startPos32; ++ if (p->needInit) ++ { ++ p->matchFinder.Init(p->matchFinderObj); ++ p->needInit = 0; ++ } ++ ++ if (p->finished) ++ return p->result; ++ RINOK(CheckErrors(p)); ++ ++ nowPos32 = (UInt32)p->nowPos64; ++ startPos32 = nowPos32; ++ ++ if (p->nowPos64 == 0) ++ { ++ UInt32 numPairs; ++ Byte curByte; ++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0) ++ return Flush(p, nowPos32); ++ ReadMatchDistances(p, &numPairs); ++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0); ++ p->state = kLiteralNextStates[p->state]; ++ curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset); ++ LitEnc_Encode(&p->rc, p->litProbs, curByte); ++ p->additionalOffset--; ++ nowPos32++; ++ } ++ ++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0) ++ for (;;) ++ { ++ UInt32 pos, len, posState; ++ ++ if (p->fastMode) ++ len = GetOptimumFast(p, &pos); ++ else ++ len = GetOptimum(p, nowPos32, &pos); ++ ++ #ifdef SHOW_STAT2 ++ printf("\n pos = %4X, len = %d pos = %d", nowPos32, len, pos); ++ #endif ++ ++ posState = nowPos32 & p->pbMask; ++ if (len == 1 && pos == (UInt32)-1) ++ { ++ Byte curByte; ++ CLzmaProb *probs; ++ const Byte *data; ++ ++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0); ++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset; ++ curByte = *data; ++ probs = LIT_PROBS(nowPos32, *(data - 1)); ++ if (IsCharState(p->state)) ++ LitEnc_Encode(&p->rc, probs, curByte); ++ else ++ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1)); ++ p->state = kLiteralNextStates[p->state]; ++ } ++ else ++ { ++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1); ++ if (pos < LZMA_NUM_REPS) ++ { ++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1); ++ if (pos == 0) ++ { ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0); ++ RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1)); ++ } ++ else ++ { ++ UInt32 distance = p->reps[pos]; ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1); ++ if (pos == 1) ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0); ++ else ++ { ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1); ++ RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2); ++ if (pos == 3) ++ p->reps[3] = p->reps[2]; ++ p->reps[2] = p->reps[1]; ++ } ++ p->reps[1] = p->reps[0]; ++ p->reps[0] = distance; ++ } ++ if (len == 1) ++ p->state = kShortRepNextStates[p->state]; ++ else ++ { ++ LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices); ++ p->state = kRepNextStates[p->state]; ++ } ++ } ++ else ++ { ++ UInt32 posSlot; ++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0); ++ p->state = kMatchNextStates[p->state]; ++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices); ++ pos -= LZMA_NUM_REPS; ++ GetPosSlot(pos, posSlot); ++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot); ++ ++ if (posSlot >= kStartPosModelIndex) ++ { ++ UInt32 footerBits = ((posSlot >> 1) - 1); ++ UInt32 base = ((2 | (posSlot & 1)) << footerBits); ++ UInt32 posReduced = pos - base; ++ ++ if (posSlot < kEndPosModelIndex) ++ RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced); ++ else ++ { ++ RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits); ++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask); ++ p->alignPriceCount++; ++ } ++ } ++ p->reps[3] = p->reps[2]; ++ p->reps[2] = p->reps[1]; ++ p->reps[1] = p->reps[0]; ++ p->reps[0] = pos; ++ p->matchPriceCount++; ++ } ++ } ++ p->additionalOffset -= len; ++ nowPos32 += len; ++ if (p->additionalOffset == 0) ++ { ++ UInt32 processed; ++ if (!p->fastMode) ++ { ++ if (p->matchPriceCount >= (1 << 7)) ++ FillDistancesPrices(p); ++ if (p->alignPriceCount >= kAlignTableSize) ++ FillAlignPrices(p); ++ } ++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0) ++ break; ++ processed = nowPos32 - startPos32; ++ if (useLimits) ++ { ++ if (processed + kNumOpts + 300 >= maxUnpackSize || ++ RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize) ++ break; ++ } ++ else if (processed >= (1 << 15)) ++ { ++ p->nowPos64 += nowPos32 - startPos32; ++ return CheckErrors(p); ++ } ++ } ++ } ++ p->nowPos64 += nowPos32 - startPos32; ++ return Flush(p, nowPos32); ++} ++ ++#define kBigHashDicLimit ((UInt32)1 << 24) ++ ++static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ UInt32 beforeSize = kNumOpts; ++ Bool btMode; ++ if (!RangeEnc_Alloc(&p->rc, alloc)) ++ return SZ_ERROR_MEM; ++ btMode = (p->matchFinderBase.btMode != 0); ++ #ifndef _7ZIP_ST ++ p->mtMode = (p->multiThread && !p->fastMode && btMode); ++ #endif ++ ++ { ++ unsigned lclp = p->lc + p->lp; ++ if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp) ++ { ++ LzmaEnc_FreeLits(p, alloc); ++ p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb)); ++ p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb)); ++ if (p->litProbs == 0 || p->saveState.litProbs == 0) ++ { ++ LzmaEnc_FreeLits(p, alloc); ++ return SZ_ERROR_MEM; ++ } ++ p->lclp = lclp; ++ } ++ } ++ ++ p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit); ++ ++ if (beforeSize + p->dictSize < keepWindowSize) ++ beforeSize = keepWindowSize - p->dictSize; ++ ++ #ifndef _7ZIP_ST ++ if (p->mtMode) ++ { ++ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig)); ++ p->matchFinderObj = &p->matchFinderMt; ++ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder); ++ } ++ else ++ #endif ++ { ++ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig)) ++ return SZ_ERROR_MEM; ++ p->matchFinderObj = &p->matchFinderBase; ++ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder); ++ } ++ return SZ_OK; ++} ++ ++void LzmaEnc_Init(CLzmaEnc *p) ++{ ++ UInt32 i; ++ p->state = 0; ++ for (i = 0 ; i < LZMA_NUM_REPS; i++) ++ p->reps[i] = 0; ++ ++ RangeEnc_Init(&p->rc); ++ ++ ++ for (i = 0; i < kNumStates; i++) ++ { ++ UInt32 j; ++ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++) ++ { ++ p->isMatch[i][j] = kProbInitValue; ++ p->isRep0Long[i][j] = kProbInitValue; ++ } ++ p->isRep[i] = kProbInitValue; ++ p->isRepG0[i] = kProbInitValue; ++ p->isRepG1[i] = kProbInitValue; ++ p->isRepG2[i] = kProbInitValue; ++ } ++ ++ { ++ UInt32 num = 0x300 << (p->lp + p->lc); ++ for (i = 0; i < num; i++) ++ p->litProbs[i] = kProbInitValue; ++ } ++ ++ { ++ for (i = 0; i < kNumLenToPosStates; i++) ++ { ++ CLzmaProb *probs = p->posSlotEncoder[i]; ++ UInt32 j; ++ for (j = 0; j < (1 << kNumPosSlotBits); j++) ++ probs[j] = kProbInitValue; ++ } ++ } ++ { ++ for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++) ++ p->posEncoders[i] = kProbInitValue; ++ } ++ ++ LenEnc_Init(&p->lenEnc.p); ++ LenEnc_Init(&p->repLenEnc.p); ++ ++ for (i = 0; i < (1 << kNumAlignBits); i++) ++ p->posAlignEncoder[i] = kProbInitValue; ++ ++ p->optimumEndIndex = 0; ++ p->optimumCurrentIndex = 0; ++ p->additionalOffset = 0; ++ ++ p->pbMask = (1 << p->pb) - 1; ++ p->lpMask = (1 << p->lp) - 1; ++} ++ ++void LzmaEnc_InitPrices(CLzmaEnc *p) ++{ ++ if (!p->fastMode) ++ { ++ FillDistancesPrices(p); ++ FillAlignPrices(p); ++ } ++ ++ p->lenEnc.tableSize = ++ p->repLenEnc.tableSize = ++ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN; ++ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices); ++ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices); ++} ++ ++static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ UInt32 i; ++ for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++) ++ if (p->dictSize <= ((UInt32)1 << i)) ++ break; ++ p->distTableSize = i * 2; ++ ++ p->finished = False; ++ p->result = SZ_OK; ++ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig)); ++ LzmaEnc_Init(p); ++ LzmaEnc_InitPrices(p); ++ p->nowPos64 = 0; ++ return SZ_OK; ++} ++ ++static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ++ ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ p->matchFinderBase.stream = inStream; ++ p->needInit = 1; ++ p->rc.outStream = outStream; ++ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig); ++} ++ ++SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, ++ ISeqInStream *inStream, UInt32 keepWindowSize, ++ ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ p->matchFinderBase.stream = inStream; ++ p->needInit = 1; ++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig); ++} ++ ++static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen) ++{ ++ p->matchFinderBase.directInput = 1; ++ p->matchFinderBase.bufferBase = (Byte *)src; ++ p->matchFinderBase.directInputRem = srcLen; ++} ++ ++SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen, ++ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ LzmaEnc_SetInputBuf(p, src, srcLen); ++ p->needInit = 1; ++ ++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig); ++} ++ ++void LzmaEnc_Finish(CLzmaEncHandle pp) ++{ ++ #ifndef _7ZIP_ST ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ if (p->mtMode) ++ MatchFinderMt_ReleaseStream(&p->matchFinderMt); ++ #else ++ pp = pp; ++ #endif ++} ++ ++typedef struct ++{ ++ ISeqOutStream funcTable; ++ Byte *data; ++ SizeT rem; ++ Bool overflow; ++} CSeqOutStreamBuf; ++ ++static size_t MyWrite(void *pp, const void *data, size_t size) ++{ ++ CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp; ++ if (p->rem < size) ++ { ++ size = p->rem; ++ p->overflow = True; ++ } ++ memcpy(p->data, data, size); ++ p->rem -= size; ++ p->data += size; ++ return size; ++} ++ ++ ++UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp) ++{ ++ const CLzmaEnc *p = (CLzmaEnc *)pp; ++ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj); ++} ++ ++const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp) ++{ ++ const CLzmaEnc *p = (CLzmaEnc *)pp; ++ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset; ++} ++ ++SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit, ++ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ UInt64 nowPos64; ++ SRes res; ++ CSeqOutStreamBuf outStream; ++ ++ outStream.funcTable.Write = MyWrite; ++ outStream.data = dest; ++ outStream.rem = *destLen; ++ outStream.overflow = False; ++ ++ p->writeEndMark = False; ++ p->finished = False; ++ p->result = SZ_OK; ++ ++ if (reInit) ++ LzmaEnc_Init(p); ++ LzmaEnc_InitPrices(p); ++ nowPos64 = p->nowPos64; ++ RangeEnc_Init(&p->rc); ++ p->rc.outStream = &outStream.funcTable; ++ ++ res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize); ++ ++ *unpackSize = (UInt32)(p->nowPos64 - nowPos64); ++ *destLen -= outStream.rem; ++ if (outStream.overflow) ++ return SZ_ERROR_OUTPUT_EOF; ++ ++ return res; ++} ++ ++static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress) ++{ ++ SRes res = SZ_OK; ++ ++ #ifndef _7ZIP_ST ++ Byte allocaDummy[0x300]; ++ int i = 0; ++ for (i = 0; i < 16; i++) ++ allocaDummy[i] = (Byte)i; ++ #endif ++ ++ for (;;) ++ { ++ res = LzmaEnc_CodeOneBlock(p, False, 0, 0); ++ if (res != SZ_OK || p->finished != 0) ++ break; ++ if (progress != 0) ++ { ++ res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc)); ++ if (res != SZ_OK) ++ { ++ res = SZ_ERROR_PROGRESS; ++ break; ++ } ++ } ++ } ++ LzmaEnc_Finish(p); ++ return res; ++} ++ ++SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress, ++ ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig)); ++ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress); ++} ++ ++SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ int i; ++ UInt32 dictSize = p->dictSize; ++ if (*size < LZMA_PROPS_SIZE) ++ return SZ_ERROR_PARAM; ++ *size = LZMA_PROPS_SIZE; ++ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc); ++ ++ for (i = 11; i <= 30; i++) ++ { ++ if (dictSize <= ((UInt32)2 << i)) ++ { ++ dictSize = (2 << i); ++ break; ++ } ++ if (dictSize <= ((UInt32)3 << i)) ++ { ++ dictSize = (3 << i); ++ break; ++ } ++ } ++ ++ for (i = 0; i < 4; i++) ++ props[1 + i] = (Byte)(dictSize >> (8 * i)); ++ return SZ_OK; ++} ++ ++SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, ++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ SRes res; ++ CLzmaEnc *p = (CLzmaEnc *)pp; ++ ++ CSeqOutStreamBuf outStream; ++ ++ LzmaEnc_SetInputBuf(p, src, srcLen); ++ ++ outStream.funcTable.Write = MyWrite; ++ outStream.data = dest; ++ outStream.rem = *destLen; ++ outStream.overflow = False; ++ ++ p->writeEndMark = writeEndMark; ++ ++ p->rc.outStream = &outStream.funcTable; ++ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig); ++ if (res == SZ_OK) ++ res = LzmaEnc_Encode2(p, progress); ++ ++ *destLen -= outStream.rem; ++ if (outStream.overflow) ++ return SZ_ERROR_OUTPUT_EOF; ++ return res; ++} ++ ++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, ++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, ++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig) ++{ ++ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc); ++ SRes res; ++ if (p == 0) ++ return SZ_ERROR_MEM; ++ ++ res = LzmaEnc_SetProps(p, props); ++ if (res == SZ_OK) ++ { ++ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize); ++ if (res == SZ_OK) ++ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen, ++ writeEndMark, progress, alloc, allocBig); ++ } ++ ++ LzmaEnc_Destroy(p, alloc, allocBig); ++ return res; ++} +--- /dev/null ++++ b/lib/lzma/Makefile +@@ -0,0 +1,7 @@ ++lzma_compress-objs := LzFind.o LzmaEnc.o ++lzma_decompress-objs := LzmaDec.o ++ ++obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o ++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o ++ ++EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h diff --git a/target/linux/generic/patches-3.8/531-debloat_lzma.patch b/target/linux/generic/patches-3.8/531-debloat_lzma.patch new file mode 100644 index 0000000000..1e41661ebb --- /dev/null +++ b/target/linux/generic/patches-3.8/531-debloat_lzma.patch @@ -0,0 +1,485 @@ +--- a/include/linux/lzma/LzmaDec.h ++++ b/include/linux/lzma/LzmaDec.h +@@ -31,14 +31,6 @@ typedef struct _CLzmaProps + UInt32 dicSize; + } CLzmaProps; + +-/* LzmaProps_Decode - decodes properties +-Returns: +- SZ_OK +- SZ_ERROR_UNSUPPORTED - Unsupported properties +-*/ +- +-SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size); +- + + /* ---------- LZMA Decoder state ---------- */ + +@@ -70,8 +62,6 @@ typedef struct + + #define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; } + +-void LzmaDec_Init(CLzmaDec *p); +- + /* There are two types of LZMA streams: + 0) Stream with end mark. That end mark adds about 6 bytes to compressed size. + 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */ +@@ -108,97 +98,6 @@ typedef enum + + /* ELzmaStatus is used only as output value for function call */ + +- +-/* ---------- Interfaces ---------- */ +- +-/* There are 3 levels of interfaces: +- 1) Dictionary Interface +- 2) Buffer Interface +- 3) One Call Interface +- You can select any of these interfaces, but don't mix functions from different +- groups for same object. */ +- +- +-/* There are two variants to allocate state for Dictionary Interface: +- 1) LzmaDec_Allocate / LzmaDec_Free +- 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs +- You can use variant 2, if you set dictionary buffer manually. +- For Buffer Interface you must always use variant 1. +- +-LzmaDec_Allocate* can return: +- SZ_OK +- SZ_ERROR_MEM - Memory allocation error +- SZ_ERROR_UNSUPPORTED - Unsupported properties +-*/ +- +-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc); +-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc); +- +-SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc); +-void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc); +- +-/* ---------- Dictionary Interface ---------- */ +- +-/* You can use it, if you want to eliminate the overhead for data copying from +- dictionary to some other external buffer. +- You must work with CLzmaDec variables directly in this interface. +- +- STEPS: +- LzmaDec_Constr() +- LzmaDec_Allocate() +- for (each new stream) +- { +- LzmaDec_Init() +- while (it needs more decompression) +- { +- LzmaDec_DecodeToDic() +- use data from CLzmaDec::dic and update CLzmaDec::dicPos +- } +- } +- LzmaDec_Free() +-*/ +- +-/* LzmaDec_DecodeToDic +- +- The decoding to internal dictionary buffer (CLzmaDec::dic). +- You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!! +- +-finishMode: +- It has meaning only if the decoding reaches output limit (dicLimit). +- LZMA_FINISH_ANY - Decode just dicLimit bytes. +- LZMA_FINISH_END - Stream must be finished after dicLimit. +- +-Returns: +- SZ_OK +- status: +- LZMA_STATUS_FINISHED_WITH_MARK +- LZMA_STATUS_NOT_FINISHED +- LZMA_STATUS_NEEDS_MORE_INPUT +- LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK +- SZ_ERROR_DATA - Data error +-*/ +- +-SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, +- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); +- +- +-/* ---------- Buffer Interface ---------- */ +- +-/* It's zlib-like interface. +- See LzmaDec_DecodeToDic description for information about STEPS and return results, +- but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need +- to work with CLzmaDec variables manually. +- +-finishMode: +- It has meaning only if the decoding reaches output limit (*destLen). +- LZMA_FINISH_ANY - Decode just destLen bytes. +- LZMA_FINISH_END - Stream must be finished after (*destLen). +-*/ +- +-SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, +- const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); +- +- + /* ---------- One Call Interface ---------- */ + + /* LzmaDecode +--- a/lib/lzma/LzmaDec.c ++++ b/lib/lzma/LzmaDec.c +@@ -682,7 +682,7 @@ static void LzmaDec_InitRc(CLzmaDec *p, + p->needFlush = 0; + } + +-void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState) ++static void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState) + { + p->needFlush = 1; + p->remainLen = 0; +@@ -698,7 +698,7 @@ void LzmaDec_InitDicAndState(CLzmaDec *p + p->needInitState = 1; + } + +-void LzmaDec_Init(CLzmaDec *p) ++static void LzmaDec_Init(CLzmaDec *p) + { + p->dicPos = 0; + LzmaDec_InitDicAndState(p, True, True); +@@ -716,7 +716,7 @@ static void LzmaDec_InitStateReal(CLzmaD + p->needInitState = 0; + } + +-SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen, ++static SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen, + ELzmaFinishMode finishMode, ELzmaStatus *status) + { + SizeT inSize = *srcLen; +@@ -837,7 +837,7 @@ SRes LzmaDec_DecodeToDic(CLzmaDec *p, Si + return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA; + } + +-SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status) ++static __maybe_unused SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status) + { + SizeT outSize = *destLen; + SizeT inSize = *srcLen; +@@ -877,7 +877,7 @@ SRes LzmaDec_DecodeToBuf(CLzmaDec *p, By + } + } + +-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc) ++static void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc) + { + alloc->Free(alloc, p->probs); + p->probs = 0; +@@ -889,13 +889,13 @@ static void LzmaDec_FreeDict(CLzmaDec *p + p->dic = 0; + } + +-void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc) ++static void __maybe_unused LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc) + { + LzmaDec_FreeProbs(p, alloc); + LzmaDec_FreeDict(p, alloc); + } + +-SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size) ++static SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size) + { + UInt32 dicSize; + Byte d; +@@ -935,7 +935,7 @@ static SRes LzmaDec_AllocateProbs2(CLzma + return SZ_OK; + } + +-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) ++static SRes __maybe_unused LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) + { + CLzmaProps propNew; + RINOK(LzmaProps_Decode(&propNew, props, propsSize)); +@@ -944,7 +944,7 @@ SRes LzmaDec_AllocateProbs(CLzmaDec *p, + return SZ_OK; + } + +-SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) ++static SRes __maybe_unused LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc) + { + CLzmaProps propNew; + SizeT dicBufSize; +--- a/include/linux/lzma/LzmaEnc.h ++++ b/include/linux/lzma/LzmaEnc.h +@@ -31,9 +31,6 @@ typedef struct _CLzmaEncProps + } CLzmaEncProps; + + void LzmaEncProps_Init(CLzmaEncProps *p); +-void LzmaEncProps_Normalize(CLzmaEncProps *p); +-UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2); +- + + /* ---------- CLzmaEncHandle Interface ---------- */ + +@@ -53,26 +50,9 @@ CLzmaEncHandle LzmaEnc_Create(ISzAlloc * + void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig); + SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props); + SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size); +-SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream, +- ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); + SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, + int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); + +-/* ---------- One Call Interface ---------- */ +- +-/* LzmaEncode +-Return code: +- SZ_OK - OK +- SZ_ERROR_MEM - Memory allocation error +- SZ_ERROR_PARAM - Incorrect paramater +- SZ_ERROR_OUTPUT_EOF - output buffer overflow +- SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version) +-*/ +- +-SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, +- const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, +- ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig); +- + #ifdef __cplusplus + } + #endif +--- a/lib/lzma/LzmaEnc.c ++++ b/lib/lzma/LzmaEnc.c +@@ -53,7 +53,7 @@ void LzmaEncProps_Init(CLzmaEncProps *p) + p->writeEndMark = 0; + } + +-void LzmaEncProps_Normalize(CLzmaEncProps *p) ++static void LzmaEncProps_Normalize(CLzmaEncProps *p) + { + int level = p->level; + if (level < 0) level = 5; +@@ -76,7 +76,7 @@ void LzmaEncProps_Normalize(CLzmaEncProp + #endif + } + +-UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2) ++static UInt32 __maybe_unused LzmaEncProps_GetDictSize(const CLzmaEncProps *props2) + { + CLzmaEncProps props = *props2; + LzmaEncProps_Normalize(&props); +@@ -93,7 +93,7 @@ UInt32 LzmaEncProps_GetDictSize(const CL + + #define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); } + +-UInt32 GetPosSlot1(UInt32 pos) ++static UInt32 GetPosSlot1(UInt32 pos) + { + UInt32 res; + BSR2_RET(pos, res); +@@ -107,7 +107,7 @@ UInt32 GetPosSlot1(UInt32 pos) + #define kNumLogBits (9 + (int)sizeof(size_t) / 2) + #define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7) + +-void LzmaEnc_FastPosInit(Byte *g_FastPos) ++static void LzmaEnc_FastPosInit(Byte *g_FastPos) + { + int c = 2, slotFast; + g_FastPos[0] = 0; +@@ -339,7 +339,7 @@ typedef struct + CSaveState saveState; + } CLzmaEnc; + +-void LzmaEnc_SaveState(CLzmaEncHandle pp) ++static void __maybe_unused LzmaEnc_SaveState(CLzmaEncHandle pp) + { + CLzmaEnc *p = (CLzmaEnc *)pp; + CSaveState *dest = &p->saveState; +@@ -365,7 +365,7 @@ void LzmaEnc_SaveState(CLzmaEncHandle pp + memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb)); + } + +-void LzmaEnc_RestoreState(CLzmaEncHandle pp) ++static void __maybe_unused LzmaEnc_RestoreState(CLzmaEncHandle pp) + { + CLzmaEnc *dest = (CLzmaEnc *)pp; + const CSaveState *p = &dest->saveState; +@@ -600,7 +600,7 @@ static void LitEnc_EncodeMatched(CRangeE + while (symbol < 0x10000); + } + +-void LzmaEnc_InitPriceTables(UInt32 *ProbPrices) ++static void LzmaEnc_InitPriceTables(UInt32 *ProbPrices) + { + UInt32 i; + for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits)) +@@ -1676,7 +1676,7 @@ static void FillDistancesPrices(CLzmaEnc + p->matchPriceCount = 0; + } + +-void LzmaEnc_Construct(CLzmaEnc *p) ++static void LzmaEnc_Construct(CLzmaEnc *p) + { + RangeEnc_Construct(&p->rc); + MatchFinder_Construct(&p->matchFinderBase); +@@ -1709,7 +1709,7 @@ CLzmaEncHandle LzmaEnc_Create(ISzAlloc * + return p; + } + +-void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc) ++static void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc) + { + alloc->Free(alloc, p->litProbs); + alloc->Free(alloc, p->saveState.litProbs); +@@ -2074,7 +2074,7 @@ SRes LzmaEnc_MemPrepare(CLzmaEncHandle p + return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig); + } + +-void LzmaEnc_Finish(CLzmaEncHandle pp) ++static void LzmaEnc_Finish(CLzmaEncHandle pp) + { + #ifndef _7ZIP_ST + CLzmaEnc *p = (CLzmaEnc *)pp; +@@ -2108,7 +2108,7 @@ static size_t MyWrite(void *pp, const vo + } + + +-UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp) ++static UInt32 __maybe_unused LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp) + { + const CLzmaEnc *p = (CLzmaEnc *)pp; + return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj); +@@ -2120,7 +2120,7 @@ const Byte *LzmaEnc_GetCurBuf(CLzmaEncHa + return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset; + } + +-SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit, ++static SRes __maybe_unused LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit, + Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize) + { + CLzmaEnc *p = (CLzmaEnc *)pp; +@@ -2248,7 +2248,7 @@ SRes LzmaEnc_MemEncode(CLzmaEncHandle pp + return res; + } + +-SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, ++static __maybe_unused SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, + const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, + ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig) + { +--- a/include/linux/lzma/LzFind.h ++++ b/include/linux/lzma/LzFind.h +@@ -55,11 +55,6 @@ typedef struct _CMatchFinder + + #define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos) + +-int MatchFinder_NeedMove(CMatchFinder *p); +-Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p); +-void MatchFinder_MoveBlock(CMatchFinder *p); +-void MatchFinder_ReadIfRequired(CMatchFinder *p); +- + void MatchFinder_Construct(CMatchFinder *p); + + /* Conditions: +@@ -70,12 +65,6 @@ int MatchFinder_Create(CMatchFinder *p, + UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter, + ISzAlloc *alloc); + void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc); +-void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems); +-void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue); +- +-UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son, +- UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue, +- UInt32 *distances, UInt32 maxLen); + + /* + Conditions: +@@ -102,12 +91,6 @@ typedef struct _IMatchFinder + + void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable); + +-void MatchFinder_Init(CMatchFinder *p); +-UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); +-UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); +-void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); +-void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); +- + #ifdef __cplusplus + } + #endif +--- a/lib/lzma/LzFind.c ++++ b/lib/lzma/LzFind.c +@@ -42,12 +42,12 @@ static int LzInWindow_Create(CMatchFinde + return (p->bufferBase != 0); + } + +-Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; } +-Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; } ++static Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; } ++static Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; } + +-UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; } ++static UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; } + +-void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue) ++static void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue) + { + p->posLimit -= subValue; + p->pos -= subValue; +@@ -268,7 +268,7 @@ static void MatchFinder_SetLimits(CMatch + p->posLimit = p->pos + limit; + } + +-void MatchFinder_Init(CMatchFinder *p) ++static void MatchFinder_Init(CMatchFinder *p) + { + UInt32 i; + for (i = 0; i < p->hashSizeSum; i++) +@@ -287,7 +287,7 @@ static UInt32 MatchFinder_GetSubValue(CM + return (p->pos - p->historySize - 1) & kNormalizeMask; + } + +-void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems) ++static void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems) + { + UInt32 i; + for (i = 0; i < numItems; i++) +@@ -350,7 +350,7 @@ static UInt32 * Hc_GetMatchesSpec(UInt32 + } + } + +-UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, ++static UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, + UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue, + UInt32 *distances, UInt32 maxLen) + { +@@ -492,7 +492,7 @@ static UInt32 Bt2_MatchFinder_GetMatches + GET_MATCHES_FOOTER(offset, 1) + } + +-UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++static __maybe_unused UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) + { + UInt32 offset; + GET_MATCHES_HEADER(3) +@@ -632,7 +632,7 @@ static UInt32 Hc4_MatchFinder_GetMatches + MOVE_POS_RET + } + +-UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) ++static __maybe_unused UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) + { + UInt32 offset; + GET_MATCHES_HEADER(3) +@@ -657,7 +657,7 @@ static void Bt2_MatchFinder_Skip(CMatchF + while (--num != 0); + } + +-void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++static __maybe_unused void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) + { + do + { +@@ -718,7 +718,7 @@ static void Hc4_MatchFinder_Skip(CMatchF + while (--num != 0); + } + +-void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) ++static __maybe_unused void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) + { + do + { diff --git a/target/linux/generic/patches-3.8/532-jffs2_eofdetect.patch b/target/linux/generic/patches-3.8/532-jffs2_eofdetect.patch new file mode 100644 index 0000000000..f36131e99f --- /dev/null +++ b/target/linux/generic/patches-3.8/532-jffs2_eofdetect.patch @@ -0,0 +1,53 @@ +--- a/fs/jffs2/build.c ++++ b/fs/jffs2/build.c +@@ -114,6 +114,16 @@ static int jffs2_build_filesystem(struct + dbg_fsbuild("scanned flash completely\n"); + jffs2_dbg_dump_block_lists_nolock(c); + ++ if (c->flags & (1 << 7)) { ++ printk("%s(): unlocking the mtd device... ", __func__); ++ mtd_unlock(c->mtd, 0, c->mtd->size); ++ printk("done.\n"); ++ ++ printk("%s(): erasing all blocks after the end marker... ", __func__); ++ jffs2_erase_pending_blocks(c, -1); ++ printk("done.\n"); ++ } ++ + dbg_fsbuild("pass 1 starting\n"); + c->flags |= JFFS2_SB_FLAG_BUILDING; + /* Now scan the directory tree, increasing nlink according to every dirent found. */ +--- a/fs/jffs2/scan.c ++++ b/fs/jffs2/scan.c +@@ -148,8 +148,11 @@ int jffs2_scan_medium(struct jffs2_sb_in + /* reset summary info for next eraseblock scan */ + jffs2_sum_reset_collected(s); + +- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), +- buf_size, s); ++ if (c->flags & (1 << 7)) ++ ret = BLK_STATE_ALLFF; ++ else ++ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), ++ buf_size, s); + + if (ret < 0) + goto out; +@@ -556,6 +559,17 @@ static int jffs2_scan_eraseblock (struct + return err; + } + ++ if ((buf[0] == 0xde) && ++ (buf[1] == 0xad) && ++ (buf[2] == 0xc0) && ++ (buf[3] == 0xde)) { ++ /* end of filesystem. erase everything after this point */ ++ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset); ++ c->flags |= (1 << 7); ++ ++ return BLK_STATE_ALLFF; ++ } ++ + /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ + ofs = 0; + max_ofs = EMPTY_SCAN_SIZE(c->sector_size); diff --git a/target/linux/generic/patches-3.8/540-crypto-xz-decompression-support.patch b/target/linux/generic/patches-3.8/540-crypto-xz-decompression-support.patch new file mode 100644 index 0000000000..cf5a8d6129 --- /dev/null +++ b/target/linux/generic/patches-3.8/540-crypto-xz-decompression-support.patch @@ -0,0 +1,146 @@ +--- a/crypto/Kconfig ++++ b/crypto/Kconfig +@@ -1223,6 +1223,13 @@ config CRYPTO_842 + help + This is the 842 algorithm. + ++config CRYPTO_XZ ++ tristate "XZ compression algorithm" ++ select CRYPTO_ALGAPI ++ select XZ_DEC ++ help ++ This is the XZ algorithm. Only decompression is supported for now. ++ + comment "Random Number Generation" + + config CRYPTO_ANSI_CPRNG +--- a/crypto/Makefile ++++ b/crypto/Makefile +@@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += mich + obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o + obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o + obj-$(CONFIG_CRYPTO_LZO) += lzo.o ++obj-$(CONFIG_CRYPTO_XZ) += xz.o + obj-$(CONFIG_CRYPTO_842) += 842.o + obj-$(CONFIG_CRYPTO_RNG2) += rng.o + obj-$(CONFIG_CRYPTO_RNG2) += krng.o +--- /dev/null ++++ b/crypto/xz.c +@@ -0,0 +1,117 @@ ++/* ++ * Cryptographic API. ++ * ++ * XZ decompression support. ++ * ++ * Copyright (c) 2012 Gabor Juhos <juhosg@openwrt.org> ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published by ++ * the Free Software Foundation. ++ * ++ */ ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/crypto.h> ++#include <linux/xz.h> ++#include <linux/interrupt.h> ++#include <linux/mm.h> ++#include <linux/net.h> ++ ++struct xz_comp_ctx { ++ struct xz_dec *decomp_state; ++ struct xz_buf decomp_buf; ++}; ++ ++static int crypto_xz_decomp_init(struct xz_comp_ctx *ctx) ++{ ++ ctx->decomp_state = xz_dec_init(XZ_SINGLE, 0); ++ if (!ctx->decomp_state) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void crypto_xz_decomp_exit(struct xz_comp_ctx *ctx) ++{ ++ xz_dec_end(ctx->decomp_state); ++} ++ ++static int crypto_xz_init(struct crypto_tfm *tfm) ++{ ++ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm); ++ ++ return crypto_xz_decomp_init(ctx); ++} ++ ++static void crypto_xz_exit(struct crypto_tfm *tfm) ++{ ++ struct xz_comp_ctx *ctx = crypto_tfm_ctx(tfm); ++ ++ crypto_xz_decomp_exit(ctx); ++} ++ ++static int crypto_xz_compress(struct crypto_tfm *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int *dlen) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static int crypto_xz_decompress(struct crypto_tfm *tfm, const u8 *src, ++ unsigned int slen, u8 *dst, unsigned int *dlen) ++{ ++ struct xz_comp_ctx *dctx = crypto_tfm_ctx(tfm); ++ struct xz_buf *xz_buf = &dctx->decomp_buf; ++ int ret; ++ ++ memset(xz_buf, '\0', sizeof(struct xz_buf)); ++ ++ xz_buf->in = (u8 *) src; ++ xz_buf->in_pos = 0; ++ xz_buf->in_size = slen; ++ xz_buf->out = (u8 *) dst; ++ xz_buf->out_pos = 0; ++ xz_buf->out_size = *dlen; ++ ++ ret = xz_dec_run(dctx->decomp_state, xz_buf); ++ if (ret != XZ_STREAM_END) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ *dlen = xz_buf->out_pos; ++ ret = 0; ++ ++out: ++ return ret; ++} ++ ++static struct crypto_alg crypto_xz_alg = { ++ .cra_name = "xz", ++ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, ++ .cra_ctxsize = sizeof(struct xz_comp_ctx), ++ .cra_module = THIS_MODULE, ++ .cra_list = LIST_HEAD_INIT(crypto_xz_alg.cra_list), ++ .cra_init = crypto_xz_init, ++ .cra_exit = crypto_xz_exit, ++ .cra_u = { .compress = { ++ .coa_compress = crypto_xz_compress, ++ .coa_decompress = crypto_xz_decompress } } ++}; ++ ++static int __init crypto_xz_mod_init(void) ++{ ++ return crypto_register_alg(&crypto_xz_alg); ++} ++ ++static void __exit crypto_xz_mod_exit(void) ++{ ++ crypto_unregister_alg(&crypto_xz_alg); ++} ++ ++module_init(crypto_xz_mod_init); ++module_exit(crypto_xz_mod_exit); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Crypto XZ decompression support"); ++MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); diff --git a/target/linux/generic/patches-3.8/541-ubifs-xz-decompression-support.patch b/target/linux/generic/patches-3.8/541-ubifs-xz-decompression-support.patch new file mode 100644 index 0000000000..f85689c586 --- /dev/null +++ b/target/linux/generic/patches-3.8/541-ubifs-xz-decompression-support.patch @@ -0,0 +1,92 @@ +--- a/fs/ubifs/Kconfig ++++ b/fs/ubifs/Kconfig +@@ -5,8 +5,10 @@ config UBIFS_FS + select CRYPTO if UBIFS_FS_ADVANCED_COMPR + select CRYPTO if UBIFS_FS_LZO + select CRYPTO if UBIFS_FS_ZLIB ++ select CRYPTO if UBIFS_FS_XZ + select CRYPTO_LZO if UBIFS_FS_LZO + select CRYPTO_DEFLATE if UBIFS_FS_ZLIB ++ select CRYPTO_XZ if UBIFS_FS_XZ + depends on MTD_UBI + help + UBIFS is a file system for flash devices which works on top of UBI. +@@ -35,3 +37,12 @@ config UBIFS_FS_ZLIB + default y + help + Zlib compresses better than LZO but it is slower. Say 'Y' if unsure. ++ ++config UBIFS_FS_XZ ++ bool "XZ decompression support" if UBIFS_FS_ADVANCED_COMPR ++ depends on UBIFS_FS ++ default y ++ help ++ XZ compresses better the ZLIB but it is slower.. ++ Say 'Y' if unsure. ++ +--- a/fs/ubifs/compress.c ++++ b/fs/ubifs/compress.c +@@ -71,6 +71,24 @@ static struct ubifs_compressor zlib_comp + }; + #endif + ++#ifdef CONFIG_UBIFS_FS_XZ ++static DEFINE_MUTEX(xz_enc_mutex); ++static DEFINE_MUTEX(xz_dec_mutex); ++ ++static struct ubifs_compressor xz_compr = { ++ .compr_type = UBIFS_COMPR_XZ, ++ .comp_mutex = &xz_enc_mutex, ++ .decomp_mutex = &xz_dec_mutex, ++ .name = "xz", ++ .capi_name = "xz", ++}; ++#else ++static struct ubifs_compressor xz_compr = { ++ .compr_type = UBIFS_COMPR_XZ, ++ .name = "xz", ++}; ++#endif ++ + /* All UBIFS compressors */ + struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; + +@@ -232,9 +250,15 @@ int __init ubifs_compressors_init(void) + if (err) + goto out_lzo; + ++ err = compr_init(&xz_compr); ++ if (err) ++ goto out_zlib; ++ + ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr; + return 0; + ++out_zlib: ++ compr_exit(&zlib_compr); + out_lzo: + compr_exit(&lzo_compr); + return err; +@@ -247,4 +271,5 @@ void ubifs_compressors_exit(void) + { + compr_exit(&lzo_compr); + compr_exit(&zlib_compr); ++ compr_exit(&xz_compr); + } +--- a/fs/ubifs/ubifs-media.h ++++ b/fs/ubifs/ubifs-media.h +@@ -332,12 +332,14 @@ enum { + * UBIFS_COMPR_NONE: no compression + * UBIFS_COMPR_LZO: LZO compression + * UBIFS_COMPR_ZLIB: ZLIB compression ++ * UBIFS_COMPR_XZ: XZ compression + * UBIFS_COMPR_TYPES_CNT: count of supported compression types + */ + enum { + UBIFS_COMPR_NONE, + UBIFS_COMPR_LZO, + UBIFS_COMPR_ZLIB, ++ UBIFS_COMPR_XZ, + UBIFS_COMPR_TYPES_CNT, + }; + diff --git a/target/linux/generic/patches-3.8/550-ubifs-symlink-xattr-support.patch b/target/linux/generic/patches-3.8/550-ubifs-symlink-xattr-support.patch new file mode 100644 index 0000000000..609aca9994 --- /dev/null +++ b/target/linux/generic/patches-3.8/550-ubifs-symlink-xattr-support.patch @@ -0,0 +1,67 @@ +--- a/fs/ubifs/file.c ++++ b/fs/ubifs/file.c +@@ -1574,6 +1574,12 @@ const struct inode_operations ubifs_syml + .follow_link = ubifs_follow_link, + .setattr = ubifs_setattr, + .getattr = ubifs_getattr, ++#ifdef CONFIG_UBIFS_FS_XATTR ++ .setxattr = ubifs_setxattr, ++ .getxattr = ubifs_getxattr, ++ .listxattr = ubifs_listxattr, ++ .removexattr = ubifs_removexattr, ++#endif + }; + + const struct file_operations ubifs_file_operations = { +--- a/fs/ubifs/journal.c ++++ b/fs/ubifs/journal.c +@@ -553,7 +553,8 @@ int ubifs_jnl_update(struct ubifs_info * + + dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu", + inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino); +- ubifs_assert(dir_ui->data_len == 0); ++ if (!xent) ++ ubifs_assert(dir_ui->data_len == 0); + ubifs_assert(mutex_is_locked(&dir_ui->ui_mutex)); + + dlen = UBIFS_DENT_NODE_SZ + nm->len + 1; +@@ -573,6 +574,13 @@ int ubifs_jnl_update(struct ubifs_info * + aligned_dlen = ALIGN(dlen, 8); + aligned_ilen = ALIGN(ilen, 8); + len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ; ++ if (xent) { ++ /* ++ * Make sure to account for dir_ui->data_len in ++ * length calculation in case there is extended attribute. ++ */ ++ len += dir_ui->data_len; ++ } + dent = kmalloc(len, GFP_NOFS); + if (!dent) + return -ENOMEM; +@@ -649,7 +657,8 @@ int ubifs_jnl_update(struct ubifs_info * + + ino_key_init(c, &ino_key, dir->i_ino); + ino_offs += aligned_ilen; +- err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ); ++ err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ++ UBIFS_INO_NODE_SZ + dir_ui->data_len); + if (err) + goto out_ro; + +--- a/fs/ubifs/xattr.c ++++ b/fs/ubifs/xattr.c +@@ -209,12 +209,12 @@ static int change_xattr(struct ubifs_inf + goto out_free; + } + inode->i_size = ui->ui_size = size; +- ui->data_len = size; + + mutex_lock(&host_ui->ui_mutex); + host->i_ctime = ubifs_current_time(host); + host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len); + host_ui->xattr_size += CALC_XATTR_BYTES(size); ++ ui->data_len = size; + + /* + * It is important to write the host inode after the xattr inode diff --git a/target/linux/generic/patches-3.8/600-netfilter_layer7_2.22.patch b/target/linux/generic/patches-3.8/600-netfilter_layer7_2.22.patch new file mode 100644 index 0000000000..02b0403833 --- /dev/null +++ b/target/linux/generic/patches-3.8/600-netfilter_layer7_2.22.patch @@ -0,0 +1,2141 @@ +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -1176,6 +1176,27 @@ config NETFILTER_XT_MATCH_STATE + + To compile it as a module, choose M here. If unsure, say N. + ++config NETFILTER_XT_MATCH_LAYER7 ++ tristate '"layer7" match support' ++ depends on NETFILTER_XTABLES ++ depends on EXPERIMENTAL && (IP_NF_CONNTRACK || NF_CONNTRACK) ++ depends on NETFILTER_ADVANCED ++ help ++ Say Y if you want to be able to classify connections (and their ++ packets) based on regular expression matching of their application ++ layer data. This is one way to classify applications such as ++ peer-to-peer filesharing systems that do not always use the same ++ port. ++ ++ To compile it as a module, choose M here. If unsure, say N. ++ ++config NETFILTER_XT_MATCH_LAYER7_DEBUG ++ bool 'Layer 7 debugging output' ++ depends on NETFILTER_XT_MATCH_LAYER7 ++ help ++ Say Y to get lots of debugging output. ++ ++ + config NETFILTER_XT_MATCH_STATISTIC + tristate '"statistic" match support' + depends on NETFILTER_ADVANCED +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -131,6 +131,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) + obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o + obj-$(CONFIG_NETFILTER_XT_MATCH_SOCKET) += xt_socket.o + obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o ++obj-$(CONFIG_NETFILTER_XT_MATCH_LAYER7) += xt_layer7.o + obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o + obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o + obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o +--- /dev/null ++++ b/net/netfilter/xt_layer7.c +@@ -0,0 +1,666 @@ ++/* ++ Kernel module to match application layer (OSI layer 7) data in connections. ++ ++ http://l7-filter.sf.net ++ ++ (C) 2003-2009 Matthew Strait and Ethan Sommer. ++ ++ This program is free software; you can redistribute it and/or ++ modify it under the terms of the GNU General Public License ++ as published by the Free Software Foundation; either version ++ 2 of the License, or (at your option) any later version. ++ http://www.gnu.org/licenses/gpl.txt ++ ++ Based on ipt_string.c (C) 2000 Emmanuel Roger <winfield@freegates.be>, ++ xt_helper.c (C) 2002 Harald Welte and cls_layer7.c (C) 2003 Matthew Strait, ++ Ethan Sommer, Justin Levandoski. ++*/ ++ ++#include <linux/spinlock.h> ++#include <linux/version.h> ++#include <net/ip.h> ++#include <net/tcp.h> ++#include <linux/module.h> ++#include <linux/skbuff.h> ++#include <linux/netfilter.h> ++#include <net/netfilter/nf_conntrack.h> ++#include <net/netfilter/nf_conntrack_core.h> ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) ++#include <net/netfilter/nf_conntrack_extend.h> ++#include <net/netfilter/nf_conntrack_acct.h> ++#endif ++#include <linux/netfilter/x_tables.h> ++#include <linux/netfilter/xt_layer7.h> ++#include <linux/ctype.h> ++#include <linux/proc_fs.h> ++ ++#include "regexp/regexp.c" ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Matthew Strait <quadong@users.sf.net>, Ethan Sommer <sommere@users.sf.net>"); ++MODULE_DESCRIPTION("iptables application layer match module"); ++MODULE_ALIAS("ipt_layer7"); ++MODULE_VERSION("2.21"); ++ ++static int maxdatalen = 2048; // this is the default ++module_param(maxdatalen, int, 0444); ++MODULE_PARM_DESC(maxdatalen, "maximum bytes of data looked at by l7-filter"); ++#ifdef CONFIG_NETFILTER_XT_MATCH_LAYER7_DEBUG ++ #define DPRINTK(format,args...) printk(format,##args) ++#else ++ #define DPRINTK(format,args...) ++#endif ++ ++/* Number of packets whose data we look at. ++This can be modified through /proc/net/layer7_numpackets */ ++static int num_packets = 10; ++ ++static struct pattern_cache { ++ char * regex_string; ++ regexp * pattern; ++ struct pattern_cache * next; ++} * first_pattern_cache = NULL; ++ ++DEFINE_SPINLOCK(l7_lock); ++ ++static int total_acct_packets(struct nf_conn *ct) ++{ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 26) ++ BUG_ON(ct == NULL); ++ return (ct->counters[IP_CT_DIR_ORIGINAL].packets + ct->counters[IP_CT_DIR_REPLY].packets); ++#else ++ struct nf_conn_counter *acct; ++ ++ BUG_ON(ct == NULL); ++ acct = nf_conn_acct_find(ct); ++ if (!acct) ++ return 0; ++ return (atomic64_read(&acct[IP_CT_DIR_ORIGINAL].packets) + atomic64_read(&acct[IP_CT_DIR_REPLY].packets)); ++#endif ++} ++ ++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG ++/* Converts an unfriendly string into a friendly one by ++replacing unprintables with periods and all whitespace with " ". */ ++static char * friendly_print(unsigned char * s) ++{ ++ char * f = kmalloc(strlen(s) + 1, GFP_ATOMIC); ++ int i; ++ ++ if(!f) { ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory in " ++ "friendly_print, bailing.\n"); ++ return NULL; ++ } ++ ++ for(i = 0; i < strlen(s); i++){ ++ if(isprint(s[i]) && s[i] < 128) f[i] = s[i]; ++ else if(isspace(s[i])) f[i] = ' '; ++ else f[i] = '.'; ++ } ++ f[i] = '\0'; ++ return f; ++} ++ ++static char dec2hex(int i) ++{ ++ switch (i) { ++ case 0 ... 9: ++ return (i + '0'); ++ break; ++ case 10 ... 15: ++ return (i - 10 + 'a'); ++ break; ++ default: ++ if (net_ratelimit()) ++ printk("layer7: Problem in dec2hex\n"); ++ return '\0'; ++ } ++} ++ ++static char * hex_print(unsigned char * s) ++{ ++ char * g = kmalloc(strlen(s)*3 + 1, GFP_ATOMIC); ++ int i; ++ ++ if(!g) { ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory in hex_print, " ++ "bailing.\n"); ++ return NULL; ++ } ++ ++ for(i = 0; i < strlen(s); i++) { ++ g[i*3 ] = dec2hex(s[i]/16); ++ g[i*3 + 1] = dec2hex(s[i]%16); ++ g[i*3 + 2] = ' '; ++ } ++ g[i*3] = '\0'; ++ ++ return g; ++} ++#endif // DEBUG ++ ++/* Use instead of regcomp. As we expect to be seeing the same regexps over and ++over again, it make sense to cache the results. */ ++static regexp * compile_and_cache(const char * regex_string, ++ const char * protocol) ++{ ++ struct pattern_cache * node = first_pattern_cache; ++ struct pattern_cache * last_pattern_cache = first_pattern_cache; ++ struct pattern_cache * tmp; ++ unsigned int len; ++ ++ while (node != NULL) { ++ if (!strcmp(node->regex_string, regex_string)) ++ return node->pattern; ++ ++ last_pattern_cache = node;/* points at the last non-NULL node */ ++ node = node->next; ++ } ++ ++ /* If we reach the end of the list, then we have not yet cached ++ the pattern for this regex. Let's do that now. ++ Be paranoid about running out of memory to avoid list corruption. */ ++ tmp = kmalloc(sizeof(struct pattern_cache), GFP_ATOMIC); ++ ++ if(!tmp) { ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory in " ++ "compile_and_cache, bailing.\n"); ++ return NULL; ++ } ++ ++ tmp->regex_string = kmalloc(strlen(regex_string) + 1, GFP_ATOMIC); ++ tmp->pattern = kmalloc(sizeof(struct regexp), GFP_ATOMIC); ++ tmp->next = NULL; ++ ++ if(!tmp->regex_string || !tmp->pattern) { ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory in " ++ "compile_and_cache, bailing.\n"); ++ kfree(tmp->regex_string); ++ kfree(tmp->pattern); ++ kfree(tmp); ++ return NULL; ++ } ++ ++ /* Ok. The new node is all ready now. */ ++ node = tmp; ++ ++ if(first_pattern_cache == NULL) /* list is empty */ ++ first_pattern_cache = node; /* make node the beginning */ ++ else ++ last_pattern_cache->next = node; /* attach node to the end */ ++ ++ /* copy the string and compile the regex */ ++ len = strlen(regex_string); ++ DPRINTK("About to compile this: \"%s\"\n", regex_string); ++ node->pattern = regcomp((char *)regex_string, &len); ++ if ( !node->pattern ) { ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: Error compiling regexp " ++ "\"%s\" (%s)\n", ++ regex_string, protocol); ++ /* pattern is now cached as NULL, so we won't try again. */ ++ } ++ ++ strcpy(node->regex_string, regex_string); ++ return node->pattern; ++} ++ ++static int can_handle(const struct sk_buff *skb) ++{ ++ if(!ip_hdr(skb)) /* not IP */ ++ return 0; ++ if(ip_hdr(skb)->protocol != IPPROTO_TCP && ++ ip_hdr(skb)->protocol != IPPROTO_UDP && ++ ip_hdr(skb)->protocol != IPPROTO_ICMP) ++ return 0; ++ return 1; ++} ++ ++/* Returns offset the into the skb->data that the application data starts */ ++static int app_data_offset(const struct sk_buff *skb) ++{ ++ /* In case we are ported somewhere (ebtables?) where ip_hdr(skb) ++ isn't set, this can be gotten from 4*(skb->data[0] & 0x0f) as well. */ ++ int ip_hl = 4*ip_hdr(skb)->ihl; ++ ++ if( ip_hdr(skb)->protocol == IPPROTO_TCP ) { ++ /* 12 == offset into TCP header for the header length field. ++ Can't get this with skb->h.th->doff because the tcphdr ++ struct doesn't get set when routing (this is confirmed to be ++ true in Netfilter as well as QoS.) */ ++ int tcp_hl = 4*(skb->data[ip_hl + 12] >> 4); ++ ++ return ip_hl + tcp_hl; ++ } else if( ip_hdr(skb)->protocol == IPPROTO_UDP ) { ++ return ip_hl + 8; /* UDP header is always 8 bytes */ ++ } else if( ip_hdr(skb)->protocol == IPPROTO_ICMP ) { ++ return ip_hl + 8; /* ICMP header is 8 bytes */ ++ } else { ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: tried to handle unknown " ++ "protocol!\n"); ++ return ip_hl + 8; /* something reasonable */ ++ } ++} ++ ++/* handles whether there's a match when we aren't appending data anymore */ ++static int match_no_append(struct nf_conn * conntrack, ++ struct nf_conn * master_conntrack, ++ enum ip_conntrack_info ctinfo, ++ enum ip_conntrack_info master_ctinfo, ++ const struct xt_layer7_info * info) ++{ ++ /* If we're in here, throw the app data away */ ++ if(master_conntrack->layer7.app_data != NULL) { ++ ++ #ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG ++ if(!master_conntrack->layer7.app_proto) { ++ char * f = ++ friendly_print(master_conntrack->layer7.app_data); ++ char * g = ++ hex_print(master_conntrack->layer7.app_data); ++ DPRINTK("\nl7-filter gave up after %d bytes " ++ "(%d packets):\n%s\n", ++ strlen(f), total_acct_packets(master_conntrack), f); ++ kfree(f); ++ DPRINTK("In hex: %s\n", g); ++ kfree(g); ++ } ++ #endif ++ ++ kfree(master_conntrack->layer7.app_data); ++ master_conntrack->layer7.app_data = NULL; /* don't free again */ ++ } ++ ++ if(master_conntrack->layer7.app_proto){ ++ /* Here child connections set their .app_proto (for /proc) */ ++ if(!conntrack->layer7.app_proto) { ++ conntrack->layer7.app_proto = ++ kmalloc(strlen(master_conntrack->layer7.app_proto)+1, ++ GFP_ATOMIC); ++ if(!conntrack->layer7.app_proto){ ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory " ++ "in match_no_append, " ++ "bailing.\n"); ++ return 1; ++ } ++ strcpy(conntrack->layer7.app_proto, ++ master_conntrack->layer7.app_proto); ++ } ++ ++ return (!strcmp(master_conntrack->layer7.app_proto, ++ info->protocol)); ++ } ++ else { ++ /* If not classified, set to "unknown" to distinguish from ++ connections that are still being tested. */ ++ master_conntrack->layer7.app_proto = ++ kmalloc(strlen("unknown")+1, GFP_ATOMIC); ++ if(!master_conntrack->layer7.app_proto){ ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory in " ++ "match_no_append, bailing.\n"); ++ return 1; ++ } ++ strcpy(master_conntrack->layer7.app_proto, "unknown"); ++ return 0; ++ } ++} ++ ++/* add the new app data to the conntrack. Return number of bytes added. */ ++static int add_data(struct nf_conn * master_conntrack, ++ char * app_data, int appdatalen) ++{ ++ int length = 0, i; ++ int oldlength = master_conntrack->layer7.app_data_len; ++ ++ /* This is a fix for a race condition by Deti Fliegl. However, I'm not ++ clear on whether the race condition exists or whether this really ++ fixes it. I might just be being dense... Anyway, if it's not really ++ a fix, all it does is waste a very small amount of time. */ ++ if(!master_conntrack->layer7.app_data) return 0; ++ ++ /* Strip nulls. Make everything lower case (our regex lib doesn't ++ do case insensitivity). Add it to the end of the current data. */ ++ for(i = 0; i < maxdatalen-oldlength-1 && ++ i < appdatalen; i++) { ++ if(app_data[i] != '\0') { ++ /* the kernel version of tolower mungs 'upper ascii' */ ++ master_conntrack->layer7.app_data[length+oldlength] = ++ isascii(app_data[i])? ++ tolower(app_data[i]) : app_data[i]; ++ length++; ++ } ++ } ++ ++ master_conntrack->layer7.app_data[length+oldlength] = '\0'; ++ master_conntrack->layer7.app_data_len = length + oldlength; ++ ++ return length; ++} ++ ++/* taken from drivers/video/modedb.c */ ++static int my_atoi(const char *s) ++{ ++ int val = 0; ++ ++ for (;; s++) { ++ switch (*s) { ++ case '0'...'9': ++ val = 10*val+(*s-'0'); ++ break; ++ default: ++ return val; ++ } ++ } ++} ++ ++/* write out num_packets to userland. */ ++static int layer7_read_proc(char* page, char ** start, off_t off, int count, ++ int* eof, void * data) ++{ ++ if(num_packets > 99 && net_ratelimit()) ++ printk(KERN_ERR "layer7: NOT REACHED. num_packets too big\n"); ++ ++ page[0] = num_packets/10 + '0'; ++ page[1] = num_packets%10 + '0'; ++ page[2] = '\n'; ++ page[3] = '\0'; ++ ++ *eof=1; ++ ++ return 3; ++} ++ ++/* Read in num_packets from userland */ ++static int layer7_write_proc(struct file* file, const char* buffer, ++ unsigned long count, void *data) ++{ ++ char * foo = kmalloc(count, GFP_ATOMIC); ++ ++ if(!foo){ ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory, bailing. " ++ "num_packets unchanged.\n"); ++ return count; ++ } ++ ++ if(copy_from_user(foo, buffer, count)) { ++ return -EFAULT; ++ } ++ ++ ++ num_packets = my_atoi(foo); ++ kfree (foo); ++ ++ /* This has an arbitrary limit to make the math easier. I'm lazy. ++ But anyway, 99 is a LOT! If you want more, you're doing it wrong! */ ++ if(num_packets > 99) { ++ printk(KERN_WARNING "layer7: num_packets can't be > 99.\n"); ++ num_packets = 99; ++ } else if(num_packets < 1) { ++ printk(KERN_WARNING "layer7: num_packets can't be < 1.\n"); ++ num_packets = 1; ++ } ++ ++ return count; ++} ++ ++static bool ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++match(const struct sk_buff *skbin, const struct xt_match_param *par) ++#else ++match(const struct sk_buff *skbin, ++ const struct net_device *in, ++ const struct net_device *out, ++ const struct xt_match *match, ++ const void *matchinfo, ++ int offset, ++ unsigned int protoff, ++ bool *hotdrop) ++#endif ++{ ++ /* sidestep const without getting a compiler warning... */ ++ struct sk_buff * skb = (struct sk_buff *)skbin; ++ ++ const struct xt_layer7_info * info = ++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++ par->matchinfo; ++ #else ++ matchinfo; ++ #endif ++ ++ enum ip_conntrack_info master_ctinfo, ctinfo; ++ struct nf_conn *master_conntrack, *conntrack; ++ unsigned char * app_data; ++ unsigned int pattern_result, appdatalen; ++ regexp * comppattern; ++ ++ /* Be paranoid/incompetent - lock the entire match function. */ ++ spin_lock_bh(&l7_lock); ++ ++ if(!can_handle(skb)){ ++ DPRINTK("layer7: This is some protocol I can't handle.\n"); ++ spin_unlock_bh(&l7_lock); ++ return info->invert; ++ } ++ ++ /* Treat parent & all its children together as one connection, except ++ for the purpose of setting conntrack->layer7.app_proto in the actual ++ connection. This makes /proc/net/ip_conntrack more satisfying. */ ++ if(!(conntrack = nf_ct_get(skb, &ctinfo)) || ++ !(master_conntrack=nf_ct_get(skb,&master_ctinfo))){ ++ DPRINTK("layer7: couldn't get conntrack.\n"); ++ spin_unlock_bh(&l7_lock); ++ return info->invert; ++ } ++ ++ /* Try to get a master conntrack (and its master etc) for FTP, etc. */ ++ while (master_ct(master_conntrack) != NULL) ++ master_conntrack = master_ct(master_conntrack); ++ ++ /* if we've classified it or seen too many packets */ ++ if(total_acct_packets(master_conntrack) > num_packets || ++ master_conntrack->layer7.app_proto) { ++ ++ pattern_result = match_no_append(conntrack, master_conntrack, ++ ctinfo, master_ctinfo, info); ++ ++ /* skb->cb[0] == seen. Don't do things twice if there are ++ multiple l7 rules. I'm not sure that using cb for this purpose ++ is correct, even though it says "put your private variables ++ there". But it doesn't look like it is being used for anything ++ else in the skbs that make it here. */ ++ skb->cb[0] = 1; /* marking it seen here's probably irrelevant */ ++ ++ spin_unlock_bh(&l7_lock); ++ return (pattern_result ^ info->invert); ++ } ++ ++ if(skb_is_nonlinear(skb)){ ++ if(skb_linearize(skb) != 0){ ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: failed to linearize " ++ "packet, bailing.\n"); ++ spin_unlock_bh(&l7_lock); ++ return info->invert; ++ } ++ } ++ ++ /* now that the skb is linearized, it's safe to set these. */ ++ app_data = skb->data + app_data_offset(skb); ++ appdatalen = skb_tail_pointer(skb) - app_data; ++ ++ /* the return value gets checked later, when we're ready to use it */ ++ comppattern = compile_and_cache(info->pattern, info->protocol); ++ ++ /* On the first packet of a connection, allocate space for app data */ ++ if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] && ++ !master_conntrack->layer7.app_data){ ++ master_conntrack->layer7.app_data = ++ kmalloc(maxdatalen, GFP_ATOMIC); ++ if(!master_conntrack->layer7.app_data){ ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory in " ++ "match, bailing.\n"); ++ spin_unlock_bh(&l7_lock); ++ return info->invert; ++ } ++ ++ master_conntrack->layer7.app_data[0] = '\0'; ++ } ++ ++ /* Can be here, but unallocated, if numpackets is increased near ++ the beginning of a connection */ ++ if(master_conntrack->layer7.app_data == NULL){ ++ spin_unlock_bh(&l7_lock); ++ return info->invert; /* unmatched */ ++ } ++ ++ if(!skb->cb[0]){ ++ int newbytes; ++ newbytes = add_data(master_conntrack, app_data, appdatalen); ++ ++ if(newbytes == 0) { /* didn't add any data */ ++ skb->cb[0] = 1; ++ /* Didn't match before, not going to match now */ ++ spin_unlock_bh(&l7_lock); ++ return info->invert; ++ } ++ } ++ ++ /* If looking for "unknown", then never match. "Unknown" means that ++ we've given up; we're still trying with these packets. */ ++ if(!strcmp(info->protocol, "unknown")) { ++ pattern_result = 0; ++ /* If looking for "unset", then always match. "Unset" means that we ++ haven't yet classified the connection. */ ++ } else if(!strcmp(info->protocol, "unset")) { ++ pattern_result = 2; ++ DPRINTK("layer7: matched unset: not yet classified " ++ "(%d/%d packets)\n", ++ total_acct_packets(master_conntrack), num_packets); ++ /* If the regexp failed to compile, don't bother running it */ ++ } else if(comppattern && ++ regexec(comppattern, master_conntrack->layer7.app_data)){ ++ DPRINTK("layer7: matched %s\n", info->protocol); ++ pattern_result = 1; ++ } else pattern_result = 0; ++ ++ if(pattern_result == 1) { ++ master_conntrack->layer7.app_proto = ++ kmalloc(strlen(info->protocol)+1, GFP_ATOMIC); ++ if(!master_conntrack->layer7.app_proto){ ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory in " ++ "match, bailing.\n"); ++ spin_unlock_bh(&l7_lock); ++ return (pattern_result ^ info->invert); ++ } ++ strcpy(master_conntrack->layer7.app_proto, info->protocol); ++ } else if(pattern_result > 1) { /* cleanup from "unset" */ ++ pattern_result = 1; ++ } ++ ++ /* mark the packet seen */ ++ skb->cb[0] = 1; ++ ++ spin_unlock_bh(&l7_lock); ++ return (pattern_result ^ info->invert); ++} ++ ++// load nf_conntrack_ipv4 ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++static bool check(const struct xt_mtchk_param *par) ++{ ++ if (nf_ct_l3proto_try_module_get(par->match->family) < 0) { ++ printk(KERN_WARNING "can't load conntrack support for " ++ "proto=%d\n", par->match->family); ++#else ++static bool check(const char *tablename, const void *inf, ++ const struct xt_match *match, void *matchinfo, ++ unsigned int hook_mask) ++{ ++ if (nf_ct_l3proto_try_module_get(match->family) < 0) { ++ printk(KERN_WARNING "can't load conntrack support for " ++ "proto=%d\n", match->family); ++#endif ++ return 0; ++ } ++ return 1; ++} ++ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++ static void destroy(const struct xt_mtdtor_param *par) ++ { ++ nf_ct_l3proto_module_put(par->match->family); ++ } ++#else ++ static void destroy(const struct xt_match *match, void *matchinfo) ++ { ++ nf_ct_l3proto_module_put(match->family); ++ } ++#endif ++ ++static struct xt_match xt_layer7_match[] __read_mostly = { ++{ ++ .name = "layer7", ++ .family = AF_INET, ++ .checkentry = check, ++ .match = match, ++ .destroy = destroy, ++ .matchsize = sizeof(struct xt_layer7_info), ++ .me = THIS_MODULE ++} ++}; ++ ++static void layer7_cleanup_proc(void) ++{ ++ remove_proc_entry("layer7_numpackets", init_net.proc_net); ++} ++ ++/* register the proc file */ ++static void layer7_init_proc(void) ++{ ++ struct proc_dir_entry* entry; ++ entry = create_proc_entry("layer7_numpackets", 0644, init_net.proc_net); ++ entry->read_proc = layer7_read_proc; ++ entry->write_proc = layer7_write_proc; ++} ++ ++static int __init xt_layer7_init(void) ++{ ++ need_conntrack(); ++ ++ layer7_init_proc(); ++ if(maxdatalen < 1) { ++ printk(KERN_WARNING "layer7: maxdatalen can't be < 1, " ++ "using 1\n"); ++ maxdatalen = 1; ++ } ++ /* This is not a hard limit. It's just here to prevent people from ++ bringing their slow machines to a grinding halt. */ ++ else if(maxdatalen > 65536) { ++ printk(KERN_WARNING "layer7: maxdatalen can't be > 65536, " ++ "using 65536\n"); ++ maxdatalen = 65536; ++ } ++ return xt_register_matches(xt_layer7_match, ++ ARRAY_SIZE(xt_layer7_match)); ++} ++ ++static void __exit xt_layer7_fini(void) ++{ ++ layer7_cleanup_proc(); ++ xt_unregister_matches(xt_layer7_match, ARRAY_SIZE(xt_layer7_match)); ++} ++ ++module_init(xt_layer7_init); ++module_exit(xt_layer7_fini); +--- /dev/null ++++ b/net/netfilter/regexp/regexp.c +@@ -0,0 +1,1197 @@ ++/* ++ * regcomp and regexec -- regsub and regerror are elsewhere ++ * @(#)regexp.c 1.3 of 18 April 87 ++ * ++ * Copyright (c) 1986 by University of Toronto. ++ * Written by Henry Spencer. Not derived from licensed software. ++ * ++ * Permission is granted to anyone to use this software for any ++ * purpose on any computer system, and to redistribute it freely, ++ * subject to the following restrictions: ++ * ++ * 1. The author is not responsible for the consequences of use of ++ * this software, no matter how awful, even if they arise ++ * from defects in it. ++ * ++ * 2. The origin of this software must not be misrepresented, either ++ * by explicit claim or by omission. ++ * ++ * 3. Altered versions must be plainly marked as such, and must not ++ * be misrepresented as being the original software. ++ * ++ * Beware that some of this code is subtly aware of the way operator ++ * precedence is structured in regular expressions. Serious changes in ++ * regular-expression syntax might require a total rethink. ++ * ++ * This code was modified by Ethan Sommer to work within the kernel ++ * (it now uses kmalloc etc..) ++ * ++ * Modified slightly by Matthew Strait to use more modern C. ++ */ ++ ++#include "regexp.h" ++#include "regmagic.h" ++ ++/* added by ethan and matt. Lets it work in both kernel and user space. ++(So iptables can use it, for instance.) Yea, it goes both ways... */ ++#if __KERNEL__ ++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC) ++#else ++ #define printk(format,args...) printf(format,##args) ++#endif ++ ++void regerror(char * s) ++{ ++ printk("<3>Regexp: %s\n", s); ++ /* NOTREACHED */ ++} ++ ++/* ++ * The "internal use only" fields in regexp.h are present to pass info from ++ * compile to execute that permits the execute phase to run lots faster on ++ * simple cases. They are: ++ * ++ * regstart char that must begin a match; '\0' if none obvious ++ * reganch is the match anchored (at beginning-of-line only)? ++ * regmust string (pointer into program) that match must include, or NULL ++ * regmlen length of regmust string ++ * ++ * Regstart and reganch permit very fast decisions on suitable starting points ++ * for a match, cutting down the work a lot. Regmust permits fast rejection ++ * of lines that cannot possibly match. The regmust tests are costly enough ++ * that regcomp() supplies a regmust only if the r.e. contains something ++ * potentially expensive (at present, the only such thing detected is * or + ++ * at the start of the r.e., which can involve a lot of backup). Regmlen is ++ * supplied because the test in regexec() needs it and regcomp() is computing ++ * it anyway. ++ */ ++ ++/* ++ * Structure for regexp "program". This is essentially a linear encoding ++ * of a nondeterministic finite-state machine (aka syntax charts or ++ * "railroad normal form" in parsing technology). Each node is an opcode ++ * plus a "next" pointer, possibly plus an operand. "Next" pointers of ++ * all nodes except BRANCH implement concatenation; a "next" pointer with ++ * a BRANCH on both ends of it is connecting two alternatives. (Here we ++ * have one of the subtle syntax dependencies: an individual BRANCH (as ++ * opposed to a collection of them) is never concatenated with anything ++ * because of operator precedence.) The operand of some types of node is ++ * a literal string; for others, it is a node leading into a sub-FSM. In ++ * particular, the operand of a BRANCH node is the first node of the branch. ++ * (NB this is *not* a tree structure: the tail of the branch connects ++ * to the thing following the set of BRANCHes.) The opcodes are: ++ */ ++ ++/* definition number opnd? meaning */ ++#define END 0 /* no End of program. */ ++#define BOL 1 /* no Match "" at beginning of line. */ ++#define EOL 2 /* no Match "" at end of line. */ ++#define ANY 3 /* no Match any one character. */ ++#define ANYOF 4 /* str Match any character in this string. */ ++#define ANYBUT 5 /* str Match any character not in this string. */ ++#define BRANCH 6 /* node Match this alternative, or the next... */ ++#define BACK 7 /* no Match "", "next" ptr points backward. */ ++#define EXACTLY 8 /* str Match this string. */ ++#define NOTHING 9 /* no Match empty string. */ ++#define STAR 10 /* node Match this (simple) thing 0 or more times. */ ++#define PLUS 11 /* node Match this (simple) thing 1 or more times. */ ++#define OPEN 20 /* no Mark this point in input as start of #n. */ ++ /* OPEN+1 is number 1, etc. */ ++#define CLOSE 30 /* no Analogous to OPEN. */ ++ ++/* ++ * Opcode notes: ++ * ++ * BRANCH The set of branches constituting a single choice are hooked ++ * together with their "next" pointers, since precedence prevents ++ * anything being concatenated to any individual branch. The ++ * "next" pointer of the last BRANCH in a choice points to the ++ * thing following the whole choice. This is also where the ++ * final "next" pointer of each individual branch points; each ++ * branch starts with the operand node of a BRANCH node. ++ * ++ * BACK Normal "next" pointers all implicitly point forward; BACK ++ * exists to make loop structures possible. ++ * ++ * STAR,PLUS '?', and complex '*' and '+', are implemented as circular ++ * BRANCH structures using BACK. Simple cases (one character ++ * per match) are implemented with STAR and PLUS for speed ++ * and to minimize recursive plunges. ++ * ++ * OPEN,CLOSE ...are numbered at compile time. ++ */ ++ ++/* ++ * A node is one char of opcode followed by two chars of "next" pointer. ++ * "Next" pointers are stored as two 8-bit pieces, high order first. The ++ * value is a positive offset from the opcode of the node containing it. ++ * An operand, if any, simply follows the node. (Note that much of the ++ * code generation knows about this implicit relationship.) ++ * ++ * Using two bytes for the "next" pointer is vast overkill for most things, ++ * but allows patterns to get big without disasters. ++ */ ++#define OP(p) (*(p)) ++#define NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377)) ++#define OPERAND(p) ((p) + 3) ++ ++/* ++ * See regmagic.h for one further detail of program structure. ++ */ ++ ++ ++/* ++ * Utility definitions. ++ */ ++#ifndef CHARBITS ++#define UCHARAT(p) ((int)*(unsigned char *)(p)) ++#else ++#define UCHARAT(p) ((int)*(p)&CHARBITS) ++#endif ++ ++#define FAIL(m) { regerror(m); return(NULL); } ++#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?') ++#define META "^$.[()|?+*\\" ++ ++/* ++ * Flags to be passed up and down. ++ */ ++#define HASWIDTH 01 /* Known never to match null string. */ ++#define SIMPLE 02 /* Simple enough to be STAR/PLUS operand. */ ++#define SPSTART 04 /* Starts with * or +. */ ++#define WORST 0 /* Worst case. */ ++ ++/* ++ * Global work variables for regcomp(). ++ */ ++struct match_globals { ++char *reginput; /* String-input pointer. */ ++char *regbol; /* Beginning of input, for ^ check. */ ++char **regstartp; /* Pointer to startp array. */ ++char **regendp; /* Ditto for endp. */ ++char *regparse; /* Input-scan pointer. */ ++int regnpar; /* () count. */ ++char regdummy; ++char *regcode; /* Code-emit pointer; ®dummy = don't. */ ++long regsize; /* Code size. */ ++}; ++ ++/* ++ * Forward declarations for regcomp()'s friends. ++ */ ++#ifndef STATIC ++#define STATIC static ++#endif ++STATIC char *reg(struct match_globals *g, int paren,int *flagp); ++STATIC char *regbranch(struct match_globals *g, int *flagp); ++STATIC char *regpiece(struct match_globals *g, int *flagp); ++STATIC char *regatom(struct match_globals *g, int *flagp); ++STATIC char *regnode(struct match_globals *g, char op); ++STATIC char *regnext(struct match_globals *g, char *p); ++STATIC void regc(struct match_globals *g, char b); ++STATIC void reginsert(struct match_globals *g, char op, char *opnd); ++STATIC void regtail(struct match_globals *g, char *p, char *val); ++STATIC void regoptail(struct match_globals *g, char *p, char *val); ++ ++ ++__kernel_size_t my_strcspn(const char *s1,const char *s2) ++{ ++ char *scan1; ++ char *scan2; ++ int count; ++ ++ count = 0; ++ for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) { ++ for (scan2 = (char *)s2; *scan2 != '\0';) /* ++ moved down. */ ++ if (*scan1 == *scan2++) ++ return(count); ++ count++; ++ } ++ return(count); ++} ++ ++/* ++ - regcomp - compile a regular expression into internal code ++ * ++ * We can't allocate space until we know how big the compiled form will be, ++ * but we can't compile it (and thus know how big it is) until we've got a ++ * place to put the code. So we cheat: we compile it twice, once with code ++ * generation turned off and size counting turned on, and once "for real". ++ * This also means that we don't allocate space until we are sure that the ++ * thing really will compile successfully, and we never have to move the ++ * code and thus invalidate pointers into it. (Note that it has to be in ++ * one piece because free() must be able to free it all.) ++ * ++ * Beware that the optimization-preparation code in here knows about some ++ * of the structure of the compiled regexp. ++ */ ++regexp * ++regcomp(char *exp,int *patternsize) ++{ ++ register regexp *r; ++ register char *scan; ++ register char *longest; ++ register int len; ++ int flags; ++ struct match_globals g; ++ ++ /* commented out by ethan ++ extern char *malloc(); ++ */ ++ ++ if (exp == NULL) ++ FAIL("NULL argument"); ++ ++ /* First pass: determine size, legality. */ ++ g.regparse = exp; ++ g.regnpar = 1; ++ g.regsize = 0L; ++ g.regcode = &g.regdummy; ++ regc(&g, MAGIC); ++ if (reg(&g, 0, &flags) == NULL) ++ return(NULL); ++ ++ /* Small enough for pointer-storage convention? */ ++ if (g.regsize >= 32767L) /* Probably could be 65535L. */ ++ FAIL("regexp too big"); ++ ++ /* Allocate space. */ ++ *patternsize=sizeof(regexp) + (unsigned)g.regsize; ++ r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize); ++ if (r == NULL) ++ FAIL("out of space"); ++ ++ /* Second pass: emit code. */ ++ g.regparse = exp; ++ g.regnpar = 1; ++ g.regcode = r->program; ++ regc(&g, MAGIC); ++ if (reg(&g, 0, &flags) == NULL) ++ return(NULL); ++ ++ /* Dig out information for optimizations. */ ++ r->regstart = '\0'; /* Worst-case defaults. */ ++ r->reganch = 0; ++ r->regmust = NULL; ++ r->regmlen = 0; ++ scan = r->program+1; /* First BRANCH. */ ++ if (OP(regnext(&g, scan)) == END) { /* Only one top-level choice. */ ++ scan = OPERAND(scan); ++ ++ /* Starting-point info. */ ++ if (OP(scan) == EXACTLY) ++ r->regstart = *OPERAND(scan); ++ else if (OP(scan) == BOL) ++ r->reganch++; ++ ++ /* ++ * If there's something expensive in the r.e., find the ++ * longest literal string that must appear and make it the ++ * regmust. Resolve ties in favor of later strings, since ++ * the regstart check works with the beginning of the r.e. ++ * and avoiding duplication strengthens checking. Not a ++ * strong reason, but sufficient in the absence of others. ++ */ ++ if (flags&SPSTART) { ++ longest = NULL; ++ len = 0; ++ for (; scan != NULL; scan = regnext(&g, scan)) ++ if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) { ++ longest = OPERAND(scan); ++ len = strlen(OPERAND(scan)); ++ } ++ r->regmust = longest; ++ r->regmlen = len; ++ } ++ } ++ ++ return(r); ++} ++ ++/* ++ - reg - regular expression, i.e. main body or parenthesized thing ++ * ++ * Caller must absorb opening parenthesis. ++ * ++ * Combining parenthesis handling with the base level of regular expression ++ * is a trifle forced, but the need to tie the tails of the branches to what ++ * follows makes it hard to avoid. ++ */ ++static char * ++reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ ) ++{ ++ register char *ret; ++ register char *br; ++ register char *ender; ++ register int parno = 0; /* 0 makes gcc happy */ ++ int flags; ++ ++ *flagp = HASWIDTH; /* Tentatively. */ ++ ++ /* Make an OPEN node, if parenthesized. */ ++ if (paren) { ++ if (g->regnpar >= NSUBEXP) ++ FAIL("too many ()"); ++ parno = g->regnpar; ++ g->regnpar++; ++ ret = regnode(g, OPEN+parno); ++ } else ++ ret = NULL; ++ ++ /* Pick up the branches, linking them together. */ ++ br = regbranch(g, &flags); ++ if (br == NULL) ++ return(NULL); ++ if (ret != NULL) ++ regtail(g, ret, br); /* OPEN -> first. */ ++ else ++ ret = br; ++ if (!(flags&HASWIDTH)) ++ *flagp &= ~HASWIDTH; ++ *flagp |= flags&SPSTART; ++ while (*g->regparse == '|') { ++ g->regparse++; ++ br = regbranch(g, &flags); ++ if (br == NULL) ++ return(NULL); ++ regtail(g, ret, br); /* BRANCH -> BRANCH. */ ++ if (!(flags&HASWIDTH)) ++ *flagp &= ~HASWIDTH; ++ *flagp |= flags&SPSTART; ++ } ++ ++ /* Make a closing node, and hook it on the end. */ ++ ender = regnode(g, (paren) ? CLOSE+parno : END); ++ regtail(g, ret, ender); ++ ++ /* Hook the tails of the branches to the closing node. */ ++ for (br = ret; br != NULL; br = regnext(g, br)) ++ regoptail(g, br, ender); ++ ++ /* Check for proper termination. */ ++ if (paren && *g->regparse++ != ')') { ++ FAIL("unmatched ()"); ++ } else if (!paren && *g->regparse != '\0') { ++ if (*g->regparse == ')') { ++ FAIL("unmatched ()"); ++ } else ++ FAIL("junk on end"); /* "Can't happen". */ ++ /* NOTREACHED */ ++ } ++ ++ return(ret); ++} ++ ++/* ++ - regbranch - one alternative of an | operator ++ * ++ * Implements the concatenation operator. ++ */ ++static char * ++regbranch(struct match_globals *g, int *flagp) ++{ ++ register char *ret; ++ register char *chain; ++ register char *latest; ++ int flags; ++ ++ *flagp = WORST; /* Tentatively. */ ++ ++ ret = regnode(g, BRANCH); ++ chain = NULL; ++ while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') { ++ latest = regpiece(g, &flags); ++ if (latest == NULL) ++ return(NULL); ++ *flagp |= flags&HASWIDTH; ++ if (chain == NULL) /* First piece. */ ++ *flagp |= flags&SPSTART; ++ else ++ regtail(g, chain, latest); ++ chain = latest; ++ } ++ if (chain == NULL) /* Loop ran zero times. */ ++ (void) regnode(g, NOTHING); ++ ++ return(ret); ++} ++ ++/* ++ - regpiece - something followed by possible [*+?] ++ * ++ * Note that the branching code sequences used for ? and the general cases ++ * of * and + are somewhat optimized: they use the same NOTHING node as ++ * both the endmarker for their branch list and the body of the last branch. ++ * It might seem that this node could be dispensed with entirely, but the ++ * endmarker role is not redundant. ++ */ ++static char * ++regpiece(struct match_globals *g, int *flagp) ++{ ++ register char *ret; ++ register char op; ++ register char *next; ++ int flags; ++ ++ ret = regatom(g, &flags); ++ if (ret == NULL) ++ return(NULL); ++ ++ op = *g->regparse; ++ if (!ISMULT(op)) { ++ *flagp = flags; ++ return(ret); ++ } ++ ++ if (!(flags&HASWIDTH) && op != '?') ++ FAIL("*+ operand could be empty"); ++ *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH); ++ ++ if (op == '*' && (flags&SIMPLE)) ++ reginsert(g, STAR, ret); ++ else if (op == '*') { ++ /* Emit x* as (x&|), where & means "self". */ ++ reginsert(g, BRANCH, ret); /* Either x */ ++ regoptail(g, ret, regnode(g, BACK)); /* and loop */ ++ regoptail(g, ret, ret); /* back */ ++ regtail(g, ret, regnode(g, BRANCH)); /* or */ ++ regtail(g, ret, regnode(g, NOTHING)); /* null. */ ++ } else if (op == '+' && (flags&SIMPLE)) ++ reginsert(g, PLUS, ret); ++ else if (op == '+') { ++ /* Emit x+ as x(&|), where & means "self". */ ++ next = regnode(g, BRANCH); /* Either */ ++ regtail(g, ret, next); ++ regtail(g, regnode(g, BACK), ret); /* loop back */ ++ regtail(g, next, regnode(g, BRANCH)); /* or */ ++ regtail(g, ret, regnode(g, NOTHING)); /* null. */ ++ } else if (op == '?') { ++ /* Emit x? as (x|) */ ++ reginsert(g, BRANCH, ret); /* Either x */ ++ regtail(g, ret, regnode(g, BRANCH)); /* or */ ++ next = regnode(g, NOTHING); /* null. */ ++ regtail(g, ret, next); ++ regoptail(g, ret, next); ++ } ++ g->regparse++; ++ if (ISMULT(*g->regparse)) ++ FAIL("nested *?+"); ++ ++ return(ret); ++} ++ ++/* ++ - regatom - the lowest level ++ * ++ * Optimization: gobbles an entire sequence of ordinary characters so that ++ * it can turn them into a single node, which is smaller to store and ++ * faster to run. Backslashed characters are exceptions, each becoming a ++ * separate node; the code is simpler that way and it's not worth fixing. ++ */ ++static char * ++regatom(struct match_globals *g, int *flagp) ++{ ++ register char *ret; ++ int flags; ++ ++ *flagp = WORST; /* Tentatively. */ ++ ++ switch (*g->regparse++) { ++ case '^': ++ ret = regnode(g, BOL); ++ break; ++ case '$': ++ ret = regnode(g, EOL); ++ break; ++ case '.': ++ ret = regnode(g, ANY); ++ *flagp |= HASWIDTH|SIMPLE; ++ break; ++ case '[': { ++ register int class; ++ register int classend; ++ ++ if (*g->regparse == '^') { /* Complement of range. */ ++ ret = regnode(g, ANYBUT); ++ g->regparse++; ++ } else ++ ret = regnode(g, ANYOF); ++ if (*g->regparse == ']' || *g->regparse == '-') ++ regc(g, *g->regparse++); ++ while (*g->regparse != '\0' && *g->regparse != ']') { ++ if (*g->regparse == '-') { ++ g->regparse++; ++ if (*g->regparse == ']' || *g->regparse == '\0') ++ regc(g, '-'); ++ else { ++ class = UCHARAT(g->regparse-2)+1; ++ classend = UCHARAT(g->regparse); ++ if (class > classend+1) ++ FAIL("invalid [] range"); ++ for (; class <= classend; class++) ++ regc(g, class); ++ g->regparse++; ++ } ++ } else ++ regc(g, *g->regparse++); ++ } ++ regc(g, '\0'); ++ if (*g->regparse != ']') ++ FAIL("unmatched []"); ++ g->regparse++; ++ *flagp |= HASWIDTH|SIMPLE; ++ } ++ break; ++ case '(': ++ ret = reg(g, 1, &flags); ++ if (ret == NULL) ++ return(NULL); ++ *flagp |= flags&(HASWIDTH|SPSTART); ++ break; ++ case '\0': ++ case '|': ++ case ')': ++ FAIL("internal urp"); /* Supposed to be caught earlier. */ ++ break; ++ case '?': ++ case '+': ++ case '*': ++ FAIL("?+* follows nothing"); ++ break; ++ case '\\': ++ if (*g->regparse == '\0') ++ FAIL("trailing \\"); ++ ret = regnode(g, EXACTLY); ++ regc(g, *g->regparse++); ++ regc(g, '\0'); ++ *flagp |= HASWIDTH|SIMPLE; ++ break; ++ default: { ++ register int len; ++ register char ender; ++ ++ g->regparse--; ++ len = my_strcspn((const char *)g->regparse, (const char *)META); ++ if (len <= 0) ++ FAIL("internal disaster"); ++ ender = *(g->regparse+len); ++ if (len > 1 && ISMULT(ender)) ++ len--; /* Back off clear of ?+* operand. */ ++ *flagp |= HASWIDTH; ++ if (len == 1) ++ *flagp |= SIMPLE; ++ ret = regnode(g, EXACTLY); ++ while (len > 0) { ++ regc(g, *g->regparse++); ++ len--; ++ } ++ regc(g, '\0'); ++ } ++ break; ++ } ++ ++ return(ret); ++} ++ ++/* ++ - regnode - emit a node ++ */ ++static char * /* Location. */ ++regnode(struct match_globals *g, char op) ++{ ++ register char *ret; ++ register char *ptr; ++ ++ ret = g->regcode; ++ if (ret == &g->regdummy) { ++ g->regsize += 3; ++ return(ret); ++ } ++ ++ ptr = ret; ++ *ptr++ = op; ++ *ptr++ = '\0'; /* Null "next" pointer. */ ++ *ptr++ = '\0'; ++ g->regcode = ptr; ++ ++ return(ret); ++} ++ ++/* ++ - regc - emit (if appropriate) a byte of code ++ */ ++static void ++regc(struct match_globals *g, char b) ++{ ++ if (g->regcode != &g->regdummy) ++ *g->regcode++ = b; ++ else ++ g->regsize++; ++} ++ ++/* ++ - reginsert - insert an operator in front of already-emitted operand ++ * ++ * Means relocating the operand. ++ */ ++static void ++reginsert(struct match_globals *g, char op, char* opnd) ++{ ++ register char *src; ++ register char *dst; ++ register char *place; ++ ++ if (g->regcode == &g->regdummy) { ++ g->regsize += 3; ++ return; ++ } ++ ++ src = g->regcode; ++ g->regcode += 3; ++ dst = g->regcode; ++ while (src > opnd) ++ *--dst = *--src; ++ ++ place = opnd; /* Op node, where operand used to be. */ ++ *place++ = op; ++ *place++ = '\0'; ++ *place++ = '\0'; ++} ++ ++/* ++ - regtail - set the next-pointer at the end of a node chain ++ */ ++static void ++regtail(struct match_globals *g, char *p, char *val) ++{ ++ register char *scan; ++ register char *temp; ++ register int offset; ++ ++ if (p == &g->regdummy) ++ return; ++ ++ /* Find last node. */ ++ scan = p; ++ for (;;) { ++ temp = regnext(g, scan); ++ if (temp == NULL) ++ break; ++ scan = temp; ++ } ++ ++ if (OP(scan) == BACK) ++ offset = scan - val; ++ else ++ offset = val - scan; ++ *(scan+1) = (offset>>8)&0377; ++ *(scan+2) = offset&0377; ++} ++ ++/* ++ - regoptail - regtail on operand of first argument; nop if operandless ++ */ ++static void ++regoptail(struct match_globals *g, char *p, char *val) ++{ ++ /* "Operandless" and "op != BRANCH" are synonymous in practice. */ ++ if (p == NULL || p == &g->regdummy || OP(p) != BRANCH) ++ return; ++ regtail(g, OPERAND(p), val); ++} ++ ++/* ++ * regexec and friends ++ */ ++ ++ ++/* ++ * Forwards. ++ */ ++STATIC int regtry(struct match_globals *g, regexp *prog, char *string); ++STATIC int regmatch(struct match_globals *g, char *prog); ++STATIC int regrepeat(struct match_globals *g, char *p); ++ ++#ifdef DEBUG ++int regnarrate = 0; ++void regdump(); ++STATIC char *regprop(char *op); ++#endif ++ ++/* ++ - regexec - match a regexp against a string ++ */ ++int ++regexec(regexp *prog, char *string) ++{ ++ register char *s; ++ struct match_globals g; ++ ++ /* Be paranoid... */ ++ if (prog == NULL || string == NULL) { ++ printk("<3>Regexp: NULL parameter\n"); ++ return(0); ++ } ++ ++ /* Check validity of program. */ ++ if (UCHARAT(prog->program) != MAGIC) { ++ printk("<3>Regexp: corrupted program\n"); ++ return(0); ++ } ++ ++ /* If there is a "must appear" string, look for it. */ ++ if (prog->regmust != NULL) { ++ s = string; ++ while ((s = strchr(s, prog->regmust[0])) != NULL) { ++ if (strncmp(s, prog->regmust, prog->regmlen) == 0) ++ break; /* Found it. */ ++ s++; ++ } ++ if (s == NULL) /* Not present. */ ++ return(0); ++ } ++ ++ /* Mark beginning of line for ^ . */ ++ g.regbol = string; ++ ++ /* Simplest case: anchored match need be tried only once. */ ++ if (prog->reganch) ++ return(regtry(&g, prog, string)); ++ ++ /* Messy cases: unanchored match. */ ++ s = string; ++ if (prog->regstart != '\0') ++ /* We know what char it must start with. */ ++ while ((s = strchr(s, prog->regstart)) != NULL) { ++ if (regtry(&g, prog, s)) ++ return(1); ++ s++; ++ } ++ else ++ /* We don't -- general case. */ ++ do { ++ if (regtry(&g, prog, s)) ++ return(1); ++ } while (*s++ != '\0'); ++ ++ /* Failure. */ ++ return(0); ++} ++ ++/* ++ - regtry - try match at specific point ++ */ ++static int /* 0 failure, 1 success */ ++regtry(struct match_globals *g, regexp *prog, char *string) ++{ ++ register int i; ++ register char **sp; ++ register char **ep; ++ ++ g->reginput = string; ++ g->regstartp = prog->startp; ++ g->regendp = prog->endp; ++ ++ sp = prog->startp; ++ ep = prog->endp; ++ for (i = NSUBEXP; i > 0; i--) { ++ *sp++ = NULL; ++ *ep++ = NULL; ++ } ++ if (regmatch(g, prog->program + 1)) { ++ prog->startp[0] = string; ++ prog->endp[0] = g->reginput; ++ return(1); ++ } else ++ return(0); ++} ++ ++/* ++ - regmatch - main matching routine ++ * ++ * Conceptually the strategy is simple: check to see whether the current ++ * node matches, call self recursively to see whether the rest matches, ++ * and then act accordingly. In practice we make some effort to avoid ++ * recursion, in particular by going through "ordinary" nodes (that don't ++ * need to know whether the rest of the match failed) by a loop instead of ++ * by recursion. ++ */ ++static int /* 0 failure, 1 success */ ++regmatch(struct match_globals *g, char *prog) ++{ ++ register char *scan = prog; /* Current node. */ ++ char *next; /* Next node. */ ++ ++#ifdef DEBUG ++ if (scan != NULL && regnarrate) ++ fprintf(stderr, "%s(\n", regprop(scan)); ++#endif ++ while (scan != NULL) { ++#ifdef DEBUG ++ if (regnarrate) ++ fprintf(stderr, "%s...\n", regprop(scan)); ++#endif ++ next = regnext(g, scan); ++ ++ switch (OP(scan)) { ++ case BOL: ++ if (g->reginput != g->regbol) ++ return(0); ++ break; ++ case EOL: ++ if (*g->reginput != '\0') ++ return(0); ++ break; ++ case ANY: ++ if (*g->reginput == '\0') ++ return(0); ++ g->reginput++; ++ break; ++ case EXACTLY: { ++ register int len; ++ register char *opnd; ++ ++ opnd = OPERAND(scan); ++ /* Inline the first character, for speed. */ ++ if (*opnd != *g->reginput) ++ return(0); ++ len = strlen(opnd); ++ if (len > 1 && strncmp(opnd, g->reginput, len) != 0) ++ return(0); ++ g->reginput += len; ++ } ++ break; ++ case ANYOF: ++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL) ++ return(0); ++ g->reginput++; ++ break; ++ case ANYBUT: ++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL) ++ return(0); ++ g->reginput++; ++ break; ++ case NOTHING: ++ case BACK: ++ break; ++ case OPEN+1: ++ case OPEN+2: ++ case OPEN+3: ++ case OPEN+4: ++ case OPEN+5: ++ case OPEN+6: ++ case OPEN+7: ++ case OPEN+8: ++ case OPEN+9: { ++ register int no; ++ register char *save; ++ ++ no = OP(scan) - OPEN; ++ save = g->reginput; ++ ++ if (regmatch(g, next)) { ++ /* ++ * Don't set startp if some later ++ * invocation of the same parentheses ++ * already has. ++ */ ++ if (g->regstartp[no] == NULL) ++ g->regstartp[no] = save; ++ return(1); ++ } else ++ return(0); ++ } ++ break; ++ case CLOSE+1: ++ case CLOSE+2: ++ case CLOSE+3: ++ case CLOSE+4: ++ case CLOSE+5: ++ case CLOSE+6: ++ case CLOSE+7: ++ case CLOSE+8: ++ case CLOSE+9: ++ { ++ register int no; ++ register char *save; ++ ++ no = OP(scan) - CLOSE; ++ save = g->reginput; ++ ++ if (regmatch(g, next)) { ++ /* ++ * Don't set endp if some later ++ * invocation of the same parentheses ++ * already has. ++ */ ++ if (g->regendp[no] == NULL) ++ g->regendp[no] = save; ++ return(1); ++ } else ++ return(0); ++ } ++ break; ++ case BRANCH: { ++ register char *save; ++ ++ if (OP(next) != BRANCH) /* No choice. */ ++ next = OPERAND(scan); /* Avoid recursion. */ ++ else { ++ do { ++ save = g->reginput; ++ if (regmatch(g, OPERAND(scan))) ++ return(1); ++ g->reginput = save; ++ scan = regnext(g, scan); ++ } while (scan != NULL && OP(scan) == BRANCH); ++ return(0); ++ /* NOTREACHED */ ++ } ++ } ++ break; ++ case STAR: ++ case PLUS: { ++ register char nextch; ++ register int no; ++ register char *save; ++ register int min; ++ ++ /* ++ * Lookahead to avoid useless match attempts ++ * when we know what character comes next. ++ */ ++ nextch = '\0'; ++ if (OP(next) == EXACTLY) ++ nextch = *OPERAND(next); ++ min = (OP(scan) == STAR) ? 0 : 1; ++ save = g->reginput; ++ no = regrepeat(g, OPERAND(scan)); ++ while (no >= min) { ++ /* If it could work, try it. */ ++ if (nextch == '\0' || *g->reginput == nextch) ++ if (regmatch(g, next)) ++ return(1); ++ /* Couldn't or didn't -- back up. */ ++ no--; ++ g->reginput = save + no; ++ } ++ return(0); ++ } ++ break; ++ case END: ++ return(1); /* Success! */ ++ break; ++ default: ++ printk("<3>Regexp: memory corruption\n"); ++ return(0); ++ break; ++ } ++ ++ scan = next; ++ } ++ ++ /* ++ * We get here only if there's trouble -- normally "case END" is ++ * the terminating point. ++ */ ++ printk("<3>Regexp: corrupted pointers\n"); ++ return(0); ++} ++ ++/* ++ - regrepeat - repeatedly match something simple, report how many ++ */ ++static int ++regrepeat(struct match_globals *g, char *p) ++{ ++ register int count = 0; ++ register char *scan; ++ register char *opnd; ++ ++ scan = g->reginput; ++ opnd = OPERAND(p); ++ switch (OP(p)) { ++ case ANY: ++ count = strlen(scan); ++ scan += count; ++ break; ++ case EXACTLY: ++ while (*opnd == *scan) { ++ count++; ++ scan++; ++ } ++ break; ++ case ANYOF: ++ while (*scan != '\0' && strchr(opnd, *scan) != NULL) { ++ count++; ++ scan++; ++ } ++ break; ++ case ANYBUT: ++ while (*scan != '\0' && strchr(opnd, *scan) == NULL) { ++ count++; ++ scan++; ++ } ++ break; ++ default: /* Oh dear. Called inappropriately. */ ++ printk("<3>Regexp: internal foulup\n"); ++ count = 0; /* Best compromise. */ ++ break; ++ } ++ g->reginput = scan; ++ ++ return(count); ++} ++ ++/* ++ - regnext - dig the "next" pointer out of a node ++ */ ++static char* ++regnext(struct match_globals *g, char *p) ++{ ++ register int offset; ++ ++ if (p == &g->regdummy) ++ return(NULL); ++ ++ offset = NEXT(p); ++ if (offset == 0) ++ return(NULL); ++ ++ if (OP(p) == BACK) ++ return(p-offset); ++ else ++ return(p+offset); ++} ++ ++#ifdef DEBUG ++ ++STATIC char *regprop(); ++ ++/* ++ - regdump - dump a regexp onto stdout in vaguely comprehensible form ++ */ ++void ++regdump(regexp *r) ++{ ++ register char *s; ++ register char op = EXACTLY; /* Arbitrary non-END op. */ ++ register char *next; ++ /* extern char *strchr(); */ ++ ++ ++ s = r->program + 1; ++ while (op != END) { /* While that wasn't END last time... */ ++ op = OP(s); ++ printf("%2d%s", s-r->program, regprop(s)); /* Where, what. */ ++ next = regnext(s); ++ if (next == NULL) /* Next ptr. */ ++ printf("(0)"); ++ else ++ printf("(%d)", (s-r->program)+(next-s)); ++ s += 3; ++ if (op == ANYOF || op == ANYBUT || op == EXACTLY) { ++ /* Literal string, where present. */ ++ while (*s != '\0') { ++ putchar(*s); ++ s++; ++ } ++ s++; ++ } ++ putchar('\n'); ++ } ++ ++ /* Header fields of interest. */ ++ if (r->regstart != '\0') ++ printf("start `%c' ", r->regstart); ++ if (r->reganch) ++ printf("anchored "); ++ if (r->regmust != NULL) ++ printf("must have \"%s\"", r->regmust); ++ printf("\n"); ++} ++ ++/* ++ - regprop - printable representation of opcode ++ */ ++static char * ++regprop(char *op) ++{ ++#define BUFLEN 50 ++ register char *p; ++ static char buf[BUFLEN]; ++ ++ strcpy(buf, ":"); ++ ++ switch (OP(op)) { ++ case BOL: ++ p = "BOL"; ++ break; ++ case EOL: ++ p = "EOL"; ++ break; ++ case ANY: ++ p = "ANY"; ++ break; ++ case ANYOF: ++ p = "ANYOF"; ++ break; ++ case ANYBUT: ++ p = "ANYBUT"; ++ break; ++ case BRANCH: ++ p = "BRANCH"; ++ break; ++ case EXACTLY: ++ p = "EXACTLY"; ++ break; ++ case NOTHING: ++ p = "NOTHING"; ++ break; ++ case BACK: ++ p = "BACK"; ++ break; ++ case END: ++ p = "END"; ++ break; ++ case OPEN+1: ++ case OPEN+2: ++ case OPEN+3: ++ case OPEN+4: ++ case OPEN+5: ++ case OPEN+6: ++ case OPEN+7: ++ case OPEN+8: ++ case OPEN+9: ++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN); ++ p = NULL; ++ break; ++ case CLOSE+1: ++ case CLOSE+2: ++ case CLOSE+3: ++ case CLOSE+4: ++ case CLOSE+5: ++ case CLOSE+6: ++ case CLOSE+7: ++ case CLOSE+8: ++ case CLOSE+9: ++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE); ++ p = NULL; ++ break; ++ case STAR: ++ p = "STAR"; ++ break; ++ case PLUS: ++ p = "PLUS"; ++ break; ++ default: ++ printk("<3>Regexp: corrupted opcode\n"); ++ break; ++ } ++ if (p != NULL) ++ strncat(buf, p, BUFLEN-strlen(buf)); ++ return(buf); ++} ++#endif ++ ++ +--- /dev/null ++++ b/net/netfilter/regexp/regexp.h +@@ -0,0 +1,41 @@ ++/* ++ * Definitions etc. for regexp(3) routines. ++ * ++ * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof], ++ * not the System V one. ++ */ ++ ++#ifndef REGEXP_H ++#define REGEXP_H ++ ++ ++/* ++http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h , ++which contains a version of this library, says: ++ ++ * ++ * NSUBEXP must be at least 10, and no greater than 117 or the parser ++ * will not work properly. ++ * ++ ++However, it looks rather like this library is limited to 10. If you think ++otherwise, let us know. ++*/ ++ ++#define NSUBEXP 10 ++typedef struct regexp { ++ char *startp[NSUBEXP]; ++ char *endp[NSUBEXP]; ++ char regstart; /* Internal use only. */ ++ char reganch; /* Internal use only. */ ++ char *regmust; /* Internal use only. */ ++ int regmlen; /* Internal use only. */ ++ char program[1]; /* Unwarranted chumminess with compiler. */ ++} regexp; ++ ++regexp * regcomp(char *exp, int *patternsize); ++int regexec(regexp *prog, char *string); ++void regsub(regexp *prog, char *source, char *dest); ++void regerror(char *s); ++ ++#endif +--- /dev/null ++++ b/net/netfilter/regexp/regmagic.h +@@ -0,0 +1,5 @@ ++/* ++ * The first byte of the regexp internal "program" is actually this magic ++ * number; the start node begins in the second byte. ++ */ ++#define MAGIC 0234 +--- /dev/null ++++ b/net/netfilter/regexp/regsub.c +@@ -0,0 +1,95 @@ ++/* ++ * regsub ++ * @(#)regsub.c 1.3 of 2 April 86 ++ * ++ * Copyright (c) 1986 by University of Toronto. ++ * Written by Henry Spencer. Not derived from licensed software. ++ * ++ * Permission is granted to anyone to use this software for any ++ * purpose on any computer system, and to redistribute it freely, ++ * subject to the following restrictions: ++ * ++ * 1. The author is not responsible for the consequences of use of ++ * this software, no matter how awful, even if they arise ++ * from defects in it. ++ * ++ * 2. The origin of this software must not be misrepresented, either ++ * by explicit claim or by omission. ++ * ++ * 3. Altered versions must be plainly marked as such, and must not ++ * be misrepresented as being the original software. ++ * ++ * ++ * This code was modified by Ethan Sommer to work within the kernel ++ * (it now uses kmalloc etc..) ++ * ++ */ ++#include "regexp.h" ++#include "regmagic.h" ++#include <linux/string.h> ++ ++ ++#ifndef CHARBITS ++#define UCHARAT(p) ((int)*(unsigned char *)(p)) ++#else ++#define UCHARAT(p) ((int)*(p)&CHARBITS) ++#endif ++ ++#if 0 ++//void regerror(char * s) ++//{ ++// printk("regexp(3): %s", s); ++// /* NOTREACHED */ ++//} ++#endif ++ ++/* ++ - regsub - perform substitutions after a regexp match ++ */ ++void ++regsub(regexp * prog, char * source, char * dest) ++{ ++ register char *src; ++ register char *dst; ++ register char c; ++ register int no; ++ register int len; ++ ++ /* Not necessary and gcc doesn't like it -MLS */ ++ /*extern char *strncpy();*/ ++ ++ if (prog == NULL || source == NULL || dest == NULL) { ++ regerror("NULL parm to regsub"); ++ return; ++ } ++ if (UCHARAT(prog->program) != MAGIC) { ++ regerror("damaged regexp fed to regsub"); ++ return; ++ } ++ ++ src = source; ++ dst = dest; ++ while ((c = *src++) != '\0') { ++ if (c == '&') ++ no = 0; ++ else if (c == '\\' && '0' <= *src && *src <= '9') ++ no = *src++ - '0'; ++ else ++ no = -1; ++ ++ if (no < 0) { /* Ordinary character. */ ++ if (c == '\\' && (*src == '\\' || *src == '&')) ++ c = *src++; ++ *dst++ = c; ++ } else if (prog->startp[no] != NULL && prog->endp[no] != NULL) { ++ len = prog->endp[no] - prog->startp[no]; ++ (void) strncpy(dst, prog->startp[no], len); ++ dst += len; ++ if (len != 0 && *(dst-1) == '\0') { /* strncpy hit NUL. */ ++ regerror("damaged match string"); ++ return; ++ } ++ } ++ } ++ *dst++ = '\0'; ++} +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -221,6 +221,13 @@ destroy_conntrack(struct nf_conntrack *n + * too. */ + nf_ct_remove_expectations(ct); + ++#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE) ++ if(ct->layer7.app_proto) ++ kfree(ct->layer7.app_proto); ++ if(ct->layer7.app_data) ++ kfree(ct->layer7.app_data); ++#endif ++ + /* We overload first tuple to link into unconfirmed or dying list.*/ + BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); + hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -239,6 +239,12 @@ static int ct_seq_show(struct seq_file * + if (ct_show_delta_time(s, ct)) + goto release; + ++#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE) ++ if(ct->layer7.app_proto && ++ seq_printf(s, "l7proto=%s ", ct->layer7.app_proto)) ++ return -ENOSPC; ++#endif ++ + if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) + goto release; + +--- a/include/net/netfilter/nf_conntrack.h ++++ b/include/net/netfilter/nf_conntrack.h +@@ -105,6 +105,22 @@ struct nf_conn { + struct net *ct_net; + #endif + ++#if defined(CONFIG_NETFILTER_XT_MATCH_LAYER7) || \ ++ defined(CONFIG_NETFILTER_XT_MATCH_LAYER7_MODULE) ++ struct { ++ /* ++ * e.g. "http". NULL before decision. "unknown" after decision ++ * if no match. ++ */ ++ char *app_proto; ++ /* ++ * application layer data so far. NULL after match decision. ++ */ ++ char *app_data; ++ unsigned int app_data_len; ++ } layer7; ++#endif ++ + /* Storage reserved for other modules, must be the last member */ + union nf_conntrack_proto proto; + }; +--- /dev/null ++++ b/include/linux/netfilter/xt_layer7.h +@@ -0,0 +1,13 @@ ++#ifndef _XT_LAYER7_H ++#define _XT_LAYER7_H ++ ++#define MAX_PATTERN_LEN 8192 ++#define MAX_PROTOCOL_LEN 256 ++ ++struct xt_layer7_info { ++ char protocol[MAX_PROTOCOL_LEN]; ++ char pattern[MAX_PATTERN_LEN]; ++ u_int8_t invert; ++}; ++ ++#endif /* _XT_LAYER7_H */ +--- a/include/uapi/linux/netfilter/Kbuild ++++ b/include/uapi/linux/netfilter/Kbuild +@@ -51,6 +51,7 @@ header-y += xt_hashlimit.h + header-y += xt_helper.h + header-y += xt_iprange.h + header-y += xt_ipvs.h ++header-y += xt_layer7.h + header-y += xt_length.h + header-y += xt_limit.h + header-y += xt_mac.h diff --git a/target/linux/generic/patches-3.8/601-netfilter_layer7_pktmatch.patch b/target/linux/generic/patches-3.8/601-netfilter_layer7_pktmatch.patch new file mode 100644 index 0000000000..f65e301fd1 --- /dev/null +++ b/target/linux/generic/patches-3.8/601-netfilter_layer7_pktmatch.patch @@ -0,0 +1,108 @@ +--- a/include/linux/netfilter/xt_layer7.h ++++ b/include/linux/netfilter/xt_layer7.h +@@ -8,6 +8,7 @@ struct xt_layer7_info { + char protocol[MAX_PROTOCOL_LEN]; + char pattern[MAX_PATTERN_LEN]; + u_int8_t invert; ++ u_int8_t pkt; + }; + + #endif /* _XT_LAYER7_H */ +--- a/net/netfilter/xt_layer7.c ++++ b/net/netfilter/xt_layer7.c +@@ -314,33 +314,35 @@ static int match_no_append(struct nf_con + } + + /* add the new app data to the conntrack. Return number of bytes added. */ +-static int add_data(struct nf_conn * master_conntrack, +- char * app_data, int appdatalen) ++static int add_datastr(char *target, int offset, char *app_data, int len) + { + int length = 0, i; +- int oldlength = master_conntrack->layer7.app_data_len; +- +- /* This is a fix for a race condition by Deti Fliegl. However, I'm not +- clear on whether the race condition exists or whether this really +- fixes it. I might just be being dense... Anyway, if it's not really +- a fix, all it does is waste a very small amount of time. */ +- if(!master_conntrack->layer7.app_data) return 0; ++ if (!target) return 0; + + /* Strip nulls. Make everything lower case (our regex lib doesn't + do case insensitivity). Add it to the end of the current data. */ +- for(i = 0; i < maxdatalen-oldlength-1 && +- i < appdatalen; i++) { ++ for(i = 0; i < maxdatalen-offset-1 && i < len; i++) { + if(app_data[i] != '\0') { + /* the kernel version of tolower mungs 'upper ascii' */ +- master_conntrack->layer7.app_data[length+oldlength] = ++ target[length+offset] = + isascii(app_data[i])? + tolower(app_data[i]) : app_data[i]; + length++; + } + } ++ target[length+offset] = '\0'; ++ ++ return length; ++} ++ ++/* add the new app data to the conntrack. Return number of bytes added. */ ++static int add_data(struct nf_conn * master_conntrack, ++ char * app_data, int appdatalen) ++{ ++ int length; + +- master_conntrack->layer7.app_data[length+oldlength] = '\0'; +- master_conntrack->layer7.app_data_len = length + oldlength; ++ length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen); ++ master_conntrack->layer7.app_data_len += length; + + return length; + } +@@ -438,7 +440,7 @@ match(const struct sk_buff *skbin, + + enum ip_conntrack_info master_ctinfo, ctinfo; + struct nf_conn *master_conntrack, *conntrack; +- unsigned char * app_data; ++ unsigned char *app_data, *tmp_data; + unsigned int pattern_result, appdatalen; + regexp * comppattern; + +@@ -466,8 +468,8 @@ match(const struct sk_buff *skbin, + master_conntrack = master_ct(master_conntrack); + + /* if we've classified it or seen too many packets */ +- if(total_acct_packets(master_conntrack) > num_packets || +- master_conntrack->layer7.app_proto) { ++ if(!info->pkt && (total_acct_packets(master_conntrack) > num_packets || ++ master_conntrack->layer7.app_proto)) { + + pattern_result = match_no_append(conntrack, master_conntrack, + ctinfo, master_ctinfo, info); +@@ -500,6 +502,25 @@ match(const struct sk_buff *skbin, + /* the return value gets checked later, when we're ready to use it */ + comppattern = compile_and_cache(info->pattern, info->protocol); + ++ if (info->pkt) { ++ tmp_data = kmalloc(maxdatalen, GFP_ATOMIC); ++ if(!tmp_data){ ++ if (net_ratelimit()) ++ printk(KERN_ERR "layer7: out of memory in match, bailing.\n"); ++ return info->invert; ++ } ++ ++ tmp_data[0] = '\0'; ++ add_datastr(tmp_data, 0, app_data, appdatalen); ++ pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0); ++ ++ kfree(tmp_data); ++ tmp_data = NULL; ++ spin_unlock_bh(&l7_lock); ++ ++ return (pattern_result ^ info->invert); ++ } ++ + /* On the first packet of a connection, allocate space for app data */ + if(total_acct_packets(master_conntrack) == 1 && !skb->cb[0] && + !master_conntrack->layer7.app_data){ diff --git a/target/linux/generic/patches-3.8/602-netfilter_layer7_match.patch b/target/linux/generic/patches-3.8/602-netfilter_layer7_match.patch new file mode 100644 index 0000000000..b2e48c824f --- /dev/null +++ b/target/linux/generic/patches-3.8/602-netfilter_layer7_match.patch @@ -0,0 +1,51 @@ +--- a/net/netfilter/xt_layer7.c ++++ b/net/netfilter/xt_layer7.c +@@ -415,7 +415,9 @@ static int layer7_write_proc(struct file + } + + static bool +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) ++match(const struct sk_buff *skbin, struct xt_action_param *par) ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) + match(const struct sk_buff *skbin, const struct xt_match_param *par) + #else + match(const struct sk_buff *skbin, +@@ -597,14 +599,19 @@ match(const struct sk_buff *skbin, + } + + // load nf_conntrack_ipv4 ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) ++static int ++#else ++static bool ++#endif + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) +-static bool check(const struct xt_mtchk_param *par) ++check(const struct xt_mtchk_param *par) + { + if (nf_ct_l3proto_try_module_get(par->match->family) < 0) { + printk(KERN_WARNING "can't load conntrack support for " + "proto=%d\n", par->match->family); + #else +-static bool check(const char *tablename, const void *inf, ++check(const char *tablename, const void *inf, + const struct xt_match *match, void *matchinfo, + unsigned int hook_mask) + { +@@ -612,9 +619,15 @@ static bool check(const char *tablename, + printk(KERN_WARNING "can't load conntrack support for " + "proto=%d\n", match->family); + #endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) ++ return -EINVAL; ++ } ++ return 0; ++#else + return 0; + } + return 1; ++#endif + } + + diff --git a/target/linux/generic/patches-3.8/603-netfilter_layer7_2.6.36_fix.patch b/target/linux/generic/patches-3.8/603-netfilter_layer7_2.6.36_fix.patch new file mode 100644 index 0000000000..23a1b1b26d --- /dev/null +++ b/target/linux/generic/patches-3.8/603-netfilter_layer7_2.6.36_fix.patch @@ -0,0 +1,61 @@ +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -980,6 +980,27 @@ config NETFILTER_XT_MATCH_IPVS + + If unsure, say N. + ++config NETFILTER_XT_MATCH_LAYER7 ++ tristate '"layer7" match support' ++ depends on EXPERIMENTAL ++ depends on NETFILTER_XTABLES ++ depends on NETFILTER_ADVANCED ++ depends on NF_CONNTRACK ++ help ++ Say Y if you want to be able to classify connections (and their ++ packets) based on regular expression matching of their application ++ layer data. This is one way to classify applications such as ++ peer-to-peer filesharing systems that do not always use the same ++ port. ++ ++ To compile it as a module, choose M here. If unsure, say N. ++ ++config NETFILTER_XT_MATCH_LAYER7_DEBUG ++ bool 'Layer 7 debugging output' ++ depends on NETFILTER_XT_MATCH_LAYER7 ++ help ++ Say Y to get lots of debugging output. ++ + config NETFILTER_XT_MATCH_LENGTH + tristate '"length" match support' + depends on NETFILTER_ADVANCED +@@ -1176,26 +1197,11 @@ config NETFILTER_XT_MATCH_STATE + + To compile it as a module, choose M here. If unsure, say N. + +-config NETFILTER_XT_MATCH_LAYER7 +- tristate '"layer7" match support' +- depends on NETFILTER_XTABLES +- depends on EXPERIMENTAL && (IP_NF_CONNTRACK || NF_CONNTRACK) +- depends on NETFILTER_ADVANCED +- help +- Say Y if you want to be able to classify connections (and their +- packets) based on regular expression matching of their application +- layer data. This is one way to classify applications such as +- peer-to-peer filesharing systems that do not always use the same +- port. +- +- To compile it as a module, choose M here. If unsure, say N. +- + config NETFILTER_XT_MATCH_LAYER7_DEBUG +- bool 'Layer 7 debugging output' +- depends on NETFILTER_XT_MATCH_LAYER7 +- help +- Say Y to get lots of debugging output. +- ++ bool 'Layer 7 debugging output' ++ depends on NETFILTER_XT_MATCH_LAYER7 ++ help ++ Say Y to get lots of debugging output. + + config NETFILTER_XT_MATCH_STATISTIC + tristate '"statistic" match support' diff --git a/target/linux/generic/patches-3.8/604-netfilter_cisco_794x_iphone.patch b/target/linux/generic/patches-3.8/604-netfilter_cisco_794x_iphone.patch new file mode 100644 index 0000000000..210f7fb6b3 --- /dev/null +++ b/target/linux/generic/patches-3.8/604-netfilter_cisco_794x_iphone.patch @@ -0,0 +1,118 @@ +--- a/include/linux/netfilter/nf_conntrack_sip.h ++++ b/include/linux/netfilter/nf_conntrack_sip.h +@@ -4,12 +4,15 @@ + + #include <net/netfilter/nf_conntrack_expect.h> + ++#include <linux/types.h> ++ + #define SIP_PORT 5060 + #define SIP_TIMEOUT 3600 + + struct nf_ct_sip_master { + unsigned int register_cseq; + unsigned int invite_cseq; ++ __be16 forced_dport; + }; + + enum sip_expectation_classes { +--- a/net/netfilter/nf_nat_sip.c ++++ b/net/netfilter/nf_nat_sip.c +@@ -95,6 +95,7 @@ static int map_addr(struct sk_buff *skb, + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); ++ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); + char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")]; + unsigned int buflen; + union nf_inet_addr newaddr; +@@ -107,7 +108,8 @@ static int map_addr(struct sk_buff *skb, + } else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) && + ct->tuplehash[dir].tuple.dst.u.udp.port == port) { + newaddr = ct->tuplehash[!dir].tuple.src.u3; +- newport = ct->tuplehash[!dir].tuple.src.u.udp.port; ++ newport = ct_sip_info->forced_dport ? ct_sip_info->forced_dport : ++ ct->tuplehash[!dir].tuple.src.u.udp.port; + } else + return 1; + +@@ -144,6 +146,7 @@ static unsigned int nf_nat_sip(struct sk + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); ++ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); + unsigned int coff, matchoff, matchlen; + enum sip_header_types hdr; + union nf_inet_addr addr; +@@ -258,6 +261,20 @@ next: + !map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_TO)) + return NF_DROP; + ++ /* Mangle destination port for Cisco phones, then fix up checksums */ ++ if (dir == IP_CT_DIR_REPLY && ct_sip_info->forced_dport) { ++ struct udphdr *uh; ++ ++ if (!skb_make_writable(skb, skb->len)) ++ return NF_DROP; ++ ++ uh = (void *)skb->data + protoff; ++ uh->dest = ct_sip_info->forced_dport; ++ ++ if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, protoff, 0, 0, NULL, 0)) ++ return NF_DROP; ++ } ++ + return NF_ACCEPT; + } + +@@ -311,8 +328,10 @@ static unsigned int nf_nat_sip_expect(st + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); ++ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); + union nf_inet_addr newaddr; + u_int16_t port; ++ __be16 srcport; + char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")]; + unsigned int buflen; + +@@ -326,8 +345,9 @@ static unsigned int nf_nat_sip_expect(st + /* If the signalling port matches the connection's source port in the + * original direction, try to use the destination port in the opposite + * direction. */ +- if (exp->tuple.dst.u.udp.port == +- ct->tuplehash[dir].tuple.src.u.udp.port) ++ srcport = ct_sip_info->forced_dport ? ct_sip_info->forced_dport : ++ ct->tuplehash[dir].tuple.src.u.udp.port; ++ if (exp->tuple.dst.u.udp.port == srcport) + port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port); + else + port = ntohs(exp->tuple.dst.u.udp.port); +--- a/net/netfilter/nf_conntrack_sip.c ++++ b/net/netfilter/nf_conntrack_sip.c +@@ -1440,8 +1440,25 @@ static int process_sip_request(struct sk + { + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ++ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); ++ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + unsigned int matchoff, matchlen; + unsigned int cseq, i; ++ union nf_inet_addr addr; ++ __be16 port; ++ ++ /* Many Cisco IP phones use a high source port for SIP requests, but ++ * listen for the response on port 5060. If we are the local ++ * router for one of these phones, save the port number from the ++ * Via: header so that nf_nat_sip can redirect the responses to ++ * the correct port. ++ */ ++ if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, ++ SIP_HDR_VIA_UDP, NULL, &matchoff, ++ &matchlen, &addr, &port) > 0 && ++ port != ct->tuplehash[dir].tuple.src.u.udp.port && ++ nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3)) ++ ct_sip_info->forced_dport = port; + + for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { + const struct sip_handler *handler; diff --git a/target/linux/generic/patches-3.8/610-netfilter_match_bypass_default_checks.patch b/target/linux/generic/patches-3.8/610-netfilter_match_bypass_default_checks.patch new file mode 100644 index 0000000000..0081da01df --- /dev/null +++ b/target/linux/generic/patches-3.8/610-netfilter_match_bypass_default_checks.patch @@ -0,0 +1,93 @@ +--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h ++++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h +@@ -87,6 +87,7 @@ struct ipt_ip { + #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ + #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ + #define IPT_F_MASK 0x03 /* All possible flag bits mask. */ ++#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */ + + /* Values for "inv" field in struct ipt_ip. */ + #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -81,6 +81,9 @@ ip_packet_match(const struct iphdr *ip, + + #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) + ++ if (ipinfo->flags & IPT_F_NO_DEF_MATCH) ++ return true; ++ + if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, + IPT_INV_SRCIP) || + FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, +@@ -134,6 +137,29 @@ ip_packet_match(const struct iphdr *ip, + return true; + } + ++static void ++ip_checkdefault(struct ipt_ip *ip) ++{ ++ static const char iface_mask[IFNAMSIZ] = {}; ++ ++ if (ip->invflags || ip->flags & IPT_F_FRAG) ++ return; ++ ++ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0) ++ return; ++ ++ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0) ++ return; ++ ++ if (ip->smsk.s_addr || ip->dmsk.s_addr) ++ return; ++ ++ if (ip->proto) ++ return; ++ ++ ip->flags |= IPT_F_NO_DEF_MATCH; ++} ++ + static bool + ip_checkentry(const struct ipt_ip *ip) + { +@@ -560,7 +586,7 @@ static void cleanup_match(struct xt_entr + } + + static int +-check_entry(const struct ipt_entry *e, const char *name) ++check_entry(struct ipt_entry *e, const char *name) + { + const struct xt_entry_target *t; + +@@ -569,6 +595,8 @@ check_entry(const struct ipt_entry *e, c + return -EINVAL; + } + ++ ip_checkdefault(&e->ip); ++ + if (e->target_offset + sizeof(struct xt_entry_target) > + e->next_offset) + return -EINVAL; +@@ -930,6 +958,7 @@ copy_entries_to_user(unsigned int total_ + const struct xt_table_info *private = table->private; + int ret = 0; + const void *loc_cpu_entry; ++ u8 flags; + + counters = alloc_counters(table); + if (IS_ERR(counters)) +@@ -960,6 +989,14 @@ copy_entries_to_user(unsigned int total_ + ret = -EFAULT; + goto free_counters; + } ++ ++ flags = e->ip.flags & IPT_F_MASK; ++ if (copy_to_user(userptr + off ++ + offsetof(struct ipt_entry, ip.flags), ++ &flags, sizeof(flags)) != 0) { ++ ret = -EFAULT; ++ goto free_counters; ++ } + + for (i = sizeof(struct ipt_entry); + i < e->target_offset; diff --git a/target/linux/generic/patches-3.8/611-netfilter_match_bypass_default_table.patch b/target/linux/generic/patches-3.8/611-netfilter_match_bypass_default_table.patch new file mode 100644 index 0000000000..10f9250bb2 --- /dev/null +++ b/target/linux/generic/patches-3.8/611-netfilter_match_bypass_default_table.patch @@ -0,0 +1,81 @@ +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -309,6 +309,33 @@ struct ipt_entry *ipt_next_entry(const s + return (void *)entry + entry->next_offset; + } + ++static bool ++ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict) ++{ ++ struct xt_entry_target *t; ++ struct xt_standard_target *st; ++ ++ if (e->target_offset != sizeof(struct ipt_entry)) ++ return false; ++ ++ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH)) ++ return false; ++ ++ t = ipt_get_target(e); ++ if (t->u.kernel.target->target) ++ return false; ++ ++ st = (struct xt_standard_target *) t; ++ if (st->verdict == XT_RETURN) ++ return false; ++ ++ if (st->verdict >= 0) ++ return false; ++ ++ *verdict = (unsigned)(-st->verdict) - 1; ++ return true; ++} ++ + /* Returns one of the generic firewall policies, like NF_ACCEPT. */ + unsigned int + ipt_do_table(struct sk_buff *skb, +@@ -333,6 +360,25 @@ ipt_do_table(struct sk_buff *skb, + ip = ip_hdr(skb); + indev = in ? in->name : nulldevname; + outdev = out ? out->name : nulldevname; ++ ++ IP_NF_ASSERT(table->valid_hooks & (1 << hook)); ++ local_bh_disable(); ++ addend = xt_write_recseq_begin(); ++ private = table->private; ++ cpu = smp_processor_id(); ++ table_base = private->entries[cpu]; ++ jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; ++ stackptr = per_cpu_ptr(private->stackptr, cpu); ++ origptr = *stackptr; ++ ++ e = get_entry(table_base, private->hook_entry[hook]); ++ if (ipt_handle_default_rule(e, &verdict)) { ++ ADD_COUNTER(e->counters, skb->len, 1); ++ xt_write_recseq_end(addend); ++ local_bh_enable(); ++ return verdict; ++ } ++ + /* We handle fragments by dealing with the first fragment as + * if it was a normal packet. All other fragments are treated + * normally, except that they will NEVER match rules that ask +@@ -347,18 +393,6 @@ ipt_do_table(struct sk_buff *skb, + acpar.family = NFPROTO_IPV4; + acpar.hooknum = hook; + +- IP_NF_ASSERT(table->valid_hooks & (1 << hook)); +- local_bh_disable(); +- addend = xt_write_recseq_begin(); +- private = table->private; +- cpu = smp_processor_id(); +- table_base = private->entries[cpu]; +- jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; +- stackptr = per_cpu_ptr(private->stackptr, cpu); +- origptr = *stackptr; +- +- e = get_entry(table_base, private->hook_entry[hook]); +- + pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n", + table->name, hook, origptr, + get_entry(table_base, private->underflow[hook])); diff --git a/target/linux/generic/patches-3.8/612-netfilter_match_reduce_memory_access.patch b/target/linux/generic/patches-3.8/612-netfilter_match_reduce_memory_access.patch new file mode 100644 index 0000000000..f506165e1d --- /dev/null +++ b/target/linux/generic/patches-3.8/612-netfilter_match_reduce_memory_access.patch @@ -0,0 +1,16 @@ +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -84,9 +84,11 @@ ip_packet_match(const struct iphdr *ip, + if (ipinfo->flags & IPT_F_NO_DEF_MATCH) + return true; + +- if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, ++ if (FWINV(ipinfo->smsk.s_addr && ++ (ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, + IPT_INV_SRCIP) || +- FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, ++ FWINV(ipinfo->dmsk.s_addr && ++ (ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, + IPT_INV_DSTIP)) { + dprintf("Source or dest mismatch.\n"); + diff --git a/target/linux/generic/patches-3.8/613-netfilter_optional_tcp_window_check.patch b/target/linux/generic/patches-3.8/613-netfilter_optional_tcp_window_check.patch new file mode 100644 index 0000000000..a828ef425d --- /dev/null +++ b/target/linux/generic/patches-3.8/613-netfilter_optional_tcp_window_check.patch @@ -0,0 +1,36 @@ +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -29,6 +29,9 @@ + #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> + #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> + ++/* Do not check the TCP window for incoming packets */ ++static int nf_ct_tcp_no_window_check __read_mostly = 1; ++ + /* "Be conservative in what you do, + be liberal in what you accept from others." + If it's non-zero, we mark only out of window RST segments as INVALID. */ +@@ -526,6 +529,9 @@ static bool tcp_in_window(const struct n + s16 receiver_offset; + bool res; + ++ if (nf_ct_tcp_no_window_check) ++ return true; ++ + /* + * Get the required data from the packet. + */ +@@ -1438,6 +1444,13 @@ static struct ctl_table tcp_sysctl_table + .mode = 0644, + .proc_handler = proc_dointvec, + }, ++ { ++ .procname = "nf_conntrack_tcp_no_window_check", ++ .data = &nf_ct_tcp_no_window_check, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec, ++ }, + { } + }; + diff --git a/target/linux/generic/patches-3.8/620-sched_esfq.patch b/target/linux/generic/patches-3.8/620-sched_esfq.patch new file mode 100644 index 0000000000..e037dcf38b --- /dev/null +++ b/target/linux/generic/patches-3.8/620-sched_esfq.patch @@ -0,0 +1,791 @@ +--- a/include/uapi/linux/pkt_sched.h ++++ b/include/uapi/linux/pkt_sched.h +@@ -214,6 +214,33 @@ struct tc_sfq_xstats { + __s32 allot; + }; + ++/* ESFQ section */ ++ ++enum ++{ ++ /* traditional */ ++ TCA_SFQ_HASH_CLASSIC, ++ TCA_SFQ_HASH_DST, ++ TCA_SFQ_HASH_SRC, ++ TCA_SFQ_HASH_FWMARK, ++ /* conntrack */ ++ TCA_SFQ_HASH_CTORIGDST, ++ TCA_SFQ_HASH_CTORIGSRC, ++ TCA_SFQ_HASH_CTREPLDST, ++ TCA_SFQ_HASH_CTREPLSRC, ++ TCA_SFQ_HASH_CTNATCHG, ++}; ++ ++struct tc_esfq_qopt ++{ ++ unsigned quantum; /* Bytes per round allocated to flow */ ++ int perturb_period; /* Period of hash perturbation */ ++ __u32 limit; /* Maximal packets in queue */ ++ unsigned divisor; /* Hash divisor */ ++ unsigned flows; /* Maximal number of flows */ ++ unsigned hash_kind; /* Hash function to use for flow identification */ ++}; ++ + /* RED section */ + + enum { +--- a/net/sched/Kconfig ++++ b/net/sched/Kconfig +@@ -148,6 +148,37 @@ config NET_SCH_SFQ + To compile this code as a module, choose M here: the + module will be called sch_sfq. + ++config NET_SCH_ESFQ ++ tristate "Enhanced Stochastic Fairness Queueing (ESFQ)" ++ ---help--- ++ Say Y here if you want to use the Enhanced Stochastic Fairness ++ Queueing (ESFQ) packet scheduling algorithm for some of your network ++ devices or as a leaf discipline for a classful qdisc such as HTB or ++ CBQ (see the top of <file:net/sched/sch_esfq.c> for details and ++ references to the SFQ algorithm). ++ ++ This is an enchanced SFQ version which allows you to control some ++ hardcoded values in the SFQ scheduler. ++ ++ ESFQ also adds control of the hash function used to identify packet ++ flows. The original SFQ discipline hashes by connection; ESFQ add ++ several other hashing methods, such as by src IP or by dst IP, which ++ can be more fair to users in some networking situations. ++ ++ To compile this code as a module, choose M here: the ++ module will be called sch_esfq. ++ ++config NET_SCH_ESFQ_NFCT ++ bool "Connection Tracking Hash Types" ++ depends on NET_SCH_ESFQ && NF_CONNTRACK ++ ---help--- ++ Say Y here to enable support for hashing based on netfilter connection ++ tracking information. This is useful for a router that is also using ++ NAT to connect privately-addressed hosts to the Internet. If you want ++ to provide fair distribution of upstream bandwidth, ESFQ must use ++ connection tracking information, since all outgoing packets will share ++ the same source address. ++ + config NET_SCH_TEQL + tristate "True Link Equalizer (TEQL)" + ---help--- +--- a/net/sched/Makefile ++++ b/net/sched/Makefile +@@ -26,6 +26,7 @@ obj-$(CONFIG_NET_SCH_INGRESS) += sch_ing + obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o + obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o + obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o ++obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o + obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o + obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o + obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o +--- /dev/null ++++ b/net/sched/sch_esfq.c +@@ -0,0 +1,702 @@ ++/* ++ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. ++ * ++ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> ++ * ++ * Changes: Alexander Atanasov, <alex@ssi.bg> ++ * Added dynamic depth,limit,divisor,hash_kind options. ++ * Added dst and src hashes. ++ * ++ * Alexander Clouter, <alex@digriz.org.uk> ++ * Ported ESFQ to Linux 2.6. ++ * ++ * Corey Hickey, <bugfood-c@fatooh.org> ++ * Maintenance of the Linux 2.6 port. ++ * Added fwmark hash (thanks to Robert Kurjata). ++ * Added usage of jhash. ++ * Added conntrack support. ++ * Added ctnatchg hash (thanks to Ben Pfountz). ++ */ ++ ++#include <linux/module.h> ++#include <asm/uaccess.h> ++#include <linux/bitops.h> ++#include <linux/types.h> ++#include <linux/kernel.h> ++#include <linux/jiffies.h> ++#include <linux/string.h> ++#include <linux/mm.h> ++#include <linux/socket.h> ++#include <linux/sockios.h> ++#include <linux/in.h> ++#include <linux/errno.h> ++#include <linux/interrupt.h> ++#include <linux/if_ether.h> ++#include <linux/inet.h> ++#include <linux/netdevice.h> ++#include <linux/etherdevice.h> ++#include <linux/notifier.h> ++#include <linux/init.h> ++#include <net/ip.h> ++#include <net/netlink.h> ++#include <linux/ipv6.h> ++#include <net/route.h> ++#include <linux/skbuff.h> ++#include <net/sock.h> ++#include <net/pkt_sched.h> ++#include <linux/jhash.h> ++#ifdef CONFIG_NET_SCH_ESFQ_NFCT ++#include <net/netfilter/nf_conntrack.h> ++#endif ++ ++/* Stochastic Fairness Queuing algorithm. ++ For more comments look at sch_sfq.c. ++ The difference is that you can change limit, depth, ++ hash table size and choose alternate hash types. ++ ++ classic: same as in sch_sfq.c ++ dst: destination IP address ++ src: source IP address ++ fwmark: netfilter mark value ++ ctorigdst: original destination IP address ++ ctorigsrc: original source IP address ++ ctrepldst: reply destination IP address ++ ctreplsrc: reply source IP ++ ++*/ ++ ++#define ESFQ_HEAD 0 ++#define ESFQ_TAIL 1 ++ ++/* This type should contain at least SFQ_DEPTH*2 values */ ++typedef unsigned int esfq_index; ++ ++struct esfq_head ++{ ++ esfq_index next; ++ esfq_index prev; ++}; ++ ++struct esfq_sched_data ++{ ++/* Parameters */ ++ int perturb_period; ++ unsigned quantum; /* Allotment per round: MUST BE >= MTU */ ++ int limit; ++ unsigned depth; ++ unsigned hash_divisor; ++ unsigned hash_kind; ++/* Variables */ ++ struct timer_list perturb_timer; ++ int perturbation; ++ esfq_index tail; /* Index of current slot in round */ ++ esfq_index max_depth; /* Maximal depth */ ++ ++ esfq_index *ht; /* Hash table */ ++ esfq_index *next; /* Active slots link */ ++ short *allot; /* Current allotment per slot */ ++ unsigned short *hash; /* Hash value indexed by slots */ ++ struct sk_buff_head *qs; /* Slot queue */ ++ struct esfq_head *dep; /* Linked list of slots, indexed by depth */ ++}; ++ ++/* This contains the info we will hash. */ ++struct esfq_packet_info ++{ ++ u32 proto; /* protocol or port */ ++ u32 src; /* source from packet header */ ++ u32 dst; /* destination from packet header */ ++ u32 ctorigsrc; /* original source from conntrack */ ++ u32 ctorigdst; /* original destination from conntrack */ ++ u32 ctreplsrc; /* reply source from conntrack */ ++ u32 ctrepldst; /* reply destination from conntrack */ ++ u32 mark; /* netfilter mark (fwmark) */ ++}; ++ ++static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a) ++{ ++ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1); ++} ++ ++static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b) ++{ ++ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1); ++} ++ ++static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c) ++{ ++ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1); ++} ++ ++static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb) ++{ ++ struct esfq_packet_info info; ++#ifdef CONFIG_NET_SCH_ESFQ_NFCT ++ enum ip_conntrack_info ctinfo; ++ struct nf_conn *ct = nf_ct_get(skb, &ctinfo); ++#endif ++ ++ switch (skb->protocol) { ++ case __constant_htons(ETH_P_IP): ++ { ++ struct iphdr *iph = ip_hdr(skb); ++ info.dst = iph->daddr; ++ info.src = iph->saddr; ++ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && ++ (iph->protocol == IPPROTO_TCP || ++ iph->protocol == IPPROTO_UDP || ++ iph->protocol == IPPROTO_SCTP || ++ iph->protocol == IPPROTO_DCCP || ++ iph->protocol == IPPROTO_ESP)) ++ info.proto = *(((u32*)iph) + iph->ihl); ++ else ++ info.proto = iph->protocol; ++ break; ++ } ++ case __constant_htons(ETH_P_IPV6): ++ { ++ struct ipv6hdr *iph = ipv6_hdr(skb); ++ /* Hash ipv6 addresses into a u32. This isn't ideal, ++ * but the code is simple. */ ++ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation); ++ info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation); ++ if (iph->nexthdr == IPPROTO_TCP || ++ iph->nexthdr == IPPROTO_UDP || ++ iph->nexthdr == IPPROTO_SCTP || ++ iph->nexthdr == IPPROTO_DCCP || ++ iph->nexthdr == IPPROTO_ESP) ++ info.proto = *(u32*)&iph[1]; ++ else ++ info.proto = iph->nexthdr; ++ break; ++ } ++ default: ++ info.dst = (u32)(unsigned long)skb_dst(skb); ++ info.src = (u32)(unsigned long)skb->sk; ++ info.proto = skb->protocol; ++ } ++ ++ info.mark = skb->mark; ++ ++#ifdef CONFIG_NET_SCH_ESFQ_NFCT ++ /* defaults if there is no conntrack info */ ++ info.ctorigsrc = info.src; ++ info.ctorigdst = info.dst; ++ info.ctreplsrc = info.dst; ++ info.ctrepldst = info.src; ++ /* collect conntrack info */ ++ if (ct && ct != &nf_conntrack_untracked) { ++ if (skb->protocol == __constant_htons(ETH_P_IP)) { ++ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; ++ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip; ++ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip; ++ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip; ++ } ++ else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { ++ /* Again, hash ipv6 addresses into a single u32. */ ++ info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation); ++ info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation); ++ info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation); ++ info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation); ++ } ++ ++ } ++#endif ++ ++ switch(q->hash_kind) { ++ case TCA_SFQ_HASH_CLASSIC: ++ return esfq_jhash_3words(q, info.dst, info.src, info.proto); ++ case TCA_SFQ_HASH_DST: ++ return esfq_jhash_1word(q, info.dst); ++ case TCA_SFQ_HASH_SRC: ++ return esfq_jhash_1word(q, info.src); ++ case TCA_SFQ_HASH_FWMARK: ++ return esfq_jhash_1word(q, info.mark); ++#ifdef CONFIG_NET_SCH_ESFQ_NFCT ++ case TCA_SFQ_HASH_CTORIGDST: ++ return esfq_jhash_1word(q, info.ctorigdst); ++ case TCA_SFQ_HASH_CTORIGSRC: ++ return esfq_jhash_1word(q, info.ctorigsrc); ++ case TCA_SFQ_HASH_CTREPLDST: ++ return esfq_jhash_1word(q, info.ctrepldst); ++ case TCA_SFQ_HASH_CTREPLSRC: ++ return esfq_jhash_1word(q, info.ctreplsrc); ++ case TCA_SFQ_HASH_CTNATCHG: ++ { ++ if (info.ctorigdst == info.ctreplsrc) ++ return esfq_jhash_1word(q, info.ctorigsrc); ++ return esfq_jhash_1word(q, info.ctreplsrc); ++ } ++#endif ++ default: ++ if (net_ratelimit()) ++ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n"); ++ } ++ return esfq_jhash_3words(q, info.dst, info.src, info.proto); ++} ++ ++static inline void esfq_link(struct esfq_sched_data *q, esfq_index x) ++{ ++ esfq_index p, n; ++ int d = q->qs[x].qlen + q->depth; ++ ++ p = d; ++ n = q->dep[d].next; ++ q->dep[x].next = n; ++ q->dep[x].prev = p; ++ q->dep[p].next = q->dep[n].prev = x; ++} ++ ++static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x) ++{ ++ esfq_index p, n; ++ ++ n = q->dep[x].next; ++ p = q->dep[x].prev; ++ q->dep[p].next = n; ++ q->dep[n].prev = p; ++ ++ if (n == p && q->max_depth == q->qs[x].qlen + 1) ++ q->max_depth--; ++ ++ esfq_link(q, x); ++} ++ ++static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x) ++{ ++ esfq_index p, n; ++ int d; ++ ++ n = q->dep[x].next; ++ p = q->dep[x].prev; ++ q->dep[p].next = n; ++ q->dep[n].prev = p; ++ d = q->qs[x].qlen; ++ if (q->max_depth < d) ++ q->max_depth = d; ++ ++ esfq_link(q, x); ++} ++ ++static unsigned int esfq_drop(struct Qdisc *sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ esfq_index d = q->max_depth; ++ struct sk_buff *skb; ++ unsigned int len; ++ ++ /* Queue is full! Find the longest slot and ++ drop a packet from it */ ++ ++ if (d > 1) { ++ esfq_index x = q->dep[d+q->depth].next; ++ skb = q->qs[x].prev; ++ len = skb->len; ++ __skb_unlink(skb, &q->qs[x]); ++ kfree_skb(skb); ++ esfq_dec(q, x); ++ sch->q.qlen--; ++ sch->qstats.drops++; ++ sch->qstats.backlog -= len; ++ return len; ++ } ++ ++ if (d == 1) { ++ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ ++ d = q->next[q->tail]; ++ q->next[q->tail] = q->next[d]; ++ q->allot[q->next[d]] += q->quantum; ++ skb = q->qs[d].prev; ++ len = skb->len; ++ __skb_unlink(skb, &q->qs[d]); ++ kfree_skb(skb); ++ esfq_dec(q, d); ++ sch->q.qlen--; ++ q->ht[q->hash[d]] = q->depth; ++ sch->qstats.drops++; ++ sch->qstats.backlog -= len; ++ return len; ++ } ++ ++ return 0; ++} ++ ++static void esfq_q_enqueue(struct sk_buff *skb, struct esfq_sched_data *q, unsigned int end) ++{ ++ unsigned hash = esfq_hash(q, skb); ++ unsigned depth = q->depth; ++ esfq_index x; ++ ++ x = q->ht[hash]; ++ if (x == depth) { ++ q->ht[hash] = x = q->dep[depth].next; ++ q->hash[x] = hash; ++ } ++ ++ if (end == ESFQ_TAIL) ++ __skb_queue_tail(&q->qs[x], skb); ++ else ++ __skb_queue_head(&q->qs[x], skb); ++ ++ esfq_inc(q, x); ++ if (q->qs[x].qlen == 1) { /* The flow is new */ ++ if (q->tail == depth) { /* It is the first flow */ ++ q->tail = x; ++ q->next[x] = x; ++ q->allot[x] = q->quantum; ++ } else { ++ q->next[x] = q->next[q->tail]; ++ q->next[q->tail] = x; ++ q->tail = x; ++ } ++ } ++} ++ ++static int esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ esfq_q_enqueue(skb, q, ESFQ_TAIL); ++ sch->qstats.backlog += skb->len; ++ if (++sch->q.qlen < q->limit-1) { ++ sch->bstats.bytes += skb->len; ++ sch->bstats.packets++; ++ return 0; ++ } ++ ++ sch->qstats.drops++; ++ esfq_drop(sch); ++ return NET_XMIT_CN; ++} ++ ++static struct sk_buff *esfq_peek(struct Qdisc* sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ esfq_index a; ++ ++ /* No active slots */ ++ if (q->tail == q->depth) ++ return NULL; ++ ++ a = q->next[q->tail]; ++ return skb_peek(&q->qs[a]); ++} ++ ++static struct sk_buff *esfq_q_dequeue(struct esfq_sched_data *q) ++{ ++ struct sk_buff *skb; ++ unsigned depth = q->depth; ++ esfq_index a, old_a; ++ ++ /* No active slots */ ++ if (q->tail == depth) ++ return NULL; ++ ++ a = old_a = q->next[q->tail]; ++ ++ /* Grab packet */ ++ skb = __skb_dequeue(&q->qs[a]); ++ esfq_dec(q, a); ++ ++ /* Is the slot empty? */ ++ if (q->qs[a].qlen == 0) { ++ q->ht[q->hash[a]] = depth; ++ a = q->next[a]; ++ if (a == old_a) { ++ q->tail = depth; ++ return skb; ++ } ++ q->next[q->tail] = a; ++ q->allot[a] += q->quantum; ++ } else if ((q->allot[a] -= skb->len) <= 0) { ++ q->tail = a; ++ a = q->next[a]; ++ q->allot[a] += q->quantum; ++ } ++ ++ return skb; ++} ++ ++static struct sk_buff *esfq_dequeue(struct Qdisc* sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ struct sk_buff *skb; ++ ++ skb = esfq_q_dequeue(q); ++ if (skb == NULL) ++ return NULL; ++ sch->q.qlen--; ++ sch->qstats.backlog -= skb->len; ++ return skb; ++} ++ ++static void esfq_q_destroy(struct esfq_sched_data *q) ++{ ++ del_timer(&q->perturb_timer); ++ if(q->ht) ++ kfree(q->ht); ++ if(q->dep) ++ kfree(q->dep); ++ if(q->next) ++ kfree(q->next); ++ if(q->allot) ++ kfree(q->allot); ++ if(q->hash) ++ kfree(q->hash); ++ if(q->qs) ++ kfree(q->qs); ++} ++ ++static void esfq_destroy(struct Qdisc *sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ esfq_q_destroy(q); ++} ++ ++ ++static void esfq_reset(struct Qdisc* sch) ++{ ++ struct sk_buff *skb; ++ ++ while ((skb = esfq_dequeue(sch)) != NULL) ++ kfree_skb(skb); ++} ++ ++static void esfq_perturbation(unsigned long arg) ++{ ++ struct Qdisc *sch = (struct Qdisc*)arg; ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ ++ q->perturbation = net_random()&0x1F; ++ ++ if (q->perturb_period) { ++ q->perturb_timer.expires = jiffies + q->perturb_period; ++ add_timer(&q->perturb_timer); ++ } ++} ++ ++static unsigned int esfq_check_hash(unsigned int kind) ++{ ++ switch (kind) { ++ case TCA_SFQ_HASH_CTORIGDST: ++ case TCA_SFQ_HASH_CTORIGSRC: ++ case TCA_SFQ_HASH_CTREPLDST: ++ case TCA_SFQ_HASH_CTREPLSRC: ++ case TCA_SFQ_HASH_CTNATCHG: ++#ifndef CONFIG_NET_SCH_ESFQ_NFCT ++ { ++ if (net_ratelimit()) ++ printk(KERN_WARNING "ESFQ: Conntrack hash types disabled in kernel config. Falling back to classic.\n"); ++ return TCA_SFQ_HASH_CLASSIC; ++ } ++#endif ++ case TCA_SFQ_HASH_CLASSIC: ++ case TCA_SFQ_HASH_DST: ++ case TCA_SFQ_HASH_SRC: ++ case TCA_SFQ_HASH_FWMARK: ++ return kind; ++ default: ++ { ++ if (net_ratelimit()) ++ printk(KERN_WARNING "ESFQ: Unknown hash type. Falling back to classic.\n"); ++ return TCA_SFQ_HASH_CLASSIC; ++ } ++ } ++} ++ ++static int esfq_q_init(struct esfq_sched_data *q, struct nlattr *opt) ++{ ++ struct tc_esfq_qopt *ctl = nla_data(opt); ++ esfq_index p = ~0U/2; ++ int i; ++ ++ if (opt && opt->nla_len < nla_attr_size(sizeof(*ctl))) ++ return -EINVAL; ++ ++ q->perturbation = 0; ++ q->hash_kind = TCA_SFQ_HASH_CLASSIC; ++ q->max_depth = 0; ++ if (opt == NULL) { ++ q->perturb_period = 0; ++ q->hash_divisor = 1024; ++ q->tail = q->limit = q->depth = 128; ++ ++ } else { ++ struct tc_esfq_qopt *ctl = nla_data(opt); ++ if (ctl->quantum) ++ q->quantum = ctl->quantum; ++ q->perturb_period = ctl->perturb_period*HZ; ++ q->hash_divisor = ctl->divisor ? : 1024; ++ q->tail = q->limit = q->depth = ctl->flows ? : 128; ++ ++ if ( q->depth > p - 1 ) ++ return -EINVAL; ++ ++ if (ctl->limit) ++ q->limit = min_t(u32, ctl->limit, q->depth); ++ ++ if (ctl->hash_kind) { ++ q->hash_kind = esfq_check_hash(ctl->hash_kind); ++ } ++ } ++ ++ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL); ++ if (!q->ht) ++ goto err_case; ++ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL); ++ if (!q->dep) ++ goto err_case; ++ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL); ++ if (!q->next) ++ goto err_case; ++ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL); ++ if (!q->allot) ++ goto err_case; ++ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL); ++ if (!q->hash) ++ goto err_case; ++ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL); ++ if (!q->qs) ++ goto err_case; ++ ++ for (i=0; i< q->hash_divisor; i++) ++ q->ht[i] = q->depth; ++ for (i=0; i<q->depth; i++) { ++ skb_queue_head_init(&q->qs[i]); ++ q->dep[i+q->depth].next = i+q->depth; ++ q->dep[i+q->depth].prev = i+q->depth; ++ } ++ ++ for (i=0; i<q->depth; i++) ++ esfq_link(q, i); ++ return 0; ++err_case: ++ esfq_q_destroy(q); ++ return -ENOBUFS; ++} ++ ++static int esfq_init(struct Qdisc *sch, struct nlattr *opt) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ int err; ++ ++ q->quantum = psched_mtu(qdisc_dev(sch)); /* default */ ++ if ((err = esfq_q_init(q, opt))) ++ return err; ++ ++ init_timer(&q->perturb_timer); ++ q->perturb_timer.data = (unsigned long)sch; ++ q->perturb_timer.function = esfq_perturbation; ++ if (q->perturb_period) { ++ q->perturb_timer.expires = jiffies + q->perturb_period; ++ add_timer(&q->perturb_timer); ++ } ++ ++ return 0; ++} ++ ++static int esfq_change(struct Qdisc *sch, struct nlattr *opt) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ struct esfq_sched_data new; ++ struct sk_buff *skb; ++ int err; ++ ++ /* set up new queue */ ++ memset(&new, 0, sizeof(struct esfq_sched_data)); ++ new.quantum = psched_mtu(qdisc_dev(sch)); /* default */ ++ if ((err = esfq_q_init(&new, opt))) ++ return err; ++ ++ /* copy all packets from the old queue to the new queue */ ++ sch_tree_lock(sch); ++ while ((skb = esfq_q_dequeue(q)) != NULL) ++ esfq_q_enqueue(skb, &new, ESFQ_TAIL); ++ ++ /* clean up the old queue */ ++ esfq_q_destroy(q); ++ ++ /* copy elements of the new queue into the old queue */ ++ q->perturb_period = new.perturb_period; ++ q->quantum = new.quantum; ++ q->limit = new.limit; ++ q->depth = new.depth; ++ q->hash_divisor = new.hash_divisor; ++ q->hash_kind = new.hash_kind; ++ q->tail = new.tail; ++ q->max_depth = new.max_depth; ++ q->ht = new.ht; ++ q->dep = new.dep; ++ q->next = new.next; ++ q->allot = new.allot; ++ q->hash = new.hash; ++ q->qs = new.qs; ++ ++ /* finish up */ ++ if (q->perturb_period) { ++ q->perturb_timer.expires = jiffies + q->perturb_period; ++ add_timer(&q->perturb_timer); ++ } else { ++ q->perturbation = 0; ++ } ++ sch_tree_unlock(sch); ++ return 0; ++} ++ ++static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ unsigned char *b = skb_tail_pointer(skb); ++ struct tc_esfq_qopt opt; ++ ++ opt.quantum = q->quantum; ++ opt.perturb_period = q->perturb_period/HZ; ++ ++ opt.limit = q->limit; ++ opt.divisor = q->hash_divisor; ++ opt.flows = q->depth; ++ opt.hash_kind = q->hash_kind; ++ ++ if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) ++ goto nla_put_failure; ++ ++ return skb->len; ++ ++nla_put_failure: ++ nlmsg_trim(skb, b); ++ return -1; ++} ++ ++static struct Qdisc_ops esfq_qdisc_ops = ++{ ++ .next = NULL, ++ .cl_ops = NULL, ++ .id = "esfq", ++ .priv_size = sizeof(struct esfq_sched_data), ++ .enqueue = esfq_enqueue, ++ .dequeue = esfq_dequeue, ++ .peek = esfq_peek, ++ .drop = esfq_drop, ++ .init = esfq_init, ++ .reset = esfq_reset, ++ .destroy = esfq_destroy, ++ .change = esfq_change, ++ .dump = esfq_dump, ++ .owner = THIS_MODULE, ++}; ++ ++static int __init esfq_module_init(void) ++{ ++ return register_qdisc(&esfq_qdisc_ops); ++} ++static void __exit esfq_module_exit(void) ++{ ++ unregister_qdisc(&esfq_qdisc_ops); ++} ++module_init(esfq_module_init) ++module_exit(esfq_module_exit) ++MODULE_LICENSE("GPL"); diff --git a/target/linux/generic/patches-3.8/621-sched_act_connmark.patch b/target/linux/generic/patches-3.8/621-sched_act_connmark.patch new file mode 100644 index 0000000000..0393f15893 --- /dev/null +++ b/target/linux/generic/patches-3.8/621-sched_act_connmark.patch @@ -0,0 +1,172 @@ +--- /dev/null ++++ b/net/sched/act_connmark.c +@@ -0,0 +1,137 @@ ++/* ++ * Copyright (c) 2011 Felix Fietkau <nbd@openwrt.org> ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms and conditions of the GNU General Public License, ++ * version 2, as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple ++ * Place - Suite 330, Boston, MA 02111-1307 USA. ++ */ ++ ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/kernel.h> ++#include <linux/skbuff.h> ++#include <linux/rtnetlink.h> ++#include <linux/pkt_cls.h> ++#include <linux/ip.h> ++#include <linux/ipv6.h> ++#include <net/netlink.h> ++#include <net/pkt_sched.h> ++#include <net/act_api.h> ++ ++#include <net/netfilter/nf_conntrack.h> ++#include <net/netfilter/nf_conntrack_core.h> ++ ++#define TCA_ACT_CONNMARK 20 ++ ++#define CONNMARK_TAB_MASK 3 ++static struct tcf_common *tcf_connmark_ht[CONNMARK_TAB_MASK + 1]; ++static u32 connmark_idx_gen; ++static DEFINE_RWLOCK(connmark_lock); ++ ++static struct tcf_hashinfo connmark_hash_info = { ++ .htab = tcf_connmark_ht, ++ .hmask = CONNMARK_TAB_MASK, ++ .lock = &connmark_lock, ++}; ++ ++static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a, ++ struct tcf_result *res) ++{ ++ struct nf_conn *c; ++ enum ip_conntrack_info ctinfo; ++ int proto; ++ int r; ++ ++ if (skb->protocol == htons(ETH_P_IP)) { ++ if (skb->len < sizeof(struct iphdr)) ++ goto out; ++ proto = PF_INET; ++ } else if (skb->protocol == htons(ETH_P_IPV6)) { ++ if (skb->len < sizeof(struct ipv6hdr)) ++ goto out; ++ proto = PF_INET6; ++ } else ++ goto out; ++ ++ r = nf_conntrack_in(dev_net(skb->dev), proto, NF_INET_PRE_ROUTING, skb); ++ if (r != NF_ACCEPT) ++ goto out; ++ ++ c = nf_ct_get(skb, &ctinfo); ++ if (!c) ++ goto out; ++ ++ skb->mark = c->mark; ++ nf_conntrack_put(skb->nfct); ++ skb->nfct = NULL; ++ ++out: ++ return TC_ACT_PIPE; ++} ++ ++static int tcf_connmark_init(struct nlattr *nla, struct nlattr *est, ++ struct tc_action *a, int ovr, int bind) ++{ ++ struct tcf_common *pc; ++ ++ pc = tcf_hash_create(0, est, a, sizeof(*pc), bind, ++ &connmark_idx_gen, &connmark_hash_info); ++ if (IS_ERR(pc)) ++ return PTR_ERR(pc); ++ ++ tcf_hash_insert(pc, &connmark_hash_info); ++ ++ return ACT_P_CREATED; ++} ++ ++static inline int tcf_connmark_cleanup(struct tc_action *a, int bind) ++{ ++ if (a->priv) ++ return tcf_hash_release(a->priv, bind, &connmark_hash_info); ++ return 0; ++} ++ ++static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, ++ int bind, int ref) ++{ ++ return skb->len; ++} ++ ++static struct tc_action_ops act_connmark_ops = { ++ .kind = "connmark", ++ .hinfo = &connmark_hash_info, ++ .type = TCA_ACT_CONNMARK, ++ .capab = TCA_CAP_NONE, ++ .owner = THIS_MODULE, ++ .act = tcf_connmark, ++ .dump = tcf_connmark_dump, ++ .cleanup = tcf_connmark_cleanup, ++ .init = tcf_connmark_init, ++ .walk = tcf_generic_walker, ++}; ++ ++MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>"); ++MODULE_DESCRIPTION("Connection tracking mark restoring"); ++MODULE_LICENSE("GPL"); ++ ++static int __init connmark_init_module(void) ++{ ++ return tcf_register_action(&act_connmark_ops); ++} ++ ++static void __exit connmark_cleanup_module(void) ++{ ++ tcf_unregister_action(&act_connmark_ops); ++} ++ ++module_init(connmark_init_module); ++module_exit(connmark_cleanup_module); +--- a/net/sched/Kconfig ++++ b/net/sched/Kconfig +@@ -670,6 +670,19 @@ config NET_ACT_CSUM + To compile this code as a module, choose M here: the + module will be called act_csum. + ++config NET_ACT_CONNMARK ++ tristate "Connection Tracking Marking" ++ depends on NET_CLS_ACT ++ depends on NF_CONNTRACK ++ depends on NF_CONNTRACK_MARK ++ ---help--- ++ Say Y here to restore the connmark from a scheduler action ++ ++ If unsure, say N. ++ ++ To compile this code as a module, choose M here: the ++ module will be called act_connmark. ++ + config NET_CLS_IND + bool "Incoming device classification" + depends on NET_CLS_U32 || NET_CLS_FW +--- a/net/sched/Makefile ++++ b/net/sched/Makefile +@@ -16,6 +16,7 @@ obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit + obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o + obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o + obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o ++obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o + obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o + obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o + obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o diff --git a/target/linux/generic/patches-3.8/630-packet_socket_type.patch b/target/linux/generic/patches-3.8/630-packet_socket_type.patch new file mode 100644 index 0000000000..921c810f0e --- /dev/null +++ b/target/linux/generic/patches-3.8/630-packet_socket_type.patch @@ -0,0 +1,134 @@ +This patch allows the user to specify desired packet types (outgoing, +broadcast, unicast, etc.) on packet sockets via setsockopt. +This can reduce the load in situations where only a limited number +of packet types are necessary + +Signed-off-by: Felix Fietkau <nbd@openwrt.org> + +--- a/include/uapi/linux/if_packet.h ++++ b/include/uapi/linux/if_packet.h +@@ -29,6 +29,8 @@ struct sockaddr_ll { + /* These ones are invisible by user level */ + #define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */ + #define PACKET_FASTROUTE 6 /* Fastrouted frame */ ++#define PACKET_MASK_ANY 0xffffffff /* mask for packet type bits */ ++ + + /* Packet socket options */ + +@@ -51,6 +53,7 @@ struct sockaddr_ll { + #define PACKET_TIMESTAMP 17 + #define PACKET_FANOUT 18 + #define PACKET_TX_HAS_OFF 19 ++#define PACKET_RECV_TYPE 20 + + #define PACKET_FANOUT_HASH 0 + #define PACKET_FANOUT_LB 1 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -1273,6 +1273,7 @@ static int packet_rcv_spkt(struct sk_buf + { + struct sock *sk; + struct sockaddr_pkt *spkt; ++ struct packet_sock *po; + + /* + * When we registered the protocol we saved the socket in the data +@@ -1280,6 +1281,7 @@ static int packet_rcv_spkt(struct sk_buf + */ + + sk = pt->af_packet_priv; ++ po = pkt_sk(sk); + + /* + * Yank back the headers [hope the device set this +@@ -1292,7 +1294,7 @@ static int packet_rcv_spkt(struct sk_buf + * so that this procedure is noop. + */ + +- if (skb->pkt_type == PACKET_LOOPBACK) ++ if (!(po->pkt_type & (1 << skb->pkt_type))) + goto out; + + if (!net_eq(dev_net(dev), sock_net(sk))) +@@ -1498,12 +1500,12 @@ static int packet_rcv(struct sk_buff *sk + int skb_len = skb->len; + unsigned int snaplen, res; + +- if (skb->pkt_type == PACKET_LOOPBACK) +- goto drop; +- + sk = pt->af_packet_priv; + po = pkt_sk(sk); + ++ if (!(po->pkt_type & (1 << skb->pkt_type))) ++ goto drop; ++ + if (!net_eq(dev_net(dev), sock_net(sk))) + goto drop; + +@@ -1622,12 +1624,12 @@ static int tpacket_rcv(struct sk_buff *s + struct timespec ts; + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + +- if (skb->pkt_type == PACKET_LOOPBACK) +- goto drop; +- + sk = pt->af_packet_priv; + po = pkt_sk(sk); + ++ if (!(po->pkt_type & (1 << skb->pkt_type))) ++ goto drop; ++ + if (!net_eq(dev_net(dev), sock_net(sk))) + goto drop; + +@@ -2537,6 +2539,7 @@ static int packet_create(struct net *net + spin_lock_init(&po->bind_lock); + mutex_init(&po->pg_vec_lock); + po->prot_hook.func = packet_rcv; ++ po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK); + + if (sock->type == SOCK_PACKET) + po->prot_hook.func = packet_rcv_spkt; +@@ -3150,6 +3153,16 @@ packet_setsockopt(struct socket *sock, i + po->tp_tx_has_off = !!val; + return 0; + } ++ case PACKET_RECV_TYPE: ++ { ++ unsigned int val; ++ if (optlen != sizeof(val)) ++ return -EINVAL; ++ if (copy_from_user(&val, optval, sizeof(val))) ++ return -EFAULT; ++ po->pkt_type = val & ~PACKET_LOOPBACK; ++ return 0; ++ } + default: + return -ENOPROTOOPT; + } +@@ -3204,6 +3217,13 @@ static int packet_getsockopt(struct sock + case PACKET_VNET_HDR: + val = po->has_vnet_hdr; + break; ++ case PACKET_RECV_TYPE: ++ if (len > sizeof(unsigned int)) ++ len = sizeof(unsigned int); ++ val = po->pkt_type; ++ ++ data = &val; ++ break; + case PACKET_VERSION: + val = po->tp_version; + break; +--- a/net/packet/internal.h ++++ b/net/packet/internal.h +@@ -112,6 +112,7 @@ struct packet_sock { + unsigned int tp_tx_has_off:1; + unsigned int tp_tstamp; + struct packet_type prot_hook ____cacheline_aligned_in_smp; ++ unsigned int pkt_type; + }; + + static struct packet_sock *pkt_sk(struct sock *sk) diff --git a/target/linux/generic/patches-3.8/640-bridge_no_eap_forward.patch b/target/linux/generic/patches-3.8/640-bridge_no_eap_forward.patch new file mode 100644 index 0000000000..92982aba0b --- /dev/null +++ b/target/linux/generic/patches-3.8/640-bridge_no_eap_forward.patch @@ -0,0 +1,15 @@ +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -75,7 +75,11 @@ int br_handle_frame_finish(struct sk_buf + + dst = NULL; + +- if (is_broadcast_ether_addr(dest)) ++ if (skb->protocol == htons(ETH_P_PAE)) { ++ skb2 = skb; ++ /* Do not forward 802.1x/EAP frames */ ++ skb = NULL; ++ } else if (is_broadcast_ether_addr(dest)) + skb2 = skb; + else if (is_multicast_ether_addr(dest)) { + mdst = br_mdb_get(br, skb); diff --git a/target/linux/generic/patches-3.8/641-bridge_always_accept_eap.patch b/target/linux/generic/patches-3.8/641-bridge_always_accept_eap.patch new file mode 100644 index 0000000000..13824e07f1 --- /dev/null +++ b/target/linux/generic/patches-3.8/641-bridge_always_accept_eap.patch @@ -0,0 +1,11 @@ +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -62,7 +62,7 @@ int br_handle_frame_finish(struct sk_buf + br_multicast_rcv(br, p, skb)) + goto drop; + +- if (p->state == BR_STATE_LEARNING) ++ if ((p->state == BR_STATE_LEARNING) && skb->protocol != htons(ETH_P_PAE)) + goto drop; + + BR_INPUT_SKB_CB(skb)->brdev = br->dev; diff --git a/target/linux/generic/patches-3.8/642-bridge_port_isolate.patch b/target/linux/generic/patches-3.8/642-bridge_port_isolate.patch new file mode 100644 index 0000000000..395dd955ea --- /dev/null +++ b/target/linux/generic/patches-3.8/642-bridge_port_isolate.patch @@ -0,0 +1,103 @@ +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -139,6 +139,7 @@ struct net_bridge_port + #define BR_BPDU_GUARD 0x00000002 + #define BR_ROOT_BLOCK 0x00000004 + #define BR_MULTICAST_FAST_LEAVE 0x00000008 ++#define BR_ISOLATE_MODE 0x00000010 + + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING + u32 multicast_startup_queries_sent; +--- a/net/bridge/br_sysfs_if.c ++++ b/net/bridge/br_sysfs_if.c +@@ -159,6 +159,22 @@ BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPI + BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD); + BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); + ++static ssize_t show_isolate_mode(struct net_bridge_port *p, char *buf) ++{ ++ int isolate_mode = (p->flags & BR_ISOLATE_MODE) ? 1 : 0; ++ return sprintf(buf, "%d\n", isolate_mode); ++} ++static ssize_t store_isolate_mode(struct net_bridge_port *p, unsigned long v) ++{ ++ if (v) ++ p->flags |= BR_ISOLATE_MODE; ++ else ++ p->flags &= ~BR_ISOLATE_MODE; ++ return 0; ++} ++static BRPORT_ATTR(isolate_mode, S_IRUGO | S_IWUSR, ++ show_isolate_mode, store_isolate_mode); ++ + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING + static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) + { +@@ -199,6 +215,7 @@ static const struct brport_attribute *br + &brport_attr_multicast_router, + &brport_attr_multicast_fast_leave, + #endif ++ &brport_attr_isolate_mode, + NULL + }; + +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -95,7 +95,8 @@ int br_handle_frame_finish(struct sk_buf + skb2 = skb; + + br->dev->stats.multicast++; +- } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { ++ } else if ((p->flags & BR_ISOLATE_MODE) || ++ ((dst = __br_fdb_get(br, dest)) && dst->is_local)) { + skb2 = skb; + /* Do not forward the packet since it's local. */ + skb = NULL; +--- a/net/bridge/br_forward.c ++++ b/net/bridge/br_forward.c +@@ -110,7 +110,7 @@ void br_deliver(const struct net_bridge_ + /* called with rcu_read_lock */ + void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) + { +- if (should_deliver(to, skb)) { ++ if (should_deliver(to, skb) && !(to->flags & BR_ISOLATE_MODE)) { + if (skb0) + deliver_clone(to, skb, __br_forward); + else +@@ -165,7 +165,8 @@ out: + static void br_flood(struct net_bridge *br, struct sk_buff *skb, + struct sk_buff *skb0, + void (*__packet_hook)(const struct net_bridge_port *p, +- struct sk_buff *skb)) ++ struct sk_buff *skb), ++ bool forward) + { + struct net_bridge_port *p; + struct net_bridge_port *prev; +@@ -173,6 +174,9 @@ static void br_flood(struct net_bridge * + prev = NULL; + + list_for_each_entry_rcu(p, &br->port_list, list) { ++ if (forward && (p->flags & BR_ISOLATE_MODE)) ++ continue; ++ + prev = maybe_deliver(prev, p, skb, __packet_hook); + if (IS_ERR(prev)) + goto out; +@@ -196,14 +200,14 @@ out: + /* called with rcu_read_lock */ + void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) + { +- br_flood(br, skb, NULL, __br_deliver); ++ br_flood(br, skb, NULL, __br_deliver, false); + } + + /* called under bridge lock */ + void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, + struct sk_buff *skb2) + { +- br_flood(br, skb, skb2, __br_forward); ++ br_flood(br, skb, skb2, __br_forward, true); + } + + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING diff --git a/target/linux/generic/patches-3.8/643-bridge_remove_ipv6_dependency.patch b/target/linux/generic/patches-3.8/643-bridge_remove_ipv6_dependency.patch new file mode 100644 index 0000000000..a1563f44e7 --- /dev/null +++ b/target/linux/generic/patches-3.8/643-bridge_remove_ipv6_dependency.patch @@ -0,0 +1,107 @@ +--- a/include/net/addrconf.h ++++ b/include/net/addrconf.h +@@ -92,6 +92,12 @@ extern void addrconf_join_solict(struc + extern void addrconf_leave_solict(struct inet6_dev *idev, + const struct in6_addr *addr); + ++extern int (*ipv6_dev_get_saddr_hook)(struct net *net, ++ struct net_device *dev, ++ const struct in6_addr *daddr, ++ unsigned int srcprefs, ++ struct in6_addr *saddr); ++ + static inline unsigned long addrconf_timeout_fixup(u32 timeout, + unsigned int unit) + { +--- a/net/bridge/Kconfig ++++ b/net/bridge/Kconfig +@@ -6,7 +6,6 @@ config BRIDGE + tristate "802.1d Ethernet Bridging" + select LLC + select STP +- depends on IPV6 || IPV6=n + ---help--- + If you say Y here, then your Linux box will be able to act as an + Ethernet bridge, which means that the different Ethernet segments it +--- a/net/ipv6/Makefile ++++ b/net/ipv6/Makefile +@@ -44,3 +44,4 @@ obj-y += addrconf_core.o exthdrs_core.o + obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) + + obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o ++obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_stubs.o +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -1246,7 +1246,7 @@ out: + return ret; + } + +-int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, ++static int __ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, + const struct in6_addr *daddr, unsigned int prefs, + struct in6_addr *saddr) + { +@@ -1371,7 +1371,6 @@ try_nextdev: + in6_ifa_put(hiscore->ifa); + return 0; + } +-EXPORT_SYMBOL(ipv6_dev_get_saddr); + + int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, + unsigned char banned_flags) +@@ -4949,6 +4948,9 @@ int __init addrconf_init(void) + + ipv6_addr_label_rtnl_register(); + ++ BUG_ON(ipv6_dev_get_saddr_hook != NULL); ++ rcu_assign_pointer(ipv6_dev_get_saddr_hook, __ipv6_dev_get_saddr); ++ + return 0; + errout: + rtnl_af_unregister(&inet6_ops); +@@ -4967,6 +4969,9 @@ void addrconf_cleanup(void) + struct net_device *dev; + int i; + ++ rcu_assign_pointer(ipv6_dev_get_saddr_hook, NULL); ++ synchronize_rcu(); ++ + unregister_netdevice_notifier(&ipv6_dev_notf); + unregister_pernet_subsys(&addrconf_ops); + ipv6_addr_label_cleanup(); +--- /dev/null ++++ b/net/ipv6/inet6_stubs.c +@@ -0,0 +1,33 @@ ++/* ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. ++ */ ++#include <linux/export.h> ++#include <net/ipv6.h> ++ ++int (*ipv6_dev_get_saddr_hook)(struct net *net, struct net_device *dev, ++ const struct in6_addr *daddr, unsigned int srcprefs, ++ struct in6_addr *saddr); ++ ++EXPORT_SYMBOL(ipv6_dev_get_saddr_hook); ++ ++int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, ++ const struct in6_addr *daddr, unsigned int prefs, ++ struct in6_addr *saddr) ++{ ++ int ret = -EADDRNOTAVAIL; ++ typeof(ipv6_dev_get_saddr_hook) dev_get_saddr; ++ ++ rcu_read_lock(); ++ dev_get_saddr = rcu_dereference(ipv6_dev_get_saddr_hook); ++ ++ if (dev_get_saddr) ++ ret = dev_get_saddr(net, dst_dev, daddr, prefs, saddr); ++ ++ rcu_read_unlock(); ++ return ret; ++} ++EXPORT_SYMBOL(ipv6_dev_get_saddr); ++ diff --git a/target/linux/generic/patches-3.8/644-bridge_optimize_netfilter_hooks.patch b/target/linux/generic/patches-3.8/644-bridge_optimize_netfilter_hooks.patch new file mode 100644 index 0000000000..f03ceb65b0 --- /dev/null +++ b/target/linux/generic/patches-3.8/644-bridge_optimize_netfilter_hooks.patch @@ -0,0 +1,146 @@ +--- a/net/bridge/br_forward.c ++++ b/net/bridge/br_forward.c +@@ -56,7 +56,7 @@ int br_dev_queue_push_xmit(struct sk_buf + + int br_forward_finish(struct sk_buff *skb) + { +- return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, ++ return BR_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, + br_dev_queue_push_xmit); + + } +@@ -75,7 +75,7 @@ static void __br_deliver(const struct ne + return; + } + +- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, ++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, + br_forward_finish); + } + +@@ -92,7 +92,7 @@ static void __br_forward(const struct ne + skb->dev = to->dev; + skb_forward_csum(skb); + +- NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, ++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, + br_forward_finish); + } + +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -37,7 +37,7 @@ static int br_pass_frame_up(struct sk_bu + indev = skb->dev; + skb->dev = brdev; + +- return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, ++ return BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, + netif_receive_skb); + } + +@@ -184,7 +184,7 @@ rx_handler_result_t br_handle_frame(stru + } + + /* Deliver packet to local host only */ +- if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, ++ if (BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, + NULL, br_handle_local_finish)) { + return RX_HANDLER_CONSUMED; /* consumed by filter */ + } else { +@@ -209,7 +209,7 @@ forward: + if (ether_addr_equal(p->br->dev->dev_addr, dest)) + skb->pkt_type = PACKET_HOST; + +- NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, ++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, + br_handle_frame_finish); + break; + default: +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -772,7 +772,7 @@ static void __br_multicast_send_query(st + if (port) { + __skb_push(skb, sizeof(struct ethhdr)); + skb->dev = port->dev; +- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, ++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, + dev_queue_xmit); + } else + netif_rx(skb); +--- a/net/bridge/br_netfilter.c ++++ b/net/bridge/br_netfilter.c +@@ -73,6 +73,15 @@ static int brnf_pass_vlan_indev __read_m + #define IS_ARP(skb) \ + (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP)) + ++int brnf_call_ebtables __read_mostly = 0; ++EXPORT_SYMBOL_GPL(brnf_call_ebtables); ++ ++bool br_netfilter_run_hooks(void) ++{ ++ return brnf_call_iptables | brnf_call_ip6tables | brnf_call_arptables | ++ brnf_call_ebtables; ++} ++ + static inline __be16 vlan_proto(const struct sk_buff *skb) + { + if (vlan_tx_tag_present(skb)) +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -531,15 +531,29 @@ static inline bool br_multicast_is_route + + /* br_netfilter.c */ + #ifdef CONFIG_BRIDGE_NETFILTER ++extern int brnf_call_ebtables; + extern int br_netfilter_init(void); + extern void br_netfilter_fini(void); + extern void br_netfilter_rtable_init(struct net_bridge *); ++extern bool br_netfilter_run_hooks(void); + #else + #define br_netfilter_init() (0) + #define br_netfilter_fini() do { } while(0) + #define br_netfilter_rtable_init(x) ++#define br_netfilter_run_hooks() false + #endif + ++static inline int ++BR_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb, ++ struct net_device *in, struct net_device *out, ++ int (*okfn)(struct sk_buff *)) ++{ ++ if (!br_netfilter_run_hooks()) ++ return okfn(skb); ++ ++ return NF_HOOK(pf, hook, skb, in, out, okfn); ++} ++ + /* br_stp.c */ + extern void br_log_state(const struct net_bridge_port *p); + extern struct net_bridge_port *br_get_port(struct net_bridge *br, +--- a/net/bridge/br_stp_bpdu.c ++++ b/net/bridge/br_stp_bpdu.c +@@ -52,7 +52,7 @@ static void br_send_bpdu(struct net_brid + + skb_reset_mac_header(skb); + +- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, ++ BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, + dev_queue_xmit); + } + +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -2403,11 +2403,13 @@ static int __init ebtables_init(void) + } + + printk(KERN_INFO "Ebtables v2.0 registered\n"); ++ brnf_call_ebtables = 1; + return 0; + } + + static void __exit ebtables_fini(void) + { ++ brnf_call_ebtables = 0; + nf_unregister_sockopt(&ebt_sockopts); + xt_unregister_target(&ebt_standard_target); + printk(KERN_INFO "Ebtables v2.0 unregistered\n"); diff --git a/target/linux/generic/patches-3.8/650-pppoe_header_pad.patch b/target/linux/generic/patches-3.8/650-pppoe_header_pad.patch new file mode 100644 index 0000000000..a89a9f3b74 --- /dev/null +++ b/target/linux/generic/patches-3.8/650-pppoe_header_pad.patch @@ -0,0 +1,20 @@ +--- a/drivers/net/ppp/pppoe.c ++++ b/drivers/net/ppp/pppoe.c +@@ -850,7 +850,7 @@ static int pppoe_sendmsg(struct kiocb *i + goto end; + + +- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, ++ skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32 + NET_SKB_PAD, + 0, GFP_KERNEL); + if (!skb) { + error = -ENOMEM; +@@ -858,7 +858,7 @@ static int pppoe_sendmsg(struct kiocb *i + } + + /* Reserve space for headers. */ +- skb_reserve(skb, dev->hard_header_len); ++ skb_reserve(skb, dev->hard_header_len + NET_SKB_PAD); + skb_reset_network_header(skb); + + skb->dev = dev; diff --git a/target/linux/generic/patches-3.8/651-wireless_mesh_header.patch b/target/linux/generic/patches-3.8/651-wireless_mesh_header.patch new file mode 100644 index 0000000000..ff3f0979f0 --- /dev/null +++ b/target/linux/generic/patches-3.8/651-wireless_mesh_header.patch @@ -0,0 +1,11 @@ +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -134,7 +134,7 @@ static inline bool dev_xmit_complete(int + */ + + #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) +-# if defined(CONFIG_MAC80211_MESH) ++# if 1 || defined(CONFIG_MAC80211_MESH) + # define LL_MAX_HEADER 128 + # else + # define LL_MAX_HEADER 96 diff --git a/target/linux/generic/patches-3.8/652-atm_header_changes.patch b/target/linux/generic/patches-3.8/652-atm_header_changes.patch new file mode 100644 index 0000000000..7e5685922f --- /dev/null +++ b/target/linux/generic/patches-3.8/652-atm_header_changes.patch @@ -0,0 +1,12 @@ +--- a/include/uapi/linux/atm.h ++++ b/include/uapi/linux/atm.h +@@ -139,6 +139,9 @@ struct atm_trafprm { + int min_pcr; /* minimum PCR in cells per second */ + int max_cdv; /* maximum CDV in microseconds */ + int max_sdu; /* maximum SDU in bytes */ ++ int scr; /* sustained rate in cells per second */ ++ int mbs; /* maximum burst size (MBS) in cells */ ++ int cdv; /* Cell delay varition */ + /* extra params for ABR */ + unsigned int icr; /* Initial Cell Rate (24-bit) */ + unsigned int tbe; /* Transient Buffer Exposure (24-bit) */ diff --git a/target/linux/generic/patches-3.8/653-disable_netlink_trim.patch b/target/linux/generic/patches-3.8/653-disable_netlink_trim.patch new file mode 100644 index 0000000000..870ae944a1 --- /dev/null +++ b/target/linux/generic/patches-3.8/653-disable_netlink_trim.patch @@ -0,0 +1,28 @@ +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -898,25 +898,7 @@ void netlink_detachskb(struct sock *sk, + + static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) + { +- int delta; +- + skb_orphan(skb); +- +- delta = skb->end - skb->tail; +- if (delta * 2 < skb->truesize) +- return skb; +- +- if (skb_shared(skb)) { +- struct sk_buff *nskb = skb_clone(skb, allocation); +- if (!nskb) +- return skb; +- consume_skb(skb); +- skb = nskb; +- } +- +- if (!pskb_expand_head(skb, 0, -delta, allocation)) +- skb->truesize -= delta; +- + return skb; + } + diff --git a/target/linux/generic/patches-3.8/655-increase_skb_pad.patch b/target/linux/generic/patches-3.8/655-increase_skb_pad.patch new file mode 100644 index 0000000000..e65ae0d461 --- /dev/null +++ b/target/linux/generic/patches-3.8/655-increase_skb_pad.patch @@ -0,0 +1,11 @@ +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -1722,7 +1722,7 @@ static inline int pskb_network_may_pull( + * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) + */ + #ifndef NET_SKB_PAD +-#define NET_SKB_PAD max(32, L1_CACHE_BYTES) ++#define NET_SKB_PAD max(48, L1_CACHE_BYTES) + #endif + + extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); diff --git a/target/linux/generic/patches-3.8/700-swconfig.patch b/target/linux/generic/patches-3.8/700-swconfig.patch new file mode 100644 index 0000000000..1c1111de06 --- /dev/null +++ b/target/linux/generic/patches-3.8/700-swconfig.patch @@ -0,0 +1,39 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -13,6 +13,16 @@ menuconfig PHYLIB + + if PHYLIB + ++config SWCONFIG ++ tristate "Switch configuration API" ++ ---help--- ++ Switch configuration API using netlink. This allows ++ you to configure the VLAN features of certain switches. ++ ++config SWCONFIG_LEDS ++ bool "Switch LED trigger support" ++ depends on (SWCONFIG && LEDS_TRIGGERS) ++ + comment "MII PHY device drivers" + + config AT803X_PHY +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -3,6 +3,7 @@ + libphy-objs := phy.o phy_device.o mdio_bus.o + + obj-$(CONFIG_PHYLIB) += libphy.o ++obj-$(CONFIG_SWCONFIG) += swconfig.o + obj-$(CONFIG_MARVELL_PHY) += marvell.o + obj-$(CONFIG_DAVICOM_PHY) += davicom.o + obj-$(CONFIG_CICADA_PHY) += cicada.o +--- a/include/uapi/linux/Kbuild ++++ b/include/uapi/linux/Kbuild +@@ -356,6 +356,7 @@ header-y += stddef.h + header-y += string.h + header-y += suspend_ioctls.h + header-y += swab.h ++header-y += switch.h + header-y += synclink.h + header-y += sysctl.h + header-y += sysinfo.h diff --git a/target/linux/generic/patches-3.8/701-phy_extension.patch b/target/linux/generic/patches-3.8/701-phy_extension.patch new file mode 100644 index 0000000000..45464ac753 --- /dev/null +++ b/target/linux/generic/patches-3.8/701-phy_extension.patch @@ -0,0 +1,72 @@ +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -301,6 +301,50 @@ int phy_ethtool_gset(struct phy_device * + } + EXPORT_SYMBOL(phy_ethtool_gset); + ++int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr) ++{ ++ u32 cmd; ++ int tmp; ++ struct ethtool_cmd ecmd = { ETHTOOL_GSET }; ++ struct ethtool_value edata = { ETHTOOL_GLINK }; ++ ++ if (get_user(cmd, (u32 *) useraddr)) ++ return -EFAULT; ++ ++ switch (cmd) { ++ case ETHTOOL_GSET: ++ phy_ethtool_gset(phydev, &ecmd); ++ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd))) ++ return -EFAULT; ++ return 0; ++ ++ case ETHTOOL_SSET: ++ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd))) ++ return -EFAULT; ++ return phy_ethtool_sset(phydev, &ecmd); ++ ++ case ETHTOOL_NWAY_RST: ++ /* if autoneg is off, it's an error */ ++ tmp = phy_read(phydev, MII_BMCR); ++ if (tmp & BMCR_ANENABLE) { ++ tmp |= (BMCR_ANRESTART); ++ phy_write(phydev, MII_BMCR, tmp); ++ return 0; ++ } ++ return -EINVAL; ++ ++ case ETHTOOL_GLINK: ++ edata.data = (phy_read(phydev, ++ MII_BMSR) & BMSR_LSTATUS) ? 1 : 0; ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++ } ++ ++ return -EOPNOTSUPP; ++} ++EXPORT_SYMBOL(phy_ethtool_ioctl); ++ + /** + * phy_mii_ioctl - generic PHY MII ioctl interface + * @phydev: the phy_device struct +@@ -476,7 +520,7 @@ static void phy_force_reduction(struct p + int idx; + + idx = phy_find_setting(phydev->speed, phydev->duplex); +- ++ + idx++; + + idx = phy_find_valid(idx, phydev->supported); +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -542,6 +542,7 @@ void phy_start_machine(struct phy_device + void phy_stop_machine(struct phy_device *phydev); + int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); + int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); ++int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr); + int phy_mii_ioctl(struct phy_device *phydev, + struct ifreq *ifr, int cmd); + int phy_start_interrupts(struct phy_device *phydev); diff --git a/target/linux/generic/patches-3.8/702-phy_add_aneg_done_function.patch b/target/linux/generic/patches-3.8/702-phy_add_aneg_done_function.patch new file mode 100644 index 0000000000..8e4443604f --- /dev/null +++ b/target/linux/generic/patches-3.8/702-phy_add_aneg_done_function.patch @@ -0,0 +1,45 @@ +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -408,9 +408,18 @@ struct phy_driver { + */ + int (*config_aneg)(struct phy_device *phydev); + ++ /* Determine if autonegotiation is done */ ++ int (*aneg_done)(struct phy_device *phydev); ++ + /* Determines the negotiated speed and duplex */ + int (*read_status)(struct phy_device *phydev); + ++ /* ++ * Update the value in phydev->link to reflect the ++ * current link value ++ */ ++ int (*update_link)(struct phy_device *phydev); ++ + /* Clears any pending interrupts */ + int (*ack_interrupt)(struct phy_device *phydev); + +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -796,6 +796,9 @@ int genphy_update_link(struct phy_device + { + int status; + ++ if (phydev->drv->update_link) ++ return phydev->drv->update_link(phydev); ++ + /* Do a fake read */ + status = phy_read(phydev, MII_BMSR); + +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -108,6 +108,9 @@ static inline int phy_aneg_done(struct p + { + int retval; + ++ if (phydev->drv->aneg_done) ++ return phydev->drv->aneg_done(phydev); ++ + retval = phy_read(phydev, MII_BMSR); + + return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); diff --git a/target/linux/generic/patches-3.8/710-phy-add-mdio_register_board_info.patch b/target/linux/generic/patches-3.8/710-phy-add-mdio_register_board_info.patch new file mode 100644 index 0000000000..a1c5b1d4bb --- /dev/null +++ b/target/linux/generic/patches-3.8/710-phy-add-mdio_register_board_info.patch @@ -0,0 +1,191 @@ +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -41,6 +41,8 @@ + #include <asm/irq.h> + #include <asm/uaccess.h> + ++#include "mdio-boardinfo.h" ++ + /** + * mdiobus_alloc_size - allocate a mii_bus structure + * @size: extra amount of memory to allocate for private storage. +@@ -229,15 +231,33 @@ void mdiobus_free(struct mii_bus *bus) + } + EXPORT_SYMBOL(mdiobus_free); + ++static void mdiobus_setup_phydev_from_boardinfo(struct mii_bus *bus, ++ struct phy_device *phydev, ++ struct mdio_board_info *bi) ++{ ++ if (strcmp(bus->id, bi->bus_id) || ++ bi->phy_addr != phydev->addr) ++ return; ++ ++ phydev->dev.platform_data = (void *) bi->platform_data; ++} ++ + struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) + { + struct phy_device *phydev; ++ struct mdio_board_entry *be; + int err; + + phydev = get_phy_device(bus, addr, false); + if (IS_ERR(phydev) || phydev == NULL) + return phydev; + ++ mutex_lock(&__mdio_board_lock); ++ list_for_each_entry(be, &__mdio_board_list, list) ++ mdiobus_setup_phydev_from_boardinfo(bus, phydev, ++ &be->board_info); ++ mutex_unlock(&__mdio_board_lock); ++ + err = phy_device_register(phydev); + if (err) { + phy_device_free(phydev); +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -575,4 +575,22 @@ int __init mdio_bus_init(void); + void mdio_bus_exit(void); + + extern struct bus_type mdio_bus_type; ++ ++struct mdio_board_info { ++ const char *bus_id; ++ int phy_addr; ++ ++ const void *platform_data; ++}; ++ ++#ifdef CONFIG_MDIO_BOARDINFO ++int mdiobus_register_board_info(const struct mdio_board_info *info, unsigned n); ++#else ++static inline int ++mdiobus_register_board_info(const struct mdio_board_info *info, unsigned n) ++{ ++ return 0; ++} ++#endif ++ + #endif /* __PHY_H */ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -13,6 +13,10 @@ menuconfig PHYLIB + + if PHYLIB + ++config MDIO_BOARDINFO ++ bool ++ default y ++ + config SWCONFIG + tristate "Switch configuration API" + ---help--- +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -2,6 +2,8 @@ + + libphy-objs := phy.o phy_device.o mdio_bus.o + ++obj-$(CONFIG_MDIO_BOARDINFO) += mdio-boardinfo.o ++ + obj-$(CONFIG_PHYLIB) += libphy.o + obj-$(CONFIG_SWCONFIG) += swconfig.o + obj-$(CONFIG_MARVELL_PHY) += marvell.o +--- /dev/null ++++ b/drivers/net/phy/mdio-boardinfo.c +@@ -0,0 +1,58 @@ ++/* ++ * mdio-boardinfo.c - collect pre-declarations of PHY devices ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/phy.h> ++#include <linux/slab.h> ++#include <linux/export.h> ++#include <linux/mutex.h> ++#include <linux/phy.h> ++ ++#include "mdio-boardinfo.h" ++ ++/* ++ * These symbols are exported ONLY FOR the mdio_bus component. ++ * No other users will be supported. ++ */ ++ ++LIST_HEAD(__mdio_board_list); ++EXPORT_SYMBOL_GPL(__mdio_board_list); ++ ++DEFINE_MUTEX(__mdio_board_lock); ++EXPORT_SYMBOL_GPL(__mdio_board_lock); ++ ++/** ++ * mdio_register_board_info - register PHY devices for a given board ++ * @info: array of chip descriptors ++ * @n: how many descriptors are provided ++ * Context: can sleep ++ * ++ * The board info passed can safely be __initdata ... but be careful of ++ * any embedded pointers (platform_data, etc), they're copied as-is. ++ */ ++int __init ++mdiobus_register_board_info(struct mdio_board_info const *info, unsigned n) ++{ ++ struct mdio_board_entry *be; ++ int i; ++ ++ be = kzalloc(n * sizeof(*be), GFP_KERNEL); ++ if (!be) ++ return -ENOMEM; ++ ++ for (i = 0; i < n; i++, be++, info++) { ++ memcpy(&be->board_info, info, sizeof(*info)); ++ mutex_lock(&__mdio_board_lock); ++ list_add_tail(&be->list, &__mdio_board_list); ++ mutex_unlock(&__mdio_board_lock); ++ } ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/net/phy/mdio-boardinfo.h +@@ -0,0 +1,22 @@ ++/* ++ * mdio-boardinfo.h - boardinfo interface internal to the mdio_bus component ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ */ ++ ++#include <linux/mutex.h> ++ ++struct mdio_board_entry { ++ struct list_head list; ++ struct mdio_board_info board_info; ++}; ++ ++/* __mdio_board_lock protects __mdio_board_list ++ * only mdio_bus components are allowed to use these symbols. ++ */ ++extern struct mutex __mdio_board_lock; ++extern struct list_head __mdio_board_list; +--- a/drivers/net/Makefile ++++ b/drivers/net/Makefile +@@ -15,7 +15,7 @@ obj-$(CONFIG_MII) += mii.o + obj-$(CONFIG_MDIO) += mdio.o + obj-$(CONFIG_NET) += Space.o loopback.o + obj-$(CONFIG_NETCONSOLE) += netconsole.o +-obj-$(CONFIG_PHYLIB) += phy/ ++obj-y += phy/ + obj-$(CONFIG_RIONET) += rionet.o + obj-$(CONFIG_NET_TEAM) += team/ + obj-$(CONFIG_TUN) += tun.o diff --git a/target/linux/generic/patches-3.8/720-phy_adm6996.patch b/target/linux/generic/patches-3.8/720-phy_adm6996.patch new file mode 100644 index 0000000000..04d49e9cca --- /dev/null +++ b/target/linux/generic/patches-3.8/720-phy_adm6996.patch @@ -0,0 +1,26 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -121,6 +121,13 @@ config MICREL_PHY + ---help--- + Supports the KSZ9021, VSC8201, KS8001 PHYs. + ++config ADM6996_PHY ++ tristate "Driver for ADM6996 switches" ++ select SWCONFIG ++ ---help--- ++ Currently supports the ADM6996FC and ADM6996M switches. ++ Support for FC is very limited. ++ + config FIXED_PHY + bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" + depends on PHYLIB=y +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -17,6 +17,7 @@ obj-$(CONFIG_BROADCOM_PHY) += broadcom.o + obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o + obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o + obj-$(CONFIG_ICPLUS_PHY) += icplus.o ++obj-$(CONFIG_ADM6996_PHY) += adm6996.o + obj-$(CONFIG_REALTEK_PHY) += realtek.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o + obj-$(CONFIG_FIXED_PHY) += fixed.o diff --git a/target/linux/generic/patches-3.8/721-phy_packets.patch b/target/linux/generic/patches-3.8/721-phy_packets.patch new file mode 100644 index 0000000000..1aa814857b --- /dev/null +++ b/target/linux/generic/patches-3.8/721-phy_packets.patch @@ -0,0 +1,175 @@ +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1090,6 +1090,11 @@ struct net_device { + const struct net_device_ops *netdev_ops; + const struct ethtool_ops *ethtool_ops; + ++#ifdef CONFIG_ETHERNET_PACKET_MANGLE ++ void (*eth_mangle_rx)(struct net_device *dev, struct sk_buff *skb); ++ struct sk_buff *(*eth_mangle_tx)(struct net_device *dev, struct sk_buff *skb); ++#endif ++ + /* Hardware header description */ + const struct header_ops *header_ops; + +@@ -1146,6 +1151,9 @@ struct net_device { + void *ax25_ptr; /* AX.25 specific data */ + struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, + assign before registering */ ++#ifdef CONFIG_ETHERNET_PACKET_MANGLE ++ void *phy_ptr; /* PHY device specific data */ ++#endif + + /* + * Cache lines mostly used on receive path (including eth_type_trans()) +--- a/include/uapi/linux/if.h ++++ b/include/uapi/linux/if.h +@@ -83,6 +83,7 @@ + #define IFF_SUPP_NOFCS 0x80000 /* device supports sending custom FCS */ + #define IFF_LIVE_ADDR_CHANGE 0x100000 /* device supports hardware address + * change when it's running */ ++#define IFF_NO_IP_ALIGN 0x200000 /* do not ip-align allocated rx pkts */ + + + #define IF_GET_IFACE 0x0001 /* for querying only */ +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -1752,6 +1752,10 @@ static inline int pskb_trim(struct sk_bu + return (len < skb->len) ? __pskb_trim(skb, len) : 0; + } + ++extern struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length, gfp_t gfp); ++ ++ + /** + * pskb_trim_unique - remove end from a paged unique (not cloned) buffer + * @skb: buffer to alter +@@ -1854,16 +1858,6 @@ static inline struct sk_buff *dev_alloc_ + } + + +-static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, +- unsigned int length, gfp_t gfp) +-{ +- struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); +- +- if (NET_IP_ALIGN && skb) +- skb_reserve(skb, NET_IP_ALIGN); +- return skb; +-} +- + static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) + { +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -23,6 +23,12 @@ menuconfig NET + + if NET + ++config ETHERNET_PACKET_MANGLE ++ bool ++ help ++ This option can be selected by phy drivers that need to mangle ++ packets going in or out of an ethernet device. ++ + config WANT_COMPAT_NETLINK_MESSAGES + bool + help +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2361,9 +2361,19 @@ int dev_hard_start_xmit(struct sk_buff * + if (!list_empty(&ptype_all)) + dev_queue_xmit_nit(skb, dev); + +- skb_len = skb->len; +- rc = ops->ndo_start_xmit(skb, dev); +- trace_net_dev_xmit(skb, rc, dev, skb_len); ++#ifdef CONFIG_ETHERNET_PACKET_MANGLE ++ if (!dev->eth_mangle_tx || ++ (skb = dev->eth_mangle_tx(dev, skb)) != NULL) ++#else ++ if (1) ++#endif ++ { ++ skb_len = skb->len; ++ rc = ops->ndo_start_xmit(skb, dev); ++ trace_net_dev_xmit(skb, rc, dev, skb_len); ++ } else { ++ rc = NETDEV_TX_OK; ++ } + if (rc == NETDEV_TX_OK) + txq_trans_update(txq); + return rc; +@@ -2386,9 +2396,19 @@ gso: + if (!list_empty(&ptype_all)) + dev_queue_xmit_nit(nskb, dev); + +- skb_len = nskb->len; +- rc = ops->ndo_start_xmit(nskb, dev); +- trace_net_dev_xmit(nskb, rc, dev, skb_len); ++#ifdef CONFIG_ETHERNET_PACKET_MANGLE ++ if (!dev->eth_mangle_tx || ++ (nskb = dev->eth_mangle_tx(dev, nskb)) != NULL) ++#else ++ if (1) ++#endif ++ { ++ skb_len = nskb->len; ++ rc = ops->ndo_start_xmit(nskb, dev); ++ trace_net_dev_xmit(nskb, rc, dev, skb_len); ++ } else { ++ rc = NETDEV_TX_OK; ++ } + if (unlikely(rc != NETDEV_TX_OK)) { + if (rc & ~NETDEV_TX_MASK) + goto out_kfree_gso_skb; +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -60,6 +60,7 @@ + #include <linux/scatterlist.h> + #include <linux/errqueue.h> + #include <linux/prefetch.h> ++#include <linux/if.h> + + #include <net/protocol.h> + #include <net/dst.h> +@@ -455,6 +456,22 @@ struct sk_buff *__netdev_alloc_skb(struc + } + EXPORT_SYMBOL(__netdev_alloc_skb); + ++struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length, gfp_t gfp) ++{ ++ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); ++ ++#ifdef CONFIG_ETHERNET_PACKET_MANGLE ++ if (dev->priv_flags & IFF_NO_IP_ALIGN) ++ return skb; ++#endif ++ ++ if (NET_IP_ALIGN && skb) ++ skb_reserve(skb, NET_IP_ALIGN); ++ return skb; ++} ++EXPORT_SYMBOL(__netdev_alloc_skb_ip_align); ++ + void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, + int size, unsigned int truesize) + { +--- a/net/ethernet/eth.c ++++ b/net/ethernet/eth.c +@@ -159,6 +159,12 @@ __be16 eth_type_trans(struct sk_buff *sk + struct ethhdr *eth; + + skb->dev = dev; ++ ++#ifdef CONFIG_ETHERNET_PACKET_MANGLE ++ if (dev->eth_mangle_rx) ++ dev->eth_mangle_rx(dev, skb); ++#endif ++ + skb_reset_mac_header(skb); + skb_pull_inline(skb, ETH_HLEN); + eth = eth_hdr(skb); diff --git a/target/linux/generic/patches-3.8/722-phy_mvswitch.patch b/target/linux/generic/patches-3.8/722-phy_mvswitch.patch new file mode 100644 index 0000000000..fe6a7d311a --- /dev/null +++ b/target/linux/generic/patches-3.8/722-phy_mvswitch.patch @@ -0,0 +1,23 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -128,6 +128,10 @@ config ADM6996_PHY + Currently supports the ADM6996FC and ADM6996M switches. + Support for FC is very limited. + ++config MVSWITCH_PHY ++ tristate "Driver for Marvell 88E6060 switches" ++ select ETHERNET_PACKET_MANGLE ++ + config FIXED_PHY + bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" + depends on PHYLIB=y +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -18,6 +18,7 @@ obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o + obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o + obj-$(CONFIG_ICPLUS_PHY) += icplus.o + obj-$(CONFIG_ADM6996_PHY) += adm6996.o ++obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o + obj-$(CONFIG_REALTEK_PHY) += realtek.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o + obj-$(CONFIG_FIXED_PHY) += fixed.o diff --git a/target/linux/generic/patches-3.8/723-phy_ip175c.patch b/target/linux/generic/patches-3.8/723-phy_ip175c.patch new file mode 100644 index 0000000000..0d84caf2de --- /dev/null +++ b/target/linux/generic/patches-3.8/723-phy_ip175c.patch @@ -0,0 +1,23 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -132,6 +132,10 @@ config MVSWITCH_PHY + tristate "Driver for Marvell 88E6060 switches" + select ETHERNET_PACKET_MANGLE + ++config IP17XX_PHY ++ tristate "Driver for IC+ IP17xx switches" ++ select SWCONFIG ++ + config FIXED_PHY + bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" + depends on PHYLIB=y +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -19,6 +19,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o + obj-$(CONFIG_ICPLUS_PHY) += icplus.o + obj-$(CONFIG_ADM6996_PHY) += adm6996.o + obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o ++obj-$(CONFIG_IP17XX_PHY) += ip17xx.o + obj-$(CONFIG_REALTEK_PHY) += realtek.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o + obj-$(CONFIG_FIXED_PHY) += fixed.o diff --git a/target/linux/generic/patches-3.8/724-phy_ar8216.patch b/target/linux/generic/patches-3.8/724-phy_ar8216.patch new file mode 100644 index 0000000000..d196032bc1 --- /dev/null +++ b/target/linux/generic/patches-3.8/724-phy_ar8216.patch @@ -0,0 +1,24 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -136,6 +136,11 @@ config IP17XX_PHY + tristate "Driver for IC+ IP17xx switches" + select SWCONFIG + ++config AR8216_PHY ++ tristate "Driver for Atheros AR8216 switches" ++ select ETHERNET_PACKET_MANGLE ++ select SWCONFIG ++ + config FIXED_PHY + bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" + depends on PHYLIB=y +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -21,6 +21,7 @@ obj-$(CONFIG_ADM6996_PHY) += adm6996.o + obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o + obj-$(CONFIG_IP17XX_PHY) += ip17xx.o + obj-$(CONFIG_REALTEK_PHY) += realtek.o ++obj-$(CONFIG_AR8216_PHY) += ar8216.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o + obj-$(CONFIG_FIXED_PHY) += fixed.o + obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o diff --git a/target/linux/generic/patches-3.8/725-phy_rtl8306.patch b/target/linux/generic/patches-3.8/725-phy_rtl8306.patch new file mode 100644 index 0000000000..aeab9fbb48 --- /dev/null +++ b/target/linux/generic/patches-3.8/725-phy_rtl8306.patch @@ -0,0 +1,23 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -141,6 +141,10 @@ config AR8216_PHY + select ETHERNET_PACKET_MANGLE + select SWCONFIG + ++config RTL8306_PHY ++ tristate "Driver for Realtek RTL8306S switches" ++ select SWCONFIG ++ + config FIXED_PHY + bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" + depends on PHYLIB=y +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -22,6 +22,7 @@ obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o + obj-$(CONFIG_IP17XX_PHY) += ip17xx.o + obj-$(CONFIG_REALTEK_PHY) += realtek.o + obj-$(CONFIG_AR8216_PHY) += ar8216.o ++obj-$(CONFIG_RTL8306_PHY) += rtl8306.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o + obj-$(CONFIG_FIXED_PHY) += fixed.o + obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o diff --git a/target/linux/generic/patches-3.8/726-phy_rtl8366.patch b/target/linux/generic/patches-3.8/726-phy_rtl8366.patch new file mode 100644 index 0000000000..12f8a36258 --- /dev/null +++ b/target/linux/generic/patches-3.8/726-phy_rtl8366.patch @@ -0,0 +1,45 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -215,6 +215,30 @@ config MDIO_BUS_MUX_MMIOREG + + Currently, only 8-bit registers are supported. + ++config RTL8366_SMI ++ tristate "Driver for the RTL8366 SMI interface" ++ depends on GENERIC_GPIO ++ ---help--- ++ This module implements the SMI interface protocol which is used ++ by some RTL8366 ethernet switch devices via the generic GPIO API. ++ ++if RTL8366_SMI ++ ++config RTL8366_SMI_DEBUG_FS ++ bool "RTL8366 SMI interface debugfs support" ++ depends on DEBUG_FS ++ default n ++ ++config RTL8366S_PHY ++ tristate "Driver for the Realtek RTL8366S switch" ++ select SWCONFIG ++ ++config RTL8366RB_PHY ++ tristate "Driver for the Realtek RTL8366RB switch" ++ select SWCONFIG ++ ++endif # RTL8366_SMI ++ + endif # PHYLIB + + config MICREL_KS8995MA +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -23,6 +23,9 @@ obj-$(CONFIG_IP17XX_PHY) += ip17xx.o + obj-$(CONFIG_REALTEK_PHY) += realtek.o + obj-$(CONFIG_AR8216_PHY) += ar8216.o + obj-$(CONFIG_RTL8306_PHY) += rtl8306.o ++obj-$(CONFIG_RTL8366_SMI) += rtl8366_smi.o ++obj-$(CONFIG_RTL8366S_PHY) += rtl8366s.o ++obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o + obj-$(CONFIG_FIXED_PHY) += fixed.o + obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o diff --git a/target/linux/generic/patches-3.8/727-phy-rtl8367.patch b/target/linux/generic/patches-3.8/727-phy-rtl8367.patch new file mode 100644 index 0000000000..a9fe6d4fb8 --- /dev/null +++ b/target/linux/generic/patches-3.8/727-phy-rtl8367.patch @@ -0,0 +1,23 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -237,6 +237,10 @@ config RTL8366RB_PHY + tristate "Driver for the Realtek RTL8366RB switch" + select SWCONFIG + ++config RTL8367_PHY ++ tristate "Driver for the Realtek RTL8367R/M switches" ++ select SWCONFIG ++ + endif # RTL8366_SMI + + endif # PHYLIB +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -26,6 +26,7 @@ obj-$(CONFIG_RTL8306_PHY) += rtl8306.o + obj-$(CONFIG_RTL8366_SMI) += rtl8366_smi.o + obj-$(CONFIG_RTL8366S_PHY) += rtl8366s.o + obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb.o ++obj-$(CONFIG_RTL8367_PHY) += rtl8367.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o + obj-$(CONFIG_FIXED_PHY) += fixed.o + obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o diff --git a/target/linux/generic/patches-3.8/728-phy-micrel.patch b/target/linux/generic/patches-3.8/728-phy-micrel.patch new file mode 100644 index 0000000000..6172e236f5 --- /dev/null +++ b/target/linux/generic/patches-3.8/728-phy-micrel.patch @@ -0,0 +1,24 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -145,6 +145,11 @@ config RTL8306_PHY + tristate "Driver for Realtek RTL8306S switches" + select SWCONFIG + ++config MICREL_PHY ++ tristate "Drivers for Micrel/Kendin PHYs" ++ ---help--- ++ Currently has a driver for the KSZ8041 ++ + config FIXED_PHY + bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" + depends on PHYLIB=y +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -28,6 +28,7 @@ obj-$(CONFIG_RTL8366S_PHY) += rtl8366s.o + obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb.o + obj-$(CONFIG_RTL8367_PHY) += rtl8367.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o ++obj-$(CONFIG_MICREL_PHY) += micrel.o + obj-$(CONFIG_FIXED_PHY) += fixed.o + obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o + obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o diff --git a/target/linux/generic/patches-3.8/729-phy-rtl8367b.patch b/target/linux/generic/patches-3.8/729-phy-rtl8367b.patch new file mode 100644 index 0000000000..4ab329fb79 --- /dev/null +++ b/target/linux/generic/patches-3.8/729-phy-rtl8367b.patch @@ -0,0 +1,23 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -246,6 +246,10 @@ config RTL8367_PHY + tristate "Driver for the Realtek RTL8367R/M switches" + select SWCONFIG + ++config RTL8367B_PHY ++ tristate "Driver fot the Realtek RTL8367R-VB switch" ++ select SWCONFIG ++ + endif # RTL8366_SMI + + endif # PHYLIB +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -27,6 +27,7 @@ obj-$(CONFIG_RTL8366_SMI) += rtl8366_smi + obj-$(CONFIG_RTL8366S_PHY) += rtl8366s.o + obj-$(CONFIG_RTL8366RB_PHY) += rtl8366rb.o + obj-$(CONFIG_RTL8367_PHY) += rtl8367.o ++obj-$(CONFIG_RTL8367B_PHY) += rtl8367b.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o + obj-$(CONFIG_MICREL_PHY) += micrel.o + obj-$(CONFIG_FIXED_PHY) += fixed.o diff --git a/target/linux/generic/patches-3.8/729-phy-tantos.patch b/target/linux/generic/patches-3.8/729-phy-tantos.patch new file mode 100644 index 0000000000..bc8be2b0dc --- /dev/null +++ b/target/linux/generic/patches-3.8/729-phy-tantos.patch @@ -0,0 +1,21 @@ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -257,3 +257,8 @@ endif # PHYLIB + config MICREL_KS8995MA + tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch" + depends on SPI ++ ++config PSB6970_PHY ++ tristate "Lantiq XWAY Tantos (PSB6970) Ethernet switch" ++ select SWCONFIG ++ select ETHERNET_PACKET_MANGLE +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -30,6 +30,7 @@ obj-$(CONFIG_RTL8367_PHY) += rtl8367.o + obj-$(CONFIG_RTL8367B_PHY) += rtl8367b.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o + obj-$(CONFIG_MICREL_PHY) += micrel.o ++obj-$(CONFIG_PSB6970_PHY) += psb6970.o + obj-$(CONFIG_FIXED_PHY) += fixed.o + obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o + obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o diff --git a/target/linux/generic/patches-3.8/750-hostap_txpower.patch b/target/linux/generic/patches-3.8/750-hostap_txpower.patch new file mode 100644 index 0000000000..814406df11 --- /dev/null +++ b/target/linux/generic/patches-3.8/750-hostap_txpower.patch @@ -0,0 +1,154 @@ +--- a/drivers/net/wireless/hostap/hostap_ap.c ++++ b/drivers/net/wireless/hostap/hostap_ap.c +@@ -2340,13 +2340,13 @@ int prism2_ap_get_sta_qual(local_info_t + addr[count].sa_family = ARPHRD_ETHER; + memcpy(addr[count].sa_data, sta->addr, ETH_ALEN); + if (sta->last_rx_silence == 0) +- qual[count].qual = sta->last_rx_signal < 27 ? +- 0 : (sta->last_rx_signal - 27) * 92 / 127; ++ qual[count].qual = (sta->last_rx_signal - 156) == 0 ? ++ 0 : (sta->last_rx_signal - 156) * 92 / 64; + else +- qual[count].qual = sta->last_rx_signal - +- sta->last_rx_silence - 35; +- qual[count].level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal); +- qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence); ++ qual[count].qual = (sta->last_rx_signal - ++ sta->last_rx_silence) * 92 / 64; ++ qual[count].level = sta->last_rx_signal; ++ qual[count].noise = sta->last_rx_silence; + qual[count].updated = sta->last_rx_updated; + + sta->last_rx_updated = IW_QUAL_DBM; +@@ -2412,13 +2412,13 @@ int prism2_ap_translate_scan(struct net_ + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = IWEVQUAL; + if (sta->last_rx_silence == 0) +- iwe.u.qual.qual = sta->last_rx_signal < 27 ? +- 0 : (sta->last_rx_signal - 27) * 92 / 127; ++ iwe.u.qual.qual = (sta->last_rx_signal -156) == 0 ? ++ 0 : (sta->last_rx_signal - 156) * 92 / 64; + else +- iwe.u.qual.qual = sta->last_rx_signal - +- sta->last_rx_silence - 35; +- iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal); +- iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence); ++ iwe.u.qual.qual = (sta->last_rx_signal - ++ sta->last_rx_silence) * 92 / 64; ++ iwe.u.qual.level = sta->last_rx_signal; ++ iwe.u.qual.noise = sta->last_rx_silence; + iwe.u.qual.updated = sta->last_rx_updated; + iwe.len = IW_EV_QUAL_LEN; + current_ev = iwe_stream_add_event(info, current_ev, end_buf, +--- a/drivers/net/wireless/hostap/hostap_config.h ++++ b/drivers/net/wireless/hostap/hostap_config.h +@@ -45,4 +45,9 @@ + */ + /* #define PRISM2_NO_STATION_MODES */ + ++/* Enable TX power Setting functions ++ * (min att = -128 , max att = 127) ++ */ ++#define RAW_TXPOWER_SETTING ++ + #endif /* HOSTAP_CONFIG_H */ +--- a/drivers/net/wireless/hostap/hostap.h ++++ b/drivers/net/wireless/hostap/hostap.h +@@ -90,6 +90,7 @@ extern const struct iw_handler_def hosta + extern const struct ethtool_ops prism2_ethtool_ops; + + int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); ++int hostap_restore_power(struct net_device *dev); + + + #endif /* HOSTAP_H */ +--- a/drivers/net/wireless/hostap/hostap_hw.c ++++ b/drivers/net/wireless/hostap/hostap_hw.c +@@ -928,6 +928,7 @@ static int hfa384x_set_rid(struct net_de + prism2_hw_reset(dev); + } + ++ hostap_restore_power(dev); + return res; + } + +--- a/drivers/net/wireless/hostap/hostap_info.c ++++ b/drivers/net/wireless/hostap/hostap_info.c +@@ -435,6 +435,11 @@ static void handle_info_queue_linkstatus + } + + /* Get BSSID if we have a valid AP address */ ++ ++ if ( val == HFA384X_LINKSTATUS_CONNECTED || ++ val == HFA384X_LINKSTATUS_DISCONNECTED ) ++ hostap_restore_power(local->dev); ++ + if (connected) { + netif_carrier_on(local->dev); + netif_carrier_on(local->ddev); +--- a/drivers/net/wireless/hostap/hostap_ioctl.c ++++ b/drivers/net/wireless/hostap/hostap_ioctl.c +@@ -1479,23 +1479,20 @@ static int prism2_txpower_hfa386x_to_dBm + val = 255; + + tmp = val; +- tmp >>= 2; + +- return -12 - tmp; ++ return tmp; + } + + static u16 prism2_txpower_dBm_to_hfa386x(int val) + { + signed char tmp; + +- if (val > 20) +- return 128; +- else if (val < -43) ++ if (val > 127) + return 127; ++ else if (val < -128) ++ return 128; + + tmp = val; +- tmp = -12 - tmp; +- tmp <<= 2; + + return (unsigned char) tmp; + } +@@ -4052,3 +4049,35 @@ int hostap_ioctl(struct net_device *dev, + + return ret; + } ++ ++/* BUG FIX: Restore power setting value when lost due to F/W bug */ ++ ++int hostap_restore_power(struct net_device *dev) ++{ ++ struct hostap_interface *iface = netdev_priv(dev); ++ local_info_t *local = iface->local; ++ ++ u16 val; ++ int ret = 0; ++ ++ if (local->txpower_type == PRISM2_TXPOWER_OFF) { ++ val = 0xff; /* use all standby and sleep modes */ ++ ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, ++ HFA386X_CR_A_D_TEST_MODES2, ++ &val, NULL); ++ } ++ ++#ifdef RAW_TXPOWER_SETTING ++ if (local->txpower_type == PRISM2_TXPOWER_FIXED) { ++ val = HFA384X_TEST_CFG_BIT_ALC; ++ local->func->cmd(dev, HFA384X_CMDCODE_TEST | ++ (HFA384X_TEST_CFG_BITS << 8), 0, &val, NULL); ++ val = prism2_txpower_dBm_to_hfa386x(local->txpower); ++ ret = (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, ++ HFA386X_CR_MANUAL_TX_POWER, &val, NULL)); ++ } ++#endif /* RAW_TXPOWER_SETTING */ ++ return (ret ? -EOPNOTSUPP : 0); ++} ++ ++EXPORT_SYMBOL(hostap_restore_power); diff --git a/target/linux/generic/patches-3.8/810-pci_disable_common_quirks.patch b/target/linux/generic/patches-3.8/810-pci_disable_common_quirks.patch new file mode 100644 index 0000000000..cd8551bc74 --- /dev/null +++ b/target/linux/generic/patches-3.8/810-pci_disable_common_quirks.patch @@ -0,0 +1,51 @@ +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -63,6 +63,12 @@ config XEN_PCIDEV_FRONTEND + The PCI device frontend driver allows the kernel to import arbitrary + PCI devices from a PCI backend to support PCI driver domains. + ++config PCI_DISABLE_COMMON_QUIRKS ++ bool "PCI disable common quirks" ++ depends on PCI ++ help ++ If you don't know what to do here, say N. ++ + config HT_IRQ + bool "Interrupts on hypertransport devices" + default y +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -44,6 +44,7 @@ static void quirk_mmio_always_on(struct + DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on); + ++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS + /* The Mellanox Tavor device gives false positive parity errors + * Mark this device with a broken_parity_status, to allow + * PCI scanning code to "skip" this now blacklisted device. +@@ -2886,6 +2887,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata); + ++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */ + + static ktime_t fixup_debug_start(struct pci_dev *dev, + void (*fn)(struct pci_dev *dev)) +@@ -2917,6 +2919,8 @@ static void fixup_debug_report(struct pc + } + } + ++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS ++ + /* + * Some BIOS implementations leave the Intel GPU interrupts enabled, + * even though no one is handling them (f.e. i915 driver is never loaded). +@@ -2951,6 +2955,8 @@ static void disable_igfx_irq(struct pci_ + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); + ++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */ ++ + /* + * Some devices may pass our check in pci_intx_mask_supported if + * PCI_COMMAND_INTX_DISABLE works though they actually do not properly diff --git a/target/linux/generic/patches-3.8/811-pci_disable_usb_common_quirks.patch b/target/linux/generic/patches-3.8/811-pci_disable_usb_common_quirks.patch new file mode 100644 index 0000000000..a2e2239077 --- /dev/null +++ b/target/linux/generic/patches-3.8/811-pci_disable_usb_common_quirks.patch @@ -0,0 +1,80 @@ + +--- a/drivers/usb/host/pci-quirks.c ++++ b/drivers/usb/host/pci-quirks.c +@@ -79,6 +79,8 @@ + #define USB_INTEL_USB3_PSSEN 0xD8 + #define USB_INTEL_USB3PRM 0xDC + ++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS ++ + static struct amd_chipset_info { + struct pci_dev *nb_dev; + struct pci_dev *smbus_dev; +@@ -353,6 +355,10 @@ void usb_amd_dev_put(void) + } + EXPORT_SYMBOL_GPL(usb_amd_dev_put); + ++#endif /* CONFIG_PCI_DISABLE_COMMON_QUIRKS */ ++ ++#if IS_ENABLED(CONFIG_USB_UHCI_HCD) ++ + /* + * Make sure the controller is completely inactive, unable to + * generate interrupts or do DMA. +@@ -432,8 +438,17 @@ reset_needed: + uhci_reset_hc(pdev, base); + return 1; + } ++#else ++int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base) ++{ ++ return 0; ++} ++ ++#endif + EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); + ++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS ++ + static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) + { + u16 cmd; +@@ -973,3 +988,4 @@ static void quirk_usb_early_handoff(stru + } + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); ++#endif +--- a/drivers/usb/host/pci-quirks.h ++++ b/drivers/usb/host/pci-quirks.h +@@ -4,18 +4,26 @@ + #ifdef CONFIG_PCI + void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); + int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); ++bool usb_is_intel_switchable_xhci(struct pci_dev *pdev); ++void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); ++void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); ++#else ++static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} ++#endif /* CONFIG_PCI */ ++ ++#if defined(CONFIG_PCI) && !defined(CONFIG_PCI_DISABLE_COMMON_QUIRKS) + int usb_amd_find_chipset_info(void); + void usb_amd_dev_put(void); + void usb_amd_quirk_pll_disable(void); + void usb_amd_quirk_pll_enable(void); +-bool usb_is_intel_switchable_xhci(struct pci_dev *pdev); +-void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); +-void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); + #else ++static inline int usb_amd_find_chipset_info(void) ++{ ++ return 0; ++} + static inline void usb_amd_quirk_pll_disable(void) {} + static inline void usb_amd_quirk_pll_enable(void) {} + static inline void usb_amd_dev_put(void) {} +-static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} +-#endif /* CONFIG_PCI */ ++#endif + + #endif /* __LINUX_USB_PCI_QUIRKS_H */ diff --git a/target/linux/generic/patches-3.8/820-usb_add_usb_find_device_by_name.patch b/target/linux/generic/patches-3.8/820-usb_add_usb_find_device_by_name.patch new file mode 100644 index 0000000000..72b2237898 --- /dev/null +++ b/target/linux/generic/patches-3.8/820-usb_add_usb_find_device_by_name.patch @@ -0,0 +1,84 @@ +--- a/drivers/usb/core/usb.c ++++ b/drivers/usb/core/usb.c +@@ -644,6 +644,71 @@ int __usb_get_extra_descriptor(char *buf + } + EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor); + ++static struct usb_device *match_device_name(struct usb_device *dev, ++ const char *name) ++{ ++ struct usb_device *ret_dev = NULL; ++ struct usb_device *childdev = NULL; ++ int child; ++ ++ dev_dbg(&dev->dev, "check for name %s ...\n", name); ++ ++ /* see if this device matches */ ++ if (strcmp(dev_name(&dev->dev), name) == 0 ) { ++ dev_dbg(&dev->dev, "matched this device!\n"); ++ ret_dev = usb_get_dev(dev); ++ goto exit; ++ } ++ /* look through all of the children of this device */ ++ usb_hub_for_each_child(dev, child, childdev) { ++ if (childdev) { ++ usb_lock_device(childdev); ++ ret_dev = match_device_name(childdev, name); ++ usb_unlock_device(childdev); ++ if (ret_dev) ++ goto exit; ++ } ++ } ++exit: ++ return ret_dev; ++} ++ ++/** ++ * usb_find_device_by_name - find a specific usb device in the system ++ * @name: the name of the device to find ++ * ++ * Returns a pointer to a struct usb_device if such a specified usb ++ * device is present in the system currently. The usage count of the ++ * device will be incremented if a device is found. Make sure to call ++ * usb_put_dev() when the caller is finished with the device. ++ * ++ * If a device with the specified bus id is not found, NULL is returned. ++ */ ++struct usb_device *usb_find_device_by_name(const char *name) ++{ ++ struct list_head *buslist; ++ struct usb_bus *bus; ++ struct usb_device *dev = NULL; ++ ++ mutex_lock(&usb_bus_list_lock); ++ for (buslist = usb_bus_list.next; ++ buslist != &usb_bus_list; ++ buslist = buslist->next) { ++ bus = container_of(buslist, struct usb_bus, bus_list); ++ if (!bus->root_hub) ++ continue; ++ usb_lock_device(bus->root_hub); ++ dev = match_device_name(bus->root_hub, name); ++ usb_unlock_device(bus->root_hub); ++ if (dev) ++ goto exit; ++ } ++exit: ++ mutex_unlock(&usb_bus_list_lock); ++ return dev; ++} ++EXPORT_SYMBOL_GPL(usb_find_device_by_name); ++ + /** + * usb_alloc_coherent - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP + * @dev: device the buffer will be used with +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -675,6 +675,7 @@ static inline bool usb_device_supports_l + return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT; + } + ++extern struct usb_device *usb_find_device_by_name(const char *name); + + /*-------------------------------------------------------------------------*/ + diff --git a/target/linux/generic/patches-3.8/830-ledtrig_morse.patch b/target/linux/generic/patches-3.8/830-ledtrig_morse.patch new file mode 100644 index 0000000000..a5639d9fe3 --- /dev/null +++ b/target/linux/generic/patches-3.8/830-ledtrig_morse.patch @@ -0,0 +1,28 @@ +--- a/drivers/leds/Kconfig ++++ b/drivers/leds/Kconfig +@@ -561,4 +561,8 @@ config LEDS_TRIGGER_TRANSIENT + GPIO/PWM based hardware. + If unsure, say Y. + ++config LEDS_TRIGGER_MORSE ++ tristate "LED Morse Trigger" ++ depends on LEDS_TRIGGERS ++ + endif # NEW_LEDS +--- a/drivers/leds/Makefile ++++ b/drivers/leds/Makefile +@@ -65,3 +65,4 @@ obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledt + obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtrig-cpu.o + obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o + obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o ++obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o +--- a/drivers/leds/ledtrig-morse.c ++++ b/drivers/leds/ledtrig-morse.c +@@ -26,7 +26,6 @@ + #include <linux/list.h> + #include <linux/spinlock.h> + #include <linux/device.h> +-#include <linux/sysdev.h> + #include <linux/timer.h> + #include <linux/ctype.h> + #include <linux/leds.h> diff --git a/target/linux/generic/patches-3.8/831-ledtrig_netdev.patch b/target/linux/generic/patches-3.8/831-ledtrig_netdev.patch new file mode 100644 index 0000000000..85937d8798 --- /dev/null +++ b/target/linux/generic/patches-3.8/831-ledtrig_netdev.patch @@ -0,0 +1,51 @@ +--- a/drivers/leds/Kconfig ++++ b/drivers/leds/Kconfig +@@ -565,4 +565,11 @@ config LEDS_TRIGGER_MORSE + tristate "LED Morse Trigger" + depends on LEDS_TRIGGERS + ++config LEDS_TRIGGER_NETDEV ++ tristate "LED Netdev Trigger" ++ depends on NET && LEDS_TRIGGERS ++ help ++ This allows LEDs to be controlled by network device activity. ++ If unsure, say Y. ++ + endif # NEW_LEDS +--- a/drivers/leds/Makefile ++++ b/drivers/leds/Makefile +@@ -66,3 +66,4 @@ obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtr + obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o + obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o + obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o ++obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o +--- a/drivers/leds/ledtrig-netdev.c ++++ b/drivers/leds/ledtrig-netdev.c +@@ -22,7 +22,6 @@ + #include <linux/list.h> + #include <linux/spinlock.h> + #include <linux/device.h> +-#include <linux/sysdev.h> + #include <linux/netdevice.h> + #include <linux/timer.h> + #include <linux/ctype.h> +@@ -307,8 +306,9 @@ done: + static void netdev_trig_timer(unsigned long arg) + { + struct led_netdev_data *trigger_data = (struct led_netdev_data *)arg; +- const struct net_device_stats *dev_stats; ++ struct rtnl_link_stats64 *dev_stats; + unsigned new_activity; ++ struct rtnl_link_stats64 temp; + + write_lock(&trigger_data->lock); + +@@ -318,7 +318,7 @@ static void netdev_trig_timer(unsigned l + goto no_restart; + } + +- dev_stats = dev_get_stats(trigger_data->net_dev); ++ dev_stats = dev_get_stats(trigger_data->net_dev, &temp); + new_activity = + ((trigger_data->mode & MODE_TX) ? dev_stats->tx_packets : 0) + + ((trigger_data->mode & MODE_RX) ? dev_stats->rx_packets : 0); diff --git a/target/linux/generic/patches-3.8/832-ledtrig_usbdev.patch b/target/linux/generic/patches-3.8/832-ledtrig_usbdev.patch new file mode 100644 index 0000000000..5b07f17a90 --- /dev/null +++ b/target/linux/generic/patches-3.8/832-ledtrig_usbdev.patch @@ -0,0 +1,31 @@ +--- a/drivers/leds/Kconfig ++++ b/drivers/leds/Kconfig +@@ -572,4 +572,11 @@ config LEDS_TRIGGER_NETDEV + This allows LEDs to be controlled by network device activity. + If unsure, say Y. + ++config LEDS_TRIGGER_USBDEV ++ tristate "LED USB device Trigger" ++ depends on USB && LEDS_TRIGGERS ++ help ++ This allows LEDs to be controlled by the presence/activity of ++ an USB device. If unsure, say N. ++ + endif # NEW_LEDS +--- a/drivers/leds/Makefile ++++ b/drivers/leds/Makefile +@@ -67,3 +67,4 @@ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += + obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o + obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o + obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o ++obj-$(CONFIG_LEDS_TRIGGER_USBDEV) += ledtrig-usbdev.o +--- a/drivers/leds/ledtrig-usbdev.c ++++ b/drivers/leds/ledtrig-usbdev.c +@@ -24,7 +24,6 @@ + #include <linux/list.h> + #include <linux/spinlock.h> + #include <linux/device.h> +-#include <linux/sysdev.h> + #include <linux/timer.h> + #include <linux/ctype.h> + #include <linux/slab.h> diff --git a/target/linux/generic/patches-3.8/840-rtc7301.patch b/target/linux/generic/patches-3.8/840-rtc7301.patch new file mode 100644 index 0000000000..6c56dd714f --- /dev/null +++ b/target/linux/generic/patches-3.8/840-rtc7301.patch @@ -0,0 +1,250 @@ +--- a/drivers/rtc/Kconfig ++++ b/drivers/rtc/Kconfig +@@ -773,6 +773,15 @@ config RTC_DRV_NUC900 + If you say yes here you get support for the RTC subsystem of the + NUC910/NUC920 used in embedded systems. + ++config RTC_DRV_RTC7301 ++ tristate "Epson RTC-7301 SF/DG" ++ help ++ If you say Y here you will get support for the ++ Epson RTC-7301 SF/DG RTC chips. ++ ++ This driver can also be built as a module. If so, the module ++ will be called rtc-7301. ++ + comment "on-CPU RTC drivers" + + config RTC_DRV_DAVINCI +--- a/drivers/rtc/Makefile ++++ b/drivers/rtc/Makefile +@@ -94,6 +94,7 @@ obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c + obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o + obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o + obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o ++obj-$(CONFIG_RTC_DRV_RTC7301) += rtc-rtc7301.o + obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o + obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o + obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o +--- /dev/null ++++ b/drivers/rtc/rtc-rtc7301.c +@@ -0,0 +1,219 @@ ++/* ++ * Driver for Epson RTC-7301SF/DG ++ * ++ * Copyright (C) 2009 Jose Vasconcellos ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include <linux/module.h> ++#include <linux/rtc.h> ++#include <linux/platform_device.h> ++#include <linux/io.h> ++#include <linux/delay.h> ++#include <linux/bcd.h> ++ ++#define RTC_NAME "rtc7301" ++#define RTC_VERSION "0.1" ++ ++/* Epson RTC-7301 register addresses */ ++#define RTC7301_SEC 0x00 ++#define RTC7301_SEC10 0x01 ++#define RTC7301_MIN 0x02 ++#define RTC7301_MIN10 0x03 ++#define RTC7301_HOUR 0x04 ++#define RTC7301_HOUR10 0x05 ++#define RTC7301_WEEKDAY 0x06 ++#define RTC7301_DAY 0x07 ++#define RTC7301_DAY10 0x08 ++#define RTC7301_MON 0x09 ++#define RTC7301_MON10 0x0A ++#define RTC7301_YEAR 0x0B ++#define RTC7301_YEAR10 0x0C ++#define RTC7301_YEAR100 0x0D ++#define RTC7301_YEAR1000 0x0E ++#define RTC7301_CTRLREG 0x0F ++ ++static uint8_t __iomem *rtc7301_base; ++ ++#define read_reg(offset) (readb(rtc7301_base + offset) & 0xf) ++#define write_reg(offset, data) writeb(data, rtc7301_base + (offset)) ++ ++#define rtc7301_isbusy() (read_reg(RTC7301_CTRLREG) & 1) ++ ++static void rtc7301_init_settings(void) ++{ ++ int i; ++ ++ write_reg(RTC7301_CTRLREG, 2); ++ write_reg(RTC7301_YEAR1000, 2); ++ udelay(122); ++ ++ /* bank 1 */ ++ write_reg(RTC7301_CTRLREG, 6); ++ for (i=0; i<15; i++) ++ write_reg(i, 0); ++ ++ /* bank 2 */ ++ write_reg(RTC7301_CTRLREG, 14); ++ for (i=0; i<15; i++) ++ write_reg(i, 0); ++ write_reg(RTC7301_CTRLREG, 0); ++} ++ ++static int rtc7301_get_datetime(struct device *dev, struct rtc_time *dt) ++{ ++ int cnt; ++ uint8_t buf[16]; ++ ++ cnt = 0; ++ while (rtc7301_isbusy()) { ++ udelay(244); ++ if (cnt++ > 100) { ++ dev_err(dev, "%s: timeout error %x\n", __func__, rtc7301_base[RTC7301_CTRLREG]); ++ return -EIO; ++ } ++ } ++ ++ for (cnt=0; cnt<16; cnt++) ++ buf[cnt] = read_reg(cnt); ++ ++ if (buf[RTC7301_SEC10] & 8) { ++ dev_err(dev, "%s: RTC not set\n", __func__); ++ return -EINVAL; ++ } ++ ++ memset(dt, 0, sizeof(*dt)); ++ ++ dt->tm_sec = buf[RTC7301_SEC] + buf[RTC7301_SEC10]*10; ++ dt->tm_min = buf[RTC7301_MIN] + buf[RTC7301_MIN10]*10; ++ dt->tm_hour = buf[RTC7301_HOUR] + buf[RTC7301_HOUR10]*10; ++ ++ dt->tm_mday = buf[RTC7301_DAY] + buf[RTC7301_DAY10]*10; ++ dt->tm_mon = buf[RTC7301_MON] + buf[RTC7301_MON10]*10 - 1; ++ dt->tm_year = buf[RTC7301_YEAR] + buf[RTC7301_YEAR10]*10 + ++ buf[RTC7301_YEAR100]*100 + ++ ((buf[RTC7301_YEAR1000] & 3)*1000) - 1900; ++ ++ /* the rtc device may contain illegal values on power up ++ * according to the data sheet. make sure they are valid. ++ */ ++ ++ return rtc_valid_tm(dt); ++} ++ ++static int rtc7301_set_datetime(struct device *dev, struct rtc_time *dt) ++{ ++ int data; ++ ++ data = dt->tm_year + 1900; ++ if (data >= 2100 || data < 1900) ++ return -EINVAL; ++ ++ write_reg(RTC7301_CTRLREG, 2); ++ udelay(122); ++ ++ data = bin2bcd(dt->tm_sec); ++ write_reg(RTC7301_SEC, data); ++ write_reg(RTC7301_SEC10, (data >> 4)); ++ ++ data = bin2bcd(dt->tm_min); ++ write_reg(RTC7301_MIN, data ); ++ write_reg(RTC7301_MIN10, (data >> 4)); ++ ++ data = bin2bcd(dt->tm_hour); ++ write_reg(RTC7301_HOUR, data); ++ write_reg(RTC7301_HOUR10, (data >> 4)); ++ ++ data = bin2bcd(dt->tm_mday); ++ write_reg(RTC7301_DAY, data); ++ write_reg(RTC7301_DAY10, (data>> 4)); ++ ++ data = bin2bcd(dt->tm_mon + 1); ++ write_reg(RTC7301_MON, data); ++ write_reg(RTC7301_MON10, (data >> 4)); ++ ++ data = bin2bcd(dt->tm_year % 100); ++ write_reg(RTC7301_YEAR, data); ++ write_reg(RTC7301_YEAR10, (data >> 4)); ++ data = bin2bcd((1900 + dt->tm_year) / 100); ++ write_reg(RTC7301_YEAR100, data); ++ ++ data = bin2bcd(dt->tm_wday); ++ write_reg(RTC7301_WEEKDAY, data); ++ ++ write_reg(RTC7301_CTRLREG, 0); ++ ++ return 0; ++} ++ ++static const struct rtc_class_ops rtc7301_rtc_ops = { ++ .read_time = rtc7301_get_datetime, ++ .set_time = rtc7301_set_datetime, ++}; ++ ++static int __devinit rtc7301_probe(struct platform_device *pdev) ++{ ++ struct rtc_device *rtc; ++ struct resource *res; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) ++ return -ENOENT; ++ ++ rtc7301_base = ioremap_nocache(res->start, 0x1000 /*res->end - res->start + 1*/); ++ if (!rtc7301_base) ++ return -EINVAL; ++ ++ rtc = rtc_device_register(RTC_NAME, &pdev->dev, ++ &rtc7301_rtc_ops, THIS_MODULE); ++ if (IS_ERR(rtc)) { ++ iounmap(rtc7301_base); ++ return PTR_ERR(rtc); ++ } ++ ++ platform_set_drvdata(pdev, rtc); ++ ++ rtc7301_init_settings(); ++ return 0; ++} ++ ++static int __devexit rtc7301_remove(struct platform_device *pdev) ++{ ++ struct rtc_device *rtc = platform_get_drvdata(pdev); ++ ++ if (rtc) ++ rtc_device_unregister(rtc); ++ if (rtc7301_base) ++ iounmap(rtc7301_base); ++ return 0; ++} ++ ++static struct platform_driver rtc7301_driver = { ++ .driver = { ++ .name = RTC_NAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = rtc7301_probe, ++ .remove = __devexit_p(rtc7301_remove), ++}; ++ ++static __init int rtc7301_init(void) ++{ ++ return platform_driver_register(&rtc7301_driver); ++} ++module_init(rtc7301_init); ++ ++static __exit void rtc7301_exit(void) ++{ ++ platform_driver_unregister(&rtc7301_driver); ++} ++module_exit(rtc7301_exit); ++ ++MODULE_DESCRIPTION("Epson 7301 RTC driver"); ++MODULE_AUTHOR("Jose Vasconcellos <jvasco@verizon.net>"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:" RTC_NAME); ++MODULE_VERSION(RTC_VERSION); diff --git a/target/linux/generic/patches-3.8/841-rtc_pt7c4338.patch b/target/linux/generic/patches-3.8/841-rtc_pt7c4338.patch new file mode 100644 index 0000000000..3beeb1383f --- /dev/null +++ b/target/linux/generic/patches-3.8/841-rtc_pt7c4338.patch @@ -0,0 +1,247 @@ +--- a/drivers/rtc/Kconfig ++++ b/drivers/rtc/Kconfig +@@ -428,6 +428,15 @@ config RTC_DRV_RV3029C2 + This driver can also be built as a module. If so, the module + will be called rtc-rv3029c2. + ++config RTC_DRV_PT7C4338 ++ tristate "Pericom Technology Inc. PT7C4338 RTC" ++ help ++ If you say yes here you get support for the Pericom Technology ++ Inc. PT7C4338 RTC chip. ++ ++ This driver can also be built as a module. If so, the module ++ will be called rtc-pt7c4338. ++ + endif # I2C + + comment "SPI RTC drivers" +--- a/drivers/rtc/Makefile ++++ b/drivers/rtc/Makefile +@@ -86,6 +86,7 @@ obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030 + obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o + obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o + obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o ++obj-$(CONFIG_RTC_DRV_PT7C4338) += rtc-pt7c4338.o + obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o + obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o + obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o +--- /dev/null ++++ b/drivers/rtc/rtc-pt7c4338.c +@@ -0,0 +1,216 @@ ++/* ++ * Copyright 2010 Freescale Semiconductor, Inc. ++ * ++ * Author: Priyanka Jain <Priyanka.Jain@freescale.com> ++ * ++ * See file CREDITS for list of people who contributed to this ++ * project. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of ++ * the License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, ++ * MA 02111-1307 USA ++ */ ++ ++/* ++ * This file provides Date & Time support (no alarms) for PT7C4338 chip. ++ * ++ * This file is based on drivers/rtc/rtc-ds1307.c ++ * ++ * PT7C4338 chip is manufactured by Pericom Technology Inc. ++ * It is a serial real-time clock which provides ++ * 1)Low-power clock/calendar. ++ * 2)Programmable square-wave output. ++ * It has 56 bytes of nonvolatile RAM. ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/slab.h> ++#include <linux/i2c.h> ++#include <linux/rtc.h> ++#include <linux/bcd.h> ++ ++/* RTC register addresses */ ++#define PT7C4338_REG_SECONDS 0x00 ++#define PT7C4338_REG_MINUTES 0x01 ++#define PT7C4338_REG_HOURS 0x02 ++#define PT7C4338_REG_AMPM 0x02 ++#define PT7C4338_REG_DAY 0x03 ++#define PT7C4338_REG_DATE 0x04 ++#define PT7C4338_REG_MONTH 0x05 ++#define PT7C4338_REG_YEAR 0x06 ++#define PT7C4338_REG_CTRL_STAT 0x07 ++ ++/* RTC second register address bit */ ++#define PT7C4338_SEC_BIT_CH 0x80 /*Clock Halt (in Register 0)*/ ++ ++/* RTC control and status register bits */ ++#define PT7C4338_CTRL_STAT_BIT_RS0 0x1 /*Rate select 0*/ ++#define PT7C4338_CTRL_STAT_BIT_RS1 0x2 /*Rate select 1*/ ++#define PT7C4338_CTRL_STAT_BIT_SQWE 0x10 /*Square Wave Enable*/ ++#define PT7C4338_CTRL_STAT_BIT_OSF 0x20 /*Oscillator Stop Flag*/ ++#define PT7C4338_CTRL_STAT_BIT_OUT 0x80 /*Output Level Control*/ ++ ++static const struct i2c_device_id pt7c4338_id[] = { ++ { "pt7c4338", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(i2c, pt7c4338_id); ++ ++struct pt7c4338{ ++ struct i2c_client *client; ++ struct rtc_device *rtc; ++}; ++ ++static int pt7c4338_read_time(struct device *dev, struct rtc_time *time) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ int ret; ++ u8 buf[7]; ++ u8 year, month, day, hour, minute, second; ++ u8 week, twelve_hr, am_pm; ++ ++ ret = i2c_smbus_read_i2c_block_data(client, ++ PT7C4338_REG_SECONDS, 7, buf); ++ if (ret < 0) ++ return ret; ++ if (ret < 7) ++ return -EIO; ++ ++ second = buf[0]; ++ minute = buf[1]; ++ hour = buf[2]; ++ week = buf[3]; ++ day = buf[4]; ++ month = buf[5]; ++ year = buf[6]; ++ ++ /* Extract additional information for AM/PM */ ++ twelve_hr = hour & 0x40; ++ am_pm = hour & 0x20; ++ ++ /* Write to rtc_time structure */ ++ time->tm_sec = bcd2bin(second & 0x7f); ++ time->tm_min = bcd2bin(minute & 0x7f); ++ if (twelve_hr) { ++ /* Convert to 24 hr */ ++ if (am_pm) ++ time->tm_hour = bcd2bin(hour & 0x10) + 12; ++ else ++ time->tm_hour = bcd2bin(hour & 0xBF); ++ } else { ++ time->tm_hour = bcd2bin(hour); ++ } ++ ++ time->tm_wday = bcd2bin(week & 0x07) - 1; ++ time->tm_mday = bcd2bin(day & 0x3f); ++ time->tm_mon = bcd2bin(month & 0x1F) - 1; ++ /* assume 20YY not 19YY */ ++ time->tm_year = bcd2bin(year) + 100; ++ ++ return 0; ++} ++ ++static int pt7c4338_set_time(struct device *dev, struct rtc_time *time) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ u8 buf[7]; ++ ++ /* Extract time from rtc_time and load into pt7c4338*/ ++ buf[0] = bin2bcd(time->tm_sec); ++ buf[1] = bin2bcd(time->tm_min); ++ buf[2] = bin2bcd(time->tm_hour); ++ buf[3] = bin2bcd(time->tm_wday + 1); /* Day of the week */ ++ buf[4] = bin2bcd(time->tm_mday); /* Date */ ++ buf[5] = bin2bcd(time->tm_mon + 1); ++ ++ /* assume 20YY not 19YY */ ++ if (time->tm_year >= 100) ++ buf[6] = bin2bcd(time->tm_year - 100); ++ else ++ buf[6] = bin2bcd(time->tm_year); ++ ++ return i2c_smbus_write_i2c_block_data(client, ++ PT7C4338_REG_SECONDS, 7, buf); ++} ++ ++static const struct rtc_class_ops pt7c4338_rtc_ops = { ++ .read_time = pt7c4338_read_time, ++ .set_time = pt7c4338_set_time, ++}; ++ ++static int pt7c4338_probe(struct i2c_client *client, ++ const struct i2c_device_id *id) ++{ ++ struct pt7c4338 *pt7c4338; ++ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); ++ int ret; ++ ++ pt7c4338 = kzalloc(sizeof(struct pt7c4338), GFP_KERNEL); ++ if (!pt7c4338) ++ return -ENOMEM; ++ ++ pt7c4338->client = client; ++ i2c_set_clientdata(client, pt7c4338); ++ pt7c4338->rtc = rtc_device_register(client->name, &client->dev, ++ &pt7c4338_rtc_ops, THIS_MODULE); ++ if (IS_ERR(pt7c4338->rtc)) { ++ ret = PTR_ERR(pt7c4338->rtc); ++ dev_err(&client->dev, "unable to register the class device\n"); ++ goto out_free; ++ } ++ ++ return 0; ++out_free: ++ i2c_set_clientdata(client, NULL); ++ kfree(pt7c4338); ++ return ret; ++} ++ ++static int __devexit pt7c4338_remove(struct i2c_client *client) ++{ ++ struct pt7c4338 *pt7c4338 = i2c_get_clientdata(client); ++ ++ rtc_device_unregister(pt7c4338->rtc); ++ i2c_set_clientdata(client, NULL); ++ kfree(pt7c4338); ++ return 0; ++} ++ ++static struct i2c_driver pt7c4338_driver = { ++ .driver = { ++ .name = "rtc-pt7c4338", ++ .owner = THIS_MODULE, ++ }, ++ .probe = pt7c4338_probe, ++ .remove = __devexit_p(pt7c4338_remove), ++ .id_table = pt7c4338_id, ++}; ++ ++static int __init pt7c4338_init(void) ++{ ++ return i2c_add_driver(&pt7c4338_driver); ++} ++ ++static void __exit pt7c4338_exit(void) ++{ ++ i2c_del_driver(&pt7c4338_driver); ++} ++ ++module_init(pt7c4338_init); ++module_exit(pt7c4338_exit); ++ ++MODULE_AUTHOR("Priyanka Jain <Priyanka.Jain@freescale.com>"); ++MODULE_DESCRIPTION("pericom Technology Inc. PT7C4338 RTC Driver"); ++MODULE_LICENSE("GPL"); diff --git a/target/linux/generic/patches-3.8/850-glamo_headers.patch b/target/linux/generic/patches-3.8/850-glamo_headers.patch new file mode 100644 index 0000000000..363070d0af --- /dev/null +++ b/target/linux/generic/patches-3.8/850-glamo_headers.patch @@ -0,0 +1,21 @@ +--- a/include/uapi/linux/fb.h ++++ b/include/uapi/linux/fb.h +@@ -122,6 +122,7 @@ + #define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ + #define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ + #define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */ ++#define FB_ACCEL_GLAMO 50 /* SMedia Glamo */ + #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ + #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ + #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ +--- a/include/uapi/linux/Kbuild ++++ b/include/uapi/linux/Kbuild +@@ -130,6 +130,8 @@ header-y += gen_stats.h + header-y += genetlink.h + header-y += gfs2_ondisk.h + header-y += gigaset_dev.h ++header-y += glamofb.h ++header-y += glamo-engine.h + header-y += hdlc.h + header-y += hdlcdrv.h + header-y += hdreg.h diff --git a/target/linux/generic/patches-3.8/861-04_spi_gpio_implement_spi_delay.patch b/target/linux/generic/patches-3.8/861-04_spi_gpio_implement_spi_delay.patch new file mode 100644 index 0000000000..4be08956d7 --- /dev/null +++ b/target/linux/generic/patches-3.8/861-04_spi_gpio_implement_spi_delay.patch @@ -0,0 +1,58 @@ +Implement the SPI-GPIO delay function for busses that need speed limitation. + +--mb + + + +--- a/drivers/spi/spi-gpio.c ++++ b/drivers/spi/spi-gpio.c +@@ -22,6 +22,7 @@ + #include <linux/init.h> + #include <linux/platform_device.h> + #include <linux/gpio.h> ++#include <linux/delay.h> + #include <linux/of_device.h> + #include <linux/of_gpio.h> + +@@ -73,6 +74,7 @@ struct spi_gpio { + * #define SPI_MOSI_GPIO 120 + * #define SPI_SCK_GPIO 121 + * #define SPI_N_CHIPSEL 4 ++ * #undef NEED_SPIDELAY + * #include "spi-gpio.c" + */ + +@@ -80,6 +82,7 @@ struct spi_gpio { + #define DRIVER_NAME "spi_gpio" + + #define GENERIC_BITBANG /* vs tight inlines */ ++#define NEED_SPIDELAY 1 + + /* all functions referencing these symbols must define pdata */ + #define SPI_MISO_GPIO ((pdata)->miso) +@@ -130,12 +133,20 @@ static inline int getmiso(const struct s + #undef pdata + + /* +- * NOTE: this clocks "as fast as we can". It "should" be a function of the +- * requested device clock. Software overhead means we usually have trouble +- * reaching even one Mbit/sec (except when we can inline bitops), so for now +- * we'll just assume we never need additional per-bit slowdowns. ++ * NOTE: to clock "as fast as we can", set spi_device.max_speed_hz ++ * and spi_transfer.speed_hz to 0. ++ * Otherwise this is a function of the requested device clock. ++ * Software overhead means we usually have trouble ++ * reaching even one Mbit/sec (except when we can inline bitops). So on small ++ * embedded devices with fast SPI slaves you usually don't need a delay. + */ +-#define spidelay(nsecs) do {} while (0) ++static inline void spidelay(unsigned nsecs) ++{ ++#ifdef NEED_SPIDELAY ++ if (unlikely(nsecs)) ++ ndelay(nsecs); ++#endif /* NEED_SPIDELAY */ ++} + + #include "spi-bitbang-txrx.h" + diff --git a/target/linux/generic/patches-3.8/862-gpio_spi_driver.patch b/target/linux/generic/patches-3.8/862-gpio_spi_driver.patch new file mode 100644 index 0000000000..86ae86df82 --- /dev/null +++ b/target/linux/generic/patches-3.8/862-gpio_spi_driver.patch @@ -0,0 +1,373 @@ +THIS CODE IS DEPRECATED. + +Please use the new mainline SPI-GPIO driver, as of 2.6.29. + +--mb + + + +--- + drivers/spi/Kconfig | 9 + + drivers/spi/Makefile | 1 + drivers/spi/spi_gpio_old.c | 251 +++++++++++++++++++++++++++++++++++++++ + include/linux/spi/spi_gpio_old.h | 73 +++++++++++ + 4 files changed, 334 insertions(+) + +--- /dev/null ++++ b/include/linux/spi/spi_gpio_old.h +@@ -0,0 +1,73 @@ ++/* ++ * spi_gpio interface to platform code ++ * ++ * Copyright (c) 2008 Piotr Skamruk ++ * Copyright (c) 2008 Michael Buesch ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#ifndef _LINUX_SPI_SPI_GPIO ++#define _LINUX_SPI_SPI_GPIO ++ ++#include <linux/types.h> ++#include <linux/spi/spi.h> ++ ++ ++/** ++ * struct spi_gpio_platform_data - Data definitions for a SPI-GPIO device. ++ * ++ * This structure holds information about a GPIO-based SPI device. ++ * ++ * @pin_clk: The GPIO pin number of the CLOCK pin. ++ * ++ * @pin_miso: The GPIO pin number of the MISO pin. ++ * ++ * @pin_mosi: The GPIO pin number of the MOSI pin. ++ * ++ * @pin_cs: The GPIO pin number of the CHIPSELECT pin. ++ * ++ * @cs_activelow: If true, the chip is selected when the CS line is low. ++ * ++ * @no_spi_delay: If true, no delay is done in the lowlevel bitbanging. ++ * Note that doing no delay is not standards compliant, ++ * but it might be needed to speed up transfers on some ++ * slow embedded machines. ++ * ++ * @boardinfo_setup: This callback is called after the ++ * SPI master device was registered, but before the ++ * device is registered. ++ * @boardinfo_setup_data: Data argument passed to boardinfo_setup(). ++ */ ++struct spi_gpio_platform_data { ++ unsigned int pin_clk; ++ unsigned int pin_miso; ++ unsigned int pin_mosi; ++ unsigned int pin_cs; ++ bool cs_activelow; ++ bool no_spi_delay; ++ int (*boardinfo_setup)(struct spi_board_info *bi, ++ struct spi_master *master, ++ void *data); ++ void *boardinfo_setup_data; ++}; ++ ++/** ++ * SPI_GPIO_PLATDEV_NAME - The platform device name string. ++ * ++ * The name string that has to be used for platform_device_alloc ++ * when allocating a spi-gpio device. ++ */ ++#define SPI_GPIO_PLATDEV_NAME "spi-gpio" ++ ++/** ++ * spi_gpio_next_id - Get another platform device ID number. ++ * ++ * This returns the next platform device ID number that has to be used ++ * for platform_device_alloc. The ID is opaque and should not be used for ++ * anything else. ++ */ ++int spi_gpio_next_id(void); ++ ++#endif /* _LINUX_SPI_SPI_GPIO */ +--- /dev/null ++++ b/drivers/spi/spi_gpio_old.c +@@ -0,0 +1,251 @@ ++/* ++ * Bitbanging SPI bus driver using GPIO API ++ * ++ * Copyright (c) 2008 Piotr Skamruk ++ * Copyright (c) 2008 Michael Buesch ++ * ++ * based on spi_s3c2410_gpio.c ++ * Copyright (c) 2006 Ben Dooks ++ * Copyright (c) 2006 Simtec Electronics ++ * and on i2c-gpio.c ++ * Copyright (C) 2007 Atmel Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/init.h> ++#include <linux/delay.h> ++#include <linux/spinlock.h> ++#include <linux/workqueue.h> ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/spi/spi.h> ++#include <linux/spi/spi_bitbang.h> ++#include <linux/spi/spi_gpio_old.h> ++#include <linux/gpio.h> ++#include <asm/atomic.h> ++ ++ ++struct spi_gpio { ++ struct spi_bitbang bitbang; ++ struct spi_gpio_platform_data *info; ++ struct platform_device *pdev; ++ struct spi_board_info bi; ++}; ++ ++ ++static inline struct spi_gpio *spidev_to_sg(struct spi_device *dev) ++{ ++ return dev->controller_data; ++} ++ ++static inline void setsck(struct spi_device *dev, int val) ++{ ++ struct spi_gpio *sp = spidev_to_sg(dev); ++ gpio_set_value(sp->info->pin_clk, val ? 1 : 0); ++} ++ ++static inline void setmosi(struct spi_device *dev, int val) ++{ ++ struct spi_gpio *sp = spidev_to_sg(dev); ++ gpio_set_value(sp->info->pin_mosi, val ? 1 : 0); ++} ++ ++static inline u32 getmiso(struct spi_device *dev) ++{ ++ struct spi_gpio *sp = spidev_to_sg(dev); ++ return gpio_get_value(sp->info->pin_miso) ? 1 : 0; ++} ++ ++static inline void do_spidelay(struct spi_device *dev, unsigned nsecs) ++{ ++ struct spi_gpio *sp = spidev_to_sg(dev); ++ ++ if (!sp->info->no_spi_delay) ++ ndelay(nsecs); ++} ++ ++#define spidelay(nsecs) do { \ ++ /* Steal the spi_device pointer from our caller. \ ++ * The bitbang-API should probably get fixed here... */ \ ++ do_spidelay(spi, nsecs); \ ++ } while (0) ++ ++#define EXPAND_BITBANG_TXRX ++#include "spi-bitbang-txrx.h" ++ ++static u32 spi_gpio_txrx_mode0(struct spi_device *spi, ++ unsigned nsecs, u32 word, u8 bits) ++{ ++ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); ++} ++ ++static u32 spi_gpio_txrx_mode1(struct spi_device *spi, ++ unsigned nsecs, u32 word, u8 bits) ++{ ++ return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); ++} ++ ++static u32 spi_gpio_txrx_mode2(struct spi_device *spi, ++ unsigned nsecs, u32 word, u8 bits) ++{ ++ return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); ++} ++ ++static u32 spi_gpio_txrx_mode3(struct spi_device *spi, ++ unsigned nsecs, u32 word, u8 bits) ++{ ++ return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); ++} ++ ++static void spi_gpio_chipselect(struct spi_device *dev, int on) ++{ ++ struct spi_gpio *sp = spidev_to_sg(dev); ++ ++ if (sp->info->cs_activelow) ++ on = !on; ++ gpio_set_value(sp->info->pin_cs, on ? 1 : 0); ++} ++ ++static int spi_gpio_probe(struct platform_device *pdev) ++{ ++ struct spi_master *master; ++ struct spi_gpio_platform_data *pdata; ++ struct spi_gpio *sp; ++ struct spi_device *spidev; ++ int err; ++ ++ pdata = pdev->dev.platform_data; ++ if (!pdata) ++ return -ENXIO; ++ ++ err = -ENOMEM; ++ master = spi_alloc_master(&pdev->dev, sizeof(struct spi_gpio)); ++ if (!master) ++ goto err_alloc_master; ++ ++ sp = spi_master_get_devdata(master); ++ platform_set_drvdata(pdev, sp); ++ sp->info = pdata; ++ ++ err = gpio_request(pdata->pin_clk, "spi_clock"); ++ if (err) ++ goto err_request_clk; ++ err = gpio_request(pdata->pin_mosi, "spi_mosi"); ++ if (err) ++ goto err_request_mosi; ++ err = gpio_request(pdata->pin_miso, "spi_miso"); ++ if (err) ++ goto err_request_miso; ++ err = gpio_request(pdata->pin_cs, "spi_cs"); ++ if (err) ++ goto err_request_cs; ++ ++ sp->bitbang.master = spi_master_get(master); ++ sp->bitbang.master->bus_num = -1; ++ sp->bitbang.master->num_chipselect = 1; ++ sp->bitbang.chipselect = spi_gpio_chipselect; ++ sp->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_mode0; ++ sp->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_mode1; ++ sp->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_mode2; ++ sp->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_mode3; ++ ++ gpio_direction_output(pdata->pin_clk, 0); ++ gpio_direction_output(pdata->pin_mosi, 0); ++ gpio_direction_output(pdata->pin_cs, ++ pdata->cs_activelow ? 1 : 0); ++ gpio_direction_input(pdata->pin_miso); ++ ++ err = spi_bitbang_start(&sp->bitbang); ++ if (err) ++ goto err_no_bitbang; ++ err = pdata->boardinfo_setup(&sp->bi, master, ++ pdata->boardinfo_setup_data); ++ if (err) ++ goto err_bi_setup; ++ sp->bi.controller_data = sp; ++ spidev = spi_new_device(master, &sp->bi); ++ if (!spidev) ++ goto err_new_dev; ++ ++ return 0; ++ ++err_new_dev: ++err_bi_setup: ++ spi_bitbang_stop(&sp->bitbang); ++err_no_bitbang: ++ spi_master_put(sp->bitbang.master); ++ gpio_free(pdata->pin_cs); ++err_request_cs: ++ gpio_free(pdata->pin_miso); ++err_request_miso: ++ gpio_free(pdata->pin_mosi); ++err_request_mosi: ++ gpio_free(pdata->pin_clk); ++err_request_clk: ++ kfree(master); ++ ++err_alloc_master: ++ return err; ++} ++ ++static int __devexit spi_gpio_remove(struct platform_device *pdev) ++{ ++ struct spi_gpio *sp; ++ struct spi_gpio_platform_data *pdata; ++ ++ pdata = pdev->dev.platform_data; ++ sp = platform_get_drvdata(pdev); ++ ++ gpio_free(pdata->pin_clk); ++ gpio_free(pdata->pin_mosi); ++ gpio_free(pdata->pin_miso); ++ gpio_free(pdata->pin_cs); ++ spi_bitbang_stop(&sp->bitbang); ++ spi_master_put(sp->bitbang.master); ++ ++ return 0; ++} ++ ++static struct platform_driver spi_gpio_driver = { ++ .driver = { ++ .name = SPI_GPIO_PLATDEV_NAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = spi_gpio_probe, ++ .remove = __devexit_p(spi_gpio_remove), ++}; ++ ++int spi_gpio_next_id(void) ++{ ++ static atomic_t counter = ATOMIC_INIT(-1); ++ ++ return atomic_inc_return(&counter); ++} ++EXPORT_SYMBOL(spi_gpio_next_id); ++ ++static int __init spi_gpio_init(void) ++{ ++ int err; ++ ++ err = platform_driver_register(&spi_gpio_driver); ++ if (err) ++ printk(KERN_ERR "spi-gpio: register failed: %d\n", err); ++ ++ return err; ++} ++module_init(spi_gpio_init); ++ ++static void __exit spi_gpio_exit(void) ++{ ++ platform_driver_unregister(&spi_gpio_driver); ++} ++module_exit(spi_gpio_exit); ++ ++MODULE_AUTHOR("Piot Skamruk <piotr.skamruk at gmail.com>"); ++MODULE_AUTHOR("Michael Buesch"); ++MODULE_DESCRIPTION("Platform independent GPIO bitbanging SPI driver"); ++MODULE_LICENSE("GPL v2"); +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -177,6 +177,15 @@ config SPI_GPIO + GPIO operations, you should be able to leverage that for better + speed with a custom version of this driver; see the source code. + ++config SPI_GPIO_OLD ++ tristate "Old GPIO API based bitbanging SPI controller (DEPRECATED)" ++ depends on SPI_MASTER && GENERIC_GPIO ++ select SPI_BITBANG ++ help ++ This code is deprecated. Please use the new mainline SPI-GPIO driver. ++ ++ If unsure, say N. ++ + config SPI_IMX + tristate "Freescale i.MX SPI controllers" + depends on ARCH_MXC +--- a/drivers/spi/Makefile ++++ b/drivers/spi/Makefile +@@ -32,6 +32,7 @@ obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-li + obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o + obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o + obj-$(CONFIG_SPI_GPIO) += spi-gpio.o ++obj-$(CONFIG_SPI_GPIO_OLD) += spi_gpio_old.o + obj-$(CONFIG_SPI_IMX) += spi-imx.o + obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o + obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o diff --git a/target/linux/generic/patches-3.8/863-gpiommc.patch b/target/linux/generic/patches-3.8/863-gpiommc.patch new file mode 100644 index 0000000000..7dfe2fa287 --- /dev/null +++ b/target/linux/generic/patches-3.8/863-gpiommc.patch @@ -0,0 +1,844 @@ +--- /dev/null ++++ b/drivers/mmc/host/gpiommc.c +@@ -0,0 +1,609 @@ ++/* ++ * Driver an MMC/SD card on a bitbanging GPIO SPI bus. ++ * This module hooks up the mmc_spi and spi_gpio modules and also ++ * provides a configfs interface. ++ * ++ * Copyright 2008 Michael Buesch <mb@bu3sch.de> ++ * ++ * Licensed under the GNU/GPL. See COPYING for details. ++ */ ++ ++#include <linux/module.h> ++#include <linux/mmc/gpiommc.h> ++#include <linux/platform_device.h> ++#include <linux/list.h> ++#include <linux/mutex.h> ++#include <linux/spi/spi_gpio_old.h> ++#include <linux/configfs.h> ++#include <linux/gpio.h> ++#include <asm/atomic.h> ++ ++ ++#define PFX "gpio-mmc: " ++ ++ ++struct gpiommc_device { ++ struct platform_device *pdev; ++ struct platform_device *spi_pdev; ++ struct spi_board_info boardinfo; ++}; ++ ++ ++MODULE_DESCRIPTION("GPIO based MMC driver"); ++MODULE_AUTHOR("Michael Buesch"); ++MODULE_LICENSE("GPL"); ++ ++ ++static int gpiommc_boardinfo_setup(struct spi_board_info *bi, ++ struct spi_master *master, ++ void *data) ++{ ++ struct gpiommc_device *d = data; ++ struct gpiommc_platform_data *pdata = d->pdev->dev.platform_data; ++ ++ /* Bind the SPI master to the MMC-SPI host driver. */ ++ strlcpy(bi->modalias, "mmc_spi", sizeof(bi->modalias)); ++ ++ bi->max_speed_hz = pdata->max_bus_speed; ++ bi->bus_num = master->bus_num; ++ bi->mode = pdata->mode; ++ ++ return 0; ++} ++ ++static int gpiommc_probe(struct platform_device *pdev) ++{ ++ struct gpiommc_platform_data *mmc_pdata = pdev->dev.platform_data; ++ struct spi_gpio_platform_data spi_pdata; ++ struct gpiommc_device *d; ++ int err; ++ ++ err = -ENXIO; ++ if (!mmc_pdata) ++ goto error; ++ ++#ifdef CONFIG_MMC_SPI_MODULE ++ err = request_module("mmc_spi"); ++ if (err) { ++ printk(KERN_WARNING PFX ++ "Failed to request mmc_spi module.\n"); ++ } ++#endif /* CONFIG_MMC_SPI_MODULE */ ++ ++ /* Allocate the GPIO-MMC device */ ++ err = -ENOMEM; ++ d = kzalloc(sizeof(*d), GFP_KERNEL); ++ if (!d) ++ goto error; ++ d->pdev = pdev; ++ ++ /* Create the SPI-GPIO device */ ++ d->spi_pdev = platform_device_alloc(SPI_GPIO_PLATDEV_NAME, ++ spi_gpio_next_id()); ++ if (!d->spi_pdev) ++ goto err_free_d; ++ ++ memset(&spi_pdata, 0, sizeof(spi_pdata)); ++ spi_pdata.pin_clk = mmc_pdata->pins.gpio_clk; ++ spi_pdata.pin_miso = mmc_pdata->pins.gpio_do; ++ spi_pdata.pin_mosi = mmc_pdata->pins.gpio_di; ++ spi_pdata.pin_cs = mmc_pdata->pins.gpio_cs; ++ spi_pdata.cs_activelow = mmc_pdata->pins.cs_activelow; ++ spi_pdata.no_spi_delay = mmc_pdata->no_spi_delay; ++ spi_pdata.boardinfo_setup = gpiommc_boardinfo_setup; ++ spi_pdata.boardinfo_setup_data = d; ++ ++ err = platform_device_add_data(d->spi_pdev, &spi_pdata, ++ sizeof(spi_pdata)); ++ if (err) ++ goto err_free_pdev; ++ err = platform_device_add(d->spi_pdev); ++ if (err) ++ goto err_free_pdata; ++ platform_set_drvdata(pdev, d); ++ ++ printk(KERN_INFO PFX "MMC-Card \"%s\" " ++ "attached to GPIO pins di=%u, do=%u, clk=%u, cs=%u\n", ++ mmc_pdata->name, mmc_pdata->pins.gpio_di, ++ mmc_pdata->pins.gpio_do, ++ mmc_pdata->pins.gpio_clk, ++ mmc_pdata->pins.gpio_cs); ++ ++ return 0; ++ ++err_free_pdata: ++ kfree(d->spi_pdev->dev.platform_data); ++ d->spi_pdev->dev.platform_data = NULL; ++err_free_pdev: ++ platform_device_put(d->spi_pdev); ++err_free_d: ++ kfree(d); ++error: ++ return err; ++} ++ ++static int gpiommc_remove(struct platform_device *pdev) ++{ ++ struct gpiommc_device *d = platform_get_drvdata(pdev); ++ struct gpiommc_platform_data *pdata = d->pdev->dev.platform_data; ++ ++ platform_device_unregister(d->spi_pdev); ++ printk(KERN_INFO PFX "GPIO based MMC-Card \"%s\" removed\n", ++ pdata->name); ++ platform_device_put(d->spi_pdev); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_GPIOMMC_CONFIGFS ++ ++/* A device that was created through configfs */ ++struct gpiommc_configfs_device { ++ struct config_item item; ++ /* The platform device, after registration. */ ++ struct platform_device *pdev; ++ /* The configuration */ ++ struct gpiommc_platform_data pdata; ++}; ++ ++#define GPIO_INVALID -1 ++ ++static inline bool gpiommc_is_registered(struct gpiommc_configfs_device *dev) ++{ ++ return (dev->pdev != NULL); ++} ++ ++static inline struct gpiommc_configfs_device *ci_to_gpiommc(struct config_item *item) ++{ ++ return item ? container_of(item, struct gpiommc_configfs_device, item) : NULL; ++} ++ ++static struct configfs_attribute gpiommc_attr_DI = { ++ .ca_owner = THIS_MODULE, ++ .ca_name = "gpio_data_in", ++ .ca_mode = S_IRUGO | S_IWUSR, ++}; ++ ++static struct configfs_attribute gpiommc_attr_DO = { ++ .ca_owner = THIS_MODULE, ++ .ca_name = "gpio_data_out", ++ .ca_mode = S_IRUGO | S_IWUSR, ++}; ++ ++static struct configfs_attribute gpiommc_attr_CLK = { ++ .ca_owner = THIS_MODULE, ++ .ca_name = "gpio_clock", ++ .ca_mode = S_IRUGO | S_IWUSR, ++}; ++ ++static struct configfs_attribute gpiommc_attr_CS = { ++ .ca_owner = THIS_MODULE, ++ .ca_name = "gpio_chipselect", ++ .ca_mode = S_IRUGO | S_IWUSR, ++}; ++ ++static struct configfs_attribute gpiommc_attr_CS_activelow = { ++ .ca_owner = THIS_MODULE, ++ .ca_name = "gpio_chipselect_activelow", ++ .ca_mode = S_IRUGO | S_IWUSR, ++}; ++ ++static struct configfs_attribute gpiommc_attr_spimode = { ++ .ca_owner = THIS_MODULE, ++ .ca_name = "spi_mode", ++ .ca_mode = S_IRUGO | S_IWUSR, ++}; ++ ++static struct configfs_attribute gpiommc_attr_spidelay = { ++ .ca_owner = THIS_MODULE, ++ .ca_name = "spi_delay", ++ .ca_mode = S_IRUGO | S_IWUSR, ++}; ++ ++static struct configfs_attribute gpiommc_attr_max_bus_speed = { ++ .ca_owner = THIS_MODULE, ++ .ca_name = "max_bus_speed", ++ .ca_mode = S_IRUGO | S_IWUSR, ++}; ++ ++static struct configfs_attribute gpiommc_attr_register = { ++ .ca_owner = THIS_MODULE, ++ .ca_name = "register", ++ .ca_mode = S_IRUGO | S_IWUSR, ++}; ++ ++static struct configfs_attribute *gpiommc_config_attrs[] = { ++ &gpiommc_attr_DI, ++ &gpiommc_attr_DO, ++ &gpiommc_attr_CLK, ++ &gpiommc_attr_CS, ++ &gpiommc_attr_CS_activelow, ++ &gpiommc_attr_spimode, ++ &gpiommc_attr_spidelay, ++ &gpiommc_attr_max_bus_speed, ++ &gpiommc_attr_register, ++ NULL, ++}; ++ ++static ssize_t gpiommc_config_attr_show(struct config_item *item, ++ struct configfs_attribute *attr, ++ char *page) ++{ ++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item); ++ ssize_t count = 0; ++ unsigned int gpio; ++ int err = 0; ++ ++ if (attr == &gpiommc_attr_DI) { ++ gpio = dev->pdata.pins.gpio_di; ++ if (gpio == GPIO_INVALID) ++ count = snprintf(page, PAGE_SIZE, "not configured\n"); ++ else ++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio); ++ goto out; ++ } ++ if (attr == &gpiommc_attr_DO) { ++ gpio = dev->pdata.pins.gpio_do; ++ if (gpio == GPIO_INVALID) ++ count = snprintf(page, PAGE_SIZE, "not configured\n"); ++ else ++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio); ++ goto out; ++ } ++ if (attr == &gpiommc_attr_CLK) { ++ gpio = dev->pdata.pins.gpio_clk; ++ if (gpio == GPIO_INVALID) ++ count = snprintf(page, PAGE_SIZE, "not configured\n"); ++ else ++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio); ++ goto out; ++ } ++ if (attr == &gpiommc_attr_CS) { ++ gpio = dev->pdata.pins.gpio_cs; ++ if (gpio == GPIO_INVALID) ++ count = snprintf(page, PAGE_SIZE, "not configured\n"); ++ else ++ count = snprintf(page, PAGE_SIZE, "%u\n", gpio); ++ goto out; ++ } ++ if (attr == &gpiommc_attr_CS_activelow) { ++ count = snprintf(page, PAGE_SIZE, "%u\n", ++ dev->pdata.pins.cs_activelow); ++ goto out; ++ } ++ if (attr == &gpiommc_attr_spimode) { ++ count = snprintf(page, PAGE_SIZE, "%u\n", ++ dev->pdata.mode); ++ goto out; ++ } ++ if (attr == &gpiommc_attr_spidelay) { ++ count = snprintf(page, PAGE_SIZE, "%u\n", ++ !dev->pdata.no_spi_delay); ++ goto out; ++ } ++ if (attr == &gpiommc_attr_max_bus_speed) { ++ count = snprintf(page, PAGE_SIZE, "%u\n", ++ dev->pdata.max_bus_speed); ++ goto out; ++ } ++ if (attr == &gpiommc_attr_register) { ++ count = snprintf(page, PAGE_SIZE, "%u\n", ++ gpiommc_is_registered(dev)); ++ goto out; ++ } ++ WARN_ON(1); ++ err = -ENOSYS; ++out: ++ return err ? err : count; ++} ++ ++static int gpiommc_do_register(struct gpiommc_configfs_device *dev, ++ const char *name) ++{ ++ int err; ++ ++ if (gpiommc_is_registered(dev)) ++ return 0; ++ ++ if (!gpio_is_valid(dev->pdata.pins.gpio_di) || ++ !gpio_is_valid(dev->pdata.pins.gpio_do) || ++ !gpio_is_valid(dev->pdata.pins.gpio_clk) || ++ !gpio_is_valid(dev->pdata.pins.gpio_cs)) { ++ printk(KERN_ERR PFX ++ "configfs: Invalid GPIO pin number(s)\n"); ++ return -EINVAL; ++ } ++ ++ strlcpy(dev->pdata.name, name, ++ sizeof(dev->pdata.name)); ++ ++ dev->pdev = platform_device_alloc(GPIOMMC_PLATDEV_NAME, ++ gpiommc_next_id()); ++ if (!dev->pdev) ++ return -ENOMEM; ++ err = platform_device_add_data(dev->pdev, &dev->pdata, ++ sizeof(dev->pdata)); ++ if (err) { ++ platform_device_put(dev->pdev); ++ return err; ++ } ++ err = platform_device_add(dev->pdev); ++ if (err) { ++ platform_device_put(dev->pdev); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static void gpiommc_do_unregister(struct gpiommc_configfs_device *dev) ++{ ++ if (!gpiommc_is_registered(dev)) ++ return; ++ ++ platform_device_unregister(dev->pdev); ++ dev->pdev = NULL; ++} ++ ++static ssize_t gpiommc_config_attr_store(struct config_item *item, ++ struct configfs_attribute *attr, ++ const char *page, size_t count) ++{ ++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item); ++ int err = -EINVAL; ++ unsigned long data; ++ ++ if (attr == &gpiommc_attr_register) { ++ err = strict_strtoul(page, 10, &data); ++ if (err) ++ goto out; ++ err = -EINVAL; ++ if (data == 1) ++ err = gpiommc_do_register(dev, item->ci_name); ++ if (data == 0) { ++ gpiommc_do_unregister(dev); ++ err = 0; ++ } ++ goto out; ++ } ++ ++ if (gpiommc_is_registered(dev)) { ++ /* The rest of the config parameters can only be set ++ * as long as the device is not registered, yet. */ ++ err = -EBUSY; ++ goto out; ++ } ++ ++ if (attr == &gpiommc_attr_DI) { ++ err = strict_strtoul(page, 10, &data); ++ if (err) ++ goto out; ++ err = -EINVAL; ++ if (!gpio_is_valid(data)) ++ goto out; ++ dev->pdata.pins.gpio_di = data; ++ err = 0; ++ goto out; ++ } ++ if (attr == &gpiommc_attr_DO) { ++ err = strict_strtoul(page, 10, &data); ++ if (err) ++ goto out; ++ err = -EINVAL; ++ if (!gpio_is_valid(data)) ++ goto out; ++ dev->pdata.pins.gpio_do = data; ++ err = 0; ++ goto out; ++ } ++ if (attr == &gpiommc_attr_CLK) { ++ err = strict_strtoul(page, 10, &data); ++ if (err) ++ goto out; ++ err = -EINVAL; ++ if (!gpio_is_valid(data)) ++ goto out; ++ dev->pdata.pins.gpio_clk = data; ++ err = 0; ++ goto out; ++ } ++ if (attr == &gpiommc_attr_CS) { ++ err = strict_strtoul(page, 10, &data); ++ if (err) ++ goto out; ++ err = -EINVAL; ++ if (!gpio_is_valid(data)) ++ goto out; ++ dev->pdata.pins.gpio_cs = data; ++ err = 0; ++ goto out; ++ } ++ if (attr == &gpiommc_attr_CS_activelow) { ++ err = strict_strtoul(page, 10, &data); ++ if (err) ++ goto out; ++ err = -EINVAL; ++ if (data != 0 && data != 1) ++ goto out; ++ dev->pdata.pins.cs_activelow = data; ++ err = 0; ++ goto out; ++ } ++ if (attr == &gpiommc_attr_spimode) { ++ err = strict_strtoul(page, 10, &data); ++ if (err) ++ goto out; ++ err = -EINVAL; ++ switch (data) { ++ case 0: ++ dev->pdata.mode = SPI_MODE_0; ++ break; ++ case 1: ++ dev->pdata.mode = SPI_MODE_1; ++ break; ++ case 2: ++ dev->pdata.mode = SPI_MODE_2; ++ break; ++ case 3: ++ dev->pdata.mode = SPI_MODE_3; ++ break; ++ default: ++ goto out; ++ } ++ err = 0; ++ goto out; ++ } ++ if (attr == &gpiommc_attr_spidelay) { ++ err = strict_strtoul(page, 10, &data); ++ if (err) ++ goto out; ++ err = -EINVAL; ++ if (data != 0 && data != 1) ++ goto out; ++ dev->pdata.no_spi_delay = !data; ++ err = 0; ++ goto out; ++ } ++ if (attr == &gpiommc_attr_max_bus_speed) { ++ err = strict_strtoul(page, 10, &data); ++ if (err) ++ goto out; ++ err = -EINVAL; ++ if (data > UINT_MAX) ++ goto out; ++ dev->pdata.max_bus_speed = data; ++ err = 0; ++ goto out; ++ } ++ WARN_ON(1); ++ err = -ENOSYS; ++out: ++ return err ? err : count; ++} ++ ++static void gpiommc_config_item_release(struct config_item *item) ++{ ++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item); ++ ++ kfree(dev); ++} ++ ++static struct configfs_item_operations gpiommc_config_item_ops = { ++ .release = gpiommc_config_item_release, ++ .show_attribute = gpiommc_config_attr_show, ++ .store_attribute = gpiommc_config_attr_store, ++}; ++ ++static struct config_item_type gpiommc_dev_ci_type = { ++ .ct_item_ops = &gpiommc_config_item_ops, ++ .ct_attrs = gpiommc_config_attrs, ++ .ct_owner = THIS_MODULE, ++}; ++ ++static struct config_item *gpiommc_make_item(struct config_group *group, ++ const char *name) ++{ ++ struct gpiommc_configfs_device *dev; ++ ++ if (strlen(name) > GPIOMMC_MAX_NAMELEN) { ++ printk(KERN_ERR PFX "configfs: device name too long\n"); ++ return NULL; ++ } ++ ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); ++ if (!dev) ++ return NULL; ++ ++ config_item_init_type_name(&dev->item, name, ++ &gpiommc_dev_ci_type); ++ ++ /* Assign default configuration */ ++ dev->pdata.pins.gpio_di = GPIO_INVALID; ++ dev->pdata.pins.gpio_do = GPIO_INVALID; ++ dev->pdata.pins.gpio_clk = GPIO_INVALID; ++ dev->pdata.pins.gpio_cs = GPIO_INVALID; ++ dev->pdata.pins.cs_activelow = 1; ++ dev->pdata.mode = SPI_MODE_0; ++ dev->pdata.no_spi_delay = 0; ++ dev->pdata.max_bus_speed = 5000000; /* 5 MHz */ ++ ++ return &(dev->item); ++} ++ ++static void gpiommc_drop_item(struct config_group *group, ++ struct config_item *item) ++{ ++ struct gpiommc_configfs_device *dev = ci_to_gpiommc(item); ++ ++ gpiommc_do_unregister(dev); ++ kfree(dev); ++} ++ ++static struct configfs_group_operations gpiommc_ct_group_ops = { ++ .make_item = gpiommc_make_item, ++ .drop_item = gpiommc_drop_item, ++}; ++ ++static struct config_item_type gpiommc_ci_type = { ++ .ct_group_ops = &gpiommc_ct_group_ops, ++ .ct_owner = THIS_MODULE, ++}; ++ ++static struct configfs_subsystem gpiommc_subsys = { ++ .su_group = { ++ .cg_item = { ++ .ci_namebuf = GPIOMMC_PLATDEV_NAME, ++ .ci_type = &gpiommc_ci_type, ++ }, ++ }, ++ .su_mutex = __MUTEX_INITIALIZER(gpiommc_subsys.su_mutex), ++}; ++ ++#endif /* CONFIG_GPIOMMC_CONFIGFS */ ++ ++static struct platform_driver gpiommc_plat_driver = { ++ .probe = gpiommc_probe, ++ .remove = gpiommc_remove, ++ .driver = { ++ .name = GPIOMMC_PLATDEV_NAME, ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++int gpiommc_next_id(void) ++{ ++ static atomic_t counter = ATOMIC_INIT(-1); ++ ++ return atomic_inc_return(&counter); ++} ++EXPORT_SYMBOL(gpiommc_next_id); ++ ++static int __init gpiommc_modinit(void) ++{ ++ int err; ++ ++ err = platform_driver_register(&gpiommc_plat_driver); ++ if (err) ++ return err; ++ ++#ifdef CONFIG_GPIOMMC_CONFIGFS ++ config_group_init(&gpiommc_subsys.su_group); ++ err = configfs_register_subsystem(&gpiommc_subsys); ++ if (err) { ++ platform_driver_unregister(&gpiommc_plat_driver); ++ return err; ++ } ++#endif /* CONFIG_GPIOMMC_CONFIGFS */ ++ ++ return 0; ++} ++module_init(gpiommc_modinit); ++ ++static void __exit gpiommc_modexit(void) ++{ ++#ifdef CONFIG_GPIOMMC_CONFIGFS ++ configfs_unregister_subsystem(&gpiommc_subsys); ++#endif ++ platform_driver_unregister(&gpiommc_plat_driver); ++} ++module_exit(gpiommc_modexit); +--- a/drivers/mmc/host/Kconfig ++++ b/drivers/mmc/host/Kconfig +@@ -459,6 +459,31 @@ config MMC_SDHI + This provides support for the SDHI SD/SDIO controller found in + SuperH and ARM SH-Mobile SoCs + ++config GPIOMMC ++ tristate "MMC/SD over GPIO-based SPI" ++ depends on MMC && MMC_SPI && SPI_GPIO_OLD ++ help ++ This driver hooks up the mmc_spi and spi_gpio modules so that ++ MMC/SD cards can be used on a GPIO based bus by bitbanging ++ the SPI protocol in software. ++ ++ This driver provides a configfs interface to dynamically create ++ and destroy GPIO-based MMC/SD card devices. It also provides ++ a platform device interface API. ++ See Documentation/gpiommc.txt for details. ++ ++ The module will be called gpiommc. ++ ++ If unsure, say N. ++ ++config GPIOMMC_CONFIGFS ++ bool ++ depends on GPIOMMC && CONFIGFS_FS ++ default y ++ help ++ This option automatically enables configfs support for gpiommc ++ if configfs is available. ++ + config MMC_CB710 + tristate "ENE CB710 MMC/SD Interface support" + depends on PCI +--- a/drivers/mmc/host/Makefile ++++ b/drivers/mmc/host/Makefile +@@ -36,6 +36,7 @@ tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_S + obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o + obj-$(CONFIG_MMC_CB710) += cb710-mmc.o + obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o ++obj-$(CONFIG_GPIOMMC) += gpiommc.o + obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o + obj-$(CONFIG_MMC_DW) += dw_mmc.o + obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o +--- /dev/null ++++ b/include/linux/mmc/gpiommc.h +@@ -0,0 +1,71 @@ ++/* ++ * Device driver for MMC/SD cards driven over a GPIO bus. ++ * ++ * Copyright (c) 2008 Michael Buesch ++ * ++ * Licensed under the GNU/GPL version 2. ++ */ ++#ifndef LINUX_GPIOMMC_H_ ++#define LINUX_GPIOMMC_H_ ++ ++#include <linux/types.h> ++ ++ ++#define GPIOMMC_MAX_NAMELEN 15 ++#define GPIOMMC_MAX_NAMELEN_STR __stringify(GPIOMMC_MAX_NAMELEN) ++ ++/** ++ * struct gpiommc_pins - Hardware pin assignments ++ * ++ * @gpio_di: The GPIO number of the DATA IN pin ++ * @gpio_do: The GPIO number of the DATA OUT pin ++ * @gpio_clk: The GPIO number of the CLOCK pin ++ * @gpio_cs: The GPIO number of the CHIPSELECT pin ++ * @cs_activelow: If true, the chip is considered selected if @gpio_cs is low. ++ */ ++struct gpiommc_pins { ++ unsigned int gpio_di; ++ unsigned int gpio_do; ++ unsigned int gpio_clk; ++ unsigned int gpio_cs; ++ bool cs_activelow; ++}; ++ ++/** ++ * struct gpiommc_platform_data - Platform data for a MMC-over-SPI-GPIO device. ++ * ++ * @name: The unique name string of the device. ++ * @pins: The hardware pin assignments. ++ * @mode: The hardware mode. This is either SPI_MODE_0, ++ * SPI_MODE_1, SPI_MODE_2 or SPI_MODE_3. See the SPI documentation. ++ * @no_spi_delay: Do not use delays in the lowlevel SPI bitbanging code. ++ * This is not standards compliant, but may be required for some ++ * embedded machines to gain reasonable speed. ++ * @max_bus_speed: The maximum speed of the SPI bus, in Hertz. ++ */ ++struct gpiommc_platform_data { ++ char name[GPIOMMC_MAX_NAMELEN + 1]; ++ struct gpiommc_pins pins; ++ u8 mode; ++ bool no_spi_delay; ++ unsigned int max_bus_speed; ++}; ++ ++/** ++ * GPIOMMC_PLATDEV_NAME - The platform device name string. ++ * ++ * The name string that has to be used for platform_device_alloc ++ * when allocating a gpiommc device. ++ */ ++#define GPIOMMC_PLATDEV_NAME "gpiommc" ++ ++/** ++ * gpiommc_next_id - Get another platform device ID number. ++ * ++ * This returns the next platform device ID number that has to be used ++ * for platform_device_alloc. The ID is opaque and should not be used for ++ * anything else. ++ */ ++int gpiommc_next_id(void); ++ ++#endif /* LINUX_GPIOMMC_H_ */ +--- /dev/null ++++ b/Documentation/gpiommc.txt +@@ -0,0 +1,97 @@ ++GPIOMMC - Driver for an MMC/SD card on a bitbanging GPIO SPI bus ++================================================================ ++ ++The gpiommc module hooks up the mmc_spi and spi_gpio modules for running an ++MMC or SD card on GPIO pins. ++ ++Two interfaces for registering a new MMC/SD card device are provided: ++A static platform-device based mechanism and a dynamic configfs based interface. ++ ++ ++Registering devices via platform-device ++======================================= ++ ++The platform-device interface is used for registering MMC/SD devices that are ++part of the hardware platform. This is most useful only for embedded machines ++with MMC/SD devices statically connected to the platform GPIO bus. ++ ++The data structures are declared in <linux/mmc/gpiommc.h>. ++ ++To register a new device, define an instance of struct gpiommc_platform_data. ++This structure holds any information about how the device is hooked up to the ++GPIO pins and what hardware modes the device supports. See the docbook-style ++documentation in the header file for more information on the struct fields. ++ ++Then allocate a new instance of a platform device by doing: ++ ++ pdev = platform_device_alloc(GPIOMMC_PLATDEV_NAME, gpiommc_next_id()); ++ ++This will allocate the platform device data structures and hook it up to the ++gpiommc driver. ++Then add the gpiommc_platform_data to the platform device. ++ ++ err = platform_device_add_data(pdev, pdata, sizeof(struct gpiommc_platform_data)); ++ ++You may free the local instance of struct gpiommc_platform_data now. (So the ++struct may be allocated on the stack, too). ++Now simply register the platform device. ++ ++ err = platform_device_add(pdev); ++ ++Done. The gpiommc probe routine will be invoked now and you should see a kernel ++log message for the added device. ++ ++ ++Registering devices via configfs ++================================ ++ ++MMC/SD cards connected via GPIO often are a pretty dynamic thing, as for example ++selfmade hacks for soldering an MMC/SD card to standard GPIO pins on embedded ++hardware are a common situation. ++So we provide a dynamic interface to conveniently handle adding and removing ++devices from userspace, without the need to recompile the kernel. ++ ++The "gpiommc" subdirectory at the configfs mountpoint is used for handling ++the dynamic configuration. ++ ++To create a new device, it must first be allocated with mkdir. ++The following command will allocate a device named "my_mmc": ++ mkdir /config/gpiommc/my_mmc ++ ++There are several configuration files available in the new ++/config/gpiommc/my_mmc/ directory: ++ ++gpio_data_in = The SPI data-IN GPIO pin number. ++gpio_data_out = The SPI data-OUT GPIO pin number. ++gpio_clock = The SPI Clock GPIO pin number. ++gpio_chipselect = The SPI Chipselect GPIO pin number. ++gpio_chipselect_activelow = Boolean. If 0, Chipselect is active-HIGH. ++ If 1, Chipselect is active-LOW. ++spi_mode = The SPI data mode. Can be 0-3. ++spi_delay = Enable all delays in the lowlevel bitbanging. ++max_bus_speed = The maximum SPI bus speed. In Hertz. ++ ++register = Not a configuration parameter. ++ Used to register the configured card ++ with the kernel. ++ ++The device must first get configured and then registered by writing "1" to ++the "register" file. ++The configuration parameters "gpio_data_in", "gpio_data_out", "gpio_clock" ++and "gpio_chipselect" are essential and _must_ be configured before writing ++"1" to the "register" file. The registration will fail, otherwise. ++ ++The default values for the other parameters are: ++gpio_chipselect_activelow = 1 (CS active-LOW) ++spi_mode = 0 (SPI_MODE_0) ++spi_delay = 1 (enabled) ++max_bus_speed = 5000000 (5 Mhz) ++ ++Configuration values can not be changed after registration. To unregister ++the device, write a "0" to the "register" file. The configuration can be ++changed again after unregistering. ++ ++To completely remove the device, simply rmdir the directory ++(/config/gpiommc/my_mmc in this example). ++There's no need to first unregister the device before removing it. That will ++be done automatically. +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -3478,6 +3478,11 @@ L: linuxppc-dev@lists.ozlabs.org + S: Odd Fixes + F: drivers/tty/hvc/ + ++GPIOMMC DRIVER ++P: Michael Buesch ++M: mb@bu3sch.de ++S: Maintained ++ + HARDWARE MONITORING + M: Jean Delvare <khali@linux-fr.org> + M: Guenter Roeck <linux@roeck-us.net> diff --git a/target/linux/generic/patches-3.8/864-gpiommc_configfs_locking.patch b/target/linux/generic/patches-3.8/864-gpiommc_configfs_locking.patch new file mode 100644 index 0000000000..d4201eb508 --- /dev/null +++ b/target/linux/generic/patches-3.8/864-gpiommc_configfs_locking.patch @@ -0,0 +1,58 @@ +The gpiommc configfs context structure needs locking, as configfs +does not lock access between files. + +--- a/drivers/mmc/host/gpiommc.c ++++ b/drivers/mmc/host/gpiommc.c +@@ -144,6 +144,8 @@ struct gpiommc_configfs_device { + struct platform_device *pdev; + /* The configuration */ + struct gpiommc_platform_data pdata; ++ /* Mutex to protect this structure */ ++ struct mutex mutex; + }; + + #define GPIO_INVALID -1 +@@ -234,6 +236,8 @@ static ssize_t gpiommc_config_attr_show( + unsigned int gpio; + int err = 0; + ++ mutex_lock(&dev->mutex); ++ + if (attr == &gpiommc_attr_DI) { + gpio = dev->pdata.pins.gpio_di; + if (gpio == GPIO_INVALID) +@@ -294,6 +298,8 @@ static ssize_t gpiommc_config_attr_show( + WARN_ON(1); + err = -ENOSYS; + out: ++ mutex_unlock(&dev->mutex); ++ + return err ? err : count; + } + +@@ -353,6 +359,8 @@ static ssize_t gpiommc_config_attr_store + int err = -EINVAL; + unsigned long data; + ++ mutex_lock(&dev->mutex); ++ + if (attr == &gpiommc_attr_register) { + err = strict_strtoul(page, 10, &data); + if (err) +@@ -478,6 +486,8 @@ static ssize_t gpiommc_config_attr_store + WARN_ON(1); + err = -ENOSYS; + out: ++ mutex_unlock(&dev->mutex); ++ + return err ? err : count; + } + +@@ -514,6 +524,7 @@ static struct config_item *gpiommc_make_ + if (!dev) + return NULL; + ++ mutex_init(&dev->mutex); + config_item_init_type_name(&dev->item, name, + &gpiommc_dev_ci_type); + diff --git a/target/linux/generic/patches-3.8/865-gpiopwm.patch b/target/linux/generic/patches-3.8/865-gpiopwm.patch new file mode 100644 index 0000000000..f899d1a63a --- /dev/null +++ b/target/linux/generic/patches-3.8/865-gpiopwm.patch @@ -0,0 +1,21 @@ +--- a/drivers/Kconfig ++++ b/drivers/Kconfig +@@ -62,6 +62,8 @@ source "drivers/pinctrl/Kconfig" + + source "drivers/gpio/Kconfig" + ++source "drivers/pwm/Kconfig" ++ + source "drivers/w1/Kconfig" + + source "drivers/power/Kconfig" +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -13,6 +13,7 @@ obj-y += pinctrl/ + obj-y += gpio/ + obj-y += pwm/ + obj-$(CONFIG_PCI) += pci/ ++obj-$(CONFIG_GENERIC_PWM) += pwm/ + obj-$(CONFIG_PARISC) += parisc/ + obj-$(CONFIG_RAPIDIO) += rapidio/ + obj-y += video/ diff --git a/target/linux/generic/patches-3.8/870-hifn795x_byteswap.patch b/target/linux/generic/patches-3.8/870-hifn795x_byteswap.patch new file mode 100644 index 0000000000..3a37c951ec --- /dev/null +++ b/target/linux/generic/patches-3.8/870-hifn795x_byteswap.patch @@ -0,0 +1,17 @@ +--- a/drivers/crypto/hifn_795x.c ++++ b/drivers/crypto/hifn_795x.c +@@ -682,12 +682,12 @@ static inline u32 hifn_read_1(struct hif + + static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) + { +- writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg); ++ writel(val, dev->bar[0] + reg); + } + + static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val) + { +- writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg); ++ writel(val, dev->bar[1] + reg); + } + + static void hifn_wait_puc(struct hifn_device *dev) diff --git a/target/linux/generic/patches-3.8/880-gateworks_system_controller.patch b/target/linux/generic/patches-3.8/880-gateworks_system_controller.patch new file mode 100644 index 0000000000..dd78ef8d07 --- /dev/null +++ b/target/linux/generic/patches-3.8/880-gateworks_system_controller.patch @@ -0,0 +1,339 @@ +--- a/drivers/hwmon/Kconfig ++++ b/drivers/hwmon/Kconfig +@@ -444,6 +444,15 @@ config SENSORS_GL520SM + This driver can also be built as a module. If so, the module + will be called gl520sm. + ++config SENSORS_GSC ++ tristate "Gateworks System Controller" ++ depends on I2C && (ARCH_DAVINCI || ARCH_CNS3XXX || ARCH_IXP4XX) ++ help ++ If you say yes here you get support for the Gateworks System Controller. ++ ++ This driver can also be built as a module. If so, the module ++ will be called gsc. ++ + config SENSORS_GPIO_FAN + tristate "GPIO fan" + depends on GPIOLIB +--- a/drivers/hwmon/Makefile ++++ b/drivers/hwmon/Makefile +@@ -132,6 +132,7 @@ obj-$(CONFIG_SENSORS_W83L785TS) += w83l7 + obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o + obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o + obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o ++obj-$(CONFIG_SENSORS_GSC) += gsc.o + + obj-$(CONFIG_PMBUS) += pmbus/ + +--- /dev/null ++++ b/drivers/hwmon/gsc.c +@@ -0,0 +1,308 @@ ++/* ++ * A hwmon driver for the Gateworks System Controller ++ * Copyright (C) 2009 Gateworks Corporation ++ * ++ * Author: Chris Lang <clang@gateworks.com> ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, ++ * as published by the Free Software Foundation - version 2. ++ */ ++ ++#include <linux/module.h> ++#include <linux/i2c.h> ++#include <linux/hwmon.h> ++#include <linux/hwmon-sysfs.h> ++#include <linux/err.h> ++#include <linux/slab.h> ++ ++#define DRV_VERSION "0.2" ++ ++enum chips { gsp }; ++ ++/* AD7418 registers */ ++#define GSP_REG_TEMP_IN 0x00 ++#define GSP_REG_VIN 0x02 ++#define GSP_REG_3P3 0x05 ++#define GSP_REG_BAT 0x08 ++#define GSP_REG_5P0 0x0b ++#define GSP_REG_CORE 0x0e ++#define GSP_REG_CPU1 0x11 ++#define GSP_REG_CPU2 0x14 ++#define GSP_REG_DRAM 0x17 ++#define GSP_REG_EXT_BAT 0x1a ++#define GSP_REG_IO1 0x1d ++#define GSP_REG_IO2 0x20 ++#define GSP_REG_PCIE 0x23 ++#define GSP_REG_CURRENT 0x26 ++#define GSP_FAN_0 0x2C ++#define GSP_FAN_1 0x2E ++#define GSP_FAN_2 0x30 ++#define GSP_FAN_3 0x32 ++#define GSP_FAN_4 0x34 ++#define GSP_FAN_5 0x36 ++ ++struct gsp_sensor_info { ++ const char* name; ++ int reg; ++}; ++ ++static const struct gsp_sensor_info gsp_sensors[] = { ++ {"temp", GSP_REG_TEMP_IN}, ++ {"vin", GSP_REG_VIN}, ++ {"3p3", GSP_REG_3P3}, ++ {"bat", GSP_REG_BAT}, ++ {"5p0", GSP_REG_5P0}, ++ {"core", GSP_REG_CORE}, ++ {"cpu1", GSP_REG_CPU1}, ++ {"cpu2", GSP_REG_CPU2}, ++ {"dram", GSP_REG_DRAM}, ++ {"ext_bat", GSP_REG_EXT_BAT}, ++ {"io1", GSP_REG_IO1}, ++ {"io2", GSP_REG_IO2}, ++ {"pci2", GSP_REG_PCIE}, ++ {"current", GSP_REG_CURRENT}, ++ {"fan_point0", GSP_FAN_0}, ++ {"fan_point1", GSP_FAN_1}, ++ {"fan_point2", GSP_FAN_2}, ++ {"fan_point3", GSP_FAN_3}, ++ {"fan_point4", GSP_FAN_4}, ++ {"fan_point5", GSP_FAN_5}, ++}; ++ ++struct gsp_data { ++ struct device *hwmon_dev; ++ struct attribute_group attrs; ++ enum chips type; ++}; ++ ++static int gsp_probe(struct i2c_client *client, ++ const struct i2c_device_id *id); ++static int gsp_remove(struct i2c_client *client); ++ ++static const struct i2c_device_id gsp_id[] = { ++ { "gsp", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(i2c, gsp_id); ++ ++static struct i2c_driver gsp_driver = { ++ .driver = { ++ .name = "gsp", ++ }, ++ .probe = gsp_probe, ++ .remove = gsp_remove, ++ .id_table = gsp_id, ++}; ++ ++/* All registers are word-sized, except for the configuration registers. ++ * AD7418 uses a high-byte first convention. Do NOT use those functions to ++ * access the configuration registers CONF and CONF2, as they are byte-sized. ++ */ ++static inline int gsp_read(struct i2c_client *client, u8 reg) ++{ ++ unsigned int adc = 0; ++ if (reg == GSP_REG_TEMP_IN || reg > GSP_REG_CURRENT) ++ { ++ adc |= i2c_smbus_read_byte_data(client, reg); ++ adc |= i2c_smbus_read_byte_data(client, reg + 1) << 8; ++ return adc; ++ } ++ else ++ { ++ adc |= i2c_smbus_read_byte_data(client, reg); ++ adc |= i2c_smbus_read_byte_data(client, reg + 1) << 8; ++ adc |= i2c_smbus_read_byte_data(client, reg + 2) << 16; ++ return adc; ++ } ++} ++ ++static inline int gsp_write(struct i2c_client *client, u8 reg, u16 value) ++{ ++ i2c_smbus_write_byte_data(client, reg, value & 0xff); ++ i2c_smbus_write_byte_data(client, reg + 1, ((value >> 8) & 0xff)); ++ return 1; ++} ++ ++static ssize_t show_adc(struct device *dev, struct device_attribute *devattr, ++ char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct i2c_client *client = to_i2c_client(dev); ++ return sprintf(buf, "%d\n", gsp_read(client, gsp_sensors[attr->index].reg)); ++} ++ ++static ssize_t show_label(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ ++ return sprintf(buf, "%s\n", gsp_sensors[attr->index].name); ++} ++ ++static ssize_t store_fan(struct device *dev, ++ struct device_attribute *devattr, const char *buf, size_t count) ++{ ++ u16 val; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct i2c_client *client = to_i2c_client(dev); ++ val = simple_strtoul(buf, NULL, 10); ++ gsp_write(client, gsp_sensors[attr->index].reg, val); ++ return count; ++} ++ ++static SENSOR_DEVICE_ATTR(temp0_input, S_IRUGO, show_adc, NULL, 0); ++static SENSOR_DEVICE_ATTR(temp0_label, S_IRUGO, show_label, NULL, 0); ++ ++static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_adc, NULL, 1); ++static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_label, NULL, 1); ++static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_adc, NULL, 2); ++static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, show_label, NULL, 2); ++static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_adc, NULL, 3); ++static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, show_label, NULL, 3); ++static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_adc, NULL, 4); ++static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 4); ++static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_adc, NULL, 5); ++static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_label, NULL, 5); ++static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_adc, NULL, 6); ++static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_label, NULL, 6); ++static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_adc, NULL, 7); ++static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_label, NULL, 7); ++static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_adc, NULL, 8); ++static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 8); ++static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_adc, NULL, 9); ++static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 9); ++static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_adc, NULL, 10); ++static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 10); ++static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_adc, NULL, 11); ++static SENSOR_DEVICE_ATTR(in10_label, S_IRUGO, show_label, NULL, 11); ++static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_adc, NULL, 12); ++static SENSOR_DEVICE_ATTR(in11_label, S_IRUGO, show_label, NULL, 12); ++static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_adc, NULL, 13); ++static SENSOR_DEVICE_ATTR(in12_label, S_IRUGO, show_label, NULL, 13); ++ ++static SENSOR_DEVICE_ATTR(fan0_point0, S_IRUGO | S_IWUSR, show_adc, store_fan, 14); ++static SENSOR_DEVICE_ATTR(fan0_point1, S_IRUGO | S_IWUSR, show_adc, store_fan, 15); ++static SENSOR_DEVICE_ATTR(fan0_point2, S_IRUGO | S_IWUSR, show_adc, store_fan, 16); ++static SENSOR_DEVICE_ATTR(fan0_point3, S_IRUGO | S_IWUSR, show_adc, store_fan, 17); ++static SENSOR_DEVICE_ATTR(fan0_point4, S_IRUGO | S_IWUSR, show_adc, store_fan, 18); ++static SENSOR_DEVICE_ATTR(fan0_point5, S_IRUGO | S_IWUSR, show_adc, store_fan, 19); ++ ++static struct attribute *gsp_attributes[] = { ++ &sensor_dev_attr_temp0_input.dev_attr.attr, ++ &sensor_dev_attr_in0_input.dev_attr.attr, ++ &sensor_dev_attr_in1_input.dev_attr.attr, ++ &sensor_dev_attr_in2_input.dev_attr.attr, ++ &sensor_dev_attr_in3_input.dev_attr.attr, ++ &sensor_dev_attr_in4_input.dev_attr.attr, ++ &sensor_dev_attr_in5_input.dev_attr.attr, ++ &sensor_dev_attr_in6_input.dev_attr.attr, ++ &sensor_dev_attr_in7_input.dev_attr.attr, ++ &sensor_dev_attr_in8_input.dev_attr.attr, ++ &sensor_dev_attr_in9_input.dev_attr.attr, ++ &sensor_dev_attr_in10_input.dev_attr.attr, ++ &sensor_dev_attr_in11_input.dev_attr.attr, ++ &sensor_dev_attr_in12_input.dev_attr.attr, ++ ++ &sensor_dev_attr_temp0_label.dev_attr.attr, ++ &sensor_dev_attr_in0_label.dev_attr.attr, ++ &sensor_dev_attr_in1_label.dev_attr.attr, ++ &sensor_dev_attr_in2_label.dev_attr.attr, ++ &sensor_dev_attr_in3_label.dev_attr.attr, ++ &sensor_dev_attr_in4_label.dev_attr.attr, ++ &sensor_dev_attr_in5_label.dev_attr.attr, ++ &sensor_dev_attr_in6_label.dev_attr.attr, ++ &sensor_dev_attr_in7_label.dev_attr.attr, ++ &sensor_dev_attr_in8_label.dev_attr.attr, ++ &sensor_dev_attr_in9_label.dev_attr.attr, ++ &sensor_dev_attr_in10_label.dev_attr.attr, ++ &sensor_dev_attr_in11_label.dev_attr.attr, ++ &sensor_dev_attr_in12_label.dev_attr.attr, ++ ++ &sensor_dev_attr_fan0_point0.dev_attr.attr, ++ &sensor_dev_attr_fan0_point1.dev_attr.attr, ++ &sensor_dev_attr_fan0_point2.dev_attr.attr, ++ &sensor_dev_attr_fan0_point3.dev_attr.attr, ++ &sensor_dev_attr_fan0_point4.dev_attr.attr, ++ &sensor_dev_attr_fan0_point5.dev_attr.attr, ++ NULL ++}; ++ ++ ++static int gsp_probe(struct i2c_client *client, ++ const struct i2c_device_id *id) ++{ ++ struct i2c_adapter *adapter = client->adapter; ++ struct gsp_data *data; ++ int err; ++ ++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | ++ I2C_FUNC_SMBUS_WORD_DATA)) { ++ err = -EOPNOTSUPP; ++ goto exit; ++ } ++ ++ if (!(data = kzalloc(sizeof(struct gsp_data), GFP_KERNEL))) { ++ err = -ENOMEM; ++ goto exit; ++ } ++ ++ i2c_set_clientdata(client, data); ++ ++ data->type = id->driver_data; ++ ++ switch (data->type) { ++ case 0: ++ data->attrs.attrs = gsp_attributes; ++ break; ++ } ++ ++ dev_info(&client->dev, "%s chip found\n", client->name); ++ ++ /* Register sysfs hooks */ ++ if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs))) ++ goto exit_free; ++ ++ data->hwmon_dev = hwmon_device_register(&client->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ err = PTR_ERR(data->hwmon_dev); ++ goto exit_remove; ++ } ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &data->attrs); ++exit_free: ++ kfree(data); ++exit: ++ return err; ++} ++ ++static int gsp_remove(struct i2c_client *client) ++{ ++ struct gsp_data *data = i2c_get_clientdata(client); ++ hwmon_device_unregister(data->hwmon_dev); ++ sysfs_remove_group(&client->dev.kobj, &data->attrs); ++ kfree(data); ++ return 0; ++} ++ ++static int __init gsp_init(void) ++{ ++ return i2c_add_driver(&gsp_driver); ++} ++ ++static void __exit gsp_exit(void) ++{ ++ i2c_del_driver(&gsp_driver); ++} ++ ++module_init(gsp_init); ++module_exit(gsp_exit); ++ ++MODULE_AUTHOR("Chris Lang <clang@gateworks.com>"); ++MODULE_DESCRIPTION("GSC HWMON driver"); ++MODULE_LICENSE("GPL"); ++MODULE_VERSION(DRV_VERSION); ++ diff --git a/target/linux/generic/patches-3.8/900-slab_maxsize.patch b/target/linux/generic/patches-3.8/900-slab_maxsize.patch new file mode 100644 index 0000000000..8c404ea6c3 --- /dev/null +++ b/target/linux/generic/patches-3.8/900-slab_maxsize.patch @@ -0,0 +1,13 @@ +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -156,8 +156,8 @@ void kmem_cache_free(struct kmem_cache * + * to do various tricks to work around compiler limitations in order to + * ensure proper constant folding. + */ +-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ +- (MAX_ORDER + PAGE_SHIFT - 1) : 25) ++#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 17 ? \ ++ (MAX_ORDER + PAGE_SHIFT - 1) : 17) + + #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) + #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) diff --git a/target/linux/generic/patches-3.8/901-debloat_sock_diag.patch b/target/linux/generic/patches-3.8/901-debloat_sock_diag.patch new file mode 100644 index 0000000000..4065f8aeae --- /dev/null +++ b/target/linux/generic/patches-3.8/901-debloat_sock_diag.patch @@ -0,0 +1,46 @@ +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -87,6 +87,9 @@ source "net/netlabel/Kconfig" + + endif # if INET + ++config SOCK_DIAG ++ bool ++ + config NETWORK_SECMARK + bool "Security Marking" + help +--- a/net/core/Makefile ++++ b/net/core/Makefile +@@ -8,9 +8,9 @@ obj-y := sock.o request_sock.o skbuff.o + obj-$(CONFIG_SYSCTL) += sysctl_net_core.o + + obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ +- neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ +- sock_diag.o ++ neighbour.o rtnetlink.o utils.o link_watch.o filter.o + ++obj-$(CONFIG_SOCK_DIAG) += sock_diag.o + obj-$(CONFIG_XFRM) += flow.o + obj-y += net-sysfs.o + obj-$(CONFIG_NET_PKTGEN) += pktgen.o +--- a/net/ipv4/Kconfig ++++ b/net/ipv4/Kconfig +@@ -404,6 +404,7 @@ config INET_LRO + + config INET_DIAG + tristate "INET: socket monitoring interface" ++ select SOCK_DIAG + default y + ---help--- + Support for INET (TCP, DCCP, etc) socket monitoring interface used by +--- a/net/unix/Kconfig ++++ b/net/unix/Kconfig +@@ -22,6 +22,7 @@ config UNIX + config UNIX_DIAG + tristate "UNIX: socket monitoring interface" + depends on UNIX ++ select SOCK_DIAG + default n + ---help--- + Support for UNIX socket monitoring interface used by the ss tool. diff --git a/target/linux/generic/patches-3.8/902-debloat_proc.patch b/target/linux/generic/patches-3.8/902-debloat_proc.patch new file mode 100644 index 0000000000..e994c5b883 --- /dev/null +++ b/target/linux/generic/patches-3.8/902-debloat_proc.patch @@ -0,0 +1,351 @@ +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -2267,6 +2267,8 @@ static const struct file_operations proc + + static int __init proc_locks_init(void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; + proc_create("locks", 0, NULL, &proc_locks_operations); + return 0; + } +--- a/fs/proc/Kconfig ++++ b/fs/proc/Kconfig +@@ -67,3 +67,8 @@ config PROC_PAGE_MONITOR + /proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap, + /proc/kpagecount, and /proc/kpageflags. Disabling these + interfaces will reduce the size of the kernel by approximately 4kb. ++ ++config PROC_STRIPPED ++ default n ++ depends on EXPERT ++ bool "Strip non-essential /proc functionality to reduce code size" +--- a/fs/proc/consoles.c ++++ b/fs/proc/consoles.c +@@ -108,6 +108,9 @@ static const struct file_operations proc + + static int __init proc_consoles_init(void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; ++ + proc_create("consoles", 0, NULL, &proc_consoles_operations); + return 0; + } +--- a/fs/proc/proc_tty.c ++++ b/fs/proc/proc_tty.c +@@ -143,7 +143,10 @@ static const struct file_operations proc + void proc_tty_register_driver(struct tty_driver *driver) + { + struct proc_dir_entry *ent; +- ++ ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; ++ + if (!driver->driver_name || driver->proc_entry || + !driver->ops->proc_fops) + return; +@@ -160,6 +163,9 @@ void proc_tty_unregister_driver(struct t + { + struct proc_dir_entry *ent; + ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; ++ + ent = driver->proc_entry; + if (!ent) + return; +@@ -174,6 +180,9 @@ void proc_tty_unregister_driver(struct t + */ + void __init proc_tty_init(void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; ++ + if (!proc_mkdir("tty", NULL)) + return; + proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL); +--- a/kernel/exec_domain.c ++++ b/kernel/exec_domain.c +@@ -173,6 +173,8 @@ static const struct file_operations exec + + static int __init proc_execdomains_init(void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; + proc_create("execdomains", 0, NULL, &execdomains_proc_fops); + return 0; + } +--- a/kernel/irq/proc.c ++++ b/kernel/irq/proc.c +@@ -311,6 +311,9 @@ void register_irq_proc(unsigned int irq, + { + char name [MAX_NAMELEN]; + ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; ++ + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) + return; + +@@ -347,6 +350,9 @@ void unregister_irq_proc(unsigned int ir + { + char name [MAX_NAMELEN]; + ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; ++ + if (!root_irq_dir || !desc->dir) + return; + #ifdef CONFIG_SMP +@@ -386,6 +392,9 @@ void init_irq_proc(void) + unsigned int irq; + struct irq_desc *desc; + ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; ++ + /* create /proc/irq */ + root_irq_dir = proc_mkdir("irq", NULL); + if (!root_irq_dir) +--- a/kernel/time/timer_list.c ++++ b/kernel/time/timer_list.c +@@ -293,6 +293,8 @@ static int __init init_timer_list_procfs + { + struct proc_dir_entry *pe; + ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; + pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); + if (!pe) + return -ENOMEM; +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -2638,6 +2638,8 @@ static const struct file_operations proc + + static int __init proc_vmalloc_init(void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; + proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); + return 0; + } +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -1238,10 +1238,12 @@ static int __init setup_vmstat(void) + start_cpu_timer(cpu); + #endif + #ifdef CONFIG_PROC_FS +- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); +- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); ++ if (!IS_ENABLED(CONFIG_PROC_STRIPPED)) { ++ proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); ++ proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); ++ proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); ++ } + proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); +- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); + #endif + return 0; + } +--- a/net/8021q/vlanproc.c ++++ b/net/8021q/vlanproc.c +@@ -127,6 +127,9 @@ void vlan_proc_cleanup(struct net *net) + { + struct vlan_net *vn = net_generic(net, vlan_net_id); + ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; ++ + if (vn->proc_vlan_conf) + remove_proc_entry(name_conf, vn->proc_vlan_dir); + +@@ -146,6 +149,9 @@ int __net_init vlan_proc_init(struct net + { + struct vlan_net *vn = net_generic(net, vlan_net_id); + ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; ++ + vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net); + if (!vn->proc_vlan_dir) + goto err; +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -4578,9 +4578,11 @@ static int __net_init dev_proc_net_init( + + if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) + goto out; +- if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) ++ if (!IS_ENABLED(CONFIG_PROC_STRIPPED) && ++ !proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) + goto out_dev; +- if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) ++ if (!IS_ENABLED(CONFIG_PROC_STRIPPED) && ++ !proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) + goto out_softnet; + + if (wext_proc_init(net)) +@@ -4589,9 +4591,11 @@ static int __net_init dev_proc_net_init( + out: + return rc; + out_ptype: +- proc_net_remove(net, "ptype"); ++ if (!IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ proc_net_remove(net, "ptype"); + out_softnet: +- proc_net_remove(net, "softnet_stat"); ++ if (!IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ proc_net_remove(net, "softnet_stat"); + out_dev: + proc_net_remove(net, "dev"); + goto out; +@@ -4601,8 +4605,10 @@ static void __net_exit dev_proc_net_exit + { + wext_proc_exit(net); + +- proc_net_remove(net, "ptype"); +- proc_net_remove(net, "softnet_stat"); ++ if (!IS_ENABLED(CONFIG_PROC_STRIPPED)) { ++ proc_net_remove(net, "ptype"); ++ proc_net_remove(net, "softnet_stat"); ++ } + proc_net_remove(net, "dev"); + } + +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2837,6 +2837,8 @@ static __net_initdata struct pernet_oper + + static int __init proto_init(void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; + return register_pernet_subsys(&proto_net_ops); + } + +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -2607,10 +2607,12 @@ static const struct file_operations fib_ + + int __net_init fib_proc_init(struct net *net) + { +- if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops)) ++ if (!IS_ENABLED(CONFIG_PROC_STRIPPED) && ++ !proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops)) + goto out1; + +- if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO, ++ if (!IS_ENABLED(CONFIG_PROC_STRIPPED) && ++ !proc_net_fops_create(net, "fib_triestat", S_IRUGO, + &fib_triestat_fops)) + goto out2; + +@@ -2629,8 +2631,10 @@ out1: + + void __net_exit fib_proc_exit(struct net *net) + { +- proc_net_remove(net, "fib_trie"); +- proc_net_remove(net, "fib_triestat"); ++ if (!IS_ENABLED(CONFIG_PROC_STRIPPED)) { ++ proc_net_remove(net, "fib_trie"); ++ proc_net_remove(net, "fib_triestat"); ++ } + proc_net_remove(net, "route"); + } + +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -2673,6 +2673,8 @@ static struct pernet_operations igmp_net + + int __init igmp_mc_proc_init(void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; + return register_pernet_subsys(&igmp_net_ops); + } + #endif +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -71,6 +71,10 @@ + #define CONFIG_IP_PIMSM 1 + #endif + ++#ifdef CONFIG_PROC_STRIPPED ++#undef CONFIG_PROC_FS ++#endif ++ + struct mr_table { + struct list_head list; + #ifdef CONFIG_NET_NS +--- a/net/ipv4/proc.c ++++ b/net/ipv4/proc.c +@@ -502,6 +502,9 @@ static __net_initdata struct pernet_oper + + int __init ip_misc_proc_init(void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; ++ + return register_pernet_subsys(&ip_proc_ops); + } + +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -427,6 +427,9 @@ static struct pernet_operations ip_rt_pr + + static int __init ip_rt_proc_init(void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return 0; ++ + return register_pernet_subsys(&ip_rt_proc_ops); + } + +--- a/ipc/msg.c ++++ b/ipc/msg.c +@@ -135,6 +135,9 @@ void __init msg_init(void) + printk(KERN_INFO "msgmni has been set to %d\n", + init_ipc_ns.msg_ctlmni); + ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; ++ + ipc_init_proc_interface("sysvipc/msg", + " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", + IPC_MSG_IDS, sysvipc_msg_proc_show); +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -185,6 +185,8 @@ void sem_exit_ns(struct ipc_namespace *n + void __init sem_init (void) + { + sem_init_ns(&init_ipc_ns); ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; + ipc_init_proc_interface("sysvipc/sem", + " key semid perms nsems uid gid cuid cgid otime ctime\n", + IPC_SEM_IDS, sysvipc_sem_proc_show); +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -115,6 +115,8 @@ pure_initcall(ipc_ns_init); + + void __init shm_init (void) + { ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; + ipc_init_proc_interface("sysvipc/shm", + #if BITS_PER_LONG <= 32 + " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", +--- a/ipc/util.c ++++ b/ipc/util.c +@@ -148,6 +148,9 @@ void __init ipc_init_proc_interface(cons + struct proc_dir_entry *pde; + struct ipc_proc_iface *iface; + ++ if (IS_ENABLED(CONFIG_PROC_STRIPPED)) ++ return; ++ + iface = kmalloc(sizeof(*iface), GFP_KERNEL); + if (!iface) + return; diff --git a/target/linux/generic/patches-3.8/903-debloat_direct_io.patch b/target/linux/generic/patches-3.8/903-debloat_direct_io.patch new file mode 100644 index 0000000000..30d0238fde --- /dev/null +++ b/target/linux/generic/patches-3.8/903-debloat_direct_io.patch @@ -0,0 +1,84 @@ +--- a/fs/Kconfig ++++ b/fs/Kconfig +@@ -62,6 +62,11 @@ config FILE_LOCKING + for filesystems like NFS and for the flock() system + call. Disabling this option saves about 11k. + ++config DIRECT_IO ++ bool "Enable O_DIRECT support" if EXPERT ++ depends on BLOCK ++ default y ++ + source "fs/notify/Kconfig" + + source "fs/quota/Kconfig" +--- a/fs/Makefile ++++ b/fs/Makefile +@@ -14,7 +14,8 @@ obj-y := open.o read_write.o file_table. + stack.o fs_struct.o statfs.o + + ifeq ($(CONFIG_BLOCK),y) +-obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o ++obj-y += buffer.o bio.o block_dev.o mpage.o ioprio.o ++obj-$(CONFIG_DIRECT_IO) += direct-io.o + else + obj-y += no-block.o + endif +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -2442,12 +2442,26 @@ enum { + DIO_SKIP_HOLES = 0x02, + }; + ++#ifdef CONFIG_DIRECT_IO + void dio_end_io(struct bio *bio, int error); + + ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, const struct iovec *iov, loff_t offset, + unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, + dio_submit_t submit_io, int flags); ++#else ++static inline void dio_end_io(struct bio *bio, int error) ++{ ++} ++static inline ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, ++ struct block_device *bdev, const struct iovec *iov, loff_t offset, ++ unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, ++ dio_submit_t submit_io, int flags) ++{ ++ return -EOPNOTSUPP; ++} ++#endif ++ + + static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, + struct inode *inode, const struct iovec *iov, loff_t offset, +--- a/fs/fcntl.c ++++ b/fs/fcntl.c +@@ -51,8 +51,10 @@ static int setfl(int fd, struct file * f + arg |= O_NONBLOCK; + + if (arg & O_DIRECT) { ++#ifdef CONFIG_DIRECT_IO + if (!filp->f_mapping || !filp->f_mapping->a_ops || + !filp->f_mapping->a_ops->direct_IO) ++#endif + return -EINVAL; + } + +--- a/fs/open.c ++++ b/fs/open.c +@@ -665,9 +665,12 @@ int open_check_o_direct(struct file *f) + { + /* NB: we're sure to have correct a_ops only after f_op->open */ + if (f->f_flags & O_DIRECT) { ++#ifdef CONFIG_DIRECT_IO + if (!f->f_mapping->a_ops || + ((!f->f_mapping->a_ops->direct_IO) && +- (!f->f_mapping->a_ops->get_xip_mem))) { ++ (!f->f_mapping->a_ops->get_xip_mem))) ++#endif ++ { + return -EINVAL; + } + } diff --git a/target/linux/generic/patches-3.8/910-kobject_uevent.patch b/target/linux/generic/patches-3.8/910-kobject_uevent.patch new file mode 100644 index 0000000000..9ee9cff670 --- /dev/null +++ b/target/linux/generic/patches-3.8/910-kobject_uevent.patch @@ -0,0 +1,21 @@ +--- a/lib/kobject_uevent.c ++++ b/lib/kobject_uevent.c +@@ -51,6 +51,18 @@ static const char *kobject_actions[] = { + [KOBJ_OFFLINE] = "offline", + }; + ++u64 uevent_next_seqnum(void) ++{ ++ u64 seq; ++ ++ mutex_lock(&uevent_sock_mutex); ++ seq = ++uevent_seqnum; ++ mutex_unlock(&uevent_sock_mutex); ++ ++ return seq; ++} ++EXPORT_SYMBOL_GPL(uevent_next_seqnum); ++ + /** + * kobject_action_type - translate action string to numeric type + * diff --git a/target/linux/generic/patches-3.8/911-kobject_add_broadcast_uevent.patch b/target/linux/generic/patches-3.8/911-kobject_add_broadcast_uevent.patch new file mode 100644 index 0000000000..14c55bc6d2 --- /dev/null +++ b/target/linux/generic/patches-3.8/911-kobject_add_broadcast_uevent.patch @@ -0,0 +1,65 @@ +--- a/include/linux/kobject.h ++++ b/include/linux/kobject.h +@@ -31,6 +31,8 @@ + #define UEVENT_NUM_ENVP 32 /* number of env pointers */ + #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ + ++struct sk_buff; ++ + /* path to the userspace helper executed on an event */ + extern char uevent_helper[]; + +@@ -213,4 +215,7 @@ int add_uevent_var(struct kobj_uevent_en + int kobject_action_type(const char *buf, size_t count, + enum kobject_action *type); + ++int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group, ++ gfp_t allocation); ++ + #endif /* _KOBJECT_H_ */ +--- a/lib/kobject_uevent.c ++++ b/lib/kobject_uevent.c +@@ -382,6 +382,43 @@ int add_uevent_var(struct kobj_uevent_en + EXPORT_SYMBOL_GPL(add_uevent_var); + + #if defined(CONFIG_NET) ++int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group, ++ gfp_t allocation) ++{ ++ struct uevent_sock *ue_sk; ++ int err = 0; ++ ++ /* send netlink message */ ++ mutex_lock(&uevent_sock_mutex); ++ list_for_each_entry(ue_sk, &uevent_sock_list, list) { ++ struct sock *uevent_sock = ue_sk->sk; ++ struct sk_buff *skb2; ++ ++ skb2 = skb_clone(skb, allocation); ++ if (!skb2) ++ break; ++ ++ err = netlink_broadcast(uevent_sock, skb2, pid, group, ++ allocation); ++ if (err) ++ break; ++ } ++ mutex_unlock(&uevent_sock_mutex); ++ ++ kfree_skb(skb); ++ return err; ++} ++#else ++int broadcast_uevent(struct sk_buff *skb, __u32 pid, __u32 group, ++ gfp_t allocation) ++{ ++ kfree_skb(skb); ++ return 0; ++} ++#endif ++EXPORT_SYMBOL_GPL(broadcast_uevent); ++ ++#if defined(CONFIG_NET) + static int uevent_net_init(struct net *net) + { + struct uevent_sock *ue_sk; diff --git a/target/linux/generic/patches-3.8/920-unable_to_open_console.patch b/target/linux/generic/patches-3.8/920-unable_to_open_console.patch new file mode 100644 index 0000000000..5a1e2f4a65 --- /dev/null +++ b/target/linux/generic/patches-3.8/920-unable_to_open_console.patch @@ -0,0 +1,11 @@ +--- a/init/main.c ++++ b/init/main.c +@@ -878,7 +878,7 @@ static void __init kernel_init_freeable( + + /* Open the /dev/console on the rootfs, this should never fail */ + if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) +- printk(KERN_WARNING "Warning: unable to open an initial console.\n"); ++ printk(KERN_WARNING "Please be patient, while OpenWrt loads ...\n"); + + (void) sys_dup(0); + (void) sys_dup(0); diff --git a/target/linux/generic/patches-3.8/921-use_preinit_as_init.patch b/target/linux/generic/patches-3.8/921-use_preinit_as_init.patch new file mode 100644 index 0000000000..4cc3695684 --- /dev/null +++ b/target/linux/generic/patches-3.8/921-use_preinit_as_init.patch @@ -0,0 +1,14 @@ +--- a/init/main.c ++++ b/init/main.c +@@ -835,10 +835,7 @@ static int __ref kernel_init(void *unuse + printk(KERN_WARNING "Failed to execute %s. Attempting " + "defaults...\n", execute_command); + } +- if (!run_init_process("/sbin/init") || +- !run_init_process("/etc/init") || +- !run_init_process("/bin/init") || +- !run_init_process("/bin/sh")) ++ if (!run_init_process("/etc/preinit")) + return 0; + + panic("No init found. Try passing init= option to kernel. " diff --git a/target/linux/generic/patches-3.8/930-crashlog.patch b/target/linux/generic/patches-3.8/930-crashlog.patch new file mode 100644 index 0000000000..453161c4c2 --- /dev/null +++ b/target/linux/generic/patches-3.8/930-crashlog.patch @@ -0,0 +1,276 @@ +--- /dev/null ++++ b/include/linux/crashlog.h +@@ -0,0 +1,17 @@ ++#ifndef __CRASHLOG_H ++#define __CRASHLOG_H ++ ++#ifdef CONFIG_CRASHLOG ++void crashlog_init_bootmem(struct bootmem_data *bdata); ++void crashlog_init_memblock(phys_addr_t addr, phys_addr_t size); ++#else ++static inline void crashlog_init_bootmem(struct bootmem_data *bdata) ++{ ++} ++ ++static inline void crashlog_init_memblock(phys_addr_t addr, phys_addr_t size) ++{ ++} ++#endif ++ ++#endif +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1154,6 +1154,10 @@ config RELAY + + If unsure, say N. + ++config CRASHLOG ++ bool "Crash logging" ++ depends on (!NO_BOOTMEM || HAVE_MEMBLOCK) && !(ARM || SPARC || PPC) ++ + config BLK_DEV_INITRD + bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" + depends on BROKEN || !FRV +--- a/kernel/Makefile ++++ b/kernel/Makefile +@@ -111,6 +111,7 @@ obj-$(CONFIG_PADATA) += padata.o + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o + obj-$(CONFIG_JUMP_LABEL) += jump_label.o + obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o ++obj-$(CONFIG_CRASHLOG) += crashlog.o + + $(obj)/configs.o: $(obj)/config_data.h + +--- /dev/null ++++ b/kernel/crashlog.c +@@ -0,0 +1,181 @@ ++/* ++ * Crash information logger ++ * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> ++ * ++ * Based on ramoops.c ++ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ * ++ */ ++ ++#include <linux/module.h> ++#include <linux/bootmem.h> ++#include <linux/memblock.h> ++#include <linux/debugfs.h> ++#include <linux/crashlog.h> ++#include <linux/kmsg_dump.h> ++#include <linux/module.h> ++#include <linux/pfn.h> ++#include <asm/io.h> ++ ++#define CRASHLOG_PAGES 4 ++#define CRASHLOG_SIZE (CRASHLOG_PAGES * PAGE_SIZE) ++#define CRASHLOG_MAGIC 0xa1eedead ++ ++/* ++ * Start the log at 1M before the end of RAM, as some boot loaders like ++ * to use the end of the RAM for stack usage and other things ++ * If this fails, fall back to using the last part. ++ */ ++#define CRASHLOG_OFFSET (1024 * 1024) ++ ++struct crashlog_data { ++ u32 magic; ++ u32 len; ++ u8 data[]; ++}; ++ ++static struct debugfs_blob_wrapper crashlog_blob; ++static unsigned long crashlog_addr = 0; ++static struct crashlog_data *crashlog_buf; ++static struct kmsg_dumper dump; ++static bool first = true; ++ ++extern struct list_head *crashlog_modules; ++ ++#ifndef CONFIG_NO_BOOTMEM ++void __init crashlog_init_bootmem(bootmem_data_t *bdata) ++{ ++ unsigned long addr; ++ ++ if (crashlog_addr) ++ return; ++ ++ addr = PFN_PHYS(bdata->node_low_pfn) - CRASHLOG_OFFSET; ++ if (reserve_bootmem(addr, CRASHLOG_SIZE, BOOTMEM_EXCLUSIVE) < 0) { ++ printk("Crashlog failed to allocate RAM at address 0x%lx\n", addr); ++ bdata->node_low_pfn -= CRASHLOG_PAGES; ++ addr = PFN_PHYS(bdata->node_low_pfn); ++ } ++ crashlog_addr = addr; ++} ++#endif ++ ++#ifdef CONFIG_HAVE_MEMBLOCK ++void __meminit crashlog_init_memblock(phys_addr_t addr, phys_addr_t size) ++{ ++ if (crashlog_addr) ++ return; ++ ++ addr += size - CRASHLOG_OFFSET; ++ if (memblock_reserve(addr, CRASHLOG_SIZE)) { ++ printk("Crashlog failed to allocate RAM at address 0x%lx\n", (unsigned long) addr); ++ return; ++ } ++ ++ crashlog_addr = addr; ++} ++#endif ++ ++static void __init crashlog_copy(void) ++{ ++ if (crashlog_buf->magic != CRASHLOG_MAGIC) ++ return; ++ ++ if (!crashlog_buf->len || crashlog_buf->len > ++ CRASHLOG_SIZE - sizeof(*crashlog_buf)) ++ return; ++ ++ crashlog_blob.size = crashlog_buf->len; ++ crashlog_blob.data = kmemdup(crashlog_buf->data, ++ crashlog_buf->len, GFP_KERNEL); ++ ++ debugfs_create_blob("crashlog", 0700, NULL, &crashlog_blob); ++} ++ ++static int get_maxlen(void) ++{ ++ return CRASHLOG_SIZE - sizeof(*crashlog_buf) - crashlog_buf->len; ++} ++ ++static void crashlog_printf(const char *fmt, ...) ++{ ++ va_list args; ++ int len = get_maxlen(); ++ ++ if (!len) ++ return; ++ ++ va_start(args, fmt); ++ crashlog_buf->len += vsnprintf( ++ &crashlog_buf->data[crashlog_buf->len], ++ len, fmt, args); ++ va_end(args); ++} ++ ++static void crashlog_do_dump(struct kmsg_dumper *dumper, ++ enum kmsg_dump_reason reason) ++{ ++ struct timeval tv; ++ struct module *m; ++ char *buf; ++ size_t len; ++ ++ if (!first) ++ crashlog_printf("\n===================================\n"); ++ ++ do_gettimeofday(&tv); ++ crashlog_printf("Time: %lu.%lu\n", ++ (long)tv.tv_sec, (long)tv.tv_usec); ++ ++ if (first) { ++ crashlog_printf("Modules:"); ++ list_for_each_entry(m, crashlog_modules, list) { ++ crashlog_printf("\t%s@%p+%x", m->name, ++ m->module_core, m->core_size, ++ m->module_init, m->init_size); ++ } ++ crashlog_printf("\n"); ++ first = false; ++ } ++ ++ buf = (char *)&crashlog_buf->data[crashlog_buf->len]; ++ ++ kmsg_dump_get_buffer(dumper, true, buf, get_maxlen(), &len); ++ ++ crashlog_buf->len += len; ++} ++ ++ ++int __init crashlog_init_fs(void) ++{ ++ if (!crashlog_addr) ++ return -ENOMEM; ++ ++ crashlog_buf = ioremap(crashlog_addr, CRASHLOG_SIZE); ++ ++ crashlog_copy(); ++ ++ crashlog_buf->magic = CRASHLOG_MAGIC; ++ crashlog_buf->len = 0; ++ ++ dump.max_reason = KMSG_DUMP_OOPS; ++ dump.dump = crashlog_do_dump; ++ kmsg_dump_register(&dump); ++ ++ return 0; ++} ++module_init(crashlog_init_fs); +--- a/mm/bootmem.c ++++ b/mm/bootmem.c +@@ -15,6 +15,7 @@ + #include <linux/export.h> + #include <linux/kmemleak.h> + #include <linux/range.h> ++#include <linux/crashlog.h> + #include <linux/memblock.h> + + #include <asm/bug.h> +@@ -177,6 +178,7 @@ static unsigned long __init free_all_boo + if (!bdata->node_bootmem_map) + return 0; + ++ crashlog_init_bootmem(bdata); + start = bdata->node_min_pfn; + end = bdata->node_low_pfn; + +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -106,6 +106,9 @@ static LIST_HEAD(modules); + #ifdef CONFIG_KGDB_KDB + struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ + #endif /* CONFIG_KGDB_KDB */ ++#ifdef CONFIG_CRASHLOG ++struct list_head *crashlog_modules = &modules; ++#endif + + #ifdef CONFIG_MODULE_SIG + #ifdef CONFIG_MODULE_SIG_FORCE +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -19,6 +19,7 @@ + #include <linux/debugfs.h> + #include <linux/seq_file.h> + #include <linux/memblock.h> ++#include <linux/crashlog.h> + + static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; + static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; +@@ -342,6 +343,8 @@ static void __init_memblock memblock_ins + memblock_set_region_node(rgn, nid); + type->cnt++; + type->total_size += size; ++ if (type == &memblock.memory && idx == 0) ++ crashlog_init_memblock(base, size); + } + + /** diff --git a/target/linux/generic/patches-3.8/940-ocf_kbuild_integration.patch b/target/linux/generic/patches-3.8/940-ocf_kbuild_integration.patch new file mode 100644 index 0000000000..99781e16d0 --- /dev/null +++ b/target/linux/generic/patches-3.8/940-ocf_kbuild_integration.patch @@ -0,0 +1,20 @@ +--- a/crypto/Kconfig ++++ b/crypto/Kconfig +@@ -1268,3 +1268,6 @@ source "drivers/crypto/Kconfig" + source crypto/asymmetric_keys/Kconfig + + endif # if CRYPTO ++ ++source "crypto/ocf/Kconfig" ++ +--- a/crypto/Makefile ++++ b/crypto/Makefile +@@ -94,6 +94,8 @@ obj-$(CONFIG_CRYPTO_USER_API) += af_alg. + obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o + obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o + ++obj-$(CONFIG_OCF_OCF) += ocf/ ++ + # + # generic algorithms and the async_tx api + # diff --git a/target/linux/generic/patches-3.8/941-ocf_20120127.patch b/target/linux/generic/patches-3.8/941-ocf_20120127.patch new file mode 100644 index 0000000000..3c2ba0d38d --- /dev/null +++ b/target/linux/generic/patches-3.8/941-ocf_20120127.patch @@ -0,0 +1,166 @@ +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -139,6 +139,9 @@ + * that might otherwise be identical and have very little entropy + * available to them (particularly common in the embedded world). + * ++ * void random_input_words(__u32 *buf, size_t wordcount, int ent_count) ++ * int random_input_wait(void); ++ * + * add_input_randomness() uses the input layer interrupt timing, as well as + * the event type information from the hardware. + * +@@ -152,6 +155,13 @@ + * seek times do not make for good sources of entropy, as their seek + * times are usually fairly consistent. + * ++ * random_input_words() just provides a raw block of entropy to the input ++ * pool, such as from a hardware entropy generator. ++ * ++ * random_input_wait() suspends the caller until such time as the ++ * entropy pool falls below the write threshold, and returns a count of how ++ * much entropy (in bits) is needed to sustain the pool. ++ * + * All of these routines try to estimate how many bits of randomness a + * particular randomness source. They do this by keeping track of the + * first and second order deltas of the event timings. +@@ -796,6 +806,63 @@ void add_disk_randomness(struct gendisk + } + #endif + ++/* ++ * random_input_words - add bulk entropy to pool ++ * ++ * @buf: buffer to add ++ * @wordcount: number of __u32 words to add ++ * @ent_count: total amount of entropy (in bits) to credit ++ * ++ * this provides bulk input of entropy to the input pool ++ * ++ */ ++void random_input_words(__u32 *buf, size_t wordcount, int ent_count) ++{ ++ mix_pool_bytes(&input_pool, buf, wordcount*4, NULL); ++ ++ credit_entropy_bits(&input_pool, ent_count); ++ ++ DEBUG_ENT("crediting %d bits => %d\n", ++ ent_count, input_pool.entropy_count); ++ /* ++ * Wake up waiting processes if we have enough ++ * entropy. ++ */ ++ if (input_pool.entropy_count >= random_read_wakeup_thresh) ++ wake_up_interruptible(&random_read_wait); ++} ++EXPORT_SYMBOL(random_input_words); ++ ++/* ++ * random_input_wait - wait until random needs entropy ++ * ++ * this function sleeps until the /dev/random subsystem actually ++ * needs more entropy, and then return the amount of entropy ++ * that it would be nice to have added to the system. ++ */ ++int random_input_wait(void) ++{ ++ int count; ++ ++ wait_event_interruptible(random_write_wait, ++ input_pool.entropy_count < random_write_wakeup_thresh); ++ ++ count = random_write_wakeup_thresh - input_pool.entropy_count; ++ ++ /* likely we got woken up due to a signal */ ++ if (count <= 0) count = random_read_wakeup_thresh; ++ ++ DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n", ++ count, ++ input_pool.entropy_count, random_write_wakeup_thresh); ++ ++ return count; ++} ++EXPORT_SYMBOL(random_input_wait); ++ ++ ++#define EXTRACT_SIZE 10 ++ + /********************************************************************* + * + * Entropy extraction routines +--- a/fs/fcntl.c ++++ b/fs/fcntl.c +@@ -148,6 +148,7 @@ pid_t f_getown(struct file *filp) + read_unlock(&filp->f_owner.lock); + return pid; + } ++EXPORT_SYMBOL(sys_dup); + + static int f_setown_ex(struct file *filp, unsigned long arg) + { +--- a/include/linux/miscdevice.h ++++ b/include/linux/miscdevice.h +@@ -19,6 +19,7 @@ + #define APOLLO_MOUSE_MINOR 7 + #define PC110PAD_MINOR 9 + /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ ++#define CRYPTODEV_MINOR 70 /* /dev/crypto */ + #define WATCHDOG_MINOR 130 /* Watchdog timer */ + #define TEMP_MINOR 131 /* Temperature Sensor */ + #define RTC_MINOR 135 +--- a/include/uapi/linux/random.h ++++ b/include/uapi/linux/random.h +@@ -34,6 +34,30 @@ + /* Clear the entropy pool and associated counters. (Superuser only.) */ + #define RNDCLEARPOOL _IO( 'R', 0x06 ) + ++#ifdef CONFIG_FIPS_RNG ++ ++/* Size of seed value - equal to AES blocksize */ ++#define AES_BLOCK_SIZE_BYTES 16 ++#define SEED_SIZE_BYTES AES_BLOCK_SIZE_BYTES ++/* Size of AES key */ ++#define KEY_SIZE_BYTES 16 ++ ++/* ioctl() structure used by FIPS 140-2 Tests */ ++struct rand_fips_test { ++ unsigned char key[KEY_SIZE_BYTES]; /* Input */ ++ unsigned char datetime[SEED_SIZE_BYTES]; /* Input */ ++ unsigned char seed[SEED_SIZE_BYTES]; /* Input */ ++ unsigned char result[SEED_SIZE_BYTES]; /* Output */ ++}; ++ ++/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */ ++#define RNDFIPSVST _IOWR('R', 0x10, struct rand_fips_test) ++ ++/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */ ++#define RNDFIPSMCT _IOWR('R', 0x11, struct rand_fips_test) ++ ++#endif /* #ifdef CONFIG_FIPS_RNG */ ++ + struct rand_pool_info { + int entropy_count; + int buf_size; +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -14,6 +14,10 @@ extern void add_input_randomness(unsigne + unsigned int value); + extern void add_interrupt_randomness(int irq, int irq_flags); + ++extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count); ++extern int random_input_wait(void); ++#define HAS_RANDOM_INPUT_WAIT 1 ++ + extern void get_random_bytes(void *buf, int nbytes); + extern void get_random_bytes_arch(void *buf, int nbytes); + void generate_random_uuid(unsigned char uuid_out[16]); +--- a/kernel/pid.c ++++ b/kernel/pid.c +@@ -421,6 +421,7 @@ void transfer_pid(struct task_struct *ol + new->pids[type].pid = old->pids[type].pid; + hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); + } ++EXPORT_SYMBOL(find_task_by_vpid); + + struct task_struct *pid_task(struct pid *pid, enum pid_type type) + { diff --git a/target/linux/generic/patches-3.8/950-vm_exports.patch b/target/linux/generic/patches-3.8/950-vm_exports.patch new file mode 100644 index 0000000000..1d8f954f6b --- /dev/null +++ b/target/linux/generic/patches-3.8/950-vm_exports.patch @@ -0,0 +1,101 @@ +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2865,6 +2865,15 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range); + + /* common code */ + ++void shmem_set_file(struct vm_area_struct *vma, struct file *file) ++{ ++ if (vma->vm_file) ++ fput(vma->vm_file); ++ vma->vm_file = file; ++ vma->vm_ops = &shmem_vm_ops; ++} ++EXPORT_SYMBOL_GPL(shmem_set_file); ++ + /** + * shmem_file_setup - get an unlinked file living in tmpfs + * @name: name for dentry (to be seen in /proc/<pid>/maps +@@ -2941,11 +2950,8 @@ int shmem_zero_setup(struct vm_area_stru + file = shmem_file_setup("dev/zero", size, vma->vm_flags); + if (IS_ERR(file)) + return PTR_ERR(file); ++ shmem_set_file(vma, file); + +- if (vma->vm_file) +- fput(vma->vm_file); +- vma->vm_file = file; +- vma->vm_ops = &shmem_vm_ops; + return 0; + } + +--- a/fs/file.c ++++ b/fs/file.c +@@ -270,6 +270,7 @@ static inline void __set_open_fd(int fd, + { + __set_bit(fd, fdt->open_fds); + } ++EXPORT_SYMBOL_GPL(expand_files); + + static inline void __clear_open_fd(int fd, struct fdtable *fdt) + { +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -499,6 +499,8 @@ static void exit_mm(struct task_struct * + mm_update_next_owner(mm); + mmput(mm); + } ++EXPORT_SYMBOL_GPL(get_files_struct); ++EXPORT_SYMBOL_GPL(put_files_struct); + + /* + * When we die, we re-parent all our children, and try to: +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3633,6 +3633,7 @@ int can_nice(const struct task_struct *p + return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || + capable(CAP_SYS_NICE)); + } ++EXPORT_SYMBOL_GPL(can_nice); + + #ifdef __ARCH_WANT_SYS_NICE + +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1403,6 +1403,7 @@ void zap_page_range(struct vm_area_struc + mmu_notifier_invalidate_range_end(mm, start, end); + tlb_finish_mmu(&tlb, start, end); + } ++EXPORT_SYMBOL_GPL(zap_page_range); + + /** + * zap_page_range_single - remove user pages in a given range +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -1266,6 +1266,7 @@ void unmap_kernel_range(unsigned long ad + vunmap_page_range(addr, end); + flush_tlb_kernel_range(addr, end); + } ++EXPORT_SYMBOL_GPL(unmap_kernel_range); + + int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) + { +@@ -1403,6 +1404,7 @@ struct vm_struct *get_vm_area(unsigned l + return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, + -1, GFP_KERNEL, __builtin_return_address(0)); + } ++EXPORT_SYMBOL_GPL(get_vm_area); + + struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, + const void *caller) +--- a/include/linux/shmem_fs.h ++++ b/include/linux/shmem_fs.h +@@ -47,6 +47,8 @@ extern int shmem_init(void); + extern int shmem_fill_super(struct super_block *sb, void *data, int silent); + extern struct file *shmem_file_setup(const char *name, + loff_t size, unsigned long flags); ++ ++extern void shmem_set_file(struct vm_area_struct *vma, struct file *file); + extern int shmem_zero_setup(struct vm_area_struct *); + extern int shmem_lock(struct file *file, int lock, struct user_struct *user); + extern void shmem_unlock_mapping(struct address_space *mapping); diff --git a/target/linux/generic/patches-3.8/960-decompress_unlzo_fix.patch b/target/linux/generic/patches-3.8/960-decompress_unlzo_fix.patch new file mode 100644 index 0000000000..ee7c118ef0 --- /dev/null +++ b/target/linux/generic/patches-3.8/960-decompress_unlzo_fix.patch @@ -0,0 +1,23 @@ +--- /dev/null ++++ b/include/linux/decompress/unlzo_mm.h +@@ -0,0 +1,10 @@ ++#ifndef UNLZO_MM_H ++#define UNLZO_MM_H ++ ++#ifdef STATIC ++#define INIT ++#else ++#define INIT __init ++#endif ++ ++#endif +--- a/lib/decompress_unlzo.c ++++ b/lib/decompress_unlzo.c +@@ -38,6 +38,7 @@ + + #include <linux/types.h> + #include <linux/lzo.h> ++#include <linux/decompress/unlzo_mm.h> + #include <linux/decompress/mm.h> + + #include <linux/compiler.h> diff --git a/target/linux/generic/patches-3.8/970-remove-unsane-filenames-from-deps_initramfs-list.patch b/target/linux/generic/patches-3.8/970-remove-unsane-filenames-from-deps_initramfs-list.patch new file mode 100644 index 0000000000..65c6ef7d0c --- /dev/null +++ b/target/linux/generic/patches-3.8/970-remove-unsane-filenames-from-deps_initramfs-list.patch @@ -0,0 +1,28 @@ +--- a/usr/Makefile ++++ b/usr/Makefile +@@ -50,20 +50,22 @@ ifneq ($(wildcard $(obj)/.initramfs_data + include $(obj)/.initramfs_data.cpio.d + endif + ++deps_initramfs_sane := $(foreach v,$(deps_initramfs),$(if $(findstring :,$(v)),,$(v))) ++ + quiet_cmd_initfs = GEN $@ + cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) + + targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.xz initramfs_data.cpio.lzo initramfs_data.cpio + # do not try to update files included in initramfs +-$(deps_initramfs): ; ++$(deps_initramfs_sane): ; + +-$(deps_initramfs): klibcdirs ++$(deps_initramfs_sane): klibcdirs + # We rebuild initramfs_data.cpio if: + # 1) Any included file is newer then initramfs_data.cpio + # 2) There are changes in which files are included (added or deleted) + # 3) If gen_init_cpio are newer than initramfs_data.cpio + # 4) arguments to gen_initramfs.sh changes +-$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs ++$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs_sane) klibcdirs + $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d + $(call if_changed,initfs) + diff --git a/target/linux/generic/patches-3.8/980-arm_openwrt_machtypes.patch b/target/linux/generic/patches-3.8/980-arm_openwrt_machtypes.patch new file mode 100644 index 0000000000..3d5590756b --- /dev/null +++ b/target/linux/generic/patches-3.8/980-arm_openwrt_machtypes.patch @@ -0,0 +1,34 @@ +--- a/arch/arm/tools/mach-types ++++ b/arch/arm/tools/mach-types +@@ -1204,3 +1204,31 @@ baileys MACH_BAILEYS BAILEYS 4169 + familybox MACH_FAMILYBOX FAMILYBOX 4170 + ensemble_mx35 MACH_ENSEMBLE_MX35 ENSEMBLE_MX35 4171 + sc_sps_1 MACH_SC_SPS_1 SC_SPS_1 4172 ++# ++# Additional mach-types supported by OpenWrt ++# ++wg302v1 MACH_WG302V1 WG302V1 889 ++pronghorn MACH_PRONGHORN PRONGHORN 928 ++pronghorn_metro MACH_PRONGHORNMETRO PRONGHORNMETRO 1040 ++sidewinder MACH_SIDEWINDER SIDEWINDER 1041 ++wrt300nv2 MACH_WRT300NV2 WRT300NV2 1077 ++compex42x MACH_COMPEXWP18 COMPEXWP18 1273 ++goldfish MACH_GOLDFISH GOLDFISH 1441 ++cambria MACH_CAMBRIA CAMBRIA 1468 ++dt2 MACH_DT2 DT2 1514 ++ap1000 MACH_AP1000 AP1000 1543 ++tw2662 MACH_TW2662 TW2662 1658 ++tw5334 MACH_TW5334 TW5334 1664 ++usr8200 MACH_USR8200 USR8200 1762 ++mi424wr MACH_MI424WR MI424WR 1778 ++gw2388 MACH_GW2388 GW2388 2635 ++iconnect MACH_ICONNECT ICONNECT 2870 ++nsb3ast MACH_NSB3AST NSB3AST 2917 ++goflexnet MACH_GOFLEXNET GOFLEXNET 3089 ++nas6210 MACH_NAS6210 NAS6210 3104 ++ns_k330 MACH_NS_K330 NS_K330 3108 ++bcm2708 MACH_BCM2708 BCM2708 3138 ++wn802t MACH_WN802T WN802T 3306 ++nsa310 MACH_NSA310 NSA310 4022 ++wnr3500 MACH_WNR3500 WNR3500 4407 ++ap42x MACH_AP42X AP42X 4418 diff --git a/target/linux/generic/patches-3.8/992-mpcore_wdt_fix_watchdog_counter_loading.patch b/target/linux/generic/patches-3.8/992-mpcore_wdt_fix_watchdog_counter_loading.patch new file mode 100644 index 0000000000..8d7390ebda --- /dev/null +++ b/target/linux/generic/patches-3.8/992-mpcore_wdt_fix_watchdog_counter_loading.patch @@ -0,0 +1,68 @@ +Although the commit "98af057092f8f0dabe63c5df08adc2bbfbddb1d2 + ARM: 6126/1: ARM mpcore_wdt: fix build failure and other fixes" +resolved long standing mpcore_wdt driver build problems, it +introduced an error in the relationship between the MPcore watchdog +timer clock rate and mpcore_margin, "MPcore timer margin in seconds", +such that watchdog timeouts are now arbitrary rather than the number +of seconds specified by mpcore_margin. + +This change restores mpcore_wdt_keepalive() to its equivalent +implementation prior to commit 98af057 such that watchdog timeouts now +occur as specified by mpcore_margin. + +The variable 'mpcore_timer_rate' which caused that build failure was +replaced by 'twd_timer_rate'. Adding exported function to obtain +'twd_timer_rate' value in mpcore_wdt driver. + +MPCORE_WATCHDOG needed to build 'mpcore_wdt' already depends on +HAVE_ARM_TWD needed to build 'smp_twd', so from the point of view of +'mpcore_wdt' driver the exported function will always exist. + +Signed-off-by: Valentine Barshak <vbarshak@mvista.com> +Signed-off-by: Vitaly Kuzmichev <vkuzmichev@mvista.com> +--- + + arch/arm/include/asm/smp_twd.h | 1 + + arch/arm/kernel/smp_twd.c | 7 +++++++ + drivers/watchdog/mpcore_wdt.c | 4 +--- + 3 files changed, 9 insertions(+), 3 deletions(-) + +--- a/arch/arm/include/asm/smp_twd.h ++++ b/arch/arm/include/asm/smp_twd.h +@@ -33,6 +33,7 @@ struct twd_local_timer name __initdata = + }; + + int twd_local_timer_register(struct twd_local_timer *); ++unsigned long twd_timer_get_rate(void); + + #ifdef CONFIG_HAVE_ARM_TWD + void twd_local_timer_of_register(void); +--- a/arch/arm/kernel/smp_twd.c ++++ b/arch/arm/kernel/smp_twd.c +@@ -376,6 +376,13 @@ int __init twd_local_timer_register(stru + return twd_local_timer_common_register(); + } + ++/* Needed by mpcore_wdt */ ++unsigned long twd_timer_get_rate(void) ++{ ++ return twd_timer_rate; ++} ++EXPORT_SYMBOL_GPL(twd_timer_get_rate); ++ + #ifdef CONFIG_OF + const static struct of_device_id twd_of_match[] __initconst = { + { .compatible = "arm,cortex-a9-twd-timer", }, +--- a/drivers/watchdog/mpcore_wdt.c ++++ b/drivers/watchdog/mpcore_wdt.c +@@ -101,9 +101,7 @@ static void mpcore_wdt_keepalive(struct + + spin_lock(&wdt_lock); + /* Assume prescale is set to 256 */ +- count = __raw_readl(wdt->base + TWD_WDOG_COUNTER); +- count = (0xFFFFFFFFU - count) * (HZ / 5); +- count = (count / 256) * mpcore_margin; ++ count = (twd_timer_get_rate() / 256) * mpcore_margin; + + /* Reload the counter */ + writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD); diff --git a/target/linux/generic/patches-3.8/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch b/target/linux/generic/patches-3.8/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch new file mode 100644 index 0000000000..c986ac8f21 --- /dev/null +++ b/target/linux/generic/patches-3.8/993-mpcore_wdt_fix_wdioc_setoptions_handling.patch @@ -0,0 +1,29 @@ +According to the include/linux/watchdog.h WDIOC_SETOPTIONS is +classified as 'read from device' ioctl call: + #define WDIOC_SETOPTIONS _IOR(WATCHDOG_IOCTL_BASE, 4, int) + +However, the driver 'mpcore_wdt' performs 'copy_from_user' only if +_IOC_WRITE is set, thus the local variable 'uarg' which is used in +WDIOC_SETOPTIONS handling remains uninitialized. + +The proper way to fix this is to bind WDIOC_SETOPTIONS to _IOW, +but this will break compatibility. +So adding additional condition for performing 'copy_from_user'. + +Signed-off-by: Vitaly Kuzmichev <vkuzmichev@mvista.com> +--- + drivers/watchdog/mpcore_wdt.c | 3 ++- + 1 files changed, 2 insertions(+), 1 deletions(-) + +--- a/drivers/watchdog/mpcore_wdt.c ++++ b/drivers/watchdog/mpcore_wdt.c +@@ -235,7 +235,8 @@ static long mpcore_wdt_ioctl(struct file + if (_IOC_DIR(cmd) && _IOC_SIZE(cmd) > sizeof(uarg)) + return -ENOTTY; + +- if (_IOC_DIR(cmd) & _IOC_WRITE) { ++ if ((_IOC_DIR(cmd) & _IOC_WRITE) ++ || cmd == WDIOC_SETOPTIONS) { + ret = copy_from_user(&uarg, (void __user *)arg, _IOC_SIZE(cmd)); + if (ret) + return -EFAULT; diff --git a/target/linux/generic/patches-3.8/994-mpcore_wdt_fix_timer_mode_setup.patch b/target/linux/generic/patches-3.8/994-mpcore_wdt_fix_timer_mode_setup.patch new file mode 100644 index 0000000000..ecc2452cab --- /dev/null +++ b/target/linux/generic/patches-3.8/994-mpcore_wdt_fix_timer_mode_setup.patch @@ -0,0 +1,57 @@ +Allow watchdog to set its iterrupt as pending when it is configured +for timer mode (in other words, allow emitting interrupt). +Also add macros for all Watchdog Control Register flags. + +Signed-off-by: Vitaly Kuzmichev <vkuzmichev@mvista.com> +--- + arch/arm/include/asm/smp_twd.h | 6 ++++++ + drivers/watchdog/mpcore_wdt.c | 15 +++++++++++---- + 2 files changed, 17 insertions(+), 4 deletions(-) + +--- a/arch/arm/include/asm/smp_twd.h ++++ b/arch/arm/include/asm/smp_twd.h +@@ -18,6 +18,12 @@ + #define TWD_TIMER_CONTROL_PERIODIC (1 << 1) + #define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) + ++#define TWD_WDOG_CONTROL_ENABLE (1 << 0) ++#define TWD_WDOG_CONTROL_PERIODIC (1 << 1) ++#define TWD_WDOG_CONTROL_IT_ENABLE (1 << 2) ++#define TWD_WDOG_CONTROL_TIMER_MODE (0 << 3) ++#define TWD_WDOG_CONTROL_WATCHDOG_MODE (1 << 3) ++ + #include <linux/ioport.h> + + struct twd_local_timer { +--- a/drivers/watchdog/mpcore_wdt.c ++++ b/drivers/watchdog/mpcore_wdt.c +@@ -120,18 +120,25 @@ static void mpcore_wdt_stop(struct mpcor + + static void mpcore_wdt_start(struct mpcore_wdt *wdt) + { ++ u32 mode; ++ + dev_info(wdt->dev, "enabling watchdog\n"); + + /* This loads the count register but does NOT start the count yet */ + mpcore_wdt_keepalive(wdt); + ++ /* Setup watchdog - prescale=256, enable=1 */ ++ mode = (255 << 8) | TWD_WDOG_CONTROL_ENABLE; ++ + if (mpcore_noboot) { +- /* Enable watchdog - prescale=256, watchdog mode=0, enable=1 */ +- writel(0x0000FF01, wdt->base + TWD_WDOG_CONTROL); ++ /* timer mode, send interrupt */ ++ mode |= TWD_WDOG_CONTROL_TIMER_MODE | ++ TWD_WDOG_CONTROL_IT_ENABLE; + } else { +- /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */ +- writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL); ++ /* watchdog mode */ ++ mode |= TWD_WDOG_CONTROL_WATCHDOG_MODE; + } ++ writel(mode, wdt->base + TWD_WDOG_CONTROL); + } + + static int mpcore_wdt_set_heartbeat(int t) |