From ac001ced90c7f5bd6bc7a03273b9dd59e4c44e5e Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 9 Feb 2016 11:15:14 -0800 Subject: futex: Remove requirement for lock_page() in get_futex_key() commit 65d8fc777f6dcfee12785c057a6b57f679641c90 upstream. When dealing with key handling for shared futexes, we can drastically reduce the usage/need of the page lock. 1) For anonymous pages, the associated futex object is the mm_struct which does not require the page lock. 2) For inode based, keys, we can check under RCU read lock if the page mapping is still valid and take reference to the inode. This just leaves one rare race that requires the page lock in the slow path when examining the swapcache. Additionally realtime users currently have a problem with the page lock being contended for unbounded periods of time during futex operations. Task A get_futex_key() lock_page() ---> preempted Now any other task trying to lock that page will have to wait until task A gets scheduled back in, which is an unbound time. With this patch, we pretty much have a lockless futex_get_key(). Experiments show that this patch can boost/speedup the hashing of shared futexes with the perf futex benchmarks (which is good for measuring such change) by up to 45% when there are high (> 100) thread counts on a 60 core Westmere. Lower counts are pretty much in the noise range or less than 10%, but mid range can be seen at over 30% overall throughput (hash ops/sec). This makes anon-mem shared futexes much closer to its private counterpart. Signed-off-by: Mel Gorman [ Ported on top of thp refcount rework, changelog, comments, fixes. ] Signed-off-by: Davidlohr Bueso Reviewed-by: Thomas Gleixner Cc: Chris Mason Cc: Darren Hart Cc: Hugh Dickins Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: dave@stgolabs.net Link: http://lkml.kernel.org/r/1455045314-8305-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar Signed-off-by: Chenbo Feng Signed-off-by: Greg Kroah-Hartman --- kernel/futex.c | 98 +++++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 91 insertions(+), 7 deletions(-) diff --git a/kernel/futex.c b/kernel/futex.c index 3057dabf726f..a03c7763d5f7 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -470,6 +470,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page, *page_head; + struct address_space *mapping; int err, ro = 0; /* @@ -555,7 +556,19 @@ again: } #endif - lock_page(page_head); + /* + * The treatment of mapping from this point on is critical. The page + * lock protects many things but in this context the page lock + * stabilizes mapping, prevents inode freeing in the shared + * file-backed region case and guards against movement to swap cache. + * + * Strictly speaking the page lock is not needed in all cases being + * considered here and page lock forces unnecessarily serialization + * From this point on, mapping will be re-verified if necessary and + * page lock will be acquired only if it is unavoidable + */ + + mapping = READ_ONCE(page_head->mapping); /* * If page_head->mapping is NULL, then it cannot be a PageAnon @@ -572,18 +585,31 @@ again: * shmem_writepage move it from filecache to swapcache beneath us: * an unlikely race, but we do need to retry for page_head->mapping. */ - if (!page_head->mapping) { - int shmem_swizzled = PageSwapCache(page_head); + if (unlikely(!mapping)) { + int shmem_swizzled; + + /* + * Page lock is required to identify which special case above + * applies. If this is really a shmem page then the page lock + * will prevent unexpected transitions. + */ + lock_page(page); + shmem_swizzled = PageSwapCache(page) || page->mapping; unlock_page(page_head); put_page(page_head); + if (shmem_swizzled) goto again; + return -EFAULT; } /* * Private mappings are handled in a simple way. * + * If the futex key is stored on an anonymous page, then the associated + * object is the mm which is implicitly pinned by the calling process. + * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. @@ -601,16 +627,74 @@ again: key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; + + get_futex_key_refs(key); /* implies smp_mb(); (B) */ + } else { + struct inode *inode; + + /* + * The associated futex object in this case is the inode and + * the page->mapping must be traversed. Ordinarily this should + * be stabilised under page lock but it's not strictly + * necessary in this case as we just want to pin the inode, not + * update the radix tree or anything like that. + * + * The RCU read lock is taken as the inode is finally freed + * under RCU. If the mapping still matches expectations then the + * mapping->host can be safely accessed as being a valid inode. + */ + rcu_read_lock(); + + if (READ_ONCE(page_head->mapping) != mapping) { + rcu_read_unlock(); + put_page(page_head); + + goto again; + } + + inode = READ_ONCE(mapping->host); + if (!inode) { + rcu_read_unlock(); + put_page(page_head); + + goto again; + } + + /* + * Take a reference unless it is about to be freed. Previously + * this reference was taken by ihold under the page lock + * pinning the inode in place so i_lock was unnecessary. The + * only way for this check to fail is if the inode was + * truncated in parallel so warn for now if this happens. + * + * We are not calling into get_futex_key_refs() in file-backed + * cases, therefore a successful atomic_inc return below will + * guarantee that get_futex_key() will still imply smp_mb(); (B). + */ + if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { + rcu_read_unlock(); + put_page(page_head); + + goto again; + } + + /* Should be impossible but lets be paranoid for now */ + if (WARN_ON_ONCE(inode->i_mapping != mapping)) { + err = -EFAULT; + rcu_read_unlock(); + iput(inode); + + goto out; + } + key->both.offset |= FUT_OFF_INODE; /* inode-based key */ - key->shared.inode = page_head->mapping->host; + key->shared.inode = inode; key->shared.pgoff = basepage_index(page); + rcu_read_unlock(); } - get_futex_key_refs(key); /* implies MB (B) */ - out: - unlock_page(page_head); put_page(page_head); return err; } -- cgit v1.2.3 From d13f0dc13b4df7bd79529927663cbdeb1d4e7432 Mon Sep 17 00:00:00 2001 From: "Felipe F. Tonello" Date: Tue, 10 Nov 2015 17:52:05 +0000 Subject: usb: gadget: define free_ep_req as universal function commit 079fe5a6da616891cca1a26e803e1df2a87e9ae5 upstream. This function is shared between gadget functions, so this avoid unnecessary duplicated code and potentially avoid memory leaks. Reviewed-by: Robert Baldyga Signed-off-by: Felipe F. Tonello Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_midi.c | 6 ------ drivers/usb/gadget/function/f_sourcesink.c | 6 ------ drivers/usb/gadget/function/g_zero.h | 1 - drivers/usb/gadget/u_f.c | 1 - drivers/usb/gadget/u_f.h | 10 ++++++++-- 5 files changed, 8 insertions(+), 16 deletions(-) diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 315c2690afe1..514912a694a4 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -201,12 +201,6 @@ static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep, return alloc_ep_req(ep, length, length); } -static void free_ep_req(struct usb_ep *ep, struct usb_request *req) -{ - kfree(req->buf); - usb_ep_free_request(ep, req); -} - static const uint8_t f_midi_cin_length[] = { 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1 }; diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 9f3ced62d916..67b243989938 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -303,12 +303,6 @@ static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) return alloc_ep_req(ep, len, ss->buflen); } -void free_ep_req(struct usb_ep *ep, struct usb_request *req) -{ - kfree(req->buf); - usb_ep_free_request(ep, req); -} - static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) { int value; diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h index 15f180904f8a..5ed90b437f18 100644 --- a/drivers/usb/gadget/function/g_zero.h +++ b/drivers/usb/gadget/function/g_zero.h @@ -59,7 +59,6 @@ void lb_modexit(void); int lb_modinit(void); /* common utilities */ -void free_ep_req(struct usb_ep *ep, struct usb_request *req); void disable_endpoints(struct usb_composite_dev *cdev, struct usb_ep *in, struct usb_ep *out, struct usb_ep *iso_in, struct usb_ep *iso_out); diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c index c6276f0268ae..4bc7eea8bfc8 100644 --- a/drivers/usb/gadget/u_f.c +++ b/drivers/usb/gadget/u_f.c @@ -11,7 +11,6 @@ * published by the Free Software Foundation. */ -#include #include "u_f.h" struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len) diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index 1d5f0eb68552..4247cc098a89 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -16,6 +16,8 @@ #ifndef __U_F_H__ #define __U_F_H__ +#include + /* Variable Length Array Macros **********************************************/ #define vla_group(groupname) size_t groupname##__next = 0 #define vla_group_size(groupname) groupname##__next @@ -45,8 +47,12 @@ struct usb_ep; struct usb_request; +/* Requests allocated via alloc_ep_req() must be freed by free_ep_req(). */ struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len); +static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} #endif /* __U_F_H__ */ - - -- cgit v1.2.3 From 4cde5710ca7156ff2634ffc5c1118d422b85f946 Mon Sep 17 00:00:00 2001 From: Krzysztof Opasiak Date: Thu, 19 Jan 2017 18:55:28 +0100 Subject: usb: gadget: f_hid: fix: Prevent accessing released memory commit aa65d11aa008f4de58a9cee7e121666d9d68505e upstream. When we unlock our spinlock to copy data to user we may get disabled by USB host and free the whole list of completed out requests including the one from which we are copying the data to user memory. To prevent from this let's remove our working element from the list and place it back only if there is sth left when we finish with it. Fixes: 99c515005857 ("usb: gadget: hidg: register OUT INT endpoint for SET_REPORT") Cc: stable@vger.kernel.org Tested-by: David Lechner Signed-off-by: Krzysztof Opasiak Signed-off-by: Felipe Balbi Cc: Jerry Zhang Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_hid.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index ee579ba2b59e..a5dae5bb62ab 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -223,6 +223,13 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, /* pick the first one */ list = list_first_entry(&hidg->completed_out_req, struct f_hidg_req_list, list); + + /* + * Remove this from list to protect it from beign free() + * while host disables our function + */ + list_del(&list->list); + req = list->req; count = min_t(unsigned int, count, req->actual - list->pos); spin_unlock_irqrestore(&hidg->spinlock, flags); @@ -238,15 +245,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, * call, taking into account its current read position. */ if (list->pos == req->actual) { - spin_lock_irqsave(&hidg->spinlock, flags); - list_del(&list->list); kfree(list); - spin_unlock_irqrestore(&hidg->spinlock, flags); req->length = hidg->report_length; ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL); - if (ret < 0) + if (ret < 0) { + free_ep_req(hidg->out_ep, req); return ret; + } + } else { + spin_lock_irqsave(&hidg->spinlock, flags); + list_add(&list->list, &hidg->completed_out_req); + spin_unlock_irqrestore(&hidg->spinlock, flags); + + wake_up(&hidg->read_queue); } return count; @@ -490,14 +502,18 @@ static void hidg_disable(struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); struct f_hidg_req_list *list, *next; + unsigned long flags; usb_ep_disable(hidg->in_ep); usb_ep_disable(hidg->out_ep); + spin_lock_irqsave(&hidg->spinlock, flags); list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { + free_ep_req(hidg->out_ep, list->req); list_del(&list->list); kfree(list); } + spin_unlock_irqrestore(&hidg->spinlock, flags); } static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) -- cgit v1.2.3 From 26e3393de736ad0a3feb8238ceaf4ffd3d4bc1d4 Mon Sep 17 00:00:00 2001 From: Li Jinyue Date: Thu, 14 Dec 2017 17:04:54 +0800 Subject: futex: Prevent overflow by strengthen input validation commit fbe0e839d1e22d88810f3ee3e2f1479be4c0aa4a upstream. UBSAN reports signed integer overflow in kernel/futex.c: UBSAN: Undefined behaviour in kernel/futex.c:2041:18 signed integer overflow: 0 - -2147483648 cannot be represented in type 'int' Add a sanity check to catch negative values of nr_wake and nr_requeue. Signed-off-by: Li Jinyue Signed-off-by: Thomas Gleixner Cc: peterz@infradead.org Cc: dvhart@infradead.org Link: https://lkml.kernel.org/r/1513242294-31786-1-git-send-email-lijinyue@huawei.com Signed-off-by: Greg Kroah-Hartman --- kernel/futex.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/futex.c b/kernel/futex.c index a03c7763d5f7..0d21619ac806 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1705,6 +1705,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, struct futex_q *this, *next; WAKE_Q(wake_q); + if (nr_wake < 0 || nr_requeue < 0) + return -EINVAL; + if (requeue_pi) { /* * Requeue PI only works on two distinct uaddrs. This -- cgit v1.2.3 From 0d9365e835b6a220933d477c48a73b74ce575f1f Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Wed, 10 May 2017 09:53:40 +0200 Subject: scsi: sg: don't return bogus Sg_requests commit 48ae8484e9fc324b4968d33c585e54bc98e44d61 upstream. If the list search in sg_get_rq_mark() fails to find a valid request, we return a bogus element. This then can later lead to a GPF in sg_remove_scat(). So don't return bogus Sg_requests in sg_get_rq_mark() but NULL in case the list search doesn't find a valid request. Signed-off-by: Johannes Thumshirn Reported-by: Andrey Konovalov Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Doug Gilbert Reviewed-by: Hannes Reinecke Acked-by: Doug Gilbert Signed-off-by: Martin K. Petersen Cc: Tony Battersby Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/sg.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 6514636431ab..72afe05be576 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -2048,11 +2048,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id) if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { resp->done = 2; /* guard against other readers */ - break; + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return resp; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return resp; + return NULL; } /* always adds to end of list */ -- cgit v1.2.3 From eb153c65260e88debc9803fa1dcd9daa21c1e36a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 10 Apr 2018 15:21:44 +0200 Subject: ARM: amba: Fix race condition with driver_override commit 6a7228d90d42bcacfe38786756ba62762b91c20a upstream. The driver_override implementation is susceptible to a race condition when different threads are reading vs storing a different driver override. Add locking to avoid this race condition. Cfr. commits 6265539776a0810b ("driver core: platform: fix race condition with driver_override") and 9561475db680f714 ("PCI: Fix race condition with driver_override"). Change-Id: I1e97d22641e30706f2f748833ce1d01127e020a5 Fixes: 3cf385713460eb2b ("ARM: 8256/1: driver coamba: add device binding path 'driver_override'") Signed-off-by: Geert Uytterhoeven Reviewed-by: Todd Kjos Cc: stable Signed-off-by: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman --- drivers/amba/bus.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 42086ad535c5..35603b7f0e90 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -68,11 +68,15 @@ static ssize_t driver_override_show(struct device *_dev, struct device_attribute *attr, char *buf) { struct amba_device *dev = to_amba_device(_dev); + ssize_t len; if (!dev->driver_override) return 0; - return sprintf(buf, "%s\n", dev->driver_override); + device_lock(_dev); + len = sprintf(buf, "%s\n", dev->driver_override); + device_unlock(_dev); + return len; } static ssize_t driver_override_store(struct device *_dev, @@ -80,7 +84,7 @@ static ssize_t driver_override_store(struct device *_dev, const char *buf, size_t count) { struct amba_device *dev = to_amba_device(_dev); - char *driver_override, *old = dev->driver_override, *cp; + char *driver_override, *old, *cp; /* We need to keep extra room for a newline */ if (count >= (PAGE_SIZE - 1)) @@ -94,12 +98,15 @@ static ssize_t driver_override_store(struct device *_dev, if (cp) *cp = '\0'; + device_lock(_dev); + old = dev->driver_override; if (strlen(driver_override)) { dev->driver_override = driver_override; } else { kfree(driver_override); dev->driver_override = NULL; } + device_unlock(_dev); kfree(old); -- cgit v1.2.3 From c5f3f40625c3318f18ecb045f8c26fc5fb3a00f8 Mon Sep 17 00:00:00 2001 From: Seunghun Han Date: Tue, 6 Mar 2018 15:21:43 +0100 Subject: x86/MCE: Serialize sysfs changes commit b3b7c4795ccab5be71f080774c45bbbcc75c2aaf upstream. The check_interval file in /sys/devices/system/machinecheck/machinecheck directory is a global timer value for MCE polling. If it is changed by one CPU, mce_restart() broadcasts the event to other CPUs to delete and restart the MCE polling timer and __mcheck_cpu_init_timer() reinitializes the mce_timer variable. If more than one CPU writes a specific value to the check_interval file concurrently, mce_timer is not protected from such concurrent accesses and all kinds of explosions happen. Since only root can write to those sysfs variables, the issue is not a big deal security-wise. However, concurrent writes to these configuration variables is void of reason so the proper thing to do is to serialize the access with a mutex. Boris: - Make store_int_with_restart() use device_store_ulong() to filter out negative intervals - Limit min interval to 1 second - Correct locking - Massage commit message Signed-off-by: Seunghun Han Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Greg Kroah-Hartman Cc: Tony Luck Cc: linux-edac Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20180302202706.9434-1-kkamagui@gmail.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/mcheck/mce.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7e8a736d09db..645342c8bc96 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -60,6 +60,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); smp_load_acquire(&(p)); \ }) +/* sysfs synchronization */ +static DEFINE_MUTEX(mce_sysfs_mutex); + #define CREATE_TRACE_POINTS #include @@ -2215,6 +2218,7 @@ static ssize_t set_ignore_ce(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.ignore_ce ^ !!new) { if (new) { /* disable ce features */ @@ -2227,6 +2231,8 @@ static ssize_t set_ignore_ce(struct device *s, on_each_cpu(mce_enable_ce, (void *)1, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2239,6 +2245,7 @@ static ssize_t set_cmci_disabled(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.cmci_disabled ^ !!new) { if (new) { /* disable cmci */ @@ -2250,6 +2257,8 @@ static ssize_t set_cmci_disabled(struct device *s, on_each_cpu(mce_enable_ce, NULL, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2257,8 +2266,19 @@ static ssize_t store_int_with_restart(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { - ssize_t ret = device_store_int(s, attr, buf, size); + unsigned long old_check_interval = check_interval; + ssize_t ret = device_store_ulong(s, attr, buf, size); + + if (check_interval == old_check_interval) + return ret; + + if (check_interval < 1) + check_interval = 1; + + mutex_lock(&mce_sysfs_mutex); mce_restart(); + mutex_unlock(&mce_sysfs_mutex); + return ret; } -- cgit v1.2.3 From dd5ce79caedcc7585314ef7243d589682f35f179 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 7 Feb 2018 13:46:25 +0100 Subject: netfilter: add back stackpointer size checks commit 57ebd808a97d7c5b1e1afb937c2db22beba3c1f8 upstream. The rationale for removing the check is only correct for rulesets generated by ip(6)tables. In iptables, a jump can only occur to a user-defined chain, i.e. because we size the stack based on number of user-defined chains we cannot exceed stack size. However, the underlying binary format has no such restriction, and the validation step only ensures that the jump target is a valid rule start point. IOW, its possible to build a rule blob that has no user-defined chains but does contain a jump. If this happens, no jump stack gets allocated and crash occurs because no jumpstack was allocated. Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset") Reported-by: syzbot+e783f671527912cd9403@syzkaller.appspotmail.com Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/ipv4/netfilter/arp_tables.c | 4 ++++ net/ipv4/netfilter/ip_tables.c | 4 ++++ net/ipv6/netfilter/ip6_tables.c | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 6e3e0e8b1ce3..cff58f7ee559 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -329,6 +329,10 @@ unsigned int arpt_do_table(struct sk_buff *skb, } if (table_base + v != arpt_next_entry(e)) { + if (unlikely(stackidx >= private->stacksize)) { + verdict = NF_DROP; + break; + } jumpstack[stackidx++] = e; } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index a399c5419622..760c81dfbee8 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -408,6 +408,10 @@ ipt_do_table(struct sk_buff *skb, } if (table_base + v != ipt_next_entry(e) && !(e->ip.flags & IPT_F_GOTO)) { + if (unlikely(stackidx >= private->stacksize)) { + verdict = NF_DROP; + break; + } jumpstack[stackidx++] = e; pr_debug("Pushed %p into pos %u\n", e, stackidx - 1); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 22f39e00bef3..8f676ca6ce86 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -425,6 +425,10 @@ ip6t_do_table(struct sk_buff *skb, } if (table_base + v != ip6t_next_entry(e) && !(e->ipv6.flags & IP6T_F_GOTO)) { + if (unlikely(stackidx >= private->stacksize)) { + verdict = NF_DROP; + break; + } jumpstack[stackidx++] = e; } -- cgit v1.2.3