The supplied buffer might be on the stack and we get the following error
message:
[ 3.312058] at91_i2c e0070600.i2c: rejecting DMA map of vmalloc memory
Use i2c_{get,put}_dma_safe_msg_buf() to get a DMA-able memory region if
necessary.
Cc: stable(a)vger.kernel.org
Signed-off-by: Michael Walle <michael(a)walle.cc>
---
I'm not sure if or which Fixes: tag I should add to this patch. The issue
seems to be since a very long time, but nobody seem to have triggered it.
FWIW, I'm using the sff,sfp driver, which triggers this.
drivers/i2c/busses/i2c-at91-master.c | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index b0eae94909f4..a7a22fedbaba 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -656,6 +656,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
unsigned int_addr_flag = 0;
struct i2c_msg *m_start = msg;
bool is_read;
+ u8 *dma_buf;
dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
@@ -703,7 +704,18 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
dev->msg = m_start;
dev->recv_len_abort = false;
+ if (dev->use_dma) {
+ dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1);
+ if (!dma_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ dev->buf = dma_buf;
+ }
+
+
ret = at91_do_twi_transfer(dev);
+ i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret);
ret = (ret < 0) ? ret : num;
out:
--
2.30.2
Make the two locations where exportfs helpers check permission to lookup
a given inode idmapped mount aware by switching it to the lookup_one()
helper. This is a bugfix for the open_by_handle_at() system call which
doesn't take idmapped mounts into account currently. It's not tied to a
specific commit so we'll just Cc stable.
In addition this is required to support idmapped base layers in overlay.
The overlay filesystem uses exportfs to encode and decode file handles
for its index=on mount option and when nfs_export=on.
Cc: <stable(a)vger.kernel.org>
Cc: <linux-fsdevel(a)vger.kernel.org>
Tested-by: Giuseppe Scrivano <gscrivan(a)redhat.com>
Reviewed-by: Amir Goldstein <amir73il(a)gmail.com>
Reviewed-by: Christoph Hellwig <hch(a)lst.de>
Signed-off-by: Christian Brauner (Microsoft) <brauner(a)kernel.org>
---
/* v2 */
unchanged
/* v3 */
unchanged
/* v4 */
unchanged
/* v5 */
unchanged
---
fs/exportfs/expfs.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 0106eba46d5a..3ef80d000e13 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -145,7 +145,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
if (err)
goto out_err;
dprintk("%s: found name: %s\n", __func__, nbuf);
- tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
+ tmp = lookup_one_unlocked(mnt_user_ns(mnt), nbuf, parent, strlen(nbuf));
if (IS_ERR(tmp)) {
dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
err = PTR_ERR(tmp);
@@ -525,7 +525,8 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
}
inode_lock(target_dir->d_inode);
- nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+ nresult = lookup_one(mnt_user_ns(mnt), nbuf,
+ target_dir, strlen(nbuf));
if (!IS_ERR(nresult)) {
if (unlikely(nresult->d_inode != result->d_inode)) {
dput(nresult);
--
2.32.0
The two bugs are here:
if (encoder) {
if (bridge && bridge->timings)
The list iterator value 'encoder/bridge' will *always* be set and
non-NULL by drm_for_each_encoder()/list_for_each_entry(), so it is
incorrect to assume that the iterator value will be NULL if the
list is empty or no element is found.
To fix the bug, use a new variable '*_iter' as the list iterator,
while use the old variable 'encoder/bridge' as a dedicated pointer
to point to the found element.
Cc: stable(a)vger.kernel.org
Fixes: 99e360442f223 ("drm/stm: Fix bus_flags handling")
Signed-off-by: Xiaomeng Tong <xiam0nd.tong(a)gmail.com>
---
drivers/gpu/drm/stm/ltdc.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index dbdee954692a..d6124aa873e5 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -528,8 +528,8 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_device *ddev = crtc->dev;
struct drm_connector_list_iter iter;
struct drm_connector *connector = NULL;
- struct drm_encoder *encoder = NULL;
- struct drm_bridge *bridge = NULL;
+ struct drm_encoder *encoder = NULL, *en_iter;
+ struct drm_bridge *bridge = NULL, *br_iter;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
@@ -538,15 +538,19 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
int ret;
/* get encoder from crtc */
- drm_for_each_encoder(encoder, ddev)
- if (encoder->crtc == crtc)
+ drm_for_each_encoder(en_iter, ddev)
+ if (en_iter->crtc == crtc) {
+ encoder = en_iter;
break;
+ }
if (encoder) {
/* get bridge from encoder */
- list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
- if (bridge->encoder == encoder)
+ list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node)
+ if (br_iter->encoder == encoder) {
+ bridge = br_iter;
break;
+ }
/* Get the connector from encoder */
drm_connector_list_iter_begin(ddev, &iter);
--
2.17.1
From: Bob Peterson <rpeterso(a)redhat.com>
[ Upstream commit 428f651cb80b227af47fc302e4931791f2fb4741 ]
Before this patch, function read_rindex_entry called compute_bitstructs
before it allocated a glock for the rgrp. But if compute_bitstructs found
a problem with the rgrp, it called gfs2_consist_rgrpd, and that called
gfs2_dump_glock for rgd->rd_gl which had not yet been assigned.
read_rindex_entry
compute_bitstructs
gfs2_consist_rgrpd
gfs2_dump_glock <---------rgd->rd_gl was not set.
This patch changes read_rindex_entry so it assigns an rgrp glock before
calling compute_bitstructs so gfs2_dump_glock does not reference an
unassigned pointer. If an error is discovered, the glock must also be
put, so a new goto and label were added.
Reported-by: syzbot+c6fd14145e2f62ca0784(a)syzkaller.appspotmail.com
Signed-off-by: Bob Peterson <rpeterso(a)redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba(a)redhat.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
fs/gfs2/rgrp.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 0fb3c01bc557..9b04a570c582 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -922,15 +922,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
spin_lock_init(&rgd->rd_rsspin);
mutex_init(&rgd->rd_mutex);
- error = compute_bitstructs(rgd);
- if (error)
- goto fail;
-
error = gfs2_glock_get(sdp, rgd->rd_addr,
&gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
if (error)
goto fail;
+ error = compute_bitstructs(rgd);
+ if (error)
+ goto fail_glock;
+
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -944,6 +944,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
}
error = 0; /* someone else read in the rgrp; free it and ignore it */
+fail_glock:
gfs2_glock_put(rgd->rd_gl);
fail:
--
2.35.1
From: Guo Ren <guoren(a)linux.alibaba.com>
These patch_text implementations are using stop_machine_cpuslocked
infrastructure with atomic cpu_count. The original idea: When the
master CPU patch_text, the others should wait for it. But current
implementation is using the first CPU as master, which couldn't
guarantee the remaining CPUs are waiting. This patch changes the
last CPU as the master to solve the potential risk.
Fixes: 33e53ae1ce41 ("csky: Add kprobes supported")
Signed-off-by: Guo Ren <guoren(a)linux.alibaba.com>
Signed-off-by: Guo Ren <guoren(a)kernel.org>
Reviewed-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
---
arch/csky/kernel/probes/kprobes.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
index 42920f25e73c..34ba684d5962 100644
--- a/arch/csky/kernel/probes/kprobes.c
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -30,7 +30,7 @@ static int __kprobes patch_text_cb(void *priv)
struct csky_insn_patch *param = priv;
unsigned int addr = (unsigned int)param->addr;
- if (atomic_inc_return(¶m->cpu_count) == 1) {
+ if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
*(u16 *) addr = cpu_to_le16(param->opcode);
dcache_wb_range(addr, addr + 2);
atomic_inc(¶m->cpu_count);
--
2.25.1
From: Guo Ren <guoren(a)linux.alibaba.com>
These patch_text implementations are using stop_machine_cpuslocked
infrastructure with atomic cpu_count. The original idea: When the
master CPU patch_text, the others should wait for it. But current
implementation is using the first CPU as master, which couldn't
guarantee the remaining CPUs are waiting. This patch changes the
last CPU as the master to solve the potential risk.
Fixes: 64711f9a47d4 ("xtensa: implement jump_label support")
Signed-off-by: Guo Ren <guoren(a)linux.alibaba.com>
Signed-off-by: Guo Ren <guoren(a)kernel.org>
Reviewed-by: Max Filippov <jcmvbkbc(a)gmail.com>
Reviewed-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
---
arch/xtensa/kernel/jump_label.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
index 0dde21e0d3de..ad1841cecdfb 100644
--- a/arch/xtensa/kernel/jump_label.c
+++ b/arch/xtensa/kernel/jump_label.c
@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
{
struct patch *patch = data;
- if (atomic_inc_return(&patch->cpu_count) == 1) {
+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
local_patch_text(patch->addr, patch->data, patch->sz);
atomic_inc(&patch->cpu_count);
} else {
--
2.25.1
From: Guo Ren <guoren(a)linux.alibaba.com>
These patch_text implementations are using stop_machine_cpuslocked
infrastructure with atomic cpu_count. The original idea: When the
master CPU patch_text, the others should wait for it. But current
implementation is using the first CPU as master, which couldn't
guarantee the remaining CPUs are waiting. This patch changes the
last CPU as the master to solve the potential risk.
Fixes: 043cb41a85de ("riscv: introduce interfaces to patch kernel code")
Signed-off-by: Guo Ren <guoren(a)linux.alibaba.com>
Signed-off-by: Guo Ren <guoren(a)kernel.org>
Reviewed-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
---
arch/riscv/kernel/patch.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 0b552873a577..765004b60513 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -104,7 +104,7 @@ static int patch_text_cb(void *data)
struct patch_insn *patch = data;
int ret = 0;
- if (atomic_inc_return(&patch->cpu_count) == 1) {
+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
ret =
patch_text_nosync(patch->addr, &patch->insn,
GET_INSN_LENGTH(patch->insn));
--
2.25.1