On 22/07/12 12:54PM, Jan Kara wrote:
Remove unnecessary else (and thus indentation level) from a code block in ext4_xattr_block_set(). It will also make following code changes easier. No functional changes.
The patch looks good to me. Just a note, while applying this patch on ext4-dev tree, I found a minor conflict with below patch.
ext4: use kmemdup() to replace kmalloc + memcpy
Replace kmalloc + memcpy with kmemdup()
-ritesh
CC: stable@vger.kernel.org Fixes: 82939d7999df ("ext4: convert to mbcache2") Signed-off-by: Jan Kara jack@suse.cz
fs/ext4/xattr.c | 79 ++++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 40 deletions(-)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 7fc40fb1e6b3..aadfae53d055 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1850,6 +1850,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, #define header(x) ((struct ext4_xattr_header *)(x))
if (s->base) {
int offset = (char *)s->here - bs->bh->b_data;
- BUFFER_TRACE(bs->bh, "get_write_access"); error = ext4_journal_get_write_access(handle, sb, bs->bh, EXT4_JTR_NONE);
@@ -1882,50 +1884,47 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, if (error) goto cleanup; goto inserted;
} else {
int offset = (char *)s->here - bs->bh->b_data;
}
unlock_buffer(bs->bh);
ea_bdebug(bs->bh, "cloning");
s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
error = -ENOMEM;
if (s->base == NULL)
goto cleanup;
memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
s->first = ENTRY(header(s->base)+1);
header(s->base)->h_refcount = cpu_to_le32(1);
s->here = ENTRY(s->base + offset);
s->end = s->base + bs->bh->b_size;
unlock_buffer(bs->bh);
ea_bdebug(bs->bh, "cloning");
s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
error = -ENOMEM;
if (s->base == NULL)
/*
* If existing entry points to an xattr inode, we need
* to prevent ext4_xattr_set_entry() from decrementing
* ref count on it because the reference belongs to the
* original block. In this case, make the entry look
* like it has an empty value.
*/
if (!s->not_found && s->here->e_value_inum) {
ea_ino = le32_to_cpu(s->here->e_value_inum);
error = ext4_xattr_inode_iget(inode, ea_ino,
le32_to_cpu(s->here->e_hash),
&tmp_inode);
if (error) goto cleanup;
memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
s->first = ENTRY(header(s->base)+1);
header(s->base)->h_refcount = cpu_to_le32(1);
s->here = ENTRY(s->base + offset);
s->end = s->base + bs->bh->b_size;
/*
* If existing entry points to an xattr inode, we need
* to prevent ext4_xattr_set_entry() from decrementing
* ref count on it because the reference belongs to the
* original block. In this case, make the entry look
* like it has an empty value.
*/
if (!s->not_found && s->here->e_value_inum) {
ea_ino = le32_to_cpu(s->here->e_value_inum);
error = ext4_xattr_inode_iget(inode, ea_ino,
le32_to_cpu(s->here->e_hash),
&tmp_inode);
if (error)
goto cleanup;
if (!ext4_test_inode_state(tmp_inode,
EXT4_STATE_LUSTRE_EA_INODE)) {
/*
* Defer quota free call for previous
* inode until success is guaranteed.
*/
old_ea_inode_quota = le32_to_cpu(
s->here->e_value_size);
}
iput(tmp_inode);
s->here->e_value_inum = 0;
s->here->e_value_size = 0;
if (!ext4_test_inode_state(tmp_inode,
EXT4_STATE_LUSTRE_EA_INODE)) {
/*
* Defer quota free call for previous
* inode until success is guaranteed.
*/
old_ea_inode_quota = le32_to_cpu(
s->here->e_value_size); }
iput(tmp_inode);
s->here->e_value_inum = 0;
} } else { /* Allocate a buffer where we construct the new block. */s->here->e_value_size = 0;
-- 2.35.3