On Fri, Aug 31, 2012 at 2:53 PM, Zhangfei Gao zhangfei.gao@marvell.com wrote:
ion_shrink is called when ION_CARVEOUT_ALLOCATE_FAIL
How to test: mount -t debugfs none /mnt cat /mnt/ion/carveout_heap echo adj > /mnt/ion/carveout_heap send_sig SIGKILL to the task with higher adj and using ion buffer Also kill all others tasks refered the buffers, in case using empty buffer
Example:
cat /mnt/ion/carveout_heap client pid size oom_score_adj ion_test 191 4096 0 ion_test 192 4096 0 ion_test 193 4096 0
echo -1 > /mnt/ion/carveout_heap [ 1333.689318] SIGKILL pid: 191 [ 1333.692192] SIGKILL pid: 192 [ 1333.695436] SIGKILL pid: 193 [ 1333.698312] SIGKILL pid: 193 size: 4096 adj: 0 [1]+ Killed ./ion_test 2
cat /mnt/ion/carveout_heap client pid size oom_score_adj
Signed-off-by: Zhangfei Gao zhangfei.gao@marvell.com
drivers/gpu/ion/ion.c | 118 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 files changed, 114 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c index 34c12df..7ffb642 100644 --- a/drivers/gpu/ion/ion.c +++ b/drivers/gpu/ion/ion.c @@ -32,6 +32,8 @@ #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/dma-buf.h> +#include <linux/oom.h> +#include <linux/delay.h>
#include "ion_priv.h"
@@ -336,6 +338,8 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) rb_insert_color(&handle->node, &client->handles); }
+static int ion_shrink(struct ion_heap *heap, int kill_adj);
struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int heap_mask, unsigned int flags) @@ -344,6 +348,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL;
struct ion_heap *heap = NULL; pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len, align, heap_mask, flags);
@@ -358,9 +363,10 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
len = PAGE_ALIGN(len);
+retry: mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
heap = rb_entry(n, struct ion_heap, node); /* if the client doesn't support this heap type */ if (!((1 << heap->type) & client->heap_mask)) continue;
@@ -373,6 +379,11 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, } mutex_unlock(&dev->lock);
if (buffer == ERR_PTR(-ENOMEM)) {
if (!ion_shrink(heap, 0))
goto retry;
}
if (buffer == NULL) return ERR_PTR(-ENODEV);
@@ -1144,7 +1155,8 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) struct ion_device *dev = heap->dev; struct rb_node *n;
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
seq_printf(s, "%16.s %16.s %16.s %16.s\n",
"client", "pid", "size", "oom_score_adj"); for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client,
@@ -1156,8 +1168,9 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) char task_comm[TASK_COMM_LEN];
get_task_comm(task_comm, client->task);
seq_printf(s, "%16.s %16u %16u\n", task_comm,
client->pid, size);
seq_printf(s, "%16.s %16u %16u %16u\n", task_comm,
client->pid, size,
client->task->signal->oom_score_adj);
The oom_score_adj is of int type and can have negative values. %16d would be a better format-specifier.
} else { seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, size);
@@ -1171,13 +1184,110 @@ static int ion_debug_heap_open(struct inode *inode, struct file *file) return single_open(file, ion_debug_heap_show, inode->i_private); }
+static ssize_t +ion_debug_heap_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
+{
struct ion_heap *heap =
((struct seq_file *)file->private_data)->private;
char buf[16];
long kill_adj, ret;
memset(buf, 0, sizeof(buf));
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
ret = kstrtol(buf, 10, &kill_adj);
if (ret)
return ret;
ion_shrink(heap, kill_adj);
return count;
+} static const struct file_operations debug_heap_fops = { .open = ion_debug_heap_open,
.write = ion_debug_heap_write, .read = seq_read, .llseek = seq_lseek, .release = single_release,
};
+/*
- ion_shrink
- kill all tasks referd the buffer by selected task
- */
+static int ion_shrink(struct ion_heap *heap, int kill_adj) +{
struct rb_node *n;
struct ion_client *client = NULL;
struct ion_device *dev = heap->dev;
struct task_struct *selected = NULL;
int selected_size = 0;
int selected_oom_score_adj = 0;
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
size_t size;
struct task_struct *p;
client = rb_entry(n, struct ion_client, node);
if (!client->task)
continue;
p = client->task;
if ((p->signal->oom_score_adj <= kill_adj) ||
(p->signal->oom_score_adj < selected_oom_score_adj))
continue;
size = ion_debug_heap_total(client, heap->type);
if (!size)
continue;
if (size < selected_size)
continue;
selected = p;
selected_size = size;
selected_oom_score_adj = p->signal->oom_score_adj;
}
if (selected) {
/* kill all proeces refer buffer shared with this client */
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct rb_node *r;
struct ion_client *c;
struct ion_handle *handle = rb_entry(n,
struct ion_handle,
node);
for (r = rb_first(&dev->clients); r; r = rb_next(r)) {
struct ion_handle *h;
c = rb_entry(r, struct ion_client, node);
h = ion_handle_lookup(c, handle->buffer);
if (!IS_ERR_OR_NULL(h)) {
send_sig(SIGKILL, c->task, 0);
pr_info("SIGKILL pid: %u\n",
c->task->pid);
}
}
}
mutex_unlock(&client->lock);
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
pr_info("SIGKILL pid: %u size: %u adj: %u\n",
selected->pid, selected_size,
selected_oom_score_adj);
msleep(20);
return 0;
}
return -EAGAIN;
+}
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { struct rb_node **p = &dev->heaps.rb_node; -- 1.7.1
Linaro-mm-sig mailing list Linaro-mm-sig@lists.linaro.org http://lists.linaro.org/mailman/listinfo/linaro-mm-sig