This is a note to let you know that I've just added the patch titled
Revert "mlx5: move affinity hints assignments to generic code"
to the 4.14-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: revert-mlx5-move-affinity-hints-assignments-to-generic-code.patch and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
From foo@baz Sun Dec 31 11:12:48 CET 2017
From: Saeed Mahameed saeedm@mellanox.com Date: Fri, 10 Nov 2017 15:59:52 +0900 Subject: Revert "mlx5: move affinity hints assignments to generic code"
From: Saeed Mahameed saeedm@mellanox.com
[ Upstream commit 231243c82793428467524227ae02ca451e6a98e7 ]
Before the offending commit, mlx5 core did the IRQ affinity itself, and it seems that the new generic code have some drawbacks and one of them is the lack for user ability to modify irq affinity after the initial affinity values got assigned.
The issue is still being discussed and a solution in the new generic code is required, until then we need to revert this patch.
This fixes the following issue: echo <new affinity> > /proc/irq/<x>/smp_affinity fails with -EIO
This reverts commit a435393acafbf0ecff4deb3e3cb554b34f0d0664. Note: kept mlx5_get_vector_affinity in include/linux/mlx5/driver.h since it is used in mlx5_ib driver.
Fixes: a435393acafb ("mlx5: move affinity hints assignments to generic code") Cc: Sagi Grimberg sagi@grimberg.me Cc: Thomas Gleixner tglx@linutronix.de Cc: Jes Sorensen jsorensen@fb.com Reported-by: Jes Sorensen jsorensen@fb.com Signed-off-by: Saeed Mahameed saeedm@mellanox.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 45 ++++++------- drivers/net/ethernet/mellanox/mlx5/core/main.c | 75 ++++++++++++++++++++-- include/linux/mlx5/driver.h | 1 4 files changed, 93 insertions(+), 29 deletions(-)
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -590,6 +590,7 @@ struct mlx5e_channel { struct mlx5_core_dev *mdev; struct mlx5e_tstamp *tstamp; int ix; + int cpu; };
struct mlx5e_channels { --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -71,11 +71,6 @@ struct mlx5e_channel_param { struct mlx5e_cq_param icosq_cq; };
-static int mlx5e_get_node(struct mlx5e_priv *priv, int ix) -{ - return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix); -} - static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { return MLX5_CAP_GEN(mdev, striding_rq) && @@ -452,17 +447,16 @@ static int mlx5e_rq_alloc_mpwqe_info(str int wq_sz = mlx5_wq_ll_get_size(&rq->wq); int mtt_sz = mlx5e_get_wqe_mtt_sz(); int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; - int node = mlx5e_get_node(c->priv, c->ix); int i;
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), - GFP_KERNEL, node); + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) goto err_out;
/* We allocate more than mtt_sz as we will align the pointer */ - rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, - GFP_KERNEL, node); + rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL, + cpu_to_node(c->cpu)); if (unlikely(!rq->mpwqe.mtt_no_align)) goto err_free_wqe_info;
@@ -570,7 +564,7 @@ static int mlx5e_alloc_rq(struct mlx5e_c int err; int i;
- rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + rqp->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, &rq->wq_ctrl); @@ -636,8 +630,7 @@ static int mlx5e_alloc_rq(struct mlx5e_c default: /* MLX5_WQ_TYPE_LINKED_LIST */ rq->wqe.frag_info = kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), - GFP_KERNEL, - mlx5e_get_node(c->priv, c->ix)); + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->wqe.frag_info) { err = -ENOMEM; goto err_rq_wq_destroy; @@ -1007,13 +1000,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5 sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode;
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy;
@@ -1060,13 +1053,13 @@ static int mlx5e_alloc_icosq(struct mlx5 sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map;
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy;
@@ -1132,13 +1125,13 @@ static int mlx5e_alloc_txqsq(struct mlx5 if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy;
@@ -1510,8 +1503,8 @@ static int mlx5e_alloc_cq(struct mlx5e_c struct mlx5_core_dev *mdev = c->priv->mdev; int err;
- param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix); - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.buf_numa_node = cpu_to_node(c->cpu); + param->wq.db_numa_node = cpu_to_node(c->cpu); param->eq_ix = c->ix;
err = mlx5e_alloc_cq_common(mdev, param, cq); @@ -1610,6 +1603,11 @@ static void mlx5e_close_cq(struct mlx5e_ mlx5e_free_cq(cq); }
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) +{ + return cpumask_first(priv->mdev->priv.irq_info[ix].mask); +} + static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@ -1758,12 +1756,13 @@ static int mlx5e_open_channel(struct mlx { struct mlx5e_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; unsigned int irq; int err; int eqn;
- c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix)); + c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM;
@@ -1771,6 +1770,7 @@ static int mlx5e_open_channel(struct mlx c->mdev = priv->mdev; c->tstamp = &priv->tstamp; c->ix = ix; + c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); @@ -1859,8 +1859,7 @@ static void mlx5e_activate_channel(struc for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_rq(&c->rq); - netif_set_xps_queue(c->netdev, - mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix); + netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix); }
static void mlx5e_deactivate_channel(struct mlx5e_channel *c) --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -316,9 +316,6 @@ static int mlx5_alloc_irq_vectors(struct { struct mlx5_priv *priv = &dev->priv; struct mlx5_eq_table *table = &priv->eq_table; - struct irq_affinity irqdesc = { - .pre_vectors = MLX5_EQ_VEC_COMP_BASE, - }; int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); int nvec;
@@ -332,10 +329,9 @@ static int mlx5_alloc_irq_vectors(struct if (!priv->irq_info) goto err_free_msix;
- nvec = pci_alloc_irq_vectors_affinity(dev->pdev, + nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1, nvec, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, - &irqdesc); + PCI_IRQ_MSIX); if (nvec < 0) return nvec;
@@ -621,6 +617,63 @@ u64 mlx5_read_internal_timer(struct mlx5 return (u64)timer_l | (u64)timer_h1 << 32; }
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + + if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { + mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); + return -ENOMEM; + } + + cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), + priv->irq_info[i].mask); + + if (IS_ENABLED(CONFIG_SMP) && + irq_set_affinity_hint(irq, priv->irq_info[i].mask)) + mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); + + return 0; +} + +static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + + irq_set_affinity_hint(irq, NULL); + free_cpumask_var(priv->irq_info[i].mask); +} + +static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) +{ + int err; + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { + err = mlx5_irq_set_affinity_hint(mdev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + mlx5_irq_clear_affinity_hint(mdev, i); + + return err; +} + +static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) +{ + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) + mlx5_irq_clear_affinity_hint(mdev, i); +} + int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn) { @@ -1093,6 +1146,12 @@ static int mlx5_load_one(struct mlx5_cor goto err_stop_eqs; }
+ err = mlx5_irq_set_affinity_hints(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); + goto err_affinity_hints; + } + err = mlx5_init_fs(dev); if (err) { dev_err(&pdev->dev, "Failed to init flow steering\n"); @@ -1150,6 +1209,9 @@ err_sriov: mlx5_cleanup_fs(dev);
err_fs: + mlx5_irq_clear_affinity_hints(dev); + +err_affinity_hints: free_comp_eqs(dev);
err_stop_eqs: @@ -1218,6 +1280,7 @@ static int mlx5_unload_one(struct mlx5_c
mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); + mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_put_uars_page(dev, priv->uar); --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -546,6 +546,7 @@ struct mlx5_core_sriov { };
struct mlx5_irq_info { + cpumask_var_t mask; char name[MLX5_MAX_IRQ_NAME]; };
Patches currently in stable-queue which might be from saeedm@mellanox.com are
queue-4.14/net-mlx5-fix-rate-limit-packet-pacing-naming-and-struct.patch queue-4.14/revert-mlx5-move-affinity-hints-assignments-to-generic-code.patch queue-4.14/net-mlx5-fpga-return-einval-if-size-is-zero.patch queue-4.14/net-mlx5e-fix-possible-deadlock-of-vxlan-lock.patch queue-4.14/net-mlx5e-prevent-possible-races-in-vxlan-control-flow.patch queue-4.14/net-mlx5-fix-error-flow-in-create_qp-command.patch queue-4.14/net-mlx5e-add-refcount-to-vxlan-structure.patch queue-4.14/net-mlx5e-fix-features-check-of-ipv6-traffic.patch
This is a note to let you know that I've just added the patch titled
Revert "mlx5: move affinity hints assignments to generic code"
to the 4.14-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: revert-mlx5-move-affinity-hints-assignments-to-generic-code.patch and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
This patch in its current form, breaks nvme-rdma affinity settings. I've mentioned that in the patch review and Saeed promised to send a fix for it (which I haven't seen so far).
Saeed?
On Sun, Dec 31, 2017 at 12:47:57PM +0200, Sagi Grimberg wrote:
This is a note to let you know that I've just added the patch titled
Revert "mlx5: move affinity hints assignments to generic code"
to the 4.14-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: revert-mlx5-move-affinity-hints-assignments-to-generic-code.patch and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
This patch in its current form, breaks nvme-rdma affinity settings. I've mentioned that in the patch review and Saeed promised to send a fix for it (which I haven't seen so far).
So things are broken in Linus's tree?
Usually reverting is the safe thing to do, we should just be now back at the state the code was in before the patch, or has something else changed here?
thanks,
greg k-h
So things are broken in Linus's tree?
I wasn't aware that this landed in Linus tree.
CC'ing DaveM.
Usually reverting is the safe thing to do, we should just be now back at the state the code was in before the patch, or has something else changed here?
The patch title is misleading, its not a revert, but rather a partial unwind of code addition. The breakage lives on the RDMA interface of the driver (which has interfaces to net and infiniband) as some RDMA kernel consumers are relying on the affinity assignments done when passing PCI_IRQ_AFFINITY to pci_alloc_irq_vectors(). The fix is pretty simple, but as said, wasn't submitted yet.
I assume that this went in because the review came a after dave took it.
On Sun, Dec 31, 2017 at 01:37:53PM +0200, Sagi Grimberg wrote:
So things are broken in Linus's tree?
I wasn't aware that this landed in Linus tree.
That's the only way it would show up in the stable tree :)
Please work on resolving this upstream and I will be glad to take whatever patch happens there in the stable tree afterward.
thanks,
greg k-h
So things are broken in Linus's tree?
I wasn't aware that this landed in Linus tree.
That's the only way it would show up in the stable tree :)
I know, was just expressing my surprise..
Please work on resolving this upstream and I will be glad to take whatever patch happens there in the stable tree afterward.
Saeed, are you submitting a patch for this or should I?
On 12/31/2017 4:00 AM, Sagi Grimberg wrote:
So things are broken in Linus's tree?
I wasn't aware that this landed in Linus tree.
That's the only way it would show up in the stable tree :)
I know, was just expressing my surprise..
Please work on resolving this upstream and I will be glad to take whatever patch happens there in the stable tree afterward.
Saeed, are you submitting a patch for this or should I?
Yes, I will submit the fix this week I will CC you and Greg, so you will ack it.
Thanks, Saeed.
On 12/31/2017 3:37 AM, Sagi Grimberg wrote:
So things are broken in Linus's tree?
I wasn't aware that this landed in Linus tree.
CC'ing DaveM.
Usually reverting is the safe thing to do, we should just be now back at the state the code was in before the patch, or has something else changed here?
The patch title is misleading, its not a revert, but rather a partial unwind of code addition. The breakage lives on the RDMA interface of
Hi Sagi,
The commit message perfectly states that this revert is slightly modified to keep compilation work, it is not partial.
the driver (which has interfaces to net and infiniband) as some RDMA kernel consumers are relying on the affinity assignments done when passing PCI_IRQ_AFFINITY to pci_alloc_irq_vectors(). The fix is pretty simple, but as said, wasn't submitted yet.
I assume that this went in because the review came a after dave took it.
We had two options here 1. revert the whole series that introduced the vector affinity infrastructure for RDMA. 2. revert only mlx5 core patch "mlx5: move affinity hints assignments to generic code" with a little change to make it compile with the RDMA infrastructure.
I believe the required fix on top of this revert is to remove the RDMA get_vector_affinity callback from mlx5_ib ib_dev structurem, see below.
I can send it, All i need to know is to where ? It should go through rdma-rc tree, if there is such thing ?
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5d6fba986fa5..ddd0b881a4be 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4256,14 +4256,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev) mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); }
-static const struct cpumask * -mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) -{ - struct mlx5_ib_dev *dev = to_mdev(ibdev); - - return mlx5_get_vector_affinity(dev->mdev, comp_vector); -} - /* The mlx5_ib_multiport_mutex should be held when calling this function */ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi) @@ -4632,7 +4624,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; - dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity; if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
On 1/3/2018 1:59 PM, Saeed Mahameed wrote:
On 12/31/2017 3:37 AM, Sagi Grimberg wrote:
So things are broken in Linus's tree?
I wasn't aware that this landed in Linus tree.
CC'ing DaveM.
Usually reverting is the safe thing to do, we should just be now back at the state the code was in before the patch, or has something else changed here?
The patch title is misleading, its not a revert, but rather a partial unwind of code addition. The breakage lives on the RDMA interface of
Hi Sagi,
The commit message perfectly states that this revert is slightly modified to keep compilation work, it is not partial.
the driver (which has interfaces to net and infiniband) as some RDMA kernel consumers are relying on the affinity assignments done when passing PCI_IRQ_AFFINITY to pci_alloc_irq_vectors(). The fix is pretty simple, but as said, wasn't submitted yet.
I assume that this went in because the review came a after dave took it.
We had two options here
- revert the whole series that introduced the vector affinity
infrastructure for RDMA. 2. revert only mlx5 core patch "mlx5: move affinity hints assignments to generic code" with a little change to make it compile with the RDMA infrastructure.
I believe the required fix on top of this revert is to remove the RDMA get_vector_affinity callback from mlx5_ib ib_dev structurem, see below.
I can send it, All i need to know is to where ? It should go through rdma-rc tree, if there is such thing ?
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5d6fba986fa5..ddd0b881a4be 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4256,14 +4256,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev) mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); }
-static const struct cpumask * -mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) -{ - struct mlx5_ib_dev *dev = to_mdev(ibdev);
- return mlx5_get_vector_affinity(dev->mdev, comp_vector); -}
Oh, I just remembered that I had suggested to use cpumask_first(mdev->priv.irq_info[vector].mask); instead of the current broken implementation in mlx5, I see that you agreed to test it in another email thread.
I will send a patch very soon, but someone has to test it ASAP so we don't miss 4.15.
/* The mlx5_ib_multiport_mutex should be held when calling this function */ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi) @@ -4632,7 +4624,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; - dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity; if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
linux-stable-mirror@lists.linaro.org