Loading block/blk-cgroup.c +40 −44 Original line number Original line Diff line number Diff line Loading @@ -376,9 +376,7 @@ int blkiocg_del_blkio_group(struct blkio_group *blkg) rcu_read_lock(); rcu_read_lock(); css = css_lookup(&blkio_subsys, blkg->blkcg_id); css = css_lookup(&blkio_subsys, blkg->blkcg_id); if (!css) if (css) { goto out; blkcg = container_of(css, struct blkio_cgroup, css); blkcg = container_of(css, struct blkio_cgroup, css); spin_lock_irqsave(&blkcg->lock, flags); spin_lock_irqsave(&blkcg->lock, flags); if (!hlist_unhashed(&blkg->blkcg_node)) { if (!hlist_unhashed(&blkg->blkcg_node)) { Loading @@ -386,7 +384,8 @@ int blkiocg_del_blkio_group(struct blkio_group *blkg) ret = 0; ret = 0; } } spin_unlock_irqrestore(&blkcg->lock, flags); spin_unlock_irqrestore(&blkcg->lock, flags); out: } rcu_read_unlock(); rcu_read_unlock(); return ret; return ret; } } Loading Loading @@ -815,17 +814,15 @@ static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft, seq_printf(m, "dev\tweight\n"); seq_printf(m, "dev\tweight\n"); blkcg = cgroup_to_blkio_cgroup(cgrp); blkcg = cgroup_to_blkio_cgroup(cgrp); if (list_empty(&blkcg->policy_list)) if (!list_empty(&blkcg->policy_list)) { goto out; spin_lock_irq(&blkcg->lock); spin_lock_irq(&blkcg->lock); list_for_each_entry(pn, &blkcg->policy_list, node) { list_for_each_entry(pn, &blkcg->policy_list, node) { seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), MINOR(pn->dev), pn->weight); MINOR(pn->dev), pn->weight); } } spin_unlock_irq(&blkcg->lock); spin_unlock_irq(&blkcg->lock); } out: return 0; return 0; } } Loading Loading @@ -917,12 +914,12 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) struct blkio_policy_node *pn, *pntmp; struct blkio_policy_node *pn, *pntmp; rcu_read_lock(); rcu_read_lock(); remove_entry: do { spin_lock_irqsave(&blkcg->lock, flags); spin_lock_irqsave(&blkcg->lock, flags); if (hlist_empty(&blkcg->blkg_list)) { if (hlist_empty(&blkcg->blkg_list)) { spin_unlock_irqrestore(&blkcg->lock, flags); spin_unlock_irqrestore(&blkcg->lock, flags); goto done; break; } } blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, Loading @@ -933,24 +930,23 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) spin_unlock_irqrestore(&blkcg->lock, flags); spin_unlock_irqrestore(&blkcg->lock, flags); /* /* * This blkio_group is being unlinked as associated cgroup is going * This blkio_group is being unlinked as associated cgroup is * away. Let all the IO controlling policies know about this event. * going away. Let all the IO controlling policies know about * * this event. Currently this is static call to one io * Currently this is static call to one io controlling policy. Once * controlling policy. Once we have more policies in place, we * we have more policies in place, we need some dynamic registration * need some dynamic registration of callback function. * of callback function. */ */ spin_lock(&blkio_list_lock); spin_lock(&blkio_list_lock); list_for_each_entry(blkiop, &blkio_list, list) list_for_each_entry(blkiop, &blkio_list, list) blkiop->ops.blkio_unlink_group_fn(key, blkg); blkiop->ops.blkio_unlink_group_fn(key, blkg); spin_unlock(&blkio_list_lock); spin_unlock(&blkio_list_lock); goto remove_entry; } while (1); done: list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) { list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) { blkio_policy_delete_node(pn); blkio_policy_delete_node(pn); kfree(pn); kfree(pn); } } free_css_id(&blkio_subsys, &blkcg->css); free_css_id(&blkio_subsys, &blkcg->css); rcu_read_unlock(); rcu_read_unlock(); if (blkcg != &blkio_root_cgroup) if (blkcg != &blkio_root_cgroup) Loading Loading
block/blk-cgroup.c +40 −44 Original line number Original line Diff line number Diff line Loading @@ -376,9 +376,7 @@ int blkiocg_del_blkio_group(struct blkio_group *blkg) rcu_read_lock(); rcu_read_lock(); css = css_lookup(&blkio_subsys, blkg->blkcg_id); css = css_lookup(&blkio_subsys, blkg->blkcg_id); if (!css) if (css) { goto out; blkcg = container_of(css, struct blkio_cgroup, css); blkcg = container_of(css, struct blkio_cgroup, css); spin_lock_irqsave(&blkcg->lock, flags); spin_lock_irqsave(&blkcg->lock, flags); if (!hlist_unhashed(&blkg->blkcg_node)) { if (!hlist_unhashed(&blkg->blkcg_node)) { Loading @@ -386,7 +384,8 @@ int blkiocg_del_blkio_group(struct blkio_group *blkg) ret = 0; ret = 0; } } spin_unlock_irqrestore(&blkcg->lock, flags); spin_unlock_irqrestore(&blkcg->lock, flags); out: } rcu_read_unlock(); rcu_read_unlock(); return ret; return ret; } } Loading Loading @@ -815,17 +814,15 @@ static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft, seq_printf(m, "dev\tweight\n"); seq_printf(m, "dev\tweight\n"); blkcg = cgroup_to_blkio_cgroup(cgrp); blkcg = cgroup_to_blkio_cgroup(cgrp); if (list_empty(&blkcg->policy_list)) if (!list_empty(&blkcg->policy_list)) { goto out; spin_lock_irq(&blkcg->lock); spin_lock_irq(&blkcg->lock); list_for_each_entry(pn, &blkcg->policy_list, node) { list_for_each_entry(pn, &blkcg->policy_list, node) { seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), MINOR(pn->dev), pn->weight); MINOR(pn->dev), pn->weight); } } spin_unlock_irq(&blkcg->lock); spin_unlock_irq(&blkcg->lock); } out: return 0; return 0; } } Loading Loading @@ -917,12 +914,12 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) struct blkio_policy_node *pn, *pntmp; struct blkio_policy_node *pn, *pntmp; rcu_read_lock(); rcu_read_lock(); remove_entry: do { spin_lock_irqsave(&blkcg->lock, flags); spin_lock_irqsave(&blkcg->lock, flags); if (hlist_empty(&blkcg->blkg_list)) { if (hlist_empty(&blkcg->blkg_list)) { spin_unlock_irqrestore(&blkcg->lock, flags); spin_unlock_irqrestore(&blkcg->lock, flags); goto done; break; } } blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, Loading @@ -933,24 +930,23 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) spin_unlock_irqrestore(&blkcg->lock, flags); spin_unlock_irqrestore(&blkcg->lock, flags); /* /* * This blkio_group is being unlinked as associated cgroup is going * This blkio_group is being unlinked as associated cgroup is * away. Let all the IO controlling policies know about this event. * going away. Let all the IO controlling policies know about * * this event. Currently this is static call to one io * Currently this is static call to one io controlling policy. Once * controlling policy. Once we have more policies in place, we * we have more policies in place, we need some dynamic registration * need some dynamic registration of callback function. * of callback function. */ */ spin_lock(&blkio_list_lock); spin_lock(&blkio_list_lock); list_for_each_entry(blkiop, &blkio_list, list) list_for_each_entry(blkiop, &blkio_list, list) blkiop->ops.blkio_unlink_group_fn(key, blkg); blkiop->ops.blkio_unlink_group_fn(key, blkg); spin_unlock(&blkio_list_lock); spin_unlock(&blkio_list_lock); goto remove_entry; } while (1); done: list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) { list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) { blkio_policy_delete_node(pn); blkio_policy_delete_node(pn); kfree(pn); kfree(pn); } } free_css_id(&blkio_subsys, &blkcg->css); free_css_id(&blkio_subsys, &blkcg->css); rcu_read_unlock(); rcu_read_unlock(); if (blkcg != &blkio_root_cgroup) if (blkcg != &blkio_root_cgroup) Loading