@@ -3076,11 +3076,28 @@ __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
void *type_data, bool err_stop, bool rtnl_held)
{
+ bool take_rtnl = false;
int ok_count;
+retry:
+ if (take_rtnl)
+ rtnl_lock();
down_read(&block->cb_lock);
+ /* Need to obtain rtnl lock if block is bound to devs that require it.
+ * In block bind code cb_lock is obtained while holding rtnl, so we must
+ * obtain the locks in same order here.
+ */
+ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
+ up_read(&block->cb_lock);
+ take_rtnl = true;
+ goto retry;
+ }
+
ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
+
up_read(&block->cb_lock);
+ if (take_rtnl)
+ rtnl_unlock();
return ok_count;
}
EXPORT_SYMBOL(tc_setup_cb_call);
@@ -3095,9 +3112,23 @@ int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
enum tc_setup_type type, void *type_data, bool err_stop,
u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
{
+ bool take_rtnl = false;
int ok_count;
+retry:
+ if (take_rtnl)
+ rtnl_lock();
down_read(&block->cb_lock);
+ /* Need to obtain rtnl lock if block is bound to devs that require it.
+ * In block bind code cb_lock is obtained while holding rtnl, so we must
+ * obtain the locks in same order here.
+ */
+ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
+ up_read(&block->cb_lock);
+ take_rtnl = true;
+ goto retry;
+ }
+
/* Make sure all netdevs sharing this block are offload-capable. */
if (block->nooffloaddevcnt && err_stop) {
ok_count = -EOPNOTSUPP;
@@ -3114,6 +3145,8 @@ int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
}
errout:
up_read(&block->cb_lock);
+ if (take_rtnl)
+ rtnl_unlock();
return ok_count;
}
EXPORT_SYMBOL(tc_setup_cb_add);
@@ -3130,9 +3163,23 @@ int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
u32 *new_flags, unsigned int *new_in_hw_count,
bool rtnl_held)
{
+ bool take_rtnl = false;
int ok_count;
+retry:
+ if (take_rtnl)
+ rtnl_lock();
down_read(&block->cb_lock);
+ /* Need to obtain rtnl lock if block is bound to devs that require it.
+ * In block bind code cb_lock is obtained while holding rtnl, so we must
+ * obtain the locks in same order here.
+ */
+ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
+ up_read(&block->cb_lock);
+ take_rtnl = true;
+ goto retry;
+ }
+
/* Make sure all netdevs sharing this block are offload-capable. */
if (block->nooffloaddevcnt && err_stop) {
ok_count = -EOPNOTSUPP;
@@ -3153,6 +3200,8 @@ int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
}
errout:
up_read(&block->cb_lock);
+ if (take_rtnl)
+ rtnl_unlock();
return ok_count;
}
EXPORT_SYMBOL(tc_setup_cb_replace);
@@ -3165,9 +3214,23 @@ int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
enum tc_setup_type type, void *type_data, bool err_stop,
u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
{
+ bool take_rtnl = false;
int ok_count;
+retry:
+ if (take_rtnl)
+ rtnl_lock();
down_read(&block->cb_lock);
+ /* Need to obtain rtnl lock if block is bound to devs that require it.
+ * In block bind code cb_lock is obtained while holding rtnl, so we must
+ * obtain the locks in same order here.
+ */
+ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
+ up_read(&block->cb_lock);
+ take_rtnl = true;
+ goto retry;
+ }
+
ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
@@ -3175,6 +3238,8 @@ int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
tp->ops->hw_del(tp, type_data);
up_read(&block->cb_lock);
+ if (take_rtnl)
+ rtnl_unlock();
return ok_count;
}
EXPORT_SYMBOL(tc_setup_cb_destroy);