From patchwork Mon Feb 1 08:34:42 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Amir Vadai X-Patchwork-Id: 576369 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 07BA0140784 for ; Mon, 1 Feb 2016 19:36:57 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752307AbcBAIgx (ORCPT ); Mon, 1 Feb 2016 03:36:53 -0500 Received: from [193.47.165.129] ([193.47.165.129]:32903 "EHLO mellanox.co.il" rhost-flags-FAIL-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1752262AbcBAIgv (ORCPT ); Mon, 1 Feb 2016 03:36:51 -0500 Received: from Internal Mail-Server by MTLPINE1 (envelope-from amir@vadai.me) with ESMTPS (AES256-SHA encrypted); 1 Feb 2016 10:35:47 +0200 Received: from dev-h-vrt-095.mth.labs.mlnx (dev-h-vrt-095.mth.labs.mlnx [10.194.95.1]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id u118ZkWj020157; Mon, 1 Feb 2016 10:35:47 +0200 From: Amir Vadai To: "David S. Miller" , netdev@vger.kernel.org, John Fastabend Cc: Or Gerlitz , Hadar Har-Zion , Jiri Pirko , Jamal Hadi Salim , Amir Vadai Subject: [RFC net-next 6/9] net/cls_flower: Introduce hardware offloading Date: Mon, 1 Feb 2016 08:34:42 +0000 Message-Id: <1454315685-32202-7-git-send-email-amir@vadai.me> X-Mailer: git-send-email 2.7.0 In-Reply-To: <1454315685-32202-1-git-send-email-amir@vadai.me> References: <1454315685-32202-1-git-send-email-amir@vadai.me> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org During initialization, tcf_exts_offload_init() is called to initialize the list of actions description. later on, the classifier description is prepared and sent to the switchdev using switchdev_port_flow_add(). When offloaded, fl_classify() is a NOP - already done in hardware. Signed-off-by: Amir Vadai --- include/uapi/linux/pkt_cls.h | 1 + net/sched/cls_flower.c | 54 ++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 4398737..c18e82d 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -416,6 +416,7 @@ enum { TCA_FLOWER_KEY_TCP_DST, /* be16 */ TCA_FLOWER_KEY_UDP_SRC, /* be16 */ TCA_FLOWER_KEY_UDP_DST, /* be16 */ + TCA_FLOWER_OFFLOAD, /* flag */ __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 95b0212..e36d408 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -22,6 +22,7 @@ #include #include #include +#include struct fl_flow_key { int indev_ifindex; @@ -56,6 +57,7 @@ struct cls_fl_head { struct list_head filters; struct rhashtable_params ht_params; struct rcu_head rcu; + bool offload; }; struct cls_fl_filter { @@ -67,6 +69,7 @@ struct cls_fl_filter { struct list_head list; u32 handle; struct rcu_head rcu; + struct net_device *indev; }; static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) @@ -123,6 +126,9 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct fl_flow_key skb_key; struct fl_flow_key skb_mkey; + if (head->offload) + return -1; + fl_clear_masked_range(&skb_key, &head->mask); skb_key.indev_ifindex = skb->skb_iif; /* skb_flow_dissect() does not set n_proto in case an unknown protocol, @@ -174,6 +180,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force) return false; list_for_each_entry_safe(f, next, &head->filters, list) { + if (head->offload) + switchdev_port_flow_del(f->indev, (unsigned long)f); + list_del_rcu(&f->list); call_rcu(&f->rcu, fl_destroy_filter); } @@ -396,9 +405,11 @@ static int fl_check_assign_mask(struct cls_fl_head *head, } static int fl_set_parms(struct net *net, struct tcf_proto *tp, + struct cls_fl_head *head, struct cls_fl_filter *f, struct fl_flow_mask *mask, unsigned long base, struct nlattr **tb, - struct nlattr *est, bool ovr) + struct nlattr *est, bool ovr, + struct switchdev_obj_port_flow_act *actions) { struct tcf_exts e; int err; @@ -413,6 +424,8 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, tcf_bind_filter(tp, &f->res, base); } + head->offload = nla_get_flag(tb[TCA_FLOWER_OFFLOAD]); + err = fl_set_key(net, tb, &f->key, &mask->key); if (err) goto errout; @@ -420,6 +433,24 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, fl_mask_update_range(mask); fl_set_masked_key(&f->mkey, &f->key, mask); + if (head->offload) { + if (!f->key.indev_ifindex) { + pr_err("indev must be set when using offloaded filter\n"); + err = -EINVAL; + goto errout; + } + + f->indev = __dev_get_by_index(net, f->key.indev_ifindex); + if (!f->indev) { + err = -EINVAL; + goto errout; + } + + err = tcf_exts_offload_init(&e, actions); + if (err) + goto errout; + } + tcf_exts_change(tp, &f->exts, &e); return 0; @@ -459,6 +490,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, struct cls_fl_filter *fnew; struct nlattr *tb[TCA_FLOWER_MAX + 1]; struct fl_flow_mask mask = {}; + struct switchdev_obj_port_flow_act actions = {}; int err; if (!tca[TCA_OPTIONS]) @@ -486,7 +518,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } fnew->handle = handle; - err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr); + err = fl_set_parms(net, tp, head, fnew, &mask, base, tb, + tca[TCA_RATE], ovr, &actions); if (err) goto errout; @@ -494,6 +527,17 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (err) goto errout; + if (head->offload) { + err = switchdev_port_flow_add(fnew->indev, + &head->dissector, + &mask.key, + &fnew->key, + &actions, + (unsigned long)fnew); + if (err) + goto errout; + } + err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, head->ht_params); if (err) @@ -505,6 +549,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, *arg = (unsigned long) fnew; if (fold) { + if (head->offload) { + err = switchdev_port_flow_del(fold->indev, + (unsigned long)fold); + if (err) + goto errout; + } list_replace_rcu(&fold->list, &fnew->list); tcf_unbind_filter(tp, &fold->res); call_rcu(&fold->rcu, fl_destroy_filter);