From patchwork Tue Mar 1 14:24:43 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Amir Vadai X-Patchwork-Id: 590545 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 395A01402CD for ; Wed, 2 Mar 2016 01:21:43 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753906AbcCAOVi (ORCPT ); Tue, 1 Mar 2016 09:21:38 -0500 Received: from mail-wm0-f66.google.com ([74.125.82.66]:34136 "EHLO mail-wm0-f66.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753745AbcCAOVg (ORCPT ); Tue, 1 Mar 2016 09:21:36 -0500 Received: by mail-wm0-f66.google.com with SMTP id p65so4362053wmp.1 for ; Tue, 01 Mar 2016 06:21:36 -0800 (PST) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=IVNUcwDWfpCKJ2FEFNY0Aehf2WHkwKJPZ4RufZXOTYA=; b=NhQAY07JUG3pme8xFge2BX5tGZWu9krIfjR4+ja5f4PkO/ULg3dXK5MlJB/u7jOX44 SONjurXsSznjCTuTEY0z8DH7hdWRVRacjkYb8YtPEQvIOyh9APbof1DOGUPa8gjgteU4 cxLsHf+A1aobYuhYzXcx3pwSrn0st6Z9dChQ5c9WsPxpVqQXUZVjCB1lyKEy5mJWAxz3 JHBlIGTB3+1+yTEEWqdelZWlY2bub3cszvcp44ucBIEgdvQIw1K8+/bHk+C4HtzfJ85A 18GOrUDdu7ti8OhyVbUS9afAznmYxSrlE3ZNgIbbTg5UnXIR+ixIbKYIzLknbW44j76V pWIw== X-Gm-Message-State: AD7BkJLq68XghAx04bayY0cHXfSWqjSRwOWVHYfkDdNQBS7QHcKE1TDyL70WYno/lgBiCg== X-Received: by 10.28.13.76 with SMTP id 73mr2088429wmn.14.1456842095289; Tue, 01 Mar 2016 06:21:35 -0800 (PST) Received: from office.mtl.com? (212.116.172.4.static.012.net.il. [212.116.172.4]) by smtp.gmail.com with ESMTPSA id w144sm21537406wmd.8.2016.03.01.06.21.32 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Tue, 01 Mar 2016 06:21:33 -0800 (PST) From: Amir Vadai To: "David S. Miller" Cc: netdev@vger.kernel.org, Or Gerlitz , John Fastabend , Saeed Mahameed , Hadar Har-Zion , Jiri Pirko , Amir Vadai Subject: [PATCH net-next 1/8] net/flower: Introduce hardware offload support Date: Tue, 1 Mar 2016 16:24:43 +0200 Message-Id: <1456842290-7844-2-git-send-email-amir@vadai.me> X-Mailer: git-send-email 2.7.0 In-Reply-To: <1456842290-7844-1-git-send-email-amir@vadai.me> References: <1456842290-7844-1-git-send-email-amir@vadai.me> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch is based on a patch made by John Fastabend. It adds support for offloading cls_flower. A filter that is offloaded successfuly by hardware, will not be added to the hashtable and won't be processed by software. Suggested-by: John Fastabend Signed-off-by: Amir Vadai --- include/linux/netdevice.h | 2 ++ include/net/pkt_cls.h | 14 +++++++++ include/uapi/linux/pkt_cls.h | 2 ++ net/sched/cls_flower.c | 75 +++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 88 insertions(+), 5 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e52077f..0fd329a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -785,6 +785,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, enum { TC_SETUP_MQPRIO, TC_SETUP_CLSU32, + TC_SETUP_CLSFLOWER, }; struct tc_cls_u32_offload; @@ -794,6 +795,7 @@ struct tc_to_netdev { union { u8 tc; struct tc_cls_u32_offload *cls_u32; + struct tc_cls_flower_offload *cls_flower; }; }; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index bea14ee..beb2ee1 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -409,4 +409,18 @@ static inline bool tc_should_offload(struct net_device *dev, u32 flags) return true; } +enum { + TC_CLSFLOWER_REPLACE, + TC_CLSFLOWER_DESTROY, +}; + +struct tc_cls_flower_offload { + int command; + u64 cookie; + struct flow_dissector *dissector; + struct fl_flow_key *mask; + struct fl_flow_key *key; + struct tcf_exts *exts; +}; + #endif diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 9874f568..c43c5f7 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -417,6 +417,8 @@ enum { TCA_FLOWER_KEY_TCP_DST, /* be16 */ TCA_FLOWER_KEY_UDP_SRC, /* be16 */ TCA_FLOWER_KEY_UDP_DST, /* be16 */ + + TCA_FLOWER_FLAGS, __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 95b0212..e599bea 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -165,6 +165,53 @@ static void fl_destroy_filter(struct rcu_head *head) kfree(f); } +static int fl_hw_destroy_filter(struct tcf_proto *tp, u64 cookie) +{ + struct net_device *dev = tp->q->dev_queue->dev; + struct tc_cls_flower_offload offload = {0}; + struct tc_to_netdev tc; + + if (!tc_should_offload(dev, 0)) + return -ENOTSUPP; + + offload.command = TC_CLSFLOWER_DESTROY; + offload.cookie = cookie; + + tc.type = TC_SETUP_CLSFLOWER; + tc.cls_flower = &offload; + + return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, + &tc); +} + +static int fl_hw_replace_filter(struct tcf_proto *tp, + struct flow_dissector *dissector, + struct fl_flow_key *mask, + struct fl_flow_key *key, + struct tcf_exts *actions, + u64 cookie, u32 flags) +{ + struct net_device *dev = tp->q->dev_queue->dev; + struct tc_cls_flower_offload offload = {0}; + struct tc_to_netdev tc; + + if (!tc_should_offload(dev, flags)) + return -ENOTSUPP; + + offload.command = TC_CLSFLOWER_REPLACE; + offload.cookie = cookie; + offload.dissector = dissector; + offload.mask = mask; + offload.key = key; + offload.exts = actions; + + tc.type = TC_SETUP_CLSFLOWER; + tc.cls_flower = &offload; + + return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, + &tc); +} + static bool fl_destroy(struct tcf_proto *tp, bool force) { struct cls_fl_head *head = rtnl_dereference(tp->root); @@ -174,6 +221,7 @@ static bool fl_destroy(struct tcf_proto *tp, bool force) return false; list_for_each_entry_safe(f, next, &head->filters, list) { + fl_hw_destroy_filter(tp, (u64)f); list_del_rcu(&f->list); call_rcu(&f->rcu, fl_destroy_filter); } @@ -459,6 +507,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, struct cls_fl_filter *fnew; struct nlattr *tb[TCA_FLOWER_MAX + 1]; struct fl_flow_mask mask = {}; + u32 flags = 0; int err; if (!tca[TCA_OPTIONS]) @@ -494,13 +543,28 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (err) goto errout; - err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, - head->ht_params); - if (err) - goto errout; - if (fold) + if (tb[TCA_FLOWER_FLAGS]) + flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); + + err = fl_hw_replace_filter(tp, + &head->dissector, + &mask.key, + &fnew->key, + &fnew->exts, + (u64)fnew, + flags); + if (err) { + err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, + head->ht_params); + if (err) + goto errout; + } + + if (fold) { rhashtable_remove_fast(&head->ht, &fold->ht_node, head->ht_params); + fl_hw_destroy_filter(tp, (u64)fold); + } *arg = (unsigned long) fnew; @@ -527,6 +591,7 @@ static int fl_delete(struct tcf_proto *tp, unsigned long arg) rhashtable_remove_fast(&head->ht, &f->ht_node, head->ht_params); list_del_rcu(&f->list); + fl_hw_destroy_filter(tp, (u64)f); tcf_unbind_filter(tp, &f->res); call_rcu(&f->rcu, fl_destroy_filter); return 0;