Lines Matching +full:pre +full:- +full:verified

1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
103 key_len = nfp_flow->meta.key_len; in nfp_flower_xmit_flow()
104 mask_len = nfp_flow->meta.mask_len; in nfp_flower_xmit_flow()
105 act_len = nfp_flow->meta.act_len; in nfp_flower_xmit_flow()
112 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; in nfp_flower_xmit_flow()
113 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; in nfp_flower_xmit_flow()
114 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; in nfp_flower_xmit_flow()
118 return -ENOMEM; in nfp_flower_xmit_flow()
121 memcpy(msg, &nfp_flow->meta, meta_len); in nfp_flower_xmit_flow()
122 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); in nfp_flower_xmit_flow()
123 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); in nfp_flower_xmit_flow()
125 nfp_flow->action_data, act_len); in nfp_flower_xmit_flow()
130 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; in nfp_flower_xmit_flow()
131 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; in nfp_flower_xmit_flow()
132 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; in nfp_flower_xmit_flow()
134 nfp_ctrl_tx(app->ctrl, skb); in nfp_flower_xmit_flow()
158 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY || in nfp_flower_calc_opt_layer()
159 (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) { in nfp_flower_calc_opt_layer()
161 return -EOPNOTSUPP; in nfp_flower_calc_opt_layer()
164 if (enc_opts->len > 0) { in nfp_flower_calc_opt_layer()
182 switch (enc_ports->dst) { in nfp_flower_calc_udp_tun_layer()
198 return -EOPNOTSUPP; in nfp_flower_calc_udp_tun_layer()
202 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) { in nfp_flower_calc_udp_tun_layer()
204 return -EOPNOTSUPP; in nfp_flower_calc_udp_tun_layer()
220 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) { in nfp_flower_calc_udp_tun_layer()
222 return -EOPNOTSUPP; in nfp_flower_calc_udp_tun_layer()
231 return -EOPNOTSUPP; in nfp_flower_calc_udp_tun_layer()
245 struct flow_dissector *dissector = rule->match.dissector; in nfp_flower_calculate_key_layers()
247 struct nfp_flower_priv *priv = app->priv; in nfp_flower_calculate_key_layers()
253 if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) { in nfp_flower_calculate_key_layers()
255 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
259 if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && in nfp_flower_calculate_key_layers()
260 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R) in nfp_flower_calculate_key_layers()
262 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) in nfp_flower_calculate_key_layers()
265 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
283 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && in nfp_flower_calculate_key_layers()
284 vlan.key->vlan_priority) { in nfp_flower_calculate_key_layers()
286 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
288 if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ && in nfp_flower_calculate_key_layers()
300 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { in nfp_flower_calculate_key_layers()
302 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
324 if (flow_rule_has_enc_control_flags(enc_ctl.mask->flags, in nfp_flower_calculate_key_layers()
326 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
328 if (enc_ctl.mask->addr_type != 0xffff) { in nfp_flower_calculate_key_layers()
330 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
333 ipv6_tun = enc_ctl.key->addr_type == in nfp_flower_calculate_key_layers()
336 !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) { in nfp_flower_calculate_key_layers()
338 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
342 enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) { in nfp_flower_calculate_key_layers()
344 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
349 if (memchr_inv(&ipv6_addrs.mask->dst, 0xff, in nfp_flower_calculate_key_layers()
350 sizeof(ipv6_addrs.mask->dst))) { in nfp_flower_calculate_key_layers()
352 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
356 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { in nfp_flower_calculate_key_layers()
358 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
368 …ack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); in nfp_flower_calculate_key_layers()
369 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
388 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
392 if (enc_ports.mask->dst != cpu_to_be16(~0)) { in nfp_flower_calculate_key_layers()
394 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
412 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
420 if (basic.mask && basic.mask->n_proto) { in nfp_flower_calculate_key_layers()
422 switch (basic.key->n_proto) { in nfp_flower_calculate_key_layers()
438 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
454 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
458 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
461 if (basic.mask && basic.mask->ip_proto) { in nfp_flower_calculate_key_layers()
462 switch (basic.key->ip_proto) { in nfp_flower_calculate_key_layers()
477 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
485 tcp_flags = be16_to_cpu(tcp.key->flags); in nfp_flower_calculate_key_layers()
489 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
498 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
507 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
512 switch (basic.key->n_proto) { in nfp_flower_calculate_key_layers()
525 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
536 ctl.mask->flags, extack)) in nfp_flower_calculate_key_layers()
537 return -EOPNOTSUPP; in nfp_flower_calculate_key_layers()
540 ret_key_ls->key_layer = key_layer; in nfp_flower_calculate_key_layers()
541 ret_key_ls->key_layer_two = key_layer_two; in nfp_flower_calculate_key_layers()
542 ret_key_ls->key_size = key_size; in nfp_flower_calculate_key_layers()
556 flow_pay->meta.key_len = key_layer->key_size; in nfp_flower_allocate_new()
557 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL); in nfp_flower_allocate_new()
558 if (!flow_pay->unmasked_data) in nfp_flower_allocate_new()
561 flow_pay->meta.mask_len = key_layer->key_size; in nfp_flower_allocate_new()
562 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL); in nfp_flower_allocate_new()
563 if (!flow_pay->mask_data) in nfp_flower_allocate_new()
566 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); in nfp_flower_allocate_new()
567 if (!flow_pay->action_data) in nfp_flower_allocate_new()
570 flow_pay->nfp_tun_ipv4_addr = 0; in nfp_flower_allocate_new()
571 flow_pay->nfp_tun_ipv6 = NULL; in nfp_flower_allocate_new()
572 flow_pay->meta.flags = 0; in nfp_flower_allocate_new()
573 INIT_LIST_HEAD(&flow_pay->linked_flows); in nfp_flower_allocate_new()
574 flow_pay->in_hw = false; in nfp_flower_allocate_new()
575 flow_pay->pre_tun_rule.dev = NULL; in nfp_flower_allocate_new()
580 kfree(flow_pay->mask_data); in nfp_flower_allocate_new()
582 kfree(flow_pay->unmasked_data); in nfp_flower_allocate_new()
608 while (act_off < flow->meta.act_len) { in nfp_flower_update_merge_with_actions()
609 a = (struct nfp_fl_act_head *)&flow->action_data[act_off]; in nfp_flower_update_merge_with_actions()
610 act_id = a->jump_id; in nfp_flower_update_merge_with_actions()
619 if (push_vlan->vlan_tci) in nfp_flower_update_merge_with_actions()
620 merge->tci = cpu_to_be16(0xffff); in nfp_flower_update_merge_with_actions()
623 merge->tci = cpu_to_be16(0); in nfp_flower_update_merge_with_actions()
627 eth_broadcast_addr(&merge->l2.mac_dst[0]); in nfp_flower_update_merge_with_actions()
628 eth_broadcast_addr(&merge->l2.mac_src[0]); in nfp_flower_update_merge_with_actions()
629 memset(&merge->l4, 0xff, in nfp_flower_update_merge_with_actions()
632 memset(&merge->ipv6, 0xff, in nfp_flower_update_merge_with_actions()
635 memset(&merge->ipv4, 0xff, in nfp_flower_update_merge_with_actions()
641 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i]; in nfp_flower_update_merge_with_actions()
643 merge->l2.mac_src[i] |= in nfp_flower_update_merge_with_actions()
644 eth->eth_addr_mask[ETH_ALEN + i]; in nfp_flower_update_merge_with_actions()
648 merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask; in nfp_flower_update_merge_with_actions()
649 merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask; in nfp_flower_update_merge_with_actions()
653 merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask; in nfp_flower_update_merge_with_actions()
654 merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask; in nfp_flower_update_merge_with_actions()
659 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |= in nfp_flower_update_merge_with_actions()
660 ipv6_add->ipv6[i].mask; in nfp_flower_update_merge_with_actions()
665 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |= in nfp_flower_update_merge_with_actions()
666 ipv6_add->ipv6[i].mask; in nfp_flower_update_merge_with_actions()
670 merge->ipv6.ip_ext.ttl |= in nfp_flower_update_merge_with_actions()
671 ipv6_tc_hl_fl->ipv6_hop_limit_mask; in nfp_flower_update_merge_with_actions()
672 merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask; in nfp_flower_update_merge_with_actions()
673 merge->ipv6.ipv6_flow_label_exthdr |= in nfp_flower_update_merge_with_actions()
674 ipv6_tc_hl_fl->ipv6_label_mask; in nfp_flower_update_merge_with_actions()
679 ports = (u8 *)&merge->l4.port_src; in nfp_flower_update_merge_with_actions()
681 ports[i] |= tport->tp_port_mask[i]; in nfp_flower_update_merge_with_actions()
685 ipv6_tun = be16_to_cpu(pre_tun->flags) & in nfp_flower_update_merge_with_actions()
692 return -EOPNOTSUPP; in nfp_flower_update_merge_with_actions()
695 act_off += a->len_lw << NFP_FL_LW_SIZ; in nfp_flower_update_merge_with_actions()
710 u8 *mask = flow->mask_data; in nfp_flower_populate_merge_match()
716 key_layer = meta_tci->nfp_flow_key_layer; in nfp_flower_populate_merge_match()
719 return -EOPNOTSUPP; in nfp_flower_populate_merge_match()
721 merge->tci = meta_tci->tci; in nfp_flower_populate_merge_match()
731 memcpy(&merge->l2, mask, match_size); in nfp_flower_populate_merge_match()
737 memcpy(&merge->l4, mask, match_size); in nfp_flower_populate_merge_match()
743 memcpy(&merge->ipv4, mask, match_size); in nfp_flower_populate_merge_match()
748 memcpy(&merge->ipv6, mask, match_size); in nfp_flower_populate_merge_match()
784 return -EOPNOTSUPP; in nfp_flower_can_merge()
793 return -EINVAL; in nfp_flower_can_merge()
808 act_len = a->len_lw << NFP_FL_LW_SIZ; in nfp_flower_copy_pre_actions()
809 act_id = a->jump_id; in nfp_flower_copy_pre_actions()
838 if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) in nfp_fl_verify_post_tun_acts()
840 else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) in nfp_fl_verify_post_tun_acts()
841 return -EOPNOTSUPP; in nfp_fl_verify_post_tun_acts()
843 act_off += a->len_lw << NFP_FL_LW_SIZ; in nfp_fl_verify_post_tun_acts()
848 return -EOPNOTSUPP; in nfp_fl_verify_post_tun_acts()
863 if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) { in nfp_fl_push_vlan_after_tun()
865 tun->outer_vlan_tpid = vlan->vlan_tpid; in nfp_fl_push_vlan_after_tun()
866 tun->outer_vlan_tci = vlan->vlan_tci; in nfp_fl_push_vlan_after_tun()
871 act_off += a->len_lw << NFP_FL_LW_SIZ; in nfp_fl_push_vlan_after_tun()
875 return -EOPNOTSUPP; in nfp_fl_push_vlan_after_tun()
889 /* The last action of sub_flow1 must be output - do not merge this. */ in nfp_flower_merge_action()
890 sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output); in nfp_flower_merge_action()
891 sub2_act_len = sub_flow2->meta.act_len; in nfp_flower_merge_action()
894 return -EINVAL; in nfp_flower_merge_action()
897 return -EINVAL; in nfp_flower_merge_action()
901 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); in nfp_flower_merge_action()
903 merge_flow->meta.shortcut = sub_flow2->meta.shortcut; in nfp_flower_merge_action()
905 merge_flow->meta.act_len = sub1_act_len + sub2_act_len; in nfp_flower_merge_action()
906 merge_act = merge_flow->action_data; in nfp_flower_merge_action()
908 /* Copy any pre-actions to the start of merge flow action list. */ in nfp_flower_merge_action()
910 sub_flow1->action_data, in nfp_flower_merge_action()
913 sub1_act_len -= pre_off1; in nfp_flower_merge_action()
915 sub_flow2->action_data, in nfp_flower_merge_action()
918 sub2_act_len -= pre_off2; in nfp_flower_merge_action()
925 char *post_tun_acts = &sub_flow2->action_data[pre_off2]; in nfp_flower_merge_action()
934 sub2_act_len -= sizeof(*post_tun_push_vlan); in nfp_flower_merge_action()
939 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); in nfp_flower_merge_action()
948 merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); in nfp_flower_merge_action()
952 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); in nfp_flower_merge_action()
960 list_del(&link->merge_flow.list); in nfp_flower_unlink_flow()
961 list_del(&link->sub_flow.list); in nfp_flower_unlink_flow()
970 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) in nfp_flower_unlink_flows()
971 if (link->sub_flow.flow == sub_flow) { in nfp_flower_unlink_flows()
984 return -ENOMEM; in nfp_flower_link_flows()
986 link->merge_flow.flow = merge_flow; in nfp_flower_link_flows()
987 list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows); in nfp_flower_link_flows()
988 link->sub_flow.flow = sub_flow; in nfp_flower_link_flows()
989 list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows); in nfp_flower_link_flows()
995 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
1009 struct nfp_flower_priv *priv = app->priv; in nfp_flower_merge_offloaded_flows()
1019 return -EINVAL; in nfp_flower_merge_offloaded_flows()
1022 parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32; in nfp_flower_merge_offloaded_flows()
1023 parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id)); in nfp_flower_merge_offloaded_flows()
1024 if (rhashtable_lookup_fast(&priv->merge_table, in nfp_flower_merge_offloaded_flows()
1034 merge_key_ls.key_size = sub_flow1->meta.key_len; in nfp_flower_merge_offloaded_flows()
1038 return -ENOMEM; in nfp_flower_merge_offloaded_flows()
1040 merge_flow->tc_flower_cookie = (unsigned long)merge_flow; in nfp_flower_merge_offloaded_flows()
1041 merge_flow->ingress_dev = sub_flow1->ingress_dev; in nfp_flower_merge_offloaded_flows()
1043 memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data, in nfp_flower_merge_offloaded_flows()
1044 sub_flow1->meta.key_len); in nfp_flower_merge_offloaded_flows()
1045 memcpy(merge_flow->mask_data, sub_flow1->mask_data, in nfp_flower_merge_offloaded_flows()
1046 sub_flow1->meta.mask_len); in nfp_flower_merge_offloaded_flows()
1060 err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow, in nfp_flower_merge_offloaded_flows()
1061 merge_flow->ingress_dev, NULL); in nfp_flower_merge_offloaded_flows()
1065 err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node, in nfp_flower_merge_offloaded_flows()
1072 err = -ENOMEM; in nfp_flower_merge_offloaded_flows()
1075 merge_info->parent_ctx = parent_ctx; in nfp_flower_merge_offloaded_flows()
1076 err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node, in nfp_flower_merge_offloaded_flows()
1086 merge_flow->in_hw = true; in nfp_flower_merge_offloaded_flows()
1087 sub_flow1->in_hw = false; in nfp_flower_merge_offloaded_flows()
1092 WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, in nfp_flower_merge_offloaded_flows()
1093 &merge_info->ht_node, in nfp_flower_merge_offloaded_flows()
1098 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, in nfp_flower_merge_offloaded_flows()
1099 &merge_flow->fl_node, in nfp_flower_merge_offloaded_flows()
1108 kfree(merge_flow->action_data); in nfp_flower_merge_offloaded_flows()
1109 kfree(merge_flow->mask_data); in nfp_flower_merge_offloaded_flows()
1110 kfree(merge_flow->unmasked_data); in nfp_flower_merge_offloaded_flows()
1122 * Verifies the flow as a pre-tunnel rule.
1124 * Return: negative value on error, 0 if verified.
1132 struct nfp_flower_priv *priv = app->priv; in nfp_flower_validate_pre_tun_rule()
1135 u8 *ext = flow->unmasked_data; in nfp_flower_validate_pre_tun_rule()
1137 u8 *mask = flow->mask_data; in nfp_flower_validate_pre_tun_rule()
1142 meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; in nfp_flower_validate_pre_tun_rule()
1143 key_layer = key_ls->key_layer; in nfp_flower_validate_pre_tun_rule()
1144 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { in nfp_flower_validate_pre_tun_rule()
1145 if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { in nfp_flower_validate_pre_tun_rule()
1146 u16 vlan_tci = be16_to_cpu(meta_tci->tci); in nfp_flower_validate_pre_tun_rule()
1149 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); in nfp_flower_validate_pre_tun_rule()
1152 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); in nfp_flower_validate_pre_tun_rule()
1157 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); in nfp_flower_validate_pre_tun_rule()
1158 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1159 } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) { in nfp_flower_validate_pre_tun_rule()
1160 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields"); in nfp_flower_validate_pre_tun_rule()
1161 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1165 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); in nfp_flower_validate_pre_tun_rule()
1166 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1171 …NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be prese… in nfp_flower_validate_pre_tun_rule()
1172 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1176 flow->pre_tun_rule.is_ipv6 = true; in nfp_flower_validate_pre_tun_rule()
1178 flow->pre_tun_rule.is_ipv6 = false; in nfp_flower_validate_pre_tun_rule()
1183 if (key_ls->key_layer_two) { in nfp_flower_validate_pre_tun_rule()
1192 if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { in nfp_flower_validate_pre_tun_rule()
1193 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); in nfp_flower_validate_pre_tun_rule()
1194 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1201 if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { in nfp_flower_validate_pre_tun_rule()
1203 if (!is_broadcast_ether_addr(&mac->mac_src[0])) { in nfp_flower_validate_pre_tun_rule()
1205 "unsupported pre-tunnel rule: source MAC field must not be masked"); in nfp_flower_validate_pre_tun_rule()
1206 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1210 if (mac->mpls_lse) { in nfp_flower_validate_pre_tun_rule()
1211 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); in nfp_flower_validate_pre_tun_rule()
1212 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1217 if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { in nfp_flower_validate_pre_tun_rule()
1219 "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); in nfp_flower_validate_pre_tun_rule()
1220 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1224 memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN); in nfp_flower_validate_pre_tun_rule()
1225 memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN); in nfp_flower_validate_pre_tun_rule()
1245 …NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip… in nfp_flower_validate_pre_tun_rule()
1246 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1252 if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { in nfp_flower_validate_pre_tun_rule()
1253 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { in nfp_flower_validate_pre_tun_rule()
1260 vlan_tci = be16_to_cpu(vlan_tags->outer_tci); in nfp_flower_validate_pre_tun_rule()
1261 vlan_tpid = be16_to_cpu(vlan_tags->outer_tpid); in nfp_flower_validate_pre_tun_rule()
1264 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); in nfp_flower_validate_pre_tun_rule()
1265 flow->pre_tun_rule.vlan_tpid = cpu_to_be16(vlan_tpid); in nfp_flower_validate_pre_tun_rule()
1268 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); in nfp_flower_validate_pre_tun_rule()
1269 flow->pre_tun_rule.vlan_tpid = cpu_to_be16(0xffff); in nfp_flower_validate_pre_tun_rule()
1275 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; in nfp_flower_validate_pre_tun_rule()
1277 if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { in nfp_flower_validate_pre_tun_rule()
1278 …NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first… in nfp_flower_validate_pre_tun_rule()
1279 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1282 act_offset += act->len_lw << NFP_FL_LW_SIZ; in nfp_flower_validate_pre_tun_rule()
1283 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; in nfp_flower_validate_pre_tun_rule()
1286 if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { in nfp_flower_validate_pre_tun_rule()
1287 …NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress w… in nfp_flower_validate_pre_tun_rule()
1288 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1291 act_offset += act->len_lw << NFP_FL_LW_SIZ; in nfp_flower_validate_pre_tun_rule()
1294 if (act_offset != flow->meta.act_len) { in nfp_flower_validate_pre_tun_rule()
1295 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); in nfp_flower_validate_pre_tun_rule()
1296 return -EOPNOTSUPP; in nfp_flower_validate_pre_tun_rule()
1305 struct flow_dissector *dissector = rule->match.dissector; in offload_pre_check()
1308 if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) { in offload_pre_check()
1315 if (flow->common.chain_index) in offload_pre_check()
1322 * nfp_flower_add_offload() - Adds a new flow to hardware.
1337 struct nfp_flower_priv *priv = app->priv; in nfp_flower_add_offload()
1344 extack = flow->common.extack; in nfp_flower_add_offload()
1355 return -EOPNOTSUPP; in nfp_flower_add_offload()
1359 return -ENOMEM; in nfp_flower_add_offload()
1368 err = -ENOMEM; in nfp_flower_add_offload()
1381 if (flow_pay->pre_tun_rule.dev) { in nfp_flower_add_offload()
1387 err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack); in nfp_flower_add_offload()
1391 flow_pay->tc_flower_cookie = flow->cookie; in nfp_flower_add_offload()
1392 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, in nfp_flower_add_offload()
1399 if (flow_pay->pre_tun_rule.dev) { in nfp_flower_add_offload()
1400 if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { in nfp_flower_add_offload()
1405 err = -ENOMEM; in nfp_flower_add_offload()
1408 predt->flow_pay = flow_pay; in nfp_flower_add_offload()
1409 INIT_LIST_HEAD(&predt->nn_list); in nfp_flower_add_offload()
1410 spin_lock_bh(&priv->predt_lock); in nfp_flower_add_offload()
1411 list_add(&predt->list_head, &priv->predt_list); in nfp_flower_add_offload()
1412 flow_pay->pre_tun_rule.predt = predt; in nfp_flower_add_offload()
1414 spin_unlock_bh(&priv->predt_lock); in nfp_flower_add_offload()
1427 port->tc_offload_cnt++; in nfp_flower_add_offload()
1429 flow_pay->in_hw = true; in nfp_flower_add_offload()
1437 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, in nfp_flower_add_offload()
1438 &flow_pay->fl_node, in nfp_flower_add_offload()
1443 if (flow_pay->nfp_tun_ipv6) in nfp_flower_add_offload()
1444 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6); in nfp_flower_add_offload()
1445 kfree(flow_pay->action_data); in nfp_flower_add_offload()
1446 kfree(flow_pay->mask_data); in nfp_flower_add_offload()
1447 kfree(flow_pay->unmasked_data); in nfp_flower_add_offload()
1459 struct nfp_flower_priv *priv = app->priv; in nfp_flower_remove_merge_flow()
1467 link = list_first_entry(&merge_flow->linked_flows, in nfp_flower_remove_merge_flow()
1469 origin = link->sub_flow.flow; in nfp_flower_remove_merge_flow()
1471 /* Re-add rule the merge had overwritten if it has not been deleted. */ in nfp_flower_remove_merge_flow()
1494 origin->in_hw = true; in nfp_flower_remove_merge_flow()
1499 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, in nfp_flower_remove_merge_flow()
1501 u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id); in nfp_flower_remove_merge_flow()
1507 merge_info = rhashtable_lookup_fast(&priv->merge_table, in nfp_flower_remove_merge_flow()
1511 WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, in nfp_flower_remove_merge_flow()
1512 &merge_info->ht_node, in nfp_flower_remove_merge_flow()
1517 kfree(merge_flow->action_data); in nfp_flower_remove_merge_flow()
1518 kfree(merge_flow->mask_data); in nfp_flower_remove_merge_flow()
1519 kfree(merge_flow->unmasked_data); in nfp_flower_remove_merge_flow()
1520 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, in nfp_flower_remove_merge_flow()
1521 &merge_flow->fl_node, in nfp_flower_remove_merge_flow()
1533 list_for_each_entry_safe(link, temp, &sub_flow->linked_flows, in nfp_flower_del_linked_merge_flows()
1536 link->merge_flow.flow); in nfp_flower_del_linked_merge_flows()
1540 * nfp_flower_del_offload() - Removes a flow from hardware.
1554 struct nfp_flower_priv *priv = app->priv; in nfp_flower_del_offload()
1561 extack = flow->common.extack; in nfp_flower_del_offload()
1566 ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie, in nfp_flower_del_offload()
1573 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); in nfp_flower_del_offload()
1576 return -ENOENT; in nfp_flower_del_offload()
1583 if (nfp_flow->nfp_tun_ipv4_addr) in nfp_flower_del_offload()
1584 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); in nfp_flower_del_offload()
1586 if (nfp_flow->nfp_tun_ipv6) in nfp_flower_del_offload()
1587 nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6); in nfp_flower_del_offload()
1589 if (!nfp_flow->in_hw) { in nfp_flower_del_offload()
1594 if (nfp_flow->pre_tun_rule.dev) { in nfp_flower_del_offload()
1595 if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { in nfp_flower_del_offload()
1598 predt = nfp_flow->pre_tun_rule.predt; in nfp_flower_del_offload()
1600 spin_lock_bh(&priv->predt_lock); in nfp_flower_del_offload()
1602 list_del(&predt->list_head); in nfp_flower_del_offload()
1603 spin_unlock_bh(&priv->predt_lock); in nfp_flower_del_offload()
1618 port->tc_offload_cnt--; in nfp_flower_del_offload()
1619 kfree(nfp_flow->action_data); in nfp_flower_del_offload()
1620 kfree(nfp_flow->mask_data); in nfp_flower_del_offload()
1621 kfree(nfp_flow->unmasked_data); in nfp_flower_del_offload()
1622 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, in nfp_flower_del_offload()
1623 &nfp_flow->fl_node, in nfp_flower_del_offload()
1633 struct nfp_flower_priv *priv = app->priv; in __nfp_flower_update_merge_stats()
1639 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id); in __nfp_flower_update_merge_stats()
1640 pkts = priv->stats[ctx_id].pkts; in __nfp_flower_update_merge_stats()
1644 bytes = priv->stats[ctx_id].bytes; in __nfp_flower_update_merge_stats()
1645 used = priv->stats[ctx_id].used; in __nfp_flower_update_merge_stats()
1648 priv->stats[ctx_id].pkts = 0; in __nfp_flower_update_merge_stats()
1649 priv->stats[ctx_id].bytes = 0; in __nfp_flower_update_merge_stats()
1655 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) { in __nfp_flower_update_merge_stats()
1656 sub_flow = link->sub_flow.flow; in __nfp_flower_update_merge_stats()
1657 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); in __nfp_flower_update_merge_stats()
1658 priv->stats[ctx_id].pkts += pkts; in __nfp_flower_update_merge_stats()
1659 priv->stats[ctx_id].bytes += bytes; in __nfp_flower_update_merge_stats()
1660 priv->stats[ctx_id].used = max_t(u64, used, in __nfp_flower_update_merge_stats()
1661 priv->stats[ctx_id].used); in __nfp_flower_update_merge_stats()
1672 list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list) in nfp_flower_update_merge_stats()
1673 __nfp_flower_update_merge_stats(app, link->merge_flow.flow); in nfp_flower_update_merge_stats()
1677 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1691 struct nfp_flower_priv *priv = app->priv; in nfp_flower_get_stats()
1698 ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie, in nfp_flower_get_stats()
1703 extack = flow->common.extack; in nfp_flower_get_stats()
1704 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); in nfp_flower_get_stats()
1707 return -EINVAL; in nfp_flower_get_stats()
1710 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); in nfp_flower_get_stats()
1712 spin_lock_bh(&priv->stats_lock); in nfp_flower_get_stats()
1714 if (!list_empty(&nfp_flow->linked_flows)) in nfp_flower_get_stats()
1717 flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, in nfp_flower_get_stats()
1718 priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used, in nfp_flower_get_stats()
1721 priv->stats[ctx_id].pkts = 0; in nfp_flower_get_stats()
1722 priv->stats[ctx_id].bytes = 0; in nfp_flower_get_stats()
1723 spin_unlock_bh(&priv->stats_lock); in nfp_flower_get_stats()
1732 struct nfp_flower_priv *priv = app->priv; in nfp_flower_repr_offload()
1735 if (!eth_proto_is_802_3(flower->common.protocol)) in nfp_flower_repr_offload()
1736 return -EOPNOTSUPP; in nfp_flower_repr_offload()
1738 mutex_lock(&priv->nfp_fl_lock); in nfp_flower_repr_offload()
1739 switch (flower->command) { in nfp_flower_repr_offload()
1750 ret = -EOPNOTSUPP; in nfp_flower_repr_offload()
1753 mutex_unlock(&priv->nfp_fl_lock); in nfp_flower_repr_offload()
1764 if (!tc_can_offload_extack(repr->netdev, common->extack)) in nfp_flower_setup_tc_block_cb()
1765 return -EOPNOTSUPP; in nfp_flower_setup_tc_block_cb()
1769 return nfp_flower_repr_offload(repr->app, repr->netdev, in nfp_flower_setup_tc_block_cb()
1772 return nfp_flower_setup_qos_offload(repr->app, repr->netdev, in nfp_flower_setup_tc_block_cb()
1775 return -EOPNOTSUPP; in nfp_flower_setup_tc_block_cb()
1788 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) in nfp_flower_setup_tc_block()
1789 return -EOPNOTSUPP; in nfp_flower_setup_tc_block()
1791 repr_priv = repr->app_priv; in nfp_flower_setup_tc_block()
1792 repr_priv->block_shared = f->block_shared; in nfp_flower_setup_tc_block()
1793 f->driver_block_list = &nfp_block_cb_list; in nfp_flower_setup_tc_block()
1794 f->unlocked_driver_cb = true; in nfp_flower_setup_tc_block()
1796 switch (f->command) { in nfp_flower_setup_tc_block()
1800 return -EBUSY; in nfp_flower_setup_tc_block()
1808 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); in nfp_flower_setup_tc_block()
1811 block_cb = flow_block_cb_lookup(f->block, in nfp_flower_setup_tc_block()
1815 return -ENOENT; in nfp_flower_setup_tc_block()
1818 list_del(&block_cb->driver_list); in nfp_flower_setup_tc_block()
1821 return -EOPNOTSUPP; in nfp_flower_setup_tc_block()
1832 return -EOPNOTSUPP; in nfp_flower_setup_tc()
1847 struct nfp_flower_priv *priv = app->priv; in nfp_flower_indr_block_cb_priv_lookup()
1849 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) in nfp_flower_indr_block_cb_priv_lookup()
1850 if (cb_priv->netdev == netdev) in nfp_flower_indr_block_cb_priv_lookup()
1863 return nfp_flower_repr_offload(priv->app, priv->netdev, in nfp_flower_setup_indr_block_cb()
1866 return -EOPNOTSUPP; in nfp_flower_setup_indr_block_cb()
1874 list_del(&priv->list); in nfp_flower_setup_indr_tc_release()
1884 struct nfp_flower_priv *priv = app->priv; in nfp_flower_setup_indr_tc_block()
1887 if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && in nfp_flower_setup_indr_tc_block()
1889 (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && in nfp_flower_setup_indr_tc_block()
1891 return -EOPNOTSUPP; in nfp_flower_setup_indr_tc_block()
1893 f->unlocked_driver_cb = true; in nfp_flower_setup_indr_tc_block()
1895 switch (f->command) { in nfp_flower_setup_indr_tc_block()
1902 return -EBUSY; in nfp_flower_setup_indr_tc_block()
1906 return -ENOMEM; in nfp_flower_setup_indr_tc_block()
1908 cb_priv->netdev = netdev; in nfp_flower_setup_indr_tc_block()
1909 cb_priv->app = app; in nfp_flower_setup_indr_tc_block()
1910 list_add(&cb_priv->list, &priv->indr_block_cb_priv); in nfp_flower_setup_indr_tc_block()
1917 list_del(&cb_priv->list); in nfp_flower_setup_indr_tc_block()
1923 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); in nfp_flower_setup_indr_tc_block()
1928 return -ENOENT; in nfp_flower_setup_indr_tc_block()
1930 block_cb = flow_block_cb_lookup(f->block, in nfp_flower_setup_indr_tc_block()
1934 return -ENOENT; in nfp_flower_setup_indr_tc_block()
1937 list_del(&block_cb->driver_list); in nfp_flower_setup_indr_tc_block()
1940 return -EOPNOTSUPP; in nfp_flower_setup_indr_tc_block()
1949 return -EOPNOTSUPP; in nfp_setup_tc_no_dev()
1955 return -EOPNOTSUPP; in nfp_setup_tc_no_dev()
1969 return -EOPNOTSUPP; in nfp_flower_indr_setup_tc_cb()
1976 return -EOPNOTSUPP; in nfp_flower_indr_setup_tc_cb()