Lines Matching +full:cpsw +full:- +full:switch
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
6 * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
7 * Interspersed Express Traffic (IET - P802.3br/D2.0)
17 #include "am65-cpsw-nuss.h"
18 #include "am65-cpsw-qos.h"
19 #include "am65-cpts.h"
47 writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio)); in am65_cpsw_tx_pn_shaper_reset()
48 writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio)); in am65_cpsw_tx_pn_shaper_reset()
54 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio; in am65_cpsw_tx_pn_shaper_apply()
55 struct am65_cpsw_common *common = port->common; in am65_cpsw_tx_pn_shaper_apply()
61 mqprio = &p_mqprio->mqprio_hw; in am65_cpsw_tx_pn_shaper_apply()
63 if (p_mqprio->max_rate_total > port->qos.link_speed) in am65_cpsw_tx_pn_shaper_apply()
68 enable = p_mqprio->shaper_en && !shaper_susp; in am65_cpsw_tx_pn_shaper_apply()
73 * for CPSW, rate limit can be applied per priority in am65_cpsw_tx_pn_shaper_apply()
80 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { in am65_cpsw_tx_pn_shaper_apply()
83 rate_mbps = TO_MBPS(mqprio->min_rate[tc]); in am65_cpsw_tx_pn_shaper_apply()
85 common->bus_freq); in am65_cpsw_tx_pn_shaper_apply()
87 port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio)); in am65_cpsw_tx_pn_shaper_apply()
91 if (mqprio->max_rate[tc]) { in am65_cpsw_tx_pn_shaper_apply()
92 rate_mbps = mqprio->max_rate[tc] - mqprio->min_rate[tc]; in am65_cpsw_tx_pn_shaper_apply()
95 common->bus_freq); in am65_cpsw_tx_pn_shaper_apply()
99 port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio)); in am65_cpsw_tx_pn_shaper_apply()
106 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio; in am65_cpsw_mqprio_verify_shaper()
107 struct netlink_ext_ack *extack = mqprio->extack; in am65_cpsw_mqprio_verify_shaper()
113 if (!(mqprio->flags & TC_MQPRIO_F_SHAPER)) in am65_cpsw_mqprio_verify_shaper()
116 if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) in am65_cpsw_mqprio_verify_shaper()
119 has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE); in am65_cpsw_mqprio_verify_shaper()
120 has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE); in am65_cpsw_mqprio_verify_shaper()
124 return -EOPNOTSUPP; in am65_cpsw_mqprio_verify_shaper()
130 num_tc = mqprio->qopt.num_tc; in am65_cpsw_mqprio_verify_shaper()
132 for (i = num_tc - 1; i >= 0; i--) { in am65_cpsw_mqprio_verify_shaper()
135 if (mqprio->min_rate[i]) in am65_cpsw_mqprio_verify_shaper()
137 min_rate_total += mqprio->min_rate[i]; in am65_cpsw_mqprio_verify_shaper()
140 if (mqprio->max_rate[i]) in am65_cpsw_mqprio_verify_shaper()
142 max_rate_total += mqprio->max_rate[i]; in am65_cpsw_mqprio_verify_shaper()
144 if (!mqprio->min_rate[i] && mqprio->max_rate[i]) { in am65_cpsw_mqprio_verify_shaper()
148 return -EINVAL; in am65_cpsw_mqprio_verify_shaper()
151 if (mqprio->max_rate[i] && in am65_cpsw_mqprio_verify_shaper()
152 mqprio->max_rate[i] < mqprio->min_rate[i]) { in am65_cpsw_mqprio_verify_shaper()
155 i, mqprio->min_rate[i], in am65_cpsw_mqprio_verify_shaper()
156 mqprio->max_rate[i]); in am65_cpsw_mqprio_verify_shaper()
157 return -EINVAL; in am65_cpsw_mqprio_verify_shaper()
161 ch_msk = GENMASK(num_tc - 1, i); in am65_cpsw_mqprio_verify_shaper()
164 "Min rate must be set sequentially hi->lo tx_rate_msk%x", in am65_cpsw_mqprio_verify_shaper()
166 return -EINVAL; in am65_cpsw_mqprio_verify_shaper()
171 "Max rate must be set sequentially hi->lo tx_rate_msk%x", in am65_cpsw_mqprio_verify_shaper()
173 return -EINVAL; in am65_cpsw_mqprio_verify_shaper()
180 p_mqprio->shaper_en = true; in am65_cpsw_mqprio_verify_shaper()
181 p_mqprio->max_rate_total = max_t(u64, min_rate_total, max_rate_total); in am65_cpsw_mqprio_verify_shaper()
189 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio; in am65_cpsw_reset_tc_mqprio()
191 p_mqprio->shaper_en = false; in am65_cpsw_reset_tc_mqprio()
192 p_mqprio->max_rate_total = 0; in am65_cpsw_reset_tc_mqprio()
198 writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP); in am65_cpsw_reset_tc_mqprio()
206 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio; in am65_cpsw_setup_mqprio()
208 struct am65_cpsw_common *common = port->common; in am65_cpsw_setup_mqprio()
209 struct tc_mqprio_qopt *qopt = &mqprio->qopt; in am65_cpsw_setup_mqprio()
211 u8 num_tc = qopt->num_tc; in am65_cpsw_setup_mqprio()
214 memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio)); in am65_cpsw_setup_mqprio()
216 ret = pm_runtime_get_sync(common->dev); in am65_cpsw_setup_mqprio()
218 pm_runtime_put_noidle(common->dev); in am65_cpsw_setup_mqprio()
243 * set the switch priority in pn_tx_pri_map. in am65_cpsw_setup_mqprio()
252 for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++) in am65_cpsw_setup_mqprio()
255 count = qopt->count[tc]; in am65_cpsw_setup_mqprio()
256 offset = qopt->offset[tc]; in am65_cpsw_setup_mqprio()
260 writel(tx_prio_map, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP); in am65_cpsw_setup_mqprio()
263 am65_cpsw_iet_change_preemptible_tcs(port, mqprio->preemptible_tcs); in am65_cpsw_setup_mqprio()
266 pm_runtime_put(common->dev); in am65_cpsw_setup_mqprio()
273 int verify_time_ms = port->qos.iet.verify_time_ms; in am65_cpsw_iet_set_verify_timeout_count()
286 return -EINVAL; in am65_cpsw_iet_set_verify_timeout_count()
288 writel(val, port->port_base + AM65_CPSW_PN_REG_IET_VERIFY); in am65_cpsw_iet_set_verify_timeout_count()
303 ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); in am65_cpsw_iet_verify_wait()
305 writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); in am65_cpsw_iet_verify_wait()
308 ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); in am65_cpsw_iet_verify_wait()
310 writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); in am65_cpsw_iet_verify_wait()
312 msleep(port->qos.iet.verify_time_ms); in am65_cpsw_iet_verify_wait()
314 status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS); in am65_cpsw_iet_verify_wait()
319 netdev_dbg(port->ndev, in am65_cpsw_iet_verify_wait()
325 netdev_dbg(port->ndev, "MAC Merge respond error\n"); in am65_cpsw_iet_verify_wait()
326 return -ENODEV; in am65_cpsw_iet_verify_wait()
330 netdev_dbg(port->ndev, "MAC Merge verify error\n"); in am65_cpsw_iet_verify_wait()
331 return -ENODEV; in am65_cpsw_iet_verify_wait()
333 } while (try-- > 0); in am65_cpsw_iet_verify_wait()
335 netdev_dbg(port->ndev, "MAC Merge verify timeout\n"); in am65_cpsw_iet_verify_wait()
336 return -ETIMEDOUT; in am65_cpsw_iet_verify_wait()
343 val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); in am65_cpsw_iet_set_preempt_mask()
346 writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); in am65_cpsw_iet_set_preempt_mask()
359 for (i = 0; i < common->port_num; i++) { in am65_cpsw_iet_common_enable()
360 port = &common->ports[i]; in am65_cpsw_iet_common_enable()
361 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL); in am65_cpsw_iet_common_enable()
367 val = readl(common->cpsw_base + AM65_CPSW_REG_CTL); in am65_cpsw_iet_common_enable()
374 writel(val, common->cpsw_base + AM65_CPSW_REG_CTL); in am65_cpsw_iet_common_enable()
375 common->iet_enabled = rx_enable; in am65_cpsw_iet_common_enable()
378 /* CPSW does not have an IRQ to notify changes to the MAC Merge TX status
388 if (port->qos.link_speed == SPEED_UNKNOWN) in am65_cpsw_iet_commit_preemptible_tcs()
391 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL); in am65_cpsw_iet_commit_preemptible_tcs()
396 am65_cpsw_iet_common_enable(port->common); in am65_cpsw_iet_commit_preemptible_tcs()
401 netdev_err(port->ndev, "couldn't set verify count: %d\n", err); in am65_cpsw_iet_commit_preemptible_tcs()
405 val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); in am65_cpsw_iet_commit_preemptible_tcs()
412 preemptible_tcs = port->qos.iet.preemptible_tcs; in am65_cpsw_iet_commit_preemptible_tcs()
418 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev); in am65_cpsw_iet_change_preemptible_tcs()
420 port->qos.iet.preemptible_tcs = preemptible_tcs; in am65_cpsw_iet_change_preemptible_tcs()
421 mutex_lock(&priv->mm_lock); in am65_cpsw_iet_change_preemptible_tcs()
423 mutex_unlock(&priv->mm_lock); in am65_cpsw_iet_change_preemptible_tcs()
431 mutex_lock(&priv->mm_lock); in am65_cpsw_iet_link_state_update()
433 mutex_unlock(&priv->mm_lock); in am65_cpsw_iet_link_state_update()
438 return port->qos.est_oper || port->qos.est_admin; in am65_cpsw_port_est_enabled()
445 val = readl(common->cpsw_base + AM65_CPSW_REG_CTL); in am65_cpsw_est_enable()
452 writel(val, common->cpsw_base + AM65_CPSW_REG_CTL); in am65_cpsw_est_enable()
453 common->est_enabled = enable; in am65_cpsw_est_enable()
460 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL); in am65_cpsw_port_est_enable()
466 writel(val, port->port_base + AM65_CPSW_PN_REG_CTL); in am65_cpsw_port_est_enable()
476 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL); in am65_cpsw_port_est_assign_buf_num()
482 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL); in am65_cpsw_port_est_assign_buf_num()
485 /* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
486 * admin -> oper or not
489 * numbers match (est_oper->buf match with est_admin->buf).
492 * and est_oper->buf not match with est_oper->buf).
500 val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS); in am65_cpsw_port_est_is_swapped()
503 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL); in am65_cpsw_port_est_is_swapped()
509 /* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
512 * Logic as follows:-
515 * as it is in the process of transitioning from admin -> oper. So keep the
526 while (roll--) { in am65_cpsw_port_est_get_free_buf_num()
531 * to touch memory in-flight, by targeting same oper buf. in am65_cpsw_port_est_get_free_buf_num()
535 dev_info(&ndev->dev, in am65_cpsw_port_est_get_free_buf_num()
536 "Prev. EST admin cycle is in transit %d -> %d\n", in am65_cpsw_port_est_get_free_buf_num()
547 devm_kfree(&ndev->dev, port->qos.est_oper); in am65_cpsw_admin_to_oper()
549 port->qos.est_oper = port->qos.est_admin; in am65_cpsw_admin_to_oper()
550 port->qos.est_admin = NULL; in am65_cpsw_admin_to_oper()
559 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL); in am65_cpsw_port_est_get_buf_num()
561 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL); in am65_cpsw_port_est_get_buf_num()
563 est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev); in am65_cpsw_port_est_get_buf_num()
566 if (port->qos.est_oper && port->qos.est_admin && in am65_cpsw_port_est_get_buf_num()
567 est_new->buf == port->qos.est_oper->buf) in am65_cpsw_port_est_get_buf_num()
574 struct am65_cpsw_common *common = port->common; in am65_cpsw_est_set()
580 for (i = 0; i < common->port_num; i++) in am65_cpsw_est_set()
581 common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]); in am65_cpsw_est_set()
588 * of admin -> oper transition, particularly it's supposed to be used in some
596 if (!port->qos.est_admin) in am65_cpsw_est_update_state()
628 fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX; in am65_cpsw_est_set_sched_cmds()
656 for (i = 0; i < taprio->num_entries; i++) { in am65_cpsw_est_calc_cmd_num()
657 if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) { in am65_cpsw_est_calc_cmd_num()
658 dev_err(&ndev->dev, "Only SET command is supported"); in am65_cpsw_est_calc_cmd_num()
659 return -EINVAL; in am65_cpsw_est_calc_cmd_num()
662 fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval, in am65_cpsw_est_calc_cmd_num()
684 cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio, in am65_cpsw_est_check_scheds()
685 port->qos.link_speed); in am65_cpsw_est_check_scheds()
690 dev_err(&ndev->dev, "No fetch RAM"); in am65_cpsw_est_check_scheds()
691 return -ENOMEM; in am65_cpsw_est_check_scheds()
706 ram_addr = port->fetch_ram_base; in am65_cpsw_est_set_sched_list()
708 ram_addr += est_new->buf * ram_size; in am65_cpsw_est_set_sched_list()
711 for (i = 0; i < est_new->taprio.num_entries; i++) { in am65_cpsw_est_set_sched_list()
712 entry = &est_new->taprio.entries[i]; in am65_cpsw_est_set_sched_list()
714 fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval, in am65_cpsw_est_set_sched_list()
715 port->qos.link_speed); in am65_cpsw_est_set_sched_list()
716 fetch_allow = entry->gate_mask; in am65_cpsw_est_set_sched_list()
718 dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n", in am65_cpsw_est_set_sched_list()
724 if (!fetch_cnt && i < est_new->taprio.num_entries - 1) { in am65_cpsw_est_set_sched_list()
725 dev_info(&ndev->dev, in am65_cpsw_est_set_sched_list()
733 /* end cmd, enabling non-timed queues for potential over cycle time */ in am65_cpsw_est_set_sched_list()
745 struct am65_cpsw_common *common = port->common; in am65_cpsw_timer_set()
746 struct am65_cpts *cpts = common->cpts; in am65_cpsw_timer_set()
749 cfg.ns_period = est_new->taprio.cycle_time; in am65_cpsw_timer_set()
750 cfg.ns_start = est_new->taprio.base_time; in am65_cpsw_timer_set()
752 return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg); in am65_cpsw_timer_set()
758 struct am65_cpts *cpts = port->common->cpts; in am65_cpsw_timer_stop()
760 am65_cpts_estf_disable(cpts, port->port_id - 1); in am65_cpsw_timer_stop()
768 struct am65_cpts *cpts = port->common->cpts; in am65_cpsw_timer_act()
772 if (!port->qos.est_oper) in am65_cpsw_timer_act()
775 taprio_new = &est_new->taprio; in am65_cpsw_timer_act()
776 taprio_oper = &port->qos.est_oper->taprio; in am65_cpsw_timer_act()
778 if (taprio_new->cycle_time != taprio_oper->cycle_time) in am65_cpsw_timer_act()
782 if (!taprio_new->base_time && taprio_oper) in am65_cpsw_timer_act()
783 taprio_new->base_time = taprio_oper->base_time; in am65_cpsw_timer_act()
785 if (taprio_new->base_time == taprio_oper->base_time) in am65_cpsw_timer_act()
789 diff = taprio_new->base_time - taprio_oper->base_time; in am65_cpsw_timer_act()
790 diff = diff < 0 ? -diff : diff; in am65_cpsw_timer_act()
791 if (diff % taprio_new->cycle_time) in am65_cpsw_timer_act()
795 if (taprio_new->base_time <= cur_time + taprio_new->cycle_time) in am65_cpsw_timer_act()
814 devm_kfree(&ndev->dev, port->qos.est_admin); in am65_cpsw_taprio_destroy()
815 devm_kfree(&ndev->dev, port->qos.est_oper); in am65_cpsw_taprio_destroy()
817 port->qos.est_oper = NULL; in am65_cpsw_taprio_destroy()
818 port->qos.est_admin = NULL; in am65_cpsw_taprio_destroy()
829 for (i = 0; i < from->num_entries; i++) in am65_cpsw_cp_taprio()
830 to->entries[i] = from->entries[i]; in am65_cpsw_cp_taprio()
837 struct netlink_ext_ack *extack = taprio->mqprio.extack; in am65_cpsw_taprio_replace()
839 struct am65_cpts *cpts = common->cpts; in am65_cpsw_taprio_replace()
846 return -ENETDOWN; in am65_cpsw_taprio_replace()
849 if (common->pf_p0_rx_ptype_rrobin) { in am65_cpsw_taprio_replace()
851 "p0-rx-ptype-rrobin flag conflicts with taprio qdisc"); in am65_cpsw_taprio_replace()
852 return -EINVAL; in am65_cpsw_taprio_replace()
855 if (port->qos.link_speed == SPEED_UNKNOWN) in am65_cpsw_taprio_replace()
856 return -ENOLINK; in am65_cpsw_taprio_replace()
858 if (taprio->cycle_time_extension) { in am65_cpsw_taprio_replace()
861 return -EOPNOTSUPP; in am65_cpsw_taprio_replace()
864 est_new = devm_kzalloc(&ndev->dev, in am65_cpsw_taprio_replace()
865 struct_size(est_new, taprio.entries, taprio->num_entries), in am65_cpsw_taprio_replace()
868 return -ENOMEM; in am65_cpsw_taprio_replace()
870 ret = am65_cpsw_setup_mqprio(ndev, &taprio->mqprio); in am65_cpsw_taprio_replace()
874 am65_cpsw_cp_taprio(taprio, &est_new->taprio); in am65_cpsw_taprio_replace()
886 ret = -EINVAL; in am65_cpsw_taprio_replace()
895 am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf); in am65_cpsw_taprio_replace()
897 /* If the base-time is in the past, start schedule from the time: in am65_cpsw_taprio_replace()
903 if (est_new->taprio.base_time < cur_time) { in am65_cpsw_taprio_replace()
904 n = div64_u64(cur_time - est_new->taprio.base_time, est_new->taprio.cycle_time); in am65_cpsw_taprio_replace()
905 est_new->taprio.base_time += (n + 1) * est_new->taprio.cycle_time; in am65_cpsw_taprio_replace()
919 devm_kfree(&ndev->dev, port->qos.est_admin); in am65_cpsw_taprio_replace()
920 port->qos.est_admin = est_new; in am65_cpsw_taprio_replace()
921 am65_cpsw_iet_change_preemptible_tcs(port, taprio->mqprio.preemptible_tcs); in am65_cpsw_taprio_replace()
927 devm_kfree(&ndev->dev, est_new); in am65_cpsw_taprio_replace()
940 if (port->qos.link_down_time) { in am65_cpsw_est_link_up()
942 delta = ktime_us_delta(cur_time, port->qos.link_down_time); in am65_cpsw_est_link_up()
944 dev_err(&ndev->dev, in am65_cpsw_est_link_up()
961 switch (taprio->cmd) { in am65_cpsw_setup_taprio()
969 err = -EOPNOTSUPP; in am65_cpsw_setup_taprio()
979 switch (base->type) { in am65_cpsw_tc_query_caps()
981 struct tc_mqprio_caps *caps = base->caps; in am65_cpsw_tc_query_caps()
983 caps->validate_queue_counts = true; in am65_cpsw_tc_query_caps()
989 struct tc_taprio_caps *caps = base->caps; in am65_cpsw_tc_query_caps()
991 caps->gate_mask_per_txq = true; in am65_cpsw_tc_query_caps()
996 return -EOPNOTSUPP; in am65_cpsw_tc_query_caps()
1006 struct flow_dissector *dissector = rule->match.dissector; in am65_cpsw_qos_clsflower_add_policer()
1008 struct am65_cpsw_qos *qos = &port->qos; in am65_cpsw_qos_clsflower_add_policer()
1012 if (dissector->used_keys & in am65_cpsw_qos_clsflower_add_policer()
1018 return -EOPNOTSUPP; in am65_cpsw_qos_clsflower_add_policer()
1022 return -EOPNOTSUPP; in am65_cpsw_qos_clsflower_add_policer()
1026 return -EOPNOTSUPP; in am65_cpsw_qos_clsflower_add_policer()
1031 if (!is_zero_ether_addr(match.mask->src)) { in am65_cpsw_qos_clsflower_add_policer()
1034 return -EOPNOTSUPP; in am65_cpsw_qos_clsflower_add_policer()
1037 if (is_broadcast_ether_addr(match.key->dst) && in am65_cpsw_qos_clsflower_add_policer()
1038 is_broadcast_ether_addr(match.mask->dst)) { in am65_cpsw_qos_clsflower_add_policer()
1039 ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps); in am65_cpsw_qos_clsflower_add_policer()
1043 qos->ale_bc_ratelimit.cookie = cls->cookie; in am65_cpsw_qos_clsflower_add_policer()
1044 qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps; in am65_cpsw_qos_clsflower_add_policer()
1045 } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) && in am65_cpsw_qos_clsflower_add_policer()
1046 ether_addr_equal_unaligned(match.mask->dst, mc_mac)) { in am65_cpsw_qos_clsflower_add_policer()
1047 ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps); in am65_cpsw_qos_clsflower_add_policer()
1051 qos->ale_mc_ratelimit.cookie = cls->cookie; in am65_cpsw_qos_clsflower_add_policer()
1052 qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps; in am65_cpsw_qos_clsflower_add_policer()
1055 return -EOPNOTSUPP; in am65_cpsw_qos_clsflower_add_policer()
1065 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { in am65_cpsw_qos_clsflower_policer_validate()
1068 return -EOPNOTSUPP; in am65_cpsw_qos_clsflower_policer_validate()
1071 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && in am65_cpsw_qos_clsflower_policer_validate()
1072 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { in am65_cpsw_qos_clsflower_policer_validate()
1075 return -EOPNOTSUPP; in am65_cpsw_qos_clsflower_policer_validate()
1078 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && in am65_cpsw_qos_clsflower_policer_validate()
1082 return -EOPNOTSUPP; in am65_cpsw_qos_clsflower_policer_validate()
1085 if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps || in am65_cpsw_qos_clsflower_policer_validate()
1086 act->police.avrate || act->police.overhead) { in am65_cpsw_qos_clsflower_policer_validate()
1089 return -EOPNOTSUPP; in am65_cpsw_qos_clsflower_policer_validate()
1099 struct netlink_ext_ack *extack = cls->common.extack; in am65_cpsw_qos_configure_clsflower()
1103 flow_action_for_each(i, act, &rule->action) { in am65_cpsw_qos_configure_clsflower()
1104 switch (act->id) { in am65_cpsw_qos_configure_clsflower()
1106 ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack); in am65_cpsw_qos_configure_clsflower()
1111 act->police.rate_pkt_ps); in am65_cpsw_qos_configure_clsflower()
1115 return -EOPNOTSUPP; in am65_cpsw_qos_configure_clsflower()
1118 return -EOPNOTSUPP; in am65_cpsw_qos_configure_clsflower()
1123 struct am65_cpsw_qos *qos = &port->qos; in am65_cpsw_qos_delete_clsflower()
1125 if (cls->cookie == qos->ale_bc_ratelimit.cookie) { in am65_cpsw_qos_delete_clsflower()
1126 qos->ale_bc_ratelimit.cookie = 0; in am65_cpsw_qos_delete_clsflower()
1127 qos->ale_bc_ratelimit.rate_packet_ps = 0; in am65_cpsw_qos_delete_clsflower()
1128 cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0); in am65_cpsw_qos_delete_clsflower()
1131 if (cls->cookie == qos->ale_mc_ratelimit.cookie) { in am65_cpsw_qos_delete_clsflower()
1132 qos->ale_mc_ratelimit.cookie = 0; in am65_cpsw_qos_delete_clsflower()
1133 qos->ale_mc_ratelimit.rate_packet_ps = 0; in am65_cpsw_qos_delete_clsflower()
1134 cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0); in am65_cpsw_qos_delete_clsflower()
1143 switch (cls_flower->command) { in am65_cpsw_qos_setup_tc_clsflower()
1149 return -EOPNOTSUPP; in am65_cpsw_qos_setup_tc_clsflower()
1157 if (!tc_cls_can_offload_and_chain0(port->ndev, type_data)) in am65_cpsw_qos_setup_tc_block_cb()
1158 return -EOPNOTSUPP; in am65_cpsw_qos_setup_tc_block_cb()
1160 switch (type) { in am65_cpsw_qos_setup_tc_block_cb()
1164 return -EOPNOTSUPP; in am65_cpsw_qos_setup_tc_block_cb()
1187 ch_cir = am65_cpsw_qos_tx_rate_calc(rate_mbps, common->bus_freq); in am65_cpsw_qos_tx_p0_rate_apply()
1188 writel(ch_cir, host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch)); in am65_cpsw_qos_tx_p0_rate_apply()
1191 for (i = 0; i < common->port_num; i++) { in am65_cpsw_qos_tx_p0_rate_apply()
1192 struct net_device *ndev = common->ports[i].ndev; in am65_cpsw_qos_tx_p0_rate_apply()
1196 netdev_get_tx_queue(ndev, tx_ch)->tx_maxrate = rate_mbps; in am65_cpsw_qos_tx_p0_rate_apply()
1204 struct am65_cpsw_common *common = port->common; in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1210 dev_dbg(common->dev, "apply TX%d rate limiting %uMbps tx_rate_msk%x\n", in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1211 queue, rate_mbps, common->tx_ch_rate_msk); in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1213 if (common->pf_p0_rx_ptype_rrobin) { in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1214 dev_err(common->dev, "TX Rate Limiting failed - rrobin mode\n"); in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1215 return -EINVAL; in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1218 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1222 ret = pm_runtime_get_sync(common->dev); in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1224 pm_runtime_put_noidle(common->dev); in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1229 tx_ch_rate_msk_new = common->tx_ch_rate_msk; in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1232 ch_msk = GENMASK(common->tx_ch_num - 1, queue); in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1236 ch_msk = queue ? GENMASK(queue - 1, 0) : 0; in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1241 …dev_err(common->dev, "TX rate limiting has to be enabled sequentially hi->lo tx_rate_msk:%x tx_rat… in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1242 common->tx_ch_rate_msk, tx_ch_rate_msk_new); in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1243 ret = -EINVAL; in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1247 tx_chn = &common->tx_chns[queue]; in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1248 tx_chn->rate_mbps = rate_mbps; in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1249 common->tx_ch_rate_msk = tx_ch_rate_msk_new; in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1251 if (!common->usage_count) in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1258 pm_runtime_put(common->dev); in am65_cpsw_qos_ndo_tx_p0_set_maxrate()
1267 for (tx_ch = 0; tx_ch < common->tx_ch_num; tx_ch++) { in am65_cpsw_qos_tx_p0_rate_init()
1268 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[tx_ch]; in am65_cpsw_qos_tx_p0_rate_init()
1271 if (!tx_chn->rate_mbps) in am65_cpsw_qos_tx_p0_rate_init()
1274 ch_cir = am65_cpsw_qos_tx_rate_calc(tx_chn->rate_mbps, in am65_cpsw_qos_tx_p0_rate_init()
1275 common->bus_freq); in am65_cpsw_qos_tx_p0_rate_init()
1277 host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch)); in am65_cpsw_qos_tx_p0_rate_init()
1284 switch (type) { in am65_cpsw_qos_ndo_setup_tc()
1294 return -EOPNOTSUPP; in am65_cpsw_qos_ndo_setup_tc()
1302 port->qos.link_speed = link_speed; in am65_cpsw_qos_link_up()
1307 port->qos.link_down_time = 0; in am65_cpsw_qos_link_up()
1314 port->qos.link_speed = SPEED_UNKNOWN; in am65_cpsw_qos_link_down()
1318 if (!port->qos.link_down_time) in am65_cpsw_qos_link_down()
1319 port->qos.link_down_time = ktime_get(); in am65_cpsw_qos_link_down()