WEIGHTED_ROUND_ROBIN,
};
-int max_available_queue[] = {0, 1, 2, 3, 4, 5, 6, 7};
-int default_queue_weights[] = {1, 1, 1, 1, 1, 1, 1, 1};
+int rtldsa_max_available_queue[] = {0, 1, 2, 3, 4, 5, 6, 7};
+int rtldsa_default_queue_weights[] = {1, 1, 1, 1, 1, 1, 1, 1};
int dot1p_priority_remapping[] = {0, 1, 2, 3, 4, 5, 6, 7};
static void rtl839x_read_scheduling_table(int port)
static void rtl83xx_setup_default_prio2queue(void)
{
if (soc_info.family == RTL8380_FAMILY_ID) {
- rtl838x_setup_prio2queue_matrix(max_available_queue);
+ rtl838x_setup_prio2queue_matrix(rtldsa_max_available_queue);
} else {
- rtl839x_setup_prio2queue_matrix(max_available_queue);
+ rtl839x_setup_prio2queue_matrix(rtldsa_max_available_queue);
}
- rtl83xx_setup_prio2queue_cpu_matrix(max_available_queue);
+ rtl83xx_setup_prio2queue_cpu_matrix(rtldsa_max_available_queue);
}
/* Sets the output queue assigned to a port, the port can be the CPU-port */
for (int port = 0; port <= soc_info.cpu_port; port++) {
rtl83xx_set_ingress_priority(port, 0);
rtl839x_set_scheduling_algorithm(priv, port, WEIGHTED_FAIR_QUEUE);
- rtl839x_set_scheduling_queue_weights(priv, port, default_queue_weights);
+ rtl839x_set_scheduling_queue_weights(priv, port, rtldsa_default_queue_weights);
/* Do re-marking based on outer tag */
sw_w32_mask(0, BIT(port % 32), RTL839X_RMK_PORT_DEI_TAG_CTRL(port));
}
rtl839x_config_qos();
rtl839x_rate_control_init(priv);
break;
+ default:
+ if (priv->r->qos_init)
+ priv->r->qos_init(priv);
+ break;
}
}
#define RTL838X_SCHED_LB_CTRL(p) (0xC004 + (((p) << 7)))
#define RTL838X_FC_P_EGR_DROP_CTRL(p) (0x6B1C + (((p) << 2)))
+#define RTL930X_REMAP_DSCP(p) (0x9B04 + (((p) / 10) * 4))
+#define RTL931X_REMAP_DSCP(p) (0x9034 + (((p) / 10) * 4))
+#define RTL93XX_REMAP_DSCP_INTPRI_DSCP_OFFSET(p) \
+ (((p) % 10) * 3)
+#define RTL93XX_REMAP_DSCP_INTPRI_DSCP_MASK(index) \
+ (0x7 << RTL93XX_REMAP_DSCP_INTPRI_DSCP_OFFSET(index))
+
+#define RTL930X_PORT_TBL_IDX_CTRL(port) (0x9B20 + (((port) / 16) * 4))
+#define RTL931X_PORT_TBL_IDX_CTRL(port) (0x9064 + (((port) / 16) * 4))
+#define RTL93XX_PORT_TBL_IDX_CTRL_IDX_OFFSET(port) \
+ (((port) & 0xF) << 1)
+#define RTL93XX_PORT_TBL_IDX_CTRL_IDX_MASK(port) \
+ (0x3 << RTL93XX_PORT_TBL_IDX_CTRL_IDX_OFFSET(port))
+
+#define RTL93XX_PRI_SEL_GROUP_0 (0)
+#define RTL93XX_PRI_SEL_GROUP_1 (1)
+
+#define RTL930X_PRI_SEL_TBL_CTRL(group) (0x9B28 + ((group) * 4))
+#define RTL931X_PRI_SEL_TBL_CTRL(group) (0x9074 + ((group) * 8))
+#define RTL931X_PRI_SEL_TBL_CTRL_1BR_MASK GENMASK(15, 12)
+#define RTL931X_PRI_SEL_TBL_CTRL_MPLS_MASK GENMASK(11, 8)
+#define RTL931X_PRI_SEL_TBL_CTRL_11E_MASK GENMASK(7, 4)
+#define RTL931X_PRI_SEL_TBL_CTRL_TUNNEL_MASK GENMASK(3, 0)
+
+#define RTL93XX_PRI_SEL_TBL_CTRL_ROUT_MASK GENMASK(31, 28)
+#define RTL93XX_PRI_SEL_TBL_CTRL_PROT_VLAN_MASK GENMASK(27, 24)
+#define RTL93XX_PRI_SEL_TBL_CTRL_MAC_VLAN_MASK GENMASK(23, 20)
+#define RTL93XX_PRI_SEL_TBL_CTRL_OTAG_MASK GENMASK(19, 16)
+#define RTL93XX_PRI_SEL_TBL_CTRL_ITAG_MASK GENMASK(15, 12)
+#define RTL93XX_PRI_SEL_TBL_CTRL_DSCP_MASK GENMASK(11, 8)
+#define RTL93XX_PRI_SEL_TBL_CTRL_VACL_MASK GENMASK(7, 4)
+#define RTL93XX_PRI_SEL_TBL_CTRL_PORT_MASK GENMASK(3, 0)
+
+/* port: 0-23, index: 0-7 */
+#define RTL930X_SCHED_PORT_Q_CTRL_SET0(port, index) \
+ (0x3D48 + ((port) * 384) + ((index) * 4))
+/* port: 24-27, index: 0-11 */
+#define RTL930X_SCHED_PORT_Q_CTRL_SET1(port, index) \
+ ((0xE860 + ((port) - 24) * 48) + ((index) * 4))
+/* port: 0-51, index: 0-7 */
+#define RTL931X_SCHED_PORT_Q_CTRL_SET0(port, index) \
+ (0x2888 + ((port) << 5) + ((index) * 4))
+/* port: 52-55, index: 0-11 */
+#define RTL931X_SCHED_PORT_Q_CTRL_SET1(port, index) \
+ ((0x2F08 + ((port) - 52) * 48) + ((index) * 4))
+
+#define RTL930X_QM_INTPRI2QID_CTRL (0xA320)
+#define RTL931X_QM_INTPRI2QID_CTRL (0xA9D0)
+
/* Debug features */
#define RTL930X_STAT_PRVTE_DROP_COUNTER0 (0xB5B8)
#define MAX_ROUTER_MACS 64
#define L3_EGRESS_DMACS 2048
#define MAX_SMACS 64
+#define DSCP_MAP_MAX 64
enum phy_type {
PHY_NONE = 0,
void (*set_distribution_algorithm)(int group, int algoidx, u32 algomask);
void (*set_receive_management_action)(int port, rma_ctrl_t type, action_type_t action);
void (*led_init)(struct rtl838x_switch_priv *priv);
+ void (*qos_init)(struct rtl838x_switch_priv *priv);
};
struct rtl838x_switch_priv {
void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv);
void rtl930x_dbgfs_init(struct rtl838x_switch_priv *priv);
+extern int rtldsa_max_available_queue[];
+extern int rtldsa_default_queue_weights[];
+
#endif /* _RTL838X_H */
dev_dbg(dev, "%08x: %08x\n", 0xbb00cc00 + i * 4, sw_r32(0xcc00 + i * 4));
}
+static void rtldsa_930x_qos_set_group_selector(int port, int group)
+{
+ sw_w32_mask(RTL93XX_PORT_TBL_IDX_CTRL_IDX_MASK(port),
+ group << RTL93XX_PORT_TBL_IDX_CTRL_IDX_OFFSET(port),
+ RTL930X_PORT_TBL_IDX_CTRL(port));
+}
+
+static void rtldsa_930x_qos_setup_default_dscp2queue_map(void)
+{
+ u32 queue;
+
+ /* The default mapping between dscp and queue is based on
+ * the first 3 bits indicate the precedence (prio = dscp >> 3).
+ */
+ for (int i = 0; i < DSCP_MAP_MAX; i++) {
+ queue = (i >> 3) << RTL93XX_REMAP_DSCP_INTPRI_DSCP_OFFSET(i);
+ sw_w32_mask(RTL93XX_REMAP_DSCP_INTPRI_DSCP_MASK(i),
+ queue, RTL930X_REMAP_DSCP(i));
+ }
+}
+
+static void rtldsa_930x_qos_prio2queue_matrix(int *min_queues)
+{
+ u32 v = 0;
+
+ for (int i = 0; i < MAX_PRIOS; i++)
+ v |= i << (min_queues[i] * 3);
+
+ sw_w32(v, RTL930X_QM_INTPRI2QID_CTRL);
+}
+
+static void rtldsa_930x_qos_set_scheduling_queue_weights(struct rtl838x_switch_priv *priv)
+{
+ struct dsa_port *dp;
+ u32 addr;
+
+ dsa_switch_for_each_user_port(dp, priv->ds) {
+ for (int q = 0; q < 8; q++) {
+ if (dp->index < 24)
+ addr = RTL930X_SCHED_PORT_Q_CTRL_SET0(dp->index, q);
+ else
+ addr = RTL930X_SCHED_PORT_Q_CTRL_SET1(dp->index, q);
+
+ sw_w32(rtldsa_default_queue_weights[q], addr);
+ }
+ }
+}
+
+static void rtldsa_930x_qos_init(struct rtl838x_switch_priv *priv)
+{
+ struct dsa_port *dp;
+ u32 v;
+
+ /* Assign all the ports to the Group-0 */
+ dsa_switch_for_each_user_port(dp, priv->ds)
+ rtldsa_930x_qos_set_group_selector(dp->index, 0);
+
+ rtldsa_930x_qos_prio2queue_matrix(rtldsa_max_available_queue);
+
+ /* configure priority weights */
+ v = 0;
+ v |= FIELD_PREP(RTL93XX_PRI_SEL_TBL_CTRL_PORT_MASK, 3);
+ v |= FIELD_PREP(RTL93XX_PRI_SEL_TBL_CTRL_DSCP_MASK, 5);
+ v |= FIELD_PREP(RTL93XX_PRI_SEL_TBL_CTRL_ITAG_MASK, 6);
+ v |= FIELD_PREP(RTL93XX_PRI_SEL_TBL_CTRL_OTAG_MASK, 7);
+
+ sw_w32(v, RTL930X_PRI_SEL_TBL_CTRL(0));
+
+ rtldsa_930x_qos_setup_default_dscp2queue_map();
+ rtldsa_930x_qos_set_scheduling_queue_weights(priv);
+}
+
const struct rtl838x_reg rtl930x_reg = {
.mask_port_reg_be = rtl838x_mask_port_reg,
.set_port_reg_be = rtl838x_set_port_reg,
.enable_learning = rtldsa_930x_enable_learning,
.enable_flood = rtldsa_930x_enable_flood,
.set_receive_management_action = rtldsa_930x_set_receive_management_action,
+ .qos_init = rtldsa_930x_qos_init,
};
dev_dbg(dev, "%08x: %08x\n", 0xbb000600 + i * 4, sw_r32(0x0600 + i * 4));
}
+static void rtldsa_931x_qos_set_group_selector(int port, int group)
+{
+ sw_w32_mask(RTL93XX_PORT_TBL_IDX_CTRL_IDX_MASK(port),
+ group << RTL93XX_PORT_TBL_IDX_CTRL_IDX_OFFSET(port),
+ RTL931X_PORT_TBL_IDX_CTRL(port));
+}
+
+static void rtldsa_931x_qos_setup_default_dscp2queue_map(void)
+{
+ u32 queue;
+
+ /* The default mapping between dscp and queue is based on
+ * the first 3 bits indicate the precedence (prio = dscp >> 3).
+ */
+ for (int i = 0; i < DSCP_MAP_MAX; i++) {
+ queue = (i >> 3) << RTL93XX_REMAP_DSCP_INTPRI_DSCP_OFFSET(i);
+ sw_w32_mask(RTL93XX_REMAP_DSCP_INTPRI_DSCP_MASK(i),
+ queue, RTL931X_REMAP_DSCP(i));
+ }
+}
+
+static void rtldsa_931x_qos_prio2queue_matrix(int *min_queues)
+{
+ u32 v = 0;
+
+ for (int i = 0; i < MAX_PRIOS; i++)
+ v |= i << (min_queues[i] * 3);
+
+ sw_w32(v, RTL931X_QM_INTPRI2QID_CTRL);
+}
+
+static void rtldsa_931x_qos_set_scheduling_queue_weights(struct rtl838x_switch_priv *priv)
+{
+ struct dsa_port *dp;
+ u32 addr;
+
+ dsa_switch_for_each_user_port(dp, priv->ds) {
+ for (int q = 0; q < 8; q++) {
+ if (dp->index < 51)
+ addr = RTL931X_SCHED_PORT_Q_CTRL_SET0(dp->index, q);
+ else
+ addr = RTL931X_SCHED_PORT_Q_CTRL_SET1(dp->index, q);
+
+ sw_w32(rtldsa_default_queue_weights[q], addr);
+ }
+ }
+}
+
+static void rtldsa_931x_qos_init(struct rtl838x_switch_priv *priv)
+{
+ struct dsa_port *dp;
+ u32 v;
+
+ /* Assign all the ports to the Group-0 */
+ dsa_switch_for_each_user_port(dp, priv->ds)
+ rtldsa_931x_qos_set_group_selector(dp->index, 0);
+
+ rtldsa_931x_qos_prio2queue_matrix(rtldsa_max_available_queue);
+
+ /* configure priority weights */
+ v = 0;
+ v |= FIELD_PREP(RTL93XX_PRI_SEL_TBL_CTRL_PORT_MASK, 3);
+ v |= FIELD_PREP(RTL93XX_PRI_SEL_TBL_CTRL_DSCP_MASK, 5);
+ v |= FIELD_PREP(RTL93XX_PRI_SEL_TBL_CTRL_ITAG_MASK, 6);
+ v |= FIELD_PREP(RTL93XX_PRI_SEL_TBL_CTRL_OTAG_MASK, 7);
+
+ sw_w32(v, RTL931X_PRI_SEL_TBL_CTRL(0) + 4);
+ sw_w32(0, RTL931X_PRI_SEL_TBL_CTRL(0));
+
+ rtldsa_931x_qos_setup_default_dscp2queue_map();
+ rtldsa_931x_qos_set_scheduling_queue_weights(priv);
+}
+
const struct rtl838x_reg rtl931x_reg = {
.mask_port_reg_be = rtl839x_mask_port_reg_be,
.set_port_reg_be = rtl839x_set_port_reg_be,
.enable_learning = rtldsa_931x_enable_learning,
.enable_flood = rtldsa_931x_enable_flood,
.set_receive_management_action = rtldsa_931x_set_receive_management_action,
+ .qos_init = rtldsa_931x_qos_init,
};